From a6601a51eb725a4b0bee843c2861e88befdf0c97 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Wed, 13 Mar 2013 13:35:47 +0000 Subject: [PATCH 0001/4704] Multi-node setup: Fix keystone host Fixes bug 1154587. Use KEYSTONE_AUTH_HOST instead of SERVICE_HOST when setting nova auth parameters. Also use KEYSTONE_AUTH_PROTOCOL instead of SERVICE_PROTOCOL. Change-Id: I54f7f31f6b795833b38968f6beea68e429f01d55 --- lib/nova | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/nova b/lib/nova index 13e20f87be..bdf5d22011 100644 --- a/lib/nova +++ b/lib/nova @@ -166,9 +166,9 @@ function configure_nova() { # Get the sample configuration file in place cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF_DIR - iniset $NOVA_API_PASTE_INI filter:authtoken auth_host $SERVICE_HOST + iniset $NOVA_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST if is_service_enabled tls-proxy; then - iniset $NOVA_API_PASTE_INI filter:authtoken auth_protocol $SERVICE_PROTOCOL + iniset $NOVA_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL fi iniset $NOVA_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $NOVA_API_PASTE_INI filter:authtoken admin_user nova From b3236914f15261a60fae254361b1e65ccf94c583 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Sun, 17 Mar 2013 15:17:05 -0500 Subject: [PATCH 0002/4704] Mova nova configuration initialization * Move shared volume configuration from stack.sh to stackrc * Move Nova network and vnc/spice configuration settings from stack.sh into lib/nova * Rename NET_MAN to NETOWRK_MANAGER to match nova.conf attribute name Change-Id: I9bd2955def553499aa832eda1f0959afe494206a --- lib/nova | 98 ++++++++++++++++++++++++++++++++++++- lib/quantum | 2 +- stack.sh | 105 ---------------------------------------- stackrc | 5 ++ tools/build_bm_multi.sh | 2 +- 5 files changed, 104 insertions(+), 108 deletions(-) diff --git a/lib/nova b/lib/nova index 23346b7716..f0c83157d6 100644 --- a/lib/nova +++ b/lib/nova @@ -66,6 +66,59 @@ SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler} QEMU_CONF=/etc/libvirt/qemu.conf +# Nova Network Configuration +# -------------------------- + +# Set defaults according to the virt driver +if [ "$VIRT_DRIVER" = 'xenserver' ]; then + PUBLIC_INTERFACE_DEFAULT=eth3 + GUEST_INTERFACE_DEFAULT=eth1 + # Allow ``build_domU.sh`` to specify the flat network bridge via kernel args + FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[[:alnum:]]*' /proc/cmdline | cut -d= -f 2 | sort -u) +elif [ "$VIRT_DRIVER" = 'baremetal' ]; then + NETWORK_MANAGER=${NETWORK_MANAGER:-FlatManager} + PUBLIC_INTERFACE_DEFAULT=eth0 + FLAT_INTERFACE=${FLAT_INTERFACE:-eth0} + FLAT_NETWORK_BRIDGE_DEFAULT=br100 + STUB_NETWORK=${STUB_NETWORK:-False} +else + PUBLIC_INTERFACE_DEFAULT=br100 + GUEST_INTERFACE_DEFAULT=eth0 + FLAT_NETWORK_BRIDGE_DEFAULT=br100 +fi + +NETWORK_MANAGER=${NETWORK_MANAGER:-${NET_MAN:-FlatDHCPManager}} +PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT} +VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT} +FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT} +EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST} + +# If you are using the FlatDHCP network mode on multiple hosts, set the +# ``FLAT_INTERFACE`` variable but make sure that the interface doesn't already +# have an IP or you risk breaking things. +# +# **DHCP Warning**: If your flat interface device uses DHCP, there will be a +# hiccup while the network is moved from the flat interface to the flat network +# bridge. This will happen when you launch your first instance. Upon launch +# you will lose all connectivity to the node, and the VM launch will probably +# fail. +# +# If you are running on a single node and don't need to access the VMs from +# devices other than that node, you can set ``FLAT_INTERFACE=`` +# This will stop nova from bridging any interfaces into ``FLAT_NETWORK_BRIDGE``. +FLAT_INTERFACE=${FLAT_INTERFACE-$GUEST_INTERFACE_DEFAULT} + +# ``MULTI_HOST`` is a mode where each compute node runs its own network node. This +# allows network operations and routing for a VM to occur on the server that is +# running the VM - removing a SPOF and bandwidth bottleneck. +MULTI_HOST=`trueorfalse False $MULTI_HOST` + +# Test floating pool and range are used for testing. They are defined +# here until the admin APIs can replace nova-manage +TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} +TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29} + + # Entry Points # ------------ @@ -439,6 +492,49 @@ function create_nova_conf() { # Replace the first '=' with ' ' for iniset syntax iniset $NOVA_CONF DEFAULT ${I/=/ } done + + # All nova-compute workers need to know the vnc configuration options + # These settings don't hurt anything if n-xvnc and n-novnc are disabled + if is_service_enabled n-cpu; then + NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"} + iniset $NOVA_CONF DEFAULT novncproxy_base_url "$NOVNCPROXY_URL" + XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"} + iniset $NOVA_CONF DEFAULT xvpvncproxy_base_url "$XVPVNCPROXY_URL" + SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"} + iniset $NOVA_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL" + fi + if [ "$VIRT_DRIVER" = 'xenserver' ]; then + VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1} + else + VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1} + fi + + if is_service_enabled n-novnc || is_service_enabled n-xvnc ; then + # Address on which instance vncservers will listen on compute hosts. + # For multi-host, this should be the management ip of the compute host. + VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} + iniset $NOVA_CONF DEFAULT vnc_enabled true + iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN" + iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" + else + iniset $NOVA_CONF DEFAULT vnc_enabled false + fi + + if is_service_enabled n-spice; then + # Address on which instance spiceservers will listen on compute hosts. + # For multi-host, this should be the management ip of the compute host. + SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1} + SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1} + iniset $NOVA_CONF spice enabled true + iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN" + iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" + else + iniset $NOVA_CONF spice enabled false + fi + + iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST" + iniset_rpc_backend nova $NOVA_CONF DEFAULT + iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT" } # create_nova_cache_dir() - Part of the init_nova() process @@ -450,7 +546,7 @@ function create_nova_cache_dir() { } function create_nova_conf_nova_network() { - iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NET_MAN" + iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NETWORK_MANAGER" iniset $NOVA_CONF DEFAULT public_interface "$PUBLIC_INTERFACE" iniset $NOVA_CONF DEFAULT vlan_interface "$VLAN_INTERFACE" iniset $NOVA_CONF DEFAULT flat_network_bridge "$FLAT_NETWORK_BRIDGE" diff --git a/lib/quantum b/lib/quantum index 862ba8486d..6dae13fe3c 100644 --- a/lib/quantum +++ b/lib/quantum @@ -53,7 +53,7 @@ # that must be set in localrc for connectivity across hosts with # Quantum. # -# With Quantum networking the NET_MAN variable is ignored. +# With Quantum networking the NETWORK_MANAGER variable is ignored. # Save trace setting diff --git a/stack.sh b/stack.sh index 14bb1610f0..233474256e 100755 --- a/stack.sh +++ b/stack.sh @@ -278,11 +278,6 @@ SWIFT3_DIR=$DEST/swift3 # https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755 CINDER_SECURE_DELETE=`trueorfalse True $CINDER_SECURE_DELETE` -# Name of the LVM volume group to use/create for iscsi volumes -VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} -VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} -INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-} - # Generic helper to configure passwords function read_password { XTRACE=$(set +o | grep xtrace) @@ -326,64 +321,6 @@ function read_password { } -# Nova Network Configuration -# -------------------------- - -# FIXME: more documentation about why these are important options. Also -# we should make sure we use the same variable names as the option names. - -if [ "$VIRT_DRIVER" = 'xenserver' ]; then - PUBLIC_INTERFACE_DEFAULT=eth3 - # Allow ``build_domU.sh`` to specify the flat network bridge via kernel args - FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[[:alnum:]]*' /proc/cmdline | cut -d= -f 2 | sort -u) - GUEST_INTERFACE_DEFAULT=eth1 -elif [ "$VIRT_DRIVER" = 'baremetal' ]; then - PUBLIC_INTERFACE_DEFAULT=eth0 - FLAT_NETWORK_BRIDGE_DEFAULT=br100 - FLAT_INTERFACE=${FLAT_INTERFACE:-eth0} - FORCE_DHCP_RELEASE=${FORCE_DHCP_RELEASE:-False} - NET_MAN=${NET_MAN:-FlatManager} - STUB_NETWORK=${STUB_NETWORK:-False} -else - PUBLIC_INTERFACE_DEFAULT=br100 - FLAT_NETWORK_BRIDGE_DEFAULT=br100 - GUEST_INTERFACE_DEFAULT=eth0 -fi - -PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT} -NET_MAN=${NET_MAN:-FlatDHCPManager} -EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST} -FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT} -VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT} -FORCE_DHCP_RELEASE=${FORCE_DHCP_RELEASE:-True} - -# Test floating pool and range are used for testing. They are defined -# here until the admin APIs can replace nova-manage -TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} -TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29} - -# ``MULTI_HOST`` is a mode where each compute node runs its own network node. This -# allows network operations and routing for a VM to occur on the server that is -# running the VM - removing a SPOF and bandwidth bottleneck. -MULTI_HOST=`trueorfalse False $MULTI_HOST` - -# If you are using the FlatDHCP network mode on multiple hosts, set the -# ``FLAT_INTERFACE`` variable but make sure that the interface doesn't already -# have an IP or you risk breaking things. -# -# **DHCP Warning**: If your flat interface device uses DHCP, there will be a -# hiccup while the network is moved from the flat interface to the flat network -# bridge. This will happen when you launch your first instance. Upon launch -# you will lose all connectivity to the node, and the VM launch will probably -# fail. -# -# If you are running on a single node and don't need to access the VMs from -# devices other than that node, you can set ``FLAT_INTERFACE=`` -# This will stop nova from bridging any interfaces into ``FLAT_NETWORK_BRIDGE``. -FLAT_INTERFACE=${FLAT_INTERFACE-$GUEST_INTERFACE_DEFAULT} - -## FIXME(ja): should/can we check that FLAT_INTERFACE is sane? - # Database Configuration # ---------------------- @@ -980,48 +917,6 @@ if is_service_enabled nova; then elif is_service_enabled n-net; then create_nova_conf_nova_network fi - # All nova-compute workers need to know the vnc configuration options - # These settings don't hurt anything if n-xvnc and n-novnc are disabled - if is_service_enabled n-cpu; then - NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"} - iniset $NOVA_CONF DEFAULT novncproxy_base_url "$NOVNCPROXY_URL" - XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"} - iniset $NOVA_CONF DEFAULT xvpvncproxy_base_url "$XVPVNCPROXY_URL" - SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"} - iniset $NOVA_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL" - fi - if [ "$VIRT_DRIVER" = 'xenserver' ]; then - VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1} - else - VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1} - fi - - if is_service_enabled n-novnc || is_service_enabled n-xvnc ; then - # Address on which instance vncservers will listen on compute hosts. - # For multi-host, this should be the management ip of the compute host. - VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} - iniset $NOVA_CONF DEFAULT vnc_enabled true - iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN" - iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" - else - iniset $NOVA_CONF DEFAULT vnc_enabled false - fi - - if is_service_enabled n-spice; then - # Address on which instance spiceservers will listen on compute hosts. - # For multi-host, this should be the management ip of the compute host. - SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1} - SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1} - iniset $NOVA_CONF spice enabled true - iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN" - iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" - else - iniset $NOVA_CONF spice enabled false - fi - - iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST" - iniset_rpc_backend nova $NOVA_CONF DEFAULT - iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT" # XenServer diff --git a/stackrc b/stackrc index d418a0ec5a..3da366d9e2 100644 --- a/stackrc +++ b/stackrc @@ -196,5 +196,10 @@ esac # 5Gb default volume backing file size VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-5130M} +# Name of the LVM volume group to use/create for iscsi volumes +VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} +VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} +INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-} + PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"} PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"nova"} diff --git a/tools/build_bm_multi.sh b/tools/build_bm_multi.sh index f1242ee4f0..52b9b4ea32 100755 --- a/tools/build_bm_multi.sh +++ b/tools/build_bm_multi.sh @@ -6,7 +6,7 @@ SHELL_AFTER_RUN=no # Variables common amongst all hosts in the cluster -COMMON_VARS="MYSQL_HOST=$HEAD_HOST RABBIT_HOST=$HEAD_HOST GLANCE_HOSTPORT=$HEAD_HOST:9292 NET_MAN=FlatDHCPManager FLAT_INTERFACE=eth0 FLOATING_RANGE=$FLOATING_RANGE MULTI_HOST=1 SHELL_AFTER_RUN=$SHELL_AFTER_RUN" +COMMON_VARS="MYSQL_HOST=$HEAD_HOST RABBIT_HOST=$HEAD_HOST GLANCE_HOSTPORT=$HEAD_HOST:9292 NETWORK_MANAGER=FlatDHCPManager FLAT_INTERFACE=eth0 FLOATING_RANGE=$FLOATING_RANGE MULTI_HOST=1 SHELL_AFTER_RUN=$SHELL_AFTER_RUN" # Helper to launch containers function run_bm { From 13aab25d3da100b494d61548654f29b1999d33ec Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Mon, 18 Mar 2013 18:55:09 +0100 Subject: [PATCH 0003/4704] Accept Quantums rootwrap.conf in etc/quantum/rootwrap.conf As part of the review request https://review.openstack.org/#/c/24615/ the Quantum rootwrap.conf is moving to etc/quantum subdir. Prefer the new location. Change-Id: I2a893c7b21e252543372854ba511f61cd1fde02f --- lib/quantum | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/quantum b/lib/quantum index 862ba8486d..2ef883a544 100644 --- a/lib/quantum +++ b/lib/quantum @@ -569,7 +569,12 @@ function _quantum_setup_rootwrap() { sudo chown -R root:root $Q_CONF_ROOTWRAP_D sudo chmod 644 $Q_CONF_ROOTWRAP_D/* # Set up rootwrap.conf, pointing to $QUANTUM_CONF_DIR/rootwrap.d - sudo cp -p $QUANTUM_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE + # location moved in newer versions, prefer new location + if test -r $QUANTUM_DIR/etc/quantum/rootwrap.conf; then + sudo cp -p $QUANTUM_DIR/etc/quantum/rootwrap.conf $Q_RR_CONF_FILE + else + sudo cp -p $QUANTUM_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE + fi sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE sudo chown root:root $Q_RR_CONF_FILE sudo chmod 0644 $Q_RR_CONF_FILE From 9a3ba4b3e1a6286cd1ccaafdb2c1d9960c91da9c Mon Sep 17 00:00:00 2001 From: Tim Miller Date: Mon, 18 Mar 2013 18:08:27 -0700 Subject: [PATCH 0004/4704] Fix typo in baremetal header comment. Change-Id: I553c37581c92dcdc13e0d8dcdb9c8be7309deaa1 --- lib/baremetal | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/baremetal b/lib/baremetal index 57048a1aa8..5326dd1ff1 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -33,7 +33,7 @@ # baremetal driver uses that to push a disk image onto the node(s). # # Below we define various defaults which control the behavior of the -# baremetal compute service, and inform it of the hardware it will contorl. +# baremetal compute service, and inform it of the hardware it will control. # # Below that, various functions are defined, which are called by devstack # in the following order: @@ -395,7 +395,7 @@ function upload_baremetal_image() { ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} \ ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}" - # override DEFAULT_IMAGE_NAME so that tempest can find the image + # override DEFAULT_IMAGE_NAME so that tempest can find the image # that we just uploaded in glance DEFAULT_IMAGE_NAME="${IMAGE_NAME%.img}" } From 0c2c3fc20e99372f3fb54390f023ce5657feefbf Mon Sep 17 00:00:00 2001 From: Brad Topol Date: Tue, 19 Mar 2013 03:01:30 -0500 Subject: [PATCH 0005/4704] Explicitly add cosine and inetorgperson schemas on Fedora Fixes Bug1156651 Change-Id: I957a8cdc562a887b0def7bc07c6bb434ce0a0437 --- lib/ldap | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/ldap b/lib/ldap index 0a0d197df2..9d415c5a84 100644 --- a/lib/ldap +++ b/lib/ldap @@ -37,6 +37,12 @@ function install_ldap() { #update ldap olcdb sudo ldapmodify -Y EXTERNAL -H ldapi:/// -f $TMP_MGR_DIFF_FILE + # On fedora we need to manually add cosine and inetorgperson schemas + if is_fedora; then + sudo ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/cosine.ldif + sudo ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/inetorgperson.ldif + fi + # add our top level ldap nodes if ldapsearch -x -w $LDAP_PASSWORD -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -x -b dc=openstack,dc=org | grep -q "Success" ; then printf "LDAP already configured for OpenStack\n" From 58ab929de63a3a3d7a615edff6c04044071cee5d Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 19 Mar 2013 15:39:47 -0500 Subject: [PATCH 0006/4704] Force Quantum processes to run in a subshell Force bash to evaluate the command line passed to screen_it so the commands are executed in a subshell. This seems to make a difference when using run_service() when the server process is owned by init. These Quantum services were the only ones in all of DevStack being spawned in this manner. This will allow the run_service() patch https://review.openstack.org/#/c/23148/ to succesfully be merged and the number of test failures due to screen should be reduced to 0. Change-Id: I97d562adda51d39a5e7fa1dc4d945d4a396201cd --- lib/quantum | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/quantum b/lib/quantum index 862ba8486d..002d83dbee 100644 --- a/lib/quantum +++ b/lib/quantum @@ -365,13 +365,13 @@ function start_quantum_service_and_check() { # Start running processes, including screen function start_quantum_agents() { # Start up the quantum agents if enabled - screen_it q-agt "python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE" - screen_it q-dhcp "python $AGENT_DHCP_BINARY --config-file $QUANTUM_CONF --config-file=$Q_DHCP_CONF_FILE" - screen_it q-meta "python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE" - screen_it q-l3 "python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE" + screen_it q-agt "cd $QUANTUM_DIR && python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE" + screen_it q-dhcp "cd $QUANTUM_DIR && python $AGENT_DHCP_BINARY --config-file $QUANTUM_CONF --config-file=$Q_DHCP_CONF_FILE" + screen_it q-l3 "cd $QUANTUM_DIR && python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE" + screen_it q-meta "cd $QUANTUM_DIR && python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE" if is_service_enabled q-lbaas; then - screen_it q-lbaas "python $AGENT_LBAAS_BINARY --config-file $QUANTUM_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME" + screen_it q-lbaas "cd $QUANTUM_DIR && python $AGENT_LBAAS_BINARY --config-file $QUANTUM_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME" fi } From cf9c10d63a4b6f2a24dd335c9f14e6a4289172d8 Mon Sep 17 00:00:00 2001 From: mathieu-rohon Date: Mon, 18 Mar 2013 17:34:03 +0100 Subject: [PATCH 0007/4704] Enable multi-agent support even when q-svc is disabled, i.e. on node without q-svc in multi-agent architecture, state_path will be configured in quantum.conf so that agent doesn't crashs anymore Bug 1156685 Change-Id: I28625e4b606fa61c35179248321632d3d797c547 --- lib/quantum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/quantum b/lib/quantum index 862ba8486d..399b0074a2 100644 --- a/lib/quantum +++ b/lib/quantum @@ -415,6 +415,7 @@ function _configure_quantum_common() { cp $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection `database_connection_url $Q_DB_NAME` + iniset $QUANTUM_CONF DEFAULT state_path $DATA_DIR/quantum _quantum_setup_rootwrap } @@ -534,7 +535,6 @@ function _configure_quantum_service() { iniset $QUANTUM_CONF DEFAULT verbose True iniset $QUANTUM_CONF DEFAULT debug True - iniset $QUANTUM_CONF DEFAULT state_path $DATA_DIR/quantum iniset $QUANTUM_CONF DEFAULT policy_file $Q_POLICY_FILE iniset $QUANTUM_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP From 681f3fddeca89dde1140b79e494aa9ff936273f7 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 27 Feb 2013 19:00:39 -0600 Subject: [PATCH 0008/4704] Add run_process() to start services without screen * USE_SCREEN defaults to True, set it to False to exec the services directly via bash. SCREEN_DEV is still supported until the CI scripts get updated. * The extra logging file descriptors are properly closed in the child process and stdout/stderr are redirected to the log files. * The screen_rc() call is still present; this means that stack-screenrc will have a complete record of what was started and rejoin-stack.sh may be able to re-create the setup under screen. * The python interpreter was unwilling to write to the log files without unbufering stdout by using PYTHONUNBUFFERED. This feels hackish and should be investigated further. Change-Id: I012ed049f2c8b185a2e6929d73edc29e167bc21f --- functions | 62 +++++++++++++++++++++++++++++++++++++++++++++++-------- stack.sh | 19 ++++++++++------- stackrc | 7 +++++-- 3 files changed, 69 insertions(+), 19 deletions(-) diff --git a/functions b/functions index b94c611446..d8b87d43ce 100644 --- a/functions +++ b/functions @@ -735,26 +735,69 @@ function restart_service() { } +# _run_process() is designed to be backgrounded by run_process() to simulate a +# fork. It includes the dirty work of closing extra filehandles and preparing log +# files to produce the same logs as screen_it(). The log filename is derived +# from the service name and global-and-now-misnamed SCREEN_LOGDIR +# _run_process service "command-line" +function _run_process() { + local service=$1 + local command="$2" + + # Undo logging redirections and close the extra descriptors + exec 1>&3 + exec 2>&3 + exec 3>&- + exec 6>&- + + if [[ -n ${SCREEN_LOGDIR} ]]; then + exec 1>&${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log 2>&1 + ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log + + # TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs. + export PYTHONUNBUFFERED=1 + fi + + exec /bin/bash -c "$command" + die "$service exec failure: $command" +} + + +# run_process() launches a child process that closes all file descriptors and +# then exec's the passed in command. This is meant to duplicate the semantics +# of screen_it() without screen. PIDs are written to +# $SERVICE_DIR/$SCREEN_NAME/$service.pid +# run_process service "command-line" +function run_process() { + local service=$1 + local command="$2" + + # Spawn the child process + _run_process "$service" "$command" & + echo $! +} + + # Helper to launch a service in a named screen # screen_it service "command-line" function screen_it { SCREEN_NAME=${SCREEN_NAME:-stack} SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} - SCREEN_DEV=`trueorfalse True $SCREEN_DEV` + USE_SCREEN=$(trueorfalse True $USE_SCREEN) if is_service_enabled $1; then # Append the service to the screen rc file screen_rc "$1" "$2" - screen -S $SCREEN_NAME -X screen -t $1 + if [[ "$USE_SCREEN" = "True" ]]; then + screen -S $SCREEN_NAME -X screen -t $1 - if [[ -n ${SCREEN_LOGDIR} ]]; then - screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log - screen -S $SCREEN_NAME -p $1 -X log on - ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log - fi + if [[ -n ${SCREEN_LOGDIR} ]]; then + screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log + screen -S $SCREEN_NAME -p $1 -X log on + ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log + fi - if [[ "$SCREEN_DEV" = "True" ]]; then # sleep to allow bash to be ready to be send the command - we are # creating a new window in screen and then sends characters, so if # bash isn't running by the time we send the command, nothing happens @@ -763,7 +806,8 @@ function screen_it { NL=`echo -ne '\015'` screen -S $SCREEN_NAME -p $1 -X stuff "$2 || touch \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL" else - screen -S $SCREEN_NAME -p $1 -X exec /bin/bash -c "$2 || touch \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"" + # Spawn directly without screen + run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$service.pid fi fi } diff --git a/stack.sh b/stack.sh index a4106e51e8..3fab488bff 100755 --- a/stack.sh +++ b/stack.sh @@ -824,8 +824,17 @@ fi # Configure screen # ---------------- -if [ -z "$SCREEN_HARDSTATUS" ]; then - SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})' +USE_SCREEN=$(trueorfalse True $USE_SCREEN) +if [[ "$USE_SCREEN" == "True" ]]; then + # Create a new named screen to run processes in + screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash + sleep 1 + + # Set a reasonable status bar + if [ -z "$SCREEN_HARDSTATUS" ]; then + SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})' + fi + screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS" fi # Clear screen rc file @@ -834,12 +843,6 @@ if [[ -e $SCREENRC ]]; then echo -n > $SCREENRC fi -# Create a new named screen to run processes in -screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash -sleep 1 - -# Set a reasonable status bar -screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS" # Initialize the directory for service status check init_service_check diff --git a/stackrc b/stackrc index 008bc9c6b8..5b473c4763 100644 --- a/stackrc +++ b/stackrc @@ -30,8 +30,8 @@ NOVA_ENABLED_APIS=ec2,osapi_compute,metadata # stuffing text into the screen windows so that a developer can use # ctrl-c, up-arrow, enter to restart the service. Starting services # this way is slightly unreliable, and a bit slower, so this can -# be disabled for automated testing by setting this value to false. -SCREEN_DEV=True +# be disabled for automated testing by setting this value to False. +USE_SCREEN=True # Repositories # ------------ @@ -198,3 +198,6 @@ VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-5130M} PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"} PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"nova"} + +# Compatibility until it's eradicated from CI +USE_SCREEN=${SCREEN_DEV:-$USE_SCREEN} From b7490da972c673960c800e3803c2a568bb7a43b6 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 18 Mar 2013 16:07:56 -0500 Subject: [PATCH 0009/4704] Clean up stack.sh config * Clean up interactive configuration * Complete moving initialization of service-specific varialbes into the service lib/* files. * Cosmetic cleanups Change-Id: Iea14359bd224dd5533201d4c7cb1437d5382c4d1 --- lib/cinder | 5 +++ lib/keystone | 3 ++ lib/nova | 3 ++ lib/swift | 7 ++++ stack.sh | 100 +++++++++++++++++++++++++-------------------------- stackrc | 4 +++ 6 files changed, 71 insertions(+), 51 deletions(-) diff --git a/lib/cinder b/lib/cinder index b3e1904de0..7688ad9903 100644 --- a/lib/cinder +++ b/lib/cinder @@ -53,6 +53,11 @@ fi # Support for multi lvm backend configuration (default is no support) CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND) +# Should cinder perform secure deletion of volumes? +# Defaults to true, can be set to False to avoid this bug when testing: +# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755 +CINDER_SECURE_DELETE=`trueorfalse True $CINDER_SECURE_DELETE` + # Name of the lvm volume groups to use/create for iscsi volumes # VOLUME_GROUP2 is used only if CINDER_MULTI_LVM_BACKEND = True VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} diff --git a/lib/keystone b/lib/keystone index 17e0866fec..805cb6f045 100644 --- a/lib/keystone +++ b/lib/keystone @@ -59,6 +59,9 @@ KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000} KEYSTONE_SERVICE_PORT_INT=${KEYSTONE_SERVICE_PORT_INT:-5001} KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} +# Set the tenant for service accounts in Keystone +SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} + # Entry Points # ------------ diff --git a/lib/nova b/lib/nova index f0c83157d6..d96ed4f2dd 100644 --- a/lib/nova +++ b/lib/nova @@ -65,6 +65,9 @@ SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler} QEMU_CONF=/etc/libvirt/qemu.conf +NOVNC_DIR=$DEST/noVNC +SPICE_DIR=$DEST/spice-html5 + # Nova Network Configuration # -------------------------- diff --git a/lib/swift b/lib/swift index 2c87d21f6a..d50b554169 100644 --- a/lib/swift +++ b/lib/swift @@ -28,6 +28,7 @@ set +o xtrace SWIFT_DIR=$DEST/swift SWIFTCLIENT_DIR=$DEST/python-swiftclient SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift} +SWIFT3_DIR=$DEST/swift3 # TODO: add logging to different location. @@ -40,6 +41,12 @@ SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DATA_DIR}/swift} # TODO(dtroyer): remove SWIFT_CONFIG_DIR after cutting stable/grizzly SWIFT_CONF_DIR=${SWIFT_CONF_DIR:-${SWIFT_CONFIG_DIR:-/etc/swift}} +if is_service_enabled s-proxy && is_service_enabled swift3; then + # If we are using swift3, we can default the s3 port to swift instead + # of nova-objectstore + S3_SERVICE_PORT=${S3_SERVICE_PORT:-8080} +fi + # DevStack will create a loop-back disk formatted as XFS to store the # swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in # kilobytes. diff --git a/stack.sh b/stack.sh index 9a87a5f16c..cfce6be121 100755 --- a/stack.sh +++ b/stack.sh @@ -269,14 +269,12 @@ source $TOP_DIR/lib/ldap # Set the destination directories for OpenStack projects HORIZON_DIR=$DEST/horizon OPENSTACKCLIENT_DIR=$DEST/python-openstackclient -NOVNC_DIR=$DEST/noVNC -SPICE_DIR=$DEST/spice-html5 -SWIFT3_DIR=$DEST/swift3 -# Should cinder perform secure deletion of volumes? -# Defaults to true, can be set to False to avoid this bug when testing: -# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755 -CINDER_SECURE_DELETE=`trueorfalse True $CINDER_SECURE_DELETE` + +# Interactive Configuration +# ------------------------- + +# Do all interactive config up front before the logging spew begins # Generic helper to configure passwords function read_password { @@ -322,7 +320,6 @@ function read_password { # Database Configuration -# ---------------------- # To select between database backends, add the following to ``localrc``: # @@ -335,8 +332,7 @@ function read_password { initialize_database_backends && echo "Using $DATABASE_TYPE database backend" || echo "No database enabled" -# RabbitMQ or Qpid -# -------------------------- +# Queue Configuration # Rabbit connection info if is_service_enabled rabbit; then @@ -344,53 +340,45 @@ if is_service_enabled rabbit; then read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT." fi -if is_service_enabled s-proxy; then - # If we are using swift3, we can default the s3 port to swift instead - # of nova-objectstore - if is_service_enabled swift3;then - S3_SERVICE_PORT=${S3_SERVICE_PORT:-8080} - fi - # We only ask for Swift Hash if we have enabled swift service. - # ``SWIFT_HASH`` is a random unique string for a swift cluster that - # can never change. - read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH." -fi - -# Set default port for nova-objectstore -S3_SERVICE_PORT=${S3_SERVICE_PORT:-3333} - # Keystone -# -------- -# The ``SERVICE_TOKEN`` is used to bootstrap the Keystone database. It is -# just a string and is not a 'real' Keystone token. -read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN TOKEN." -# Services authenticate to Identity with servicename/``SERVICE_PASSWORD`` -read_password SERVICE_PASSWORD "ENTER A SERVICE_PASSWORD TO USE FOR THE SERVICE AUTHENTICATION." -# Horizon currently truncates usernames and passwords at 20 characters -read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)." -# Keystone can now optionally install OpenLDAP by adding ldap to the list -# of enabled services in the localrc file (e.g. ENABLED_SERVICES=key,ldap). -# If OpenLDAP has already been installed but you need to clear out -# the Keystone contents of LDAP set KEYSTONE_CLEAR_LDAP to yes -# (e.g. KEYSTONE_CLEAR_LDAP=yes ) in the localrc file. To enable the -# Keystone Identity Driver (keystone.identity.backends.ldap.Identity) -# set KEYSTONE_IDENTITY_BACKEND to ldap (e.g. KEYSTONE_IDENTITY_BACKEND=ldap) -# in the localrc file. +if is_service_enabled key; then + # The ``SERVICE_TOKEN`` is used to bootstrap the Keystone database. It is + # just a string and is not a 'real' Keystone token. + read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN TOKEN." + # Services authenticate to Identity with servicename/``SERVICE_PASSWORD`` + read_password SERVICE_PASSWORD "ENTER A SERVICE_PASSWORD TO USE FOR THE SERVICE AUTHENTICATION." + # Horizon currently truncates usernames and passwords at 20 characters + read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)." + + # Keystone can now optionally install OpenLDAP by enabling the ``ldap`` + # service in ``localrc`` (e.g. ``enable_service ldap``). + # To clean out the Keystone contents in OpenLDAP set ``KEYSTONE_CLEAR_LDAP`` + # to ``yes`` (e.g. ``KEYSTONE_CLEAR_LDAP=yes``) in ``localrc``. To enable the + # Keystone Identity Driver (``keystone.identity.backends.ldap.Identity``) + # set ``KEYSTONE_IDENTITY_BACKEND`` to ``ldap`` (e.g. + # ``KEYSTONE_IDENTITY_BACKEND=ldap``) in ``localrc``. + + # only request ldap password if the service is enabled + if is_service_enabled ldap; then + read_password LDAP_PASSWORD "ENTER A PASSWORD TO USE FOR LDAP" + fi +fi -# only request ldap password if the service is enabled -if is_service_enabled ldap; then - read_password LDAP_PASSWORD "ENTER A PASSWORD TO USE FOR LDAP" -fi +# Swift -# Set the tenant for service accounts in Keystone -SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} +if is_service_enabled s-proxy; then + # We only ask for Swift Hash if we have enabled swift service. + # ``SWIFT_HASH`` is a random unique string for a swift cluster that + # can never change. + read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH." +fi -# Log files -# --------- +# Configure logging +# ----------------- # Draw a spinner so the user knows something is happening function spinner() { @@ -638,14 +626,15 @@ fi echo_summary "Configuring OpenStack projects" -# Set up our checkouts so they are installed into python path -# allowing ``import nova`` or ``import glance.client`` +# Set up our checkouts so they are installed in the python path configure_keystoneclient configure_novaclient setup_develop $OPENSTACKCLIENT_DIR + if is_service_enabled key g-api n-api s-proxy; then configure_keystone fi + if is_service_enabled s-proxy; then configure_swift configure_swiftclient @@ -653,6 +642,7 @@ if is_service_enabled s-proxy; then setup_develop $SWIFT3_DIR fi fi + if is_service_enabled g-api n-api; then configure_glance fi @@ -666,17 +656,21 @@ if is_service_enabled nova; then cleanup_nova configure_nova fi + if is_service_enabled horizon; then configure_horizon fi + if is_service_enabled quantum; then setup_quantumclient setup_quantum fi + if is_service_enabled heat; then configure_heat configure_heatclient fi + if is_service_enabled cinder; then configure_cinder fi @@ -698,6 +692,7 @@ if is_service_enabled tls-proxy; then # don't be naive and add to existing line! fi + # Syslog # ------ @@ -992,6 +987,7 @@ if is_service_enabled nova && is_baremetal; then fi fi + # Launch Services # =============== @@ -1081,6 +1077,7 @@ if is_service_enabled heat; then start_heat fi + # Create account rc files # ======================= @@ -1191,6 +1188,7 @@ fi # Check the status of running services service_check + # Fin # === diff --git a/stackrc b/stackrc index 19674edf10..34ccfa2169 100644 --- a/stackrc +++ b/stackrc @@ -201,6 +201,10 @@ VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-} +# Set default port for nova-objectstore +S3_SERVICE_PORT=${S3_SERVICE_PORT:-3333} + +# Common network names PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"} PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"nova"} From 3452f8eb8663a8cfc5733784d918b59947630cf8 Mon Sep 17 00:00:00 2001 From: Akihiro MOTOKI Date: Thu, 21 Mar 2013 14:11:27 +0900 Subject: [PATCH 0010/4704] Support Quantum security group Adds Q_USE_SECGROUP flag for quantum security group - Added has_quantum_plugin_security_group method for each plugin. - Set NOVA_VIF_DRIVER to the hybrid VIF driver for plugins with iptables based security group support. - Specifying device_owner type on debug port in lib/quantum and quantum-adv-test.sh. This change makes apply quantum security group fro debug port Change-Id: Ifd155798912247d85a9765ef73a2186b929237b4 --- exercises/quantum-adv-test.sh | 2 +- lib/quantum | 16 ++++++++++++++-- lib/quantum_plugins/README.md | 2 ++ lib/quantum_plugins/bigswitch_floodlight | 5 +++++ lib/quantum_plugins/brocade | 5 +++++ lib/quantum_plugins/linuxbridge | 10 ++++++++++ lib/quantum_plugins/nicira | 5 +++++ lib/quantum_plugins/openvswitch | 7 ++++++- lib/quantum_plugins/ovs_base | 18 ++++++++++++++++++ lib/quantum_plugins/ryu | 9 ++++++++- 10 files changed, 74 insertions(+), 5 deletions(-) diff --git a/exercises/quantum-adv-test.sh b/exercises/quantum-adv-test.sh index 5c4b16ea5e..a1fb2ad03c 100755 --- a/exercises/quantum-adv-test.sh +++ b/exercises/quantum-adv-test.sh @@ -235,7 +235,7 @@ function create_network { source $TOP_DIR/openrc $TENANT $TENANT local NET_ID=$(quantum net-create --tenant_id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' ) quantum subnet-create --ip_version 4 --tenant_id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR - quantum-debug probe-create $NET_ID + quantum-debug probe-create --device-owner compute $NET_ID source $TOP_DIR/openrc demo demo } diff --git a/lib/quantum b/lib/quantum index 09cde6417d..efdd43d071 100644 --- a/lib/quantum +++ b/lib/quantum @@ -181,6 +181,13 @@ source $TOP_DIR/lib/quantum_plugins/$Q_PLUGIN # Hardcoding for 1 service plugin for now source $TOP_DIR/lib/quantum_plugins/agent_loadbalancer +# Use security group or not +if has_quantum_plugin_security_group; then + Q_USE_SECGROUP=${Q_USE_SECGROUP:-True} +else + Q_USE_SECGROUP=False +fi + # Entry Points # ------------ @@ -222,6 +229,11 @@ function create_nova_conf_quantum() { iniset $NOVA_CONF DEFAULT quantum_admin_tenant_name "$SERVICE_TENANT_NAME" iniset $NOVA_CONF DEFAULT quantum_url "http://$Q_HOST:$Q_PORT" + if [[ "$Q_USE_SECGROUP" == "True" ]]; then + LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver + iniset $NOVA_CONF DEFAULT security_group_api quantum + fi + # set NOVA_VIF_DRIVER and optionally set options in nova_conf quantum_plugin_create_nova_conf @@ -646,9 +658,9 @@ function delete_probe() { function setup_quantum_debug() { if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then public_net_id=`_get_net_id $PUBLIC_NETWORK_NAME` - quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $public_net_id + quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create --device-owner compute $public_net_id private_net_id=`_get_net_id $PRIVATE_NETWORK_NAME` - quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $private_net_id + quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create --device-owner compute $private_net_id fi } diff --git a/lib/quantum_plugins/README.md b/lib/quantum_plugins/README.md index 5411de00c7..05bfb85125 100644 --- a/lib/quantum_plugins/README.md +++ b/lib/quantum_plugins/README.md @@ -32,3 +32,5 @@ functions * ``quantum_plugin_configure_plugin_agent`` * ``quantum_plugin_configure_service`` * ``quantum_plugin_setup_interface_driver`` +* ``has_quantum_plugin_security_group``: + return 0 if the plugin support quantum security group otherwise return 1 diff --git a/lib/quantum_plugins/bigswitch_floodlight b/lib/quantum_plugins/bigswitch_floodlight index 7d3fd9675c..4857f49569 100644 --- a/lib/quantum_plugins/bigswitch_floodlight +++ b/lib/quantum_plugins/bigswitch_floodlight @@ -51,5 +51,10 @@ function quantum_plugin_setup_interface_driver() { iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver } +function has_quantum_plugin_security_group() { + # 1 means False here + return 1 +} + # Restore xtrace $MY_XTRACE diff --git a/lib/quantum_plugins/brocade b/lib/quantum_plugins/brocade index ac911439a3..6e26ad7842 100644 --- a/lib/quantum_plugins/brocade +++ b/lib/quantum_plugins/brocade @@ -45,5 +45,10 @@ function quantum_plugin_setup_interface_driver() { iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver } +function has_quantum_plugin_security_group() { + # 0 means True here + return 0 +} + # Restore xtrace $BRCD_XTRACE diff --git a/lib/quantum_plugins/linuxbridge b/lib/quantum_plugins/linuxbridge index 11bc585fe9..324e255231 100644 --- a/lib/quantum_plugins/linuxbridge +++ b/lib/quantum_plugins/linuxbridge @@ -48,6 +48,11 @@ function quantum_plugin_configure_plugin_agent() { if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings $LB_INTERFACE_MAPPINGS fi + if [[ "$Q_USE_SECGROUP" == "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.linux.iptables_firewall.IptablesFirewallDriver + else + iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.firewall.NoopFirewallDriver + fi AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent" } @@ -76,5 +81,10 @@ function quantum_plugin_setup_interface_driver() { iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver } +function has_quantum_plugin_security_group() { + # 0 means True here + return 0 +} + # Restore xtrace $MY_XTRACE diff --git a/lib/quantum_plugins/nicira b/lib/quantum_plugins/nicira index 8c150b11f5..6eefb022ec 100644 --- a/lib/quantum_plugins/nicira +++ b/lib/quantum_plugins/nicira @@ -141,5 +141,10 @@ function quantum_plugin_setup_interface_driver() { iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver } +function has_quantum_plugin_security_group() { + # 0 means True here + return 0 +} + # Restore xtrace $MY_XTRACE diff --git a/lib/quantum_plugins/openvswitch b/lib/quantum_plugins/openvswitch index dda1239cb9..ab16483452 100644 --- a/lib/quantum_plugins/openvswitch +++ b/lib/quantum_plugins/openvswitch @@ -8,7 +8,7 @@ set +o xtrace source $TOP_DIR/lib/quantum_plugins/ovs_base function quantum_plugin_create_nova_conf() { - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} + _quantum_ovs_base_configure_nova_vif_driver if [ "$VIRT_DRIVER" = 'xenserver' ]; then iniset $NOVA_CONF DEFAULT xenapi_vif_driver nova.virt.xenapi.vif.XenAPIOpenVswitchDriver iniset $NOVA_CONF DEFAULT xenapi_ovs_integration_bridge $FLAT_NETWORK_BRIDGE @@ -43,6 +43,7 @@ function quantum_plugin_configure_plugin_agent() { # Setup integration bridge OVS_BRIDGE=${OVS_BRIDGE:-br-int} _quantum_ovs_base_setup_bridge $OVS_BRIDGE + _quantum_ovs_base_configure_firewall_driver # Setup agent for tunneling if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then @@ -139,5 +140,9 @@ function quantum_plugin_setup_interface_driver() { iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver } +function has_quantum_plugin_security_group() { + return 0 +} + # Restore xtrace $MY_XTRACE diff --git a/lib/quantum_plugins/ovs_base b/lib/quantum_plugins/ovs_base index ab988d9f62..2ada0dbf5a 100644 --- a/lib/quantum_plugins/ovs_base +++ b/lib/quantum_plugins/ovs_base @@ -39,6 +39,14 @@ function _quantum_ovs_base_configure_debug_command() { iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE } +function _quantum_ovs_base_configure_firewall_driver() { + if [[ "$Q_USE_SECGROUP" == "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver + else + iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.firewall.NoopFirewallDriver + fi +} + function _quantum_ovs_base_configure_l3_agent() { iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE @@ -48,5 +56,15 @@ function _quantum_ovs_base_configure_l3_agent() { sudo ip addr flush dev $PUBLIC_BRIDGE } +function _quantum_ovs_base_configure_nova_vif_driver() { + # The hybrid VIF driver needs to be specified when Quantum Security Group + # is enabled (until vif_security attributes are supported in VIF extension) + if [[ "$Q_USE_SECGROUP" == "True" ]]; then + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} + else + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} + fi +} + # Restore xtrace $MY_XTRACE diff --git a/lib/quantum_plugins/ryu b/lib/quantum_plugins/ryu index d1d7382c4b..113923235c 100644 --- a/lib/quantum_plugins/ryu +++ b/lib/quantum_plugins/ryu @@ -9,7 +9,7 @@ source $TOP_DIR/lib/quantum_plugins/ovs_base source $TOP_DIR/lib/quantum_thirdparty/ryu # for configuration value function quantum_plugin_create_nova_conf() { - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} + _quantum_ovs_base_configure_nova_vif_driver iniset $NOVA_CONF DEFAULT libvirt_ovs_integration_bridge "$OVS_BRIDGE" } @@ -52,6 +52,8 @@ function quantum_plugin_configure_plugin_agent() { fi iniset /$Q_PLUGIN_CONF_FILE OVS integration_bridge $OVS_BRIDGE AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/ryu/agent/ryu_quantum_agent.py" + + _quantum_ovs_base_configure_firewall_driver } function quantum_plugin_configure_service() { @@ -64,5 +66,10 @@ function quantum_plugin_setup_interface_driver() { iniset $conf_file DEFAULT ovs_use_veth True } +function has_quantum_plugin_security_group() { + # 0 means True here + return 0 +} + # Restore xtrace $MY_XTRACE From b93b74ca021abb6d82a24ea04f760cdfa0e49474 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Thu, 21 Mar 2013 21:25:05 -0400 Subject: [PATCH 0011/4704] Fix FLAT_INTERFACE not working add a missing colon Fiex LP# 1158308 Change-Id: Ia873788fd5dd17be3c2942057168fbfddb32c14f --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index f0c83157d6..6aa98bcc6c 100644 --- a/lib/nova +++ b/lib/nova @@ -106,7 +106,7 @@ EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST} # If you are running on a single node and don't need to access the VMs from # devices other than that node, you can set ``FLAT_INTERFACE=`` # This will stop nova from bridging any interfaces into ``FLAT_NETWORK_BRIDGE``. -FLAT_INTERFACE=${FLAT_INTERFACE-$GUEST_INTERFACE_DEFAULT} +FLAT_INTERFACE=${FLAT_INTERFACE:-$GUEST_INTERFACE_DEFAULT} # ``MULTI_HOST`` is a mode where each compute node runs its own network node. This # allows network operations and routing for a VM to occur on the server that is From f85fa089bb76d57b5df85507d5b4163a9e5b0733 Mon Sep 17 00:00:00 2001 From: Akihiro MOTOKI Date: Sun, 13 Jan 2013 05:01:08 +0900 Subject: [PATCH 0012/4704] Add Quantum NEC OpenFlow plugin support NEC OpenFlow plugin supports several OpenFlow controllers. This commit also adds a third party script to support Trema Sliceable Switch, one of open source OpenFlow controllers. Change-Id: I5dddf04a25bc275c3119d751ca79b482e1875902 --- files/apts/trema | 15 +++++ lib/quantum_plugins/nec | 122 +++++++++++++++++++++++++++++++++++ lib/quantum_thirdparty/trema | 113 ++++++++++++++++++++++++++++++++ 3 files changed, 250 insertions(+) create mode 100644 files/apts/trema create mode 100644 lib/quantum_plugins/nec create mode 100644 lib/quantum_thirdparty/trema diff --git a/files/apts/trema b/files/apts/trema new file mode 100644 index 0000000000..e33ccd3004 --- /dev/null +++ b/files/apts/trema @@ -0,0 +1,15 @@ +# Trema +gcc +make +ruby1.8 +rubygems1.8 +ruby1.8-dev +libpcap-dev +libsqlite3-dev + +# Sliceable Switch +sqlite3 +libdbi-perl +libdbd-sqlite3-perl +apache2 +libjson-perl diff --git a/lib/quantum_plugins/nec b/lib/quantum_plugins/nec new file mode 100644 index 0000000000..f61f50bba5 --- /dev/null +++ b/lib/quantum_plugins/nec @@ -0,0 +1,122 @@ +# Quantum NEC OpenFlow plugin +# --------------------------- + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# Configuration parameters +OFC_HOST=${OFC_HOST:-127.0.0.1} +OFC_PORT=${OFC_PORT:-8888} + +OFC_API_HOST=${OFC_API_HOST:-$OFC_HOST} +OFC_API_PORT=${OFC_API_PORT:-$OFC_PORT} +OFC_OFP_HOST=${OFC_OFP_HOST:-$OFC_HOST} +OFC_OFP_PORT=${OFC_OFP_PORT:-6633} +OFC_DRIVER=${OFC_DRIVER:-trema} +OFC_RETRY_MAX=${OFC_RETRY_MAX:-0} +OFC_RETRY_INTERVAL=${OFC_RETRY_INTERVAL:-1} + +OVS_BRIDGE=${OVS_BRIDGE:-br-int} + +# Main logic +# --------------------------- + +source $TOP_DIR/lib/quantum_plugins/ovs_base + +function quantum_plugin_create_nova_conf() { + _quantum_ovs_base_configure_nova_vif_driver +} + +function quantum_plugin_install_agent_packages() { + # SKIP_OVS_INSTALL is useful when we want to use Open vSwitch whose + # version is different from the version provided by the distribution. + if [[ "$SKIP_OVS_INSTALL" = "True" ]]; then + echo "You need to install Open vSwitch manually." + return + fi + _quantum_ovs_base_install_agent_packages +} + +function quantum_plugin_configure_common() { + Q_PLUGIN_CONF_PATH=etc/quantum/plugins/nec + Q_PLUGIN_CONF_FILENAME=nec.ini + Q_DB_NAME="quantum_nec" + Q_PLUGIN_CLASS="quantum.plugins.nec.nec_plugin.NECPluginV2" +} + +function quantum_plugin_configure_debug_command() { + _quantum_ovs_base_configure_debug_command +} + +function quantum_plugin_configure_dhcp_agent() { + : +} + +function quantum_plugin_configure_l3_agent() { + _quantum_ovs_base_configure_l3_agent +} + +function quantum_plugin_configure_plugin_agent() { + if [[ "$SKIP_OVS_BRIDGE_SETUP" = "True" ]]; then + return + fi + # Set up integration bridge + _quantum_ovs_base_setup_bridge $OVS_BRIDGE + sudo ovs-vsctl --no-wait set-controller $OVS_BRIDGE tcp:$OFC_OFP_HOST:$OFC_OFP_PORT + # Generate datapath ID from HOST_IP + local dpid=$(printf "0x%07d%03d%03d%03d\n" ${HOST_IP//./ }) + sudo ovs-vsctl --no-wait set Bridge $OVS_BRIDGE other-config:datapath-id=$dpid + sudo ovs-vsctl --no-wait set-fail-mode $OVS_BRIDGE secure + if [ -n "$OVS_INTERFACE" ]; then + sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $OVS_INTERFACE + fi + _quantum_setup_ovs_tunnels $OVS_BRIDGE + AGENT_BINARY="$QUANTUM_DIR/bin/quantum-nec-agent" + + _quantum_ovs_base_configure_firewall_driver +} + +function quantum_plugin_configure_service() { + iniset $QUANTUM_CONF DEFAULT api_extensions_path quantum/plugins/nec/extensions/ + iniset /$Q_PLUGIN_CONF_FILE OFC host $OFC_API_HOST + iniset /$Q_PLUGIN_CONF_FILE OFC port $OFC_API_PORT + iniset /$Q_PLUGIN_CONF_FILE OFC driver $OFC_DRIVER + iniset /$Q_PLUGIN_CONF_FILE OFC api_retry_max OFC_RETRY_MAX + iniset /$Q_PLUGIN_CONF_FILE OFC api_retry_interval OFC_RETRY_INTERVAL +} + +function quantum_plugin_setup_interface_driver() { + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver + iniset $conf_file DEFAULT ovs_use_veth True +} + +# Utility functions +# --------------------------- + +# Setup OVS tunnel manually +function _quantum_setup_ovs_tunnels() { + local bridge=$1 + local id=0 + GRE_LOCAL_IP=${GRE_LOCAL_IP:-$HOST_IP} + if [ -n "$GRE_REMOTE_IPS" ]; then + for ip in ${GRE_REMOTE_IPS//:/ } + do + if [[ "$ip" == "$GRE_LOCAL_IP" ]]; then + continue + fi + sudo ovs-vsctl --no-wait add-port $bridge gre$id -- \ + set Interface gre$id type=gre options:remote_ip=$ip + id=`expr $id + 1` + done + fi +} + +function has_quantum_plugin_security_group() { + # 0 means True here + return 0 +} + +# Restore xtrace +$MY_XTRACE diff --git a/lib/quantum_thirdparty/trema b/lib/quantum_thirdparty/trema new file mode 100644 index 0000000000..09dc46bd83 --- /dev/null +++ b/lib/quantum_thirdparty/trema @@ -0,0 +1,113 @@ +# Trema Sliceable Switch +# ---------------------- + +# Trema is a Full-Stack OpenFlow Framework in Ruby and C +# https://github.com/trema/trema +# +# Trema Sliceable Switch is an OpenFlow controller which provides +# virtual layer-2 network slices. +# https://github.com/trema/apps/wiki + +# Trema Sliceable Switch (OpenFlow Controller) +TREMA_APPS_REPO=${TREMA_APPS_REPO:-https://github.com/trema/apps.git} +TREMA_APPS_BRANCH=${TREMA_APPS_BRANCH:-master} + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +TREMA_DIR=${TREMA_DIR:-$DEST/trema} +TREMA_SS_DIR="$TREMA_DIR/apps/sliceable_switch" + +TREMA_DATA_DIR=${TREMA_DATA_DIR:-$DATA_DIR/trema} +TREMA_SS_ETC_DIR=$TREMA_DATA_DIR/sliceable_switch/etc +TREMA_SS_DB_DIR=$TREMA_DATA_DIR/sliceable_switch/db +TREMA_SS_SCRIPT_DIR=$TREMA_DATA_DIR/sliceable_switch/script +TREMA_TMP_DIR=$TREMA_DATA_DIR/trema + +TREMA_LOG_LEVEL=${TREMA_LOG_LEVEL:-info} + +TREMA_SS_CONFIG=$TREMA_SS_ETC_DIR/sliceable.conf +TREMA_SS_APACHE_CONFIG=/etc/apache2/sites-available/sliceable_switch + +# configure_trema - Set config files, create data dirs, etc +function configure_trema() { + # prepare dir + for d in $TREMA_SS_ETC_DIR $TREMA_SS_DB_DIR $TREMA_SS_SCRIPT_DIR; do + sudo mkdir -p $d + sudo chown -R `whoami` $d + done + sudo mkdir -p $TREMA_TMP_DIR +} + +# init_trema - Initialize databases, etc. +function init_trema() { + local _pwd=$(pwd) + + # Initialize databases for Sliceable Switch + cd $TREMA_SS_DIR + rm -f filter.db slice.db + ./create_tables.sh + mv filter.db slice.db $TREMA_SS_DB_DIR + # Make sure that apache cgi has write access to the databases + sudo chown -R www-data.www-data $TREMA_SS_DB_DIR + cd $_pwd + + # Setup HTTP Server for sliceable_switch + cp $TREMA_SS_DIR/{Slice.pm,Filter.pm,config.cgi} $TREMA_SS_SCRIPT_DIR + sed -i -e "s|/home/sliceable_switch/db|$TREMA_SS_DB_DIR|" \ + $TREMA_SS_SCRIPT_DIR/config.cgi + + sudo cp $TREMA_SS_DIR/apache/sliceable_switch $TREMA_SS_APACHE_CONFIG + sudo sed -i -e "s|/home/sliceable_switch/script|$TREMA_SS_SCRIPT_DIR|" \ + $TREMA_SS_APACHE_CONFIG + sudo a2enmod rewrite actions + sudo a2ensite sliceable_switch + + cp $TREMA_SS_DIR/sliceable_switch_null.conf $TREMA_SS_CONFIG + sed -i -e "s|^\$apps_dir.*$|\$apps_dir = \"$TREMA_DIR/apps\"|" \ + -e "s|^\$db_dir.*$|\$db_dir = \"$TREMA_SS_DB_DIR\"|" \ + $TREMA_SS_CONFIG +} + +function gem_install() { + [[ "$OFFLINE" = "True" ]] && return + [ -n "$RUBYGEMS_CMD" ] || get_gem_command + + local pkg=$1 + $RUBYGEMS_CMD list | grep "^${pkg} " && return + sudo $RUBYGEMS_CMD install $pkg +} + +function get_gem_command() { + # Trema requires ruby 1.8, so gem1.8 is checked first + RUBYGEMS_CMD=$(which gem1.8 || which gem) + if [ -z "$RUBYGEMS_CMD" ]; then + echo "Warning: ruby gems command not found." + fi +} + +function install_trema() { + # Trema + gem_install trema + # Sliceable Switch + git_clone $TREMA_APPS_REPO $TREMA_DIR/apps $TREMA_APPS_BRANCH + make -C $TREMA_DIR/apps/topology + make -C $TREMA_DIR/apps/flow_manager + make -C $TREMA_DIR/apps/sliceable_switch +} + +function start_trema() { + # APACHE_NAME is defined in init_horizon (in lib/horizon) + restart_service $APACHE_NAME + + sudo LOGGING_LEVEL=$TREMA_LOG_LEVEL TREMA_TMP=$TREMA_TMP_DIR \ + trema run -d -c $TREMA_SS_CONFIG +} + +function stop_trema() { + sudo TREMA_TMP=$TREMA_TMP_DIR trema killall +} + +# Restore xtrace +$MY_XTRACE From 1f11f9ac8f644b8bc6981f462af46fe4de816d40 Mon Sep 17 00:00:00 2001 From: Oleg Bondarev Date: Mon, 25 Mar 2013 05:34:23 -0700 Subject: [PATCH 0013/4704] Add comments on how to run devstack with Quantum Fixes bug 1077826 Change-Id: I224db4e00e3c937f3106e9a099b79de789be1fc1 --- README.md | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ stackrc | 13 ++++++++++++- 2 files changed, 60 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index a738554d3f..d8538c2583 100644 --- a/README.md +++ b/README.md @@ -103,3 +103,51 @@ If you only want to do some testing of a real normal swift cluster with multiple If you are enabling `swift3` in `ENABLED_SERVICES` devstack will install the swift3 middleware emulation. Swift will be configured to act as a S3 endpoint for Keystone so effectively replacing the `nova-objectstore`. Only Swift proxy server is launched in the screen session all other services are started in background and managed by `swift-init` tool. + +# Quantum + +Basic Setup + +In order to enable Quantum a single node setup, you'll need the following settings in your `localrc` : + + disable_service n-net + enable_service q-svc + enable_service q-agt + enable_service q-dhcp + enable_service q-l3 + enable_service q-meta + enable_service quantum + # Optional, to enable tempest configuration as part of devstack + enable_service tempest + +Then run stack.sh as normal. + +If tempest has been successfully configured, a basic set of smoke tests can be run as follows: + + $ cd /opt/stack/tempest + $ nosetests tempest/tests/network/test_network_basic_ops.py + +Multi-Node Setup + +A more interesting setup involves running multiple compute nodes, with Quantum networks connecting VMs on different compute nodes. +You should run at least one "controller node", which should have a `stackrc` that includes at least: + + disable_service n-net + enable_service q-svc + enable_service q-agt + enable_service q-dhcp + enable_service q-l3 + enable_service q-meta + enable_service quantum + +You likely want to change your `localrc` to run a scheduler that will balance VMs across hosts: + + SCHEDULER=nova.scheduler.simple.SimpleScheduler + +You can then run many compute nodes, each of which should have a `stackrc` which includes the following, with the IP address of the above controller node: + + ENABLED_SERVICES=n-cpu,rabbit,g-api,quantum,q-agt + SERVICE_HOST=[IP of controller node] + MYSQL_HOST=$SERVICE_HOST + RABBIT_HOST=$SERVICE_HOST + Q_HOST=$SERVICE_HOST diff --git a/stackrc b/stackrc index 34ccfa2169..f35cdc07fb 100644 --- a/stackrc +++ b/stackrc @@ -20,7 +20,18 @@ fi # screen tabs. To change the default list, use the ``enable_service`` and # ``disable_service`` functions in ``localrc``. # For example, to enable Swift add this to ``localrc``: -# enable_service swift +# enable_service swift +# In order to enable Quantum (a single node setup) add the following +# settings in `` localrc``: +# disable_service n-net +# enable_service q-svc +# enable_service q-agt +# enable_service q-dhcp +# enable_service q-l3 +# enable_service q-meta +# enable_service quantum +# # Optional, to enable tempest configuration as part of devstack +# enable_service tempest ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,tempest,mysql # Set the default Nova APIs to enable From 57e3da9b760260bba020d4b65f9db29a339ec02a Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Fri, 22 Mar 2013 16:34:05 +0000 Subject: [PATCH 0014/4704] xenapi: Extract plugin installation functions This change extracts the plugin installation functions, and covers the extracted functions with tests. Use: ./test_funtions.sh run_tests to run the tests. Change-Id: I1d78d9e8cc4d52ee2df83d07e4c74dda4805f21a --- tools/xen/functions | 55 ++++++++++++++ tools/xen/install_os_domU.sh | 41 +++-------- tools/xen/mocks | 59 +++++++++++++++ tools/xen/test_functions.sh | 134 +++++++++++++++++++++++++++++++++++ 4 files changed, 259 insertions(+), 30 deletions(-) create mode 100644 tools/xen/functions create mode 100644 tools/xen/mocks create mode 100755 tools/xen/test_functions.sh diff --git a/tools/xen/functions b/tools/xen/functions new file mode 100644 index 0000000000..5b4a661acd --- /dev/null +++ b/tools/xen/functions @@ -0,0 +1,55 @@ +#!/bin/bash + +function xapi_plugin_location { + for PLUGIN_DIR in "/etc/xapi.d/plugins/" "/usr/lib/xcp/plugins/" + do + if [ -d $PLUGIN_DIR ] + then + echo $PLUGIN_DIR + return 0 + fi + done + return 1 +} + +function zip_snapshot_location { + echo $1 | sed "s:\.git$::;s:$:/zipball/$2:g" +} + +function create_directory_for_kernels { + mkdir -p "/boot/guest" +} + +function extract_remote_zipball { + local ZIPBALL_URL=$1 + + local LOCAL_ZIPBALL=$(mktemp) + local EXTRACTED_FILES=$(mktemp -d) + + ( + wget -nv $ZIPBALL_URL -O $LOCAL_ZIPBALL --no-check-certificate + unzip -q -o $LOCAL_ZIPBALL -d $EXTRACTED_FILES + rm -f $LOCAL_ZIPBALL + ) >&2 + + echo "$EXTRACTED_FILES" +} + +function find_xapi_plugins_dir { + find $1 -path '*/xapi.d/plugins' -type d -print +} + +function install_xapi_plugins_from_zipball { + local XAPI_PLUGIN_DIR + local EXTRACTED_FILES + local EXTRACTED_PLUGINS_DIR + + XAPI_PLUGIN_DIR=$(xapi_plugin_location) + + EXTRACTED_FILES=$(extract_remote_zipball $1) + EXTRACTED_PLUGINS_DIR=$(find_xapi_plugins_dir $EXTRACTED_FILES) + + cp -pr $EXTRACTED_PLUGINS_DIR/* $XAPI_PLUGIN_DIR + rm -rf $EXTRACTED_FILES + chmod a+x ${XAPI_PLUGIN_DIR}* +} diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 0c0e1e2440..7c3b839209 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -28,6 +28,9 @@ THIS_DIR=$(cd $(dirname "$0") && pwd) # Include onexit commands . $THIS_DIR/scripts/on_exit.sh +# xapi functions +. $THIS_DIR/functions + # # Get Settings @@ -43,48 +46,26 @@ xe_min() xe "$cmd" --minimal "$@" } - # # Prepare Dom0 # including installing XenAPI plugins # cd $THIS_DIR -if [ -f ./master ] -then - rm -rf ./master - rm -rf ./nova -fi -# get nova -NOVA_ZIPBALL_URL=${NOVA_ZIPBALL_URL:-$(echo $NOVA_REPO | sed "s:\.git$::;s:$:/zipball/$NOVA_BRANCH:g")} -wget -nv $NOVA_ZIPBALL_URL -O nova-zipball --no-check-certificate -unzip -q -o nova-zipball -d ./nova +# Install plugins -# install xapi plugins -XAPI_PLUGIN_DIR=/etc/xapi.d/plugins/ -if [ ! -d $XAPI_PLUGIN_DIR ]; then - # the following is needed when using xcp-xapi - XAPI_PLUGIN_DIR=/usr/lib/xcp/plugins/ -fi -cp -pr ./nova/*/plugins/xenserver/xenapi/etc/xapi.d/plugins/* $XAPI_PLUGIN_DIR +## Nova plugins +NOVA_ZIPBALL_URL=${NOVA_ZIPBALL_URL:-$(zip_snapshot_location $NOVA_REPO $NOVA_BRANCH)} +install_xapi_plugins_from_zipball $NOVA_ZIPBALL_URL -# Install the netwrap xapi plugin to support agent control of dom0 networking +## Install the netwrap xapi plugin to support agent control of dom0 networking if [[ "$ENABLED_SERVICES" =~ "q-agt" && "$Q_PLUGIN" = "openvswitch" ]]; then - if [ -f ./quantum ]; then - rm -rf ./quantum - fi - # get quantum - QUANTUM_ZIPBALL_URL=${QUANTUM_ZIPBALL_URL:-$(echo $QUANTUM_REPO | sed "s:\.git$::;s:$:/zipball/$QUANTUM_BRANCH:g")} - wget -nv $QUANTUM_ZIPBALL_URL -O quantum-zipball --no-check-certificate - unzip -q -o quantum-zipball -d ./quantum - cp -pr ./quantum/*/quantum/plugins/openvswitch/agent/xenapi/etc/xapi.d/plugins/* $XAPI_PLUGIN_DIR + QUANTUM_ZIPBALL_URL=${QUANTUM_ZIPBALL_URL:-$(zip_snapshot_location $QUANTUM_REPO $QUANTUM_BRANCH)} + install_xapi_plugins_from_zipball $QUANTUM_ZIPBALL_URL fi -chmod a+x ${XAPI_PLUGIN_DIR}* - -mkdir -p /boot/guest - +create_directory_for_kernels # # Configure Networking diff --git a/tools/xen/mocks b/tools/xen/mocks new file mode 100644 index 0000000000..b00655873c --- /dev/null +++ b/tools/xen/mocks @@ -0,0 +1,59 @@ +#!/bin/bash + +test ! -e "$LIST_OF_ACTIONS" && { + echo "Mocking is not set up properly." + echo "LIST_OF_ACTIONS should point to an existing file." + exit 1 +} + +test ! -e "$LIST_OF_DIRECTORIES" && { + echo "Mocking is not set up properly." + echo "LIST_OF_DIRECTORIES should point to an existing file." + exit 1 +} + +function mktemp { + if test "${1:-}" = "-d"; + then + echo "tempdir" + else + echo "tempfile" + fi +} + +function wget { + echo "wget $@" >> $LIST_OF_ACTIONS +} + +function mkdir { + if test "${1:-}" = "-p"; + then + echo "$2" >> $LIST_OF_DIRECTORIES + fi +} + +function unzip { + echo "Random rubbish from unzip" + echo "unzip $@" >> $LIST_OF_ACTIONS +} + +function rm { + echo "rm $@" >> $LIST_OF_ACTIONS +} + +function [ { + if test "${1:-}" = "-d"; + then + echo "[ $@" >> $LIST_OF_ACTIONS + for directory in $(cat $LIST_OF_DIRECTORIES) + do + if test "$directory" = "$2" + then + return 0 + fi + done + return 1 + fi + echo "Mock test does not implement the requested function" + exit 1 +} diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh new file mode 100755 index 0000000000..6817ec3956 --- /dev/null +++ b/tools/xen/test_functions.sh @@ -0,0 +1,134 @@ +#!/bin/bash + +# Tests for functions. +# +# The tests are sourcing the mocks file to mock out various functions. The +# mocking-out always happens in a sub-shell, thus it does not have impact on +# the functions defined here. + +# To run the tests, please run: +# +# ./test_functions.sh run_tests +# +# To only print out the discovered test functions, run: +# +# ./test_functions.sh + +. functions + +# Setup +function before_each_test { + LIST_OF_DIRECTORIES=$(mktemp) + truncate -s 0 $LIST_OF_DIRECTORIES + + LIST_OF_ACTIONS=$(mktemp) + truncate -s 0 $LIST_OF_ACTIONS +} + +# Teardown +function after_each_test { + rm -f $LIST_OF_DIRECTORIES + rm -f $LIST_OF_ACTIONS +} + +# Helpers +function given_directory_exists { + echo "$1" >> $LIST_OF_DIRECTORIES +} + +function assert_directory_exists { + grep "$1" $LIST_OF_DIRECTORIES +} + +function assert_previous_command_failed { + [ "$?" != "0" ] || exit 1 +} + +# Tests +function test_plugin_directory_on_xenserver { + given_directory_exists "/etc/xapi.d/plugins/" + + PLUGDIR=$(. mocks && xapi_plugin_location) + + [ "/etc/xapi.d/plugins/" = "$PLUGDIR" ] +} + +function test_plugin_directory_on_xcp { + given_directory_exists "/usr/lib/xcp/plugins/" + + PLUGDIR=$(. mocks && xapi_plugin_location) + + [ "/usr/lib/xcp/plugins/" = "$PLUGDIR" ] +} + +function test_no_plugin_directory_found { + set +e + + local IGNORE + IGNORE=$(. mocks && xapi_plugin_location) + + assert_previous_command_failed + + grep "[ -d /etc/xapi.d/plugins/ ]" $LIST_OF_ACTIONS + grep "[ -d /usr/lib/xcp/plugins/ ]" $LIST_OF_ACTIONS +} + +function test_zip_snapshot_location { + diff \ + <(zip_snapshot_location "https://github.com/openstack/nova.git" "master") \ + <(echo "https://github.com/openstack/nova/zipball/master") +} + +function test_create_directory_for_kernels { + (. mocks && create_directory_for_kernels) + + assert_directory_exists "/boot/guest" +} + +function test_extract_remote_zipball { + local RESULT=$(. mocks && extract_remote_zipball "someurl") + + diff <(cat $LIST_OF_ACTIONS) - << EOF +wget -nv someurl -O tempfile --no-check-certificate +unzip -q -o tempfile -d tempdir +rm -f tempfile +EOF + + [ "$RESULT" = "tempdir" ] +} + +function test_find_nova_plugins { + local tmpdir=$(mktemp -d) + + mkdir -p "$tmpdir/blah/blah/u/xapi.d/plugins" + + [ "$tmpdir/blah/blah/u/xapi.d/plugins" = $(find_xapi_plugins_dir $tmpdir) ] + + rm -rf $tmpdir +} + +# Test runner +[ "$1" = "" ] && { + grep -e "^function *test_" $0 | cut -d" " -f2 +} + +[ "$1" = "run_tests" ] && { + for testname in $($0) + do + echo "$testname" + before_each_test + ( + set -eux + $testname + ) + if [ "$?" != "0" ] + then + echo "FAIL" + exit 1 + else + echo "PASS" + fi + + after_each_test + done +} From cf9eef859791d7ce6b809808d756c36ece5550e7 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Mon, 25 Mar 2013 19:34:48 -0400 Subject: [PATCH 0015/4704] Make sure the NOVA_INSTANCES_PATH is created Under some conditions when NOVA_STATE_PATH is set the NOVA_INSTANCES_PATH directory under it is removed but then not correctly recreated if the parent directory is owned by root instead of the current user running devstack. This change fixes that problem by creating the NOVA_INSTANCES_PATH as root, and then chowning it to the correct user. Change-Id: I7fb724787b5e915bae9bd058454d2aa38991364e Signed-off-by: Doug Hellmann --- lib/nova | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 9632a8c5e9..4449f81674 100644 --- a/lib/nova +++ b/lib/nova @@ -355,7 +355,8 @@ EOF" # ---------------- # Nova stores each instance in its own directory. - mkdir -p $NOVA_INSTANCES_PATH + sudo mkdir -p $NOVA_INSTANCES_PATH + sudo chown -R $STACK_USER $NOVA_INSTANCES_PATH # You can specify a different disk to be mounted and used for backing the # virtual machines. If there is a partition labeled nova-instances we From 640f1e4c8d0da6de70e358f5bad28fb1b0306e0d Mon Sep 17 00:00:00 2001 From: Aaron Rosen Date: Tue, 26 Mar 2013 16:52:53 -0700 Subject: [PATCH 0016/4704] Change default PUBLIC_NETWORK_NAME/DEFAULT_FLOATING_POOL name This patch changes the default network name/floating pool from nova to pubic as the name public makes more sense than nova for a public network. Change-Id: I4153750c08d1510dbad3051681e8c142a79fe1a1 --- exercises/floating_ips.sh | 2 +- stackrc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index b4e1c423e6..ad11a6b793 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -48,7 +48,7 @@ DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} SECGROUP=${SECGROUP:-test_secgroup} # Default floating IP pool name -DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-nova} +DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-public} # Additional floating IP pool and range TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} diff --git a/stackrc b/stackrc index f35cdc07fb..5f8b6ef8a5 100644 --- a/stackrc +++ b/stackrc @@ -217,7 +217,7 @@ S3_SERVICE_PORT=${S3_SERVICE_PORT:-3333} # Common network names PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"} -PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"nova"} +PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"public"} # Compatibility until it's eradicated from CI USE_SCREEN=${SCREEN_DEV:-$USE_SCREEN} From 0680204b1f437f140d6b4ef8f81e587dce4cb17b Mon Sep 17 00:00:00 2001 From: Jonathan Michalon Date: Thu, 21 Mar 2013 14:29:58 +0100 Subject: [PATCH 0017/4704] Add support for iso files as glance images Change-Id: Id1731c1a0a579fab44bb3944d2369b3feecf15d2 --- functions | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/functions b/functions index fe5054739d..15901662b8 100644 --- a/functions +++ b/functions @@ -1148,6 +1148,12 @@ function upload_image() { DISK_FORMAT=qcow2 CONTAINER_FORMAT=bare ;; + *.iso) + IMAGE="$FILES/${IMAGE_FNAME}" + IMAGE_NAME=$(basename "$IMAGE" ".iso") + DISK_FORMAT=iso + CONTAINER_FORMAT=bare + ;; *) echo "Do not know what to do with $IMAGE_FNAME"; false;; esac From e542883c898de40702ae3506f042ba84040f1f71 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Wed, 27 Mar 2013 23:40:59 +0100 Subject: [PATCH 0018/4704] Only start zeromq-receiver when we're using zeromq. Change-Id: Ibd3e6a74f1098e330be0fcbbb83b9188e51202ad --- stack.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index cfce6be121..0407c30df6 100755 --- a/stack.sh +++ b/stack.sh @@ -1017,7 +1017,10 @@ if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nov iniset $NOVA_CONF DEFAULT s3_affix_tenant "True" fi -screen_it zeromq "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-rpc-zmq-receiver" +if is_service_enabled zeromq; then + echo_summary "Starting zermomq receiver" + screen_it zeromq "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-rpc-zmq-receiver" +fi # Launch the nova-api and wait for it to answer before continuing if is_service_enabled n-api; then From 7104ab406a30e20dbeffdbdcc5820cc508d8cab2 Mon Sep 17 00:00:00 2001 From: Akihiro MOTOKI Date: Wed, 27 Mar 2013 19:47:11 +0900 Subject: [PATCH 0019/4704] Use example settings in horizon repo as local_settings.py The current horizon_settings.py in devstack is out-of-date and we tend to forget to update this file. This commit changes devstack to use the example settings in horizon repo. Change-Id: I0bb6af21a806a72ed59f31b094dd21da85ca335e --- files/horizon_settings.py | 169 -------------------------------------- lib/horizon | 6 +- 2 files changed, 5 insertions(+), 170 deletions(-) delete mode 100644 files/horizon_settings.py diff --git a/files/horizon_settings.py b/files/horizon_settings.py deleted file mode 100644 index ce92e2c9e2..0000000000 --- a/files/horizon_settings.py +++ /dev/null @@ -1,169 +0,0 @@ -import os - -from django.utils.translation import ugettext_lazy as _ - -DEBUG = True -TEMPLATE_DEBUG = DEBUG -PROD = False -USE_SSL = False - -# Set SSL proxy settings: -# For Django 1.4+ pass this header from the proxy after terminating the SSL, -# and don't forget to strip it from the client's request. -# For more information see: -# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header -# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https') - -# Specify a regular expression to validate user passwords. -# HORIZON_CONFIG = { -# "password_validator": { -# "regex": '.*', -# "help_text": _("Your password does not meet the requirements.") -# }, -# 'help_url': "http://docs.openstack.org" -# } - -LOCAL_PATH = os.path.dirname(os.path.abspath(__file__)) - -# FIXME: We need to change this to mysql, instead of sqlite. -DATABASES = { - 'default': { - 'ENGINE': 'django.db.backends.sqlite3', - 'NAME': os.path.join(LOCAL_PATH, 'dashboard_openstack.sqlite3'), - 'TEST_NAME': os.path.join(LOCAL_PATH, 'test.sqlite3'), - }, -} - -# Set custom secret key: -# You can either set it to a specific value or you can let horizion generate a -# default secret key that is unique on this machine, e.i. regardless of the -# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there -# may be situations where you would want to set this explicitly, e.g. when -# multiple dashboard instances are distributed on different machines (usually -# behind a load-balancer). Either you have to make sure that a session gets all -# requests routed to the same dashboard instance or you set the same SECRET_KEY -# for all of them. -from horizon.utils import secret_key -SECRET_KEY = secret_key.generate_or_read_from_file(os.path.join(LOCAL_PATH, '.secret_key_store')) - -# We recommend you use memcached for development; otherwise after every reload -# of the django development server, you will have to login again. To use -# memcached set CACHE_BACKED to something like 'memcached://127.0.0.1:11211/' -CACHE_BACKEND = 'dummy://' -SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db' - -# Send email to the console by default -EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' -# Or send them to /dev/null -#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend' - -# django-mailer uses a different settings attribute -MAILER_EMAIL_BACKEND = EMAIL_BACKEND - -# Configure these for your outgoing email host -# EMAIL_HOST = 'smtp.my-company.com' -# EMAIL_PORT = 25 -# EMAIL_HOST_USER = 'djangomail' -# EMAIL_HOST_PASSWORD = 'top-secret!' - -# For multiple regions uncomment this configuration, and add (endpoint, title). -# AVAILABLE_REGIONS = [ -# ('http://cluster1.example.com:5000/v2.0', 'cluster1'), -# ('http://cluster2.example.com:5000/v2.0', 'cluster2'), -# ] - -OPENSTACK_HOST = "127.0.0.1" -OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST -OPENSTACK_KEYSTONE_DEFAULT_ROLE = "Member" - -# Disable SSL certificate checks (useful for self-signed certificates): -# OPENSTACK_SSL_NO_VERIFY = True - -HORIZON_CONFIG = { - 'dashboards': ('project', 'admin', 'settings',), - 'default_dashboard': 'project', -} - -# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the -# capabilities of the auth backend for Keystone. -# If Keystone has been configured to use LDAP as the auth backend then set -# can_edit_user to False and name to 'ldap'. -# -# TODO(tres): Remove these once Keystone has an API to identify auth backend. -OPENSTACK_KEYSTONE_BACKEND = { - 'name': 'native', - 'can_edit_user': True -} - -OPENSTACK_HYPERVISOR_FEATURES = { - 'can_set_mount_point': True -} - -# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints -# in the Keystone service catalog. Use this setting when Horizon is running -# external to the OpenStack environment. The default is 'internalURL'. -#OPENSTACK_ENDPOINT_TYPE = "publicURL" - -# The number of objects (Swift containers/objects or images) to display -# on a single page before providing a paging element (a "more" link) -# to paginate results. -API_RESULT_LIMIT = 1000 -API_RESULT_PAGE_SIZE = 20 - -SWIFT_PAGINATE_LIMIT = 100 - -# The timezone of the server. This should correspond with the timezone -# of your entire OpenStack installation, and hopefully be in UTC. -TIME_ZONE = "UTC" - -#LOGGING = { -# 'version': 1, -# # When set to True this will disable all logging except -# # for loggers specified in this configuration dictionary. Note that -# # if nothing is specified here and disable_existing_loggers is True, -# # django.db.backends will still log unless it is disabled explicitly. -# 'disable_existing_loggers': False, -# 'handlers': { -# 'null': { -# 'level': 'DEBUG', -# 'class': 'django.utils.log.NullHandler', -# }, -# 'console': { -# # Set the level to "DEBUG" for verbose output logging. -# 'level': 'INFO', -# 'class': 'logging.StreamHandler', -# }, -# }, -# 'loggers': { -# # Logging from django.db.backends is VERY verbose, send to null -# # by default. -# 'django.db.backends': { -# 'handlers': ['null'], -# 'propagate': False, -# }, -# 'horizon': { -# 'handlers': ['console'], -# 'propagate': False, -# }, -# 'openstack_dashboard': { -# 'handlers': ['console'], -# 'propagate': False, -# }, -# 'novaclient': { -# 'handlers': ['console'], -# 'propagate': False, -# }, -# 'keystoneclient': { -# 'handlers': ['console'], -# 'propagate': False, -# }, -# 'glanceclient': { -# 'handlers': ['console'], -# 'propagate': False, -# }, -# 'nose.plugins.manager': { -# 'handlers': ['console'], -# 'propagate': False, -# } -# } -#} diff --git a/lib/horizon b/lib/horizon index 9c96b58e6f..bc739ed521 100644 --- a/lib/horizon +++ b/lib/horizon @@ -29,6 +29,10 @@ set +o xtrace # Set up default directories HORIZON_DIR=$DEST/horizon +# local_settings.py is used to customize Dashboard settings. +# The example file in Horizon repo is used by default. +HORIZON_SETTINGS=${HORIZON_SETTINGS:-$HORIZON_DIR/openstack_dashboard/local/local_settings.py.example} + # Allow overriding the default Apache user and group, default to # current user and his default group. APACHE_USER=${APACHE_USER:-$USER} @@ -77,7 +81,7 @@ function init_horizon() { # ``local_settings.py`` is used to override horizon default settings. local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py - cp $FILES/horizon_settings.py $local_settings + cp $HORIZON_SETTINGS $local_settings # enable loadbalancer dashboard in case service is enabled if is_service_enabled q-lbaas; then From c0fad2b6a1729e7b1e7c6f892d6cac9d4a086433 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 28 Mar 2013 12:22:25 -0700 Subject: [PATCH 0020/4704] Don't clobber all iSCSI connections during nova cleanup The existing cleanup_nova() code inadvertently logs out of all iSCSI connections on the machine because it does not pass the target name as the -T parameter. This patch changes it to step through the targets that match the pattern, logging out of each individually, and following up with a delete op to cleanup the database. Change-Id: I26c14acbe0e8de18f0e3bf7ad83b37379503e199 --- lib/nova | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/nova b/lib/nova index 4449f81674..46eeea49d4 100644 --- a/lib/nova +++ b/lib/nova @@ -156,8 +156,11 @@ function cleanup_nova() { fi # Logout and delete iscsi sessions - sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d " " -f2 | xargs sudo iscsiadm --mode node --logout || true - sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d " " -f2 | sudo iscsiadm --mode node --op delete || true + tgts=$(sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d ' ' -f2) + for target in $tgts; do + sudo iscsiadm --mode node -T $target --logout || true + done + sudo iscsiadm --mode node --op delete || true # Clean out the instances directory. sudo rm -rf $NOVA_INSTANCES_PATH/* From c07112a203aa9a25d2abaf497a740abd908e5919 Mon Sep 17 00:00:00 2001 From: Akihiro MOTOKI Date: Fri, 22 Mar 2013 16:35:38 +0900 Subject: [PATCH 0021/4704] Allow a plugin specific interface_driver config in q-lbaas Previously interface_driver for q-lbaas is hardcoded and q-lbaas does not work when quantum plugin requires additional configuration (like ovs_use_veth=True). This commit set up interface_driver using quantum_plugin_setup_interface_driver function. This commit also moves a script related Quantum service plugin to lib/quantum_plugins/services/ Change-Id: Iad11c1b31071dfb580843be18be78c83feb91f10 --- lib/quantum | 2 +- .../{ => services}/agent_loadbalancer | 13 +++++-------- 2 files changed, 6 insertions(+), 9 deletions(-) rename lib/quantum_plugins/{ => services}/agent_loadbalancer (60%) diff --git a/lib/quantum b/lib/quantum index efdd43d071..b088202f73 100644 --- a/lib/quantum +++ b/lib/quantum @@ -179,7 +179,7 @@ source $TOP_DIR/lib/quantum_plugins/$Q_PLUGIN # Agent loadbalancer service plugin functions # ------------------------------------------- # Hardcoding for 1 service plugin for now -source $TOP_DIR/lib/quantum_plugins/agent_loadbalancer +source $TOP_DIR/lib/quantum_plugins/services/agent_loadbalancer # Use security group or not if has_quantum_plugin_security_group; then diff --git a/lib/quantum_plugins/agent_loadbalancer b/lib/quantum_plugins/services/agent_loadbalancer similarity index 60% rename from lib/quantum_plugins/agent_loadbalancer rename to lib/quantum_plugins/services/agent_loadbalancer index 87e7aaaf36..b6528b0e84 100644 --- a/lib/quantum_plugins/agent_loadbalancer +++ b/lib/quantum_plugins/services/agent_loadbalancer @@ -7,6 +7,7 @@ set +o xtrace AGENT_LBAAS_BINARY="$QUANTUM_DIR/bin/quantum-lbaas-agent" +AGENT_LBAAS_PLUGIN=quantum.plugins.services.agent_loadbalancer.plugin.LoadBalancerPlugin function quantum_agent_lbaas_install_agent_packages() { if is_ubuntu || is_fedora; then @@ -19,9 +20,9 @@ function quantum_agent_lbaas_install_agent_packages() { function quantum_agent_lbaas_configure_common() { if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then - Q_SERVICE_PLUGIN_CLASSES="quantum.plugins.services.agent_loadbalancer.plugin.LoadBalancerPlugin" + Q_SERVICE_PLUGIN_CLASSES=$AGENT_LBAAS_PLUGIN else - Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,quantum.plugins.services.agent_loadbalancer.plugin.LoadBalancerPlugin" + Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$AGENT_LBAAS_PLUGIN" fi } @@ -31,13 +32,9 @@ function quantum_agent_lbaas_configure_agent() { LBAAS_AGENT_CONF_FILENAME="$LBAAS_AGENT_CONF_PATH/lbaas_agent.ini" - cp $QUANTUM_DIR/etc/lbaas_agent.ini /$LBAAS_AGENT_CONF_FILENAME + cp $QUANTUM_DIR/etc/lbaas_agent.ini $LBAAS_AGENT_CONF_FILENAME - if [[ $Q_PLUGIN == 'linuxbridge' || $Q_PLUGIN == 'brocade' ]]; then - iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT interface_driver "quantum.agent.linux.interface.BridgeInterfaceDriver" - else - iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT interface_driver "quantum.agent.linux.interface.OVSInterfaceDriver" - fi + quantum_plugin_setup_interface_driver $LBAAS_AGENT_CONF_FILENAME if is_fedora; then iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT user_group "nobody" From 3ac95355bc4240ff04f997609a9aba93c22a6b45 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 29 Mar 2013 10:15:36 -0500 Subject: [PATCH 0022/4704] Correctly source .stackenv Specifically to recall HOST_IP and SERVICE_HOST for the current DevStack configuration. Bug 930274 Change-Id: I299d94101ab93faccc88917503409e0afaa0a523 --- openrc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/openrc b/openrc index 3ef44fd151..8af28543fb 100644 --- a/openrc +++ b/openrc @@ -27,8 +27,8 @@ source $RC_DIR/functions source $RC_DIR/stackrc # Load the last env variables if available -if [[ -r $TOP_DIR/.stackenv ]]; then - source $TOP_DIR/.stackenv +if [[ -r $RC_DIR/.stackenv ]]; then + source $RC_DIR/.stackenv fi # Get some necessary configuration From e9e80f9bfaed608cbff8249cab52561bb3d67a4d Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 29 Mar 2013 10:22:53 -0500 Subject: [PATCH 0023/4704] Clean up Nova API prereq Bug 980966 Change-Id: If5bbcc094b54a2de96151d69653e57e0e540d6de --- files/apts/n-api | 1 - 1 file changed, 1 deletion(-) diff --git a/files/apts/n-api b/files/apts/n-api index ad943ffdf8..0f08daace3 100644 --- a/files/apts/n-api +++ b/files/apts/n-api @@ -1,2 +1 @@ -gcc # temporary because this pulls in glance to get the client without running the glance prereqs python-dateutil From c77b932e16398eebb810501973f6c8a706a3ba58 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 29 Mar 2013 10:51:01 -0500 Subject: [PATCH 0024/4704] Move glace's swift config to lib/glance Change-Id: Icbb355c15bfffe17725ea5cc64cfa5e76c1e74e6 --- lib/glance | 9 +++++++++ stack.sh | 10 ---------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/lib/glance b/lib/glance index edf6982a63..e9d05622f8 100644 --- a/lib/glance +++ b/lib/glance @@ -116,6 +116,15 @@ function configure_glance() { iniset_rpc_backend glance $GLANCE_API_CONF DEFAULT iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api + # Store the images in swift if enabled. + if is_service_enabled s-proxy; then + iniset $GLANCE_API_CONF DEFAULT default_store swift + iniset $GLANCE_API_CONF DEFAULT swift_store_auth_address $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ + iniset $GLANCE_API_CONF DEFAULT swift_store_user $SERVICE_TENANT_NAME:glance + iniset $GLANCE_API_CONF DEFAULT swift_store_key $SERVICE_PASSWORD + iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True + fi + cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI cp -p $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI diff --git a/stack.sh b/stack.sh index 0407c30df6..baf44b000e 100755 --- a/stack.sh +++ b/stack.sh @@ -826,17 +826,7 @@ fi if is_service_enabled g-reg; then echo_summary "Configuring Glance" - init_glance - - # Store the images in swift if enabled. - if is_service_enabled s-proxy; then - iniset $GLANCE_API_CONF DEFAULT default_store swift - iniset $GLANCE_API_CONF DEFAULT swift_store_auth_address $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ - iniset $GLANCE_API_CONF DEFAULT swift_store_user $SERVICE_TENANT_NAME:glance - iniset $GLANCE_API_CONF DEFAULT swift_store_key $SERVICE_PASSWORD - iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True - fi fi From 1c6c1125199f1d987848bb1e3522b90358a4531b Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 27 Mar 2013 17:40:53 -0500 Subject: [PATCH 0025/4704] Split disk creation out of configure_swift() Grenade needs to be able to configure Swift without initializing the backing disk files. Move it into create_swift_disk() and call from init_swift(). Also move start of n-obj (if swift3 is not enabled) to start_nova(). Change-Id: Id33939182d84eeff10ba4139eeced2c1bc532f0f --- lib/nova | 5 +++ lib/swift | 104 +++++++++++++++++++++++++++++------------------------- stack.sh | 6 ---- 3 files changed, 61 insertions(+), 54 deletions(-) diff --git a/lib/nova b/lib/nova index 4449f81674..8f74897cca 100644 --- a/lib/nova +++ b/lib/nova @@ -660,6 +660,11 @@ function start_nova() { screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $NOVA_CONF" screen_it n-spice "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $NOVA_CONF --web $SPICE_DIR" screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth" + + # Starting the nova-objectstore only if swift3 service is not enabled. + # Swift will act as s3 objectstore. + is_service_enabled swift3 || \ + screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore" } # stop_nova() - Stop running processes (non-screen) diff --git a/lib/swift b/lib/swift index d50b554169..783ec7503a 100644 --- a/lib/swift +++ b/lib/swift @@ -111,54 +111,8 @@ function configure_swift() { # Make sure to kill all swift processes first swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true - # First do a bit of setup by creating the directories and - # changing the permissions so we can run it as our user. - - USER_GROUP=$(id -g) - sudo mkdir -p ${SWIFT_DATA_DIR}/{drives,cache,run,logs} - sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR} - - # Create a loopback disk and format it to XFS. - if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then - if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then - sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 - sudo rm -f ${SWIFT_DATA_DIR}/drives/images/swift.img - fi - fi - - mkdir -p ${SWIFT_DATA_DIR}/drives/images - sudo touch ${SWIFT_DATA_DIR}/drives/images/swift.img - sudo chown $USER: ${SWIFT_DATA_DIR}/drives/images/swift.img - - dd if=/dev/zero of=${SWIFT_DATA_DIR}/drives/images/swift.img \ - bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} - - # Make a fresh XFS filesystem - mkfs.xfs -f -i size=1024 ${SWIFT_DATA_DIR}/drives/images/swift.img - - # Mount the disk with mount options to make it as efficient as possible - mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1 - if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then - sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \ - ${SWIFT_DATA_DIR}/drives/images/swift.img ${SWIFT_DATA_DIR}/drives/sdb1 - fi - - # Create a link to the above mount and - # create all of the directories needed to emulate a few different servers - for node_number in ${SWIFT_REPLICAS_SEQ}; do - sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$node_number ${SWIFT_DATA_DIR}/$node_number; - drive=${SWIFT_DATA_DIR}/drives/sdb1/${node_number} - node=${SWIFT_DATA_DIR}/${node_number}/node - node_device=${node}/sdb1 - [[ -d $node ]] && continue - [[ -d $drive ]] && continue - sudo install -o ${USER} -g $USER_GROUP -d $drive - sudo install -o ${USER} -g $USER_GROUP -d $node_device - sudo chown -R $USER: ${node} - done - - sudo mkdir -p ${SWIFT_CONF_DIR}/{object,container,account}-server - sudo chown -R $USER: ${SWIFT_CONF_DIR} + sudo mkdir -p ${SWIFT_CONF_DIR}/{object,container,account}-server + sudo chown -R $USER: ${SWIFT_CONF_DIR} if [[ "$SWIFT_CONF_DIR" != "/etc/swift" ]]; then # Some swift tools are hard-coded to use ``/etc/swift`` and are apparently not going to be fixed. @@ -332,12 +286,66 @@ function configure_swiftclient() { setup_develop $SWIFTCLIENT_DIR } +# create_swift_disk - Create Swift backing disk +function create_swift_disk() { + local node_number + + # First do a bit of setup by creating the directories and + # changing the permissions so we can run it as our user. + + USER_GROUP=$(id -g) + sudo mkdir -p ${SWIFT_DATA_DIR}/{drives,cache,run,logs} + sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR} + + # Create a loopback disk and format it to XFS. + if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then + if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then + sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 + sudo rm -f ${SWIFT_DATA_DIR}/drives/images/swift.img + fi + fi + + mkdir -p ${SWIFT_DATA_DIR}/drives/images + sudo touch ${SWIFT_DATA_DIR}/drives/images/swift.img + sudo chown $USER: ${SWIFT_DATA_DIR}/drives/images/swift.img + + dd if=/dev/zero of=${SWIFT_DATA_DIR}/drives/images/swift.img \ + bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} + + # Make a fresh XFS filesystem + mkfs.xfs -f -i size=1024 ${SWIFT_DATA_DIR}/drives/images/swift.img + + # Mount the disk with mount options to make it as efficient as possible + mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1 + if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then + sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \ + ${SWIFT_DATA_DIR}/drives/images/swift.img ${SWIFT_DATA_DIR}/drives/sdb1 + fi + + # Create a link to the above mount and + # create all of the directories needed to emulate a few different servers + for node_number in ${SWIFT_REPLICAS_SEQ}; do + sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$node_number ${SWIFT_DATA_DIR}/$node_number; + drive=${SWIFT_DATA_DIR}/drives/sdb1/${node_number} + node=${SWIFT_DATA_DIR}/${node_number}/node + node_device=${node}/sdb1 + [[ -d $node ]] && continue + [[ -d $drive ]] && continue + sudo install -o ${USER} -g $USER_GROUP -d $drive + sudo install -o ${USER} -g $USER_GROUP -d $node_device + sudo chown -R $USER: ${node} + done +} + # init_swift() - Initialize rings function init_swift() { local node_number # Make sure to kill all swift processes first swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true + # Forcibly re-create the backing filesystem + create_swift_disk + # This is where we create three different rings for swift with # different object servers binding on different ports. pushd ${SWIFT_CONF_DIR} >/dev/null && { diff --git a/stack.sh b/stack.sh index cfce6be121..9034864d3a 100755 --- a/stack.sh +++ b/stack.sh @@ -1062,12 +1062,6 @@ if is_service_enabled ceilometer; then start_ceilometer fi -# Starting the nova-objectstore only if swift3 service is not enabled. -# Swift will act as s3 objectstore. -is_service_enabled swift3 || \ - screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore" - - # Configure and launch heat engine, api and metadata if is_service_enabled heat; then # Initialize heat, including replacing nova flavors From 584d90ec56e18cbb9c0f15fe6af35504c02ea4bd Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 29 Mar 2013 14:34:53 -0400 Subject: [PATCH 0026/4704] add emacs shell-script tagging for files that don't start with a #! or end in .sh, the added tags are nice for emacs users to automatically switch to the right mode. Change-Id: If4b93e106191bc744ccad8420cef20e751cdf902 --- functions | 2 +- lib/baremetal | 4 ++++ lib/ceilometer | 4 ++++ lib/cinder | 4 ++++ lib/database | 4 ++++ lib/databases/mysql | 4 ++++ lib/databases/postgresql | 4 ++++ lib/glance | 4 ++++ lib/heat | 4 ++++ lib/horizon | 4 ++++ lib/keystone | 4 ++++ lib/ldap | 4 ++++ lib/nova | 4 ++++ lib/quantum | 4 ++++ lib/rpc_backend | 4 ++++ lib/swift | 4 ++++ lib/tempest | 4 ++++ lib/template | 4 ++++ lib/tls | 4 ++++ stackrc | 4 ++++ 20 files changed, 77 insertions(+), 1 deletion(-) diff --git a/functions b/functions index 15901662b8..0bbf477fa9 100644 --- a/functions +++ b/functions @@ -1356,5 +1356,5 @@ $XTRACE # Local variables: -# -*- mode: Shell-script -*- +# mode: shell-script # End: diff --git a/lib/baremetal b/lib/baremetal index 5326dd1ff1..24cce9f7dd 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -434,3 +434,7 @@ function add_baremetal_node() { # Restore xtrace $XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/ceilometer b/lib/ceilometer index d90694c513..58cafd1d79 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -138,3 +138,7 @@ function stop_ceilometer() { # Restore xtrace $XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder b/lib/cinder index 7688ad9903..7afd69bdc8 100644 --- a/lib/cinder +++ b/lib/cinder @@ -431,3 +431,7 @@ function stop_cinder() { # Restore xtrace $XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/database b/lib/database index 79b77a267b..cbe886f5c8 100644 --- a/lib/database +++ b/lib/database @@ -114,3 +114,7 @@ function database_connection_url { # Restore xtrace $XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/databases/mysql b/lib/databases/mysql index 0633ab046e..30450b1caf 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -139,3 +139,7 @@ function database_connection_url_mysql { # Restore xtrace $MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/databases/postgresql b/lib/databases/postgresql index efc206fa27..b64de2c95e 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -90,3 +90,7 @@ function database_connection_url_postgresql { # Restore xtrace $PG_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/glance b/lib/glance index e9d05622f8..aa101e10da 100644 --- a/lib/glance +++ b/lib/glance @@ -206,3 +206,7 @@ function stop_glance() { # Restore xtrace $XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/heat b/lib/heat index 56d6f396ff..88535c352e 100644 --- a/lib/heat +++ b/lib/heat @@ -184,3 +184,7 @@ function stop_heat() { # Restore xtrace $XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/horizon b/lib/horizon index bc739ed521..b63e1f8b01 100644 --- a/lib/horizon +++ b/lib/horizon @@ -178,3 +178,7 @@ function stop_horizon() { # Restore xtrace $XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/keystone b/lib/keystone index 805cb6f045..875d537d8b 100644 --- a/lib/keystone +++ b/lib/keystone @@ -346,3 +346,7 @@ function stop_keystone() { # Restore xtrace $XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/ldap b/lib/ldap index 9d415c5a84..53f68377b5 100644 --- a/lib/ldap +++ b/lib/ldap @@ -76,3 +76,7 @@ function clear_ldap_state() { # Restore xtrace $XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/nova b/lib/nova index 61c05a11cf..91fe0280c0 100644 --- a/lib/nova +++ b/lib/nova @@ -680,3 +680,7 @@ function stop_nova() { # Restore xtrace $XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/quantum b/lib/quantum index efdd43d071..61b89f7b0e 100644 --- a/lib/quantum +++ b/lib/quantum @@ -761,3 +761,7 @@ function stop_quantum_third_party() { # Restore xtrace $XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/rpc_backend b/lib/rpc_backend index bbd51f0cdf..d08cb01f06 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -159,3 +159,7 @@ function qpid_is_supported() { # Restore xtrace $XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/swift b/lib/swift index 783ec7503a..5931acc405 100644 --- a/lib/swift +++ b/lib/swift @@ -429,3 +429,7 @@ function stop_swift() { # Restore xtrace $XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/tempest b/lib/tempest index 85e643e474..c1dc3a33ea 100644 --- a/lib/tempest +++ b/lib/tempest @@ -294,3 +294,7 @@ function init_tempest() { # Restore xtrace $XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/template b/lib/template index 02de5cef37..72904fe1c6 100644 --- a/lib/template +++ b/lib/template @@ -78,3 +78,7 @@ function stop_XXXX() { # Restore xtrace $XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/tls b/lib/tls index 202edeffbf..fb8f4b9be6 100644 --- a/lib/tls +++ b/lib/tls @@ -316,3 +316,7 @@ function start_tls_proxy() { stud $STUD_PROTO -f $f_host,$f_port -b $b_host,$b_port $DEVSTACK_CERT 2>/dev/null } + +# Local variables: +# mode: shell-script +# End: diff --git a/stackrc b/stackrc index f35cdc07fb..5a4c580536 100644 --- a/stackrc +++ b/stackrc @@ -221,3 +221,7 @@ PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"nova"} # Compatibility until it's eradicated from CI USE_SCREEN=${SCREEN_DEV:-$USE_SCREEN} + +# Local variables: +# mode: shell-script +# End: From 1151d6ff6116ffa86eb49221cdd949380a4d7423 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 29 Mar 2013 14:06:52 -0500 Subject: [PATCH 0027/4704] Make Swift *_PORT_BASE configurable DevStack's Swift port defaults changed in the Grizzly cycle, we need to put it back to complete the upgrade without going through the ring build process. Change-Id: I30c09839690ad7ba9bb3fdffa3f05aedfc47559e --- lib/swift | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/swift b/lib/swift index 783ec7503a..d01f6871e0 100644 --- a/lib/swift +++ b/lib/swift @@ -79,9 +79,9 @@ SWIFT_REPLICAS_SEQ=$(seq ${SWIFT_REPLICAS}) # Port bases used in port number calclution for the service "nodes" # The specified port number will be used, the additinal ports calculated by # base_port + node_num * 10 -OBJECT_PORT_BASE=6013 -CONTAINER_PORT_BASE=6011 -ACCOUNT_PORT_BASE=6012 +OBJECT_PORT_BASE=${OBJECT_PORT_BASE:-6013} +CONTAINER_PORT_BASE=${CONTAINER_PORT_BASE:-6011} +ACCOUNT_PORT_BASE=${ACCOUNT_PORT_BASE:-6012} # Entry Points From e4f0cd7eed3981086b4a0db967501fffa6a07c1e Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 1 Apr 2013 15:56:07 -0400 Subject: [PATCH 0028/4704] refactor the install/configure split configure remains just to generate configs, install now gets the setup_develop in addition to the git clone. This lets use remove configure_glanceclient as a function Change-Id: I68e3e3973d15dc0b4f534662a4f57a9f38f69784 --- lib/glance | 9 ++------- stack.sh | 4 ---- 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/lib/glance b/lib/glance index aa101e10da..3376400035 100644 --- a/lib/glance +++ b/lib/glance @@ -62,15 +62,8 @@ function cleanup_glance() { sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR $GLANCE_AUTH_CACHE_DIR } -# configure_glanceclient() - Set config files, create data dirs, etc -function configure_glanceclient() { - setup_develop $GLANCECLIENT_DIR -} - # configure_glance() - Set config files, create data dirs, etc function configure_glance() { - setup_develop $GLANCE_DIR - if [[ ! -d $GLANCE_CONF_DIR ]]; then sudo mkdir -p $GLANCE_CONF_DIR fi @@ -180,11 +173,13 @@ function init_glance() { # install_glanceclient() - Collect source and prepare function install_glanceclient() { git_clone $GLANCECLIENT_REPO $GLANCECLIENT_DIR $GLANCECLIENT_BRANCH + setup_develop $GLANCECLIENT_DIR } # install_glance() - Collect source and prepare function install_glance() { git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH + setup_develop $GLANCE_DIR } # start_glance() - Start running processes, including screen diff --git a/stack.sh b/stack.sh index 7d0dd9b6f7..e2ef8f1955 100755 --- a/stack.sh +++ b/stack.sh @@ -647,10 +647,6 @@ if is_service_enabled g-api n-api; then configure_glance fi -# Do this _after_ glance is installed to override the old binary -# TODO(dtroyer): figure out when this is no longer necessary -configure_glanceclient - if is_service_enabled nova; then # First clean up old instances cleanup_nova From 4bf9d7a74a076ddb11f76b948aae8718d8ec7448 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 1 Apr 2013 16:41:39 -0400 Subject: [PATCH 0029/4704] change the install/configure split make setup_develop part of install so that configure is only about config file setup. Change-Id: I1ed58011e41c550d3ab2ba33b38cfe16adb3dde4 --- lib/cinder | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/cinder b/lib/cinder index 7afd69bdc8..710d94219d 100644 --- a/lib/cinder +++ b/lib/cinder @@ -123,9 +123,6 @@ function cleanup_cinder() { # configure_cinder() - Set config files, create data dirs, etc function configure_cinder() { - setup_develop $CINDER_DIR - setup_develop $CINDERCLIENT_DIR - if [[ ! -d $CINDER_CONF_DIR ]]; then sudo mkdir -p $CINDER_CONF_DIR fi @@ -367,7 +364,10 @@ function init_cinder() { # install_cinder() - Collect source and prepare function install_cinder() { git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH + setup_develop $CINDER_DIR + git_clone $CINDERCLIENT_REPO $CINDERCLIENT_DIR $CINDERCLIENT_BRANCH + setup_develop $CINDERCLIENT_DIR } # apply config.d approach (e.g. Oneiric does not have this) From 1b4b4be78c5f1254bebfb58624d1ef8c2a09531f Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 1 Apr 2013 16:44:31 -0400 Subject: [PATCH 0030/4704] change configure/install split make it so setup_develop happens in install instead of configure to ensure that we can handle config file generation by itself. Change-Id: I4801d7a0bc6642de2db5b78df1750666895f0aa3 --- lib/keystone | 9 ++------- stack.sh | 1 - 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/lib/keystone b/lib/keystone index 875d537d8b..0fbc7d709a 100644 --- a/lib/keystone +++ b/lib/keystone @@ -75,15 +75,8 @@ function cleanup_keystone() { : } -# configure_keystoneclient() - Set config files, create data dirs, etc -function configure_keystoneclient() { - setup_develop $KEYSTONECLIENT_DIR -} - # configure_keystone() - Set config files, create data dirs, etc function configure_keystone() { - setup_develop $KEYSTONE_DIR - if [[ ! -d $KEYSTONE_CONF_DIR ]]; then sudo mkdir -p $KEYSTONE_CONF_DIR fi @@ -305,6 +298,7 @@ function init_keystone() { # install_keystoneclient() - Collect source and prepare function install_keystoneclient() { git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH + setup_develop $KEYSTONECLIENT_DIR } # install_keystone() - Collect source and prepare @@ -314,6 +308,7 @@ function install_keystone() { install_ldap fi git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH + setup_develop $KEYSTONE_DIR } # start_keystone() - Start running processes, including screen diff --git a/stack.sh b/stack.sh index e2ef8f1955..e6654501b6 100755 --- a/stack.sh +++ b/stack.sh @@ -627,7 +627,6 @@ fi echo_summary "Configuring OpenStack projects" # Set up our checkouts so they are installed in the python path -configure_keystoneclient configure_novaclient setup_develop $OPENSTACKCLIENT_DIR From 2c678ccf632ed17fde609c210dcb698e08f5395c Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 20 Mar 2013 13:00:44 +1100 Subject: [PATCH 0031/4704] Use sudo to remove packages yum and others need root to remove packages Change-Id: I556505636c41296f9188c248dff13b85616c667c --- functions | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/functions b/functions index 0bbf477fa9..edc4bf9aef 100644 --- a/functions +++ b/functions @@ -781,9 +781,9 @@ function uninstall_package() { if is_ubuntu; then apt_get purge "$@" elif is_fedora; then - yum remove -y "$@" + sudo yum remove -y "$@" elif is_suse; then - rpm -e "$@" + sudo rpm -e "$@" else exit_distro_not_supported "uninstalling packages" fi From 253a1a351b13fd434c9246e7994fb669b1691d20 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 1 Apr 2013 18:23:22 -0500 Subject: [PATCH 0032/4704] Move setup_develop call Move setup_develop() call into the install_XXX() function as the distinction has no real purpose. This eliminates the configure_XXXclient() function for Nova and Swift. Also create install_cinderclient() as we will need it to handle dependencies in a followup patch. Chasing down another #$^%&&^^^$%%#$ prettytable version pin Change-Id: I61d761bc57a31caf2ed0ade326ab236eb2bb7358 --- lib/cinder | 3 +++ lib/nova | 9 ++------- lib/swift | 10 ++-------- stack.sh | 5 ++--- 4 files changed, 9 insertions(+), 18 deletions(-) diff --git a/lib/cinder b/lib/cinder index 710d94219d..deace68277 100644 --- a/lib/cinder +++ b/lib/cinder @@ -365,7 +365,10 @@ function init_cinder() { function install_cinder() { git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH setup_develop $CINDER_DIR +} +# install_cinderclient() - Collect source and prepare +function install_cinderclient() { git_clone $CINDERCLIENT_REPO $CINDERCLIENT_DIR $CINDERCLIENT_BRANCH setup_develop $CINDERCLIENT_DIR } diff --git a/lib/nova b/lib/nova index 91fe0280c0..8d045b5aa9 100644 --- a/lib/nova +++ b/lib/nova @@ -169,11 +169,6 @@ function cleanup_nova() { sudo rm -rf $NOVA_STATE_PATH $NOVA_AUTH_CACHE_DIR } -# configure_novaclient() - Set config files, create data dirs, etc -function configure_novaclient() { - setup_develop $NOVACLIENT_DIR -} - # configure_nova_rootwrap() - configure Nova's rootwrap function configure_nova_rootwrap() { # Deploy new rootwrap filters files (owned by root). @@ -204,8 +199,6 @@ function configure_nova_rootwrap() { # configure_nova() - Set config files, create data dirs, etc function configure_nova() { - setup_develop $NOVA_DIR - # Put config files in ``/etc/nova`` for everyone to find if [[ ! -d $NOVA_CONF_DIR ]]; then sudo mkdir -p $NOVA_CONF_DIR @@ -597,6 +590,7 @@ function init_nova() { # install_novaclient() - Collect source and prepare function install_novaclient() { git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH + setup_develop $NOVACLIENT_DIR } # install_nova() - Collect source and prepare @@ -627,6 +621,7 @@ function install_nova() { fi git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH + setup_develop $NOVA_DIR } # start_nova_api() - Start the API process ahead of other things diff --git a/lib/swift b/lib/swift index 3e7ee20b1d..eb57477ed7 100644 --- a/lib/swift +++ b/lib/swift @@ -106,8 +106,6 @@ function configure_swift() { local swift_node_config local swift_log_dir - setup_develop $SWIFT_DIR - # Make sure to kill all swift processes first swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true @@ -281,11 +279,6 @@ EOF tee /etc/rsyslog.d/10-swift.conf } -# configure_swiftclient() - Set config files, create data dirs, etc -function configure_swiftclient() { - setup_develop $SWIFTCLIENT_DIR -} - # create_swift_disk - Create Swift backing disk function create_swift_disk() { local node_number @@ -374,13 +367,14 @@ function init_swift() { function install_swift() { git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH + setup_develop $SWIFT_DIR } function install_swiftclient() { git_clone $SWIFTCLIENT_REPO $SWIFTCLIENT_DIR $SWIFTCLIENT_BRANCH + setup_develop $SWIFTCLIENT_DIR } - # start_swift() - Start running processes, including screen function start_swift() { # (re)start rsyslog diff --git a/stack.sh b/stack.sh index e6654501b6..1010b4e031 100755 --- a/stack.sh +++ b/stack.sh @@ -564,9 +564,11 @@ echo_summary "Installing OpenStack project source" # Grab clients first install_keystoneclient install_glanceclient +install_cinderclient install_novaclient # Check out the client libs that are used most git_clone $OPENSTACKCLIENT_REPO $OPENSTACKCLIENT_DIR $OPENSTACKCLIENT_BRANCH +setup_develop $OPENSTACKCLIENT_DIR # glance, swift middleware and nova api needs keystone middleware if is_service_enabled key g-api n-api s-proxy; then @@ -627,8 +629,6 @@ fi echo_summary "Configuring OpenStack projects" # Set up our checkouts so they are installed in the python path -configure_novaclient -setup_develop $OPENSTACKCLIENT_DIR if is_service_enabled key g-api n-api s-proxy; then configure_keystone @@ -636,7 +636,6 @@ fi if is_service_enabled s-proxy; then configure_swift - configure_swiftclient if is_service_enabled swift3; then setup_develop $SWIFT3_DIR fi From bd461a36927ba025b7523c005c6abd228ec8c302 Mon Sep 17 00:00:00 2001 From: Lianhao Lu Date: Tue, 2 Apr 2013 15:59:33 +0800 Subject: [PATCH 0033/4704] Remove unused option for ceilometer. This fixed the bug #1163158. Change-Id: Ib93e5b26d1f1309edf2baa5c147337db1411eee5 --- lib/ceilometer | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/ceilometer b/lib/ceilometer index 58cafd1d79..f7d14d547f 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -68,7 +68,6 @@ function configure_ceilometer() { iniset $CEILOMETER_CONF DEFAULT notification_topics 'notifications,glance_notifications' iniset $CEILOMETER_CONF DEFAULT verbose True - iniset $CEILOMETER_CONF DEFAULT `database_connection_url nova` # Install the policy file for the API server cp $CEILOMETER_DIR/etc/ceilometer/policy.json $CEILOMETER_CONF_DIR From 4b1dbb5dabb6a30097030c0a625830a17c5c065f Mon Sep 17 00:00:00 2001 From: Zang MingJie Date: Fri, 29 Mar 2013 14:06:19 -0400 Subject: [PATCH 0034/4704] Keystone doesn't depend on bcrypt any more see https://github.com/openstack/keystone/commit/48f2f650c8b622b55e67610081336055ec9a2c8e#keystone/common/utils.py Change-Id: I3038a96deb9ca319224c07f549c893d24707be0d --- files/apts/keystone | 1 - files/rpms-suse/keystone | 1 - 2 files changed, 2 deletions(-) diff --git a/files/apts/keystone b/files/apts/keystone index ce536bfc81..c98409faaf 100644 --- a/files/apts/keystone +++ b/files/apts/keystone @@ -13,4 +13,3 @@ python-greenlet python-routes libldap2-dev libsasl2-dev -python-bcrypt diff --git a/files/rpms-suse/keystone b/files/rpms-suse/keystone index b3c876ad77..7d9a7bfe9b 100644 --- a/files/rpms-suse/keystone +++ b/files/rpms-suse/keystone @@ -12,6 +12,5 @@ python-setuptools # instead of python-distribute; dist:sle11sp2 python-greenlet python-lxml python-mysql -python-py-bcrypt python-pysqlite sqlite3 From a81dcaa62466d6f1260895fd08d1de607f34da21 Mon Sep 17 00:00:00 2001 From: Maru Newby Date: Tue, 26 Mar 2013 00:15:34 -0400 Subject: [PATCH 0035/4704] Increase flexibility of stackrc repo config. * Offline use of devstack previously required defining individual repo overrides. This change maintains support for individual overrides while adding the ability to override GIT_BASE. Change-Id: I4ca8b8e69c0fb05a7c9cf2fd4643eac2f7643aa9 --- stackrc | 112 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 56 insertions(+), 56 deletions(-) diff --git a/stackrc b/stackrc index 7c4fa68c30..3a17e4aa54 100644 --- a/stackrc +++ b/stackrc @@ -44,112 +44,117 @@ NOVA_ENABLED_APIS=ec2,osapi_compute,metadata # be disabled for automated testing by setting this value to False. USE_SCREEN=True +# allow local overrides of env variables, including repo config +if [ -f $RC_DIR/localrc ]; then + source $RC_DIR/localrc +fi + # Repositories # ------------ # Base GIT Repo URL # Another option is http://review.openstack.org/p -GIT_BASE=https://github.com +GIT_BASE=${GIT_BASE:-https://github.com} # metering service -CEILOMETER_REPO=${GIT_BASE}/openstack/ceilometer.git -CEILOMETER_BRANCH=master +CEILOMETER_REPO=${CEILOMETER_REPO:-${GIT_BASE}/openstack/ceilometer.git} +CEILOMETER_BRANCH=${CEILOMETER_BRANCH:-master} # ceilometer client library -CEILOMETERCLIENT_REPO=${GIT_BASE}/openstack/python-ceilometerclient.git -CEILOMETERCLIENT_BRANCH=master +CEILOMETERCLIENT_REPO=${CEILOMETERCLIENT_REPO:-${GIT_BASE}/openstack/python-ceilometerclient.git} +CEILOMETERCLIENT_BRANCH=${CEILOMETERCLIENT_BRANCH:-master} # volume service -CINDER_REPO=${GIT_BASE}/openstack/cinder.git -CINDER_BRANCH=master +CINDER_REPO=${CINDER_REPO:-${GIT_BASE}/openstack/cinder.git} +CINDER_BRANCH=${CINDER_BRANCH:-master} # volume client -CINDERCLIENT_REPO=${GIT_BASE}/openstack/python-cinderclient.git -CINDERCLIENT_BRANCH=master +CINDERCLIENT_REPO=${CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-cinderclient.git} +CINDERCLIENT_BRANCH=${CINDERCLIENT_BRANCH:-master} # compute service -NOVA_REPO=${GIT_BASE}/openstack/nova.git -NOVA_BRANCH=master +NOVA_REPO=${NOVA_REPO:-${GIT_BASE}/openstack/nova.git} +NOVA_BRANCH=${NOVA_BRANCH:-master} # storage service -SWIFT_REPO=${GIT_BASE}/openstack/swift.git -SWIFT_BRANCH=master -SWIFT3_REPO=${GIT_BASE}/fujita/swift3.git -SWIFT3_BRANCH=master +SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git} +SWIFT_BRANCH=${SWIFT_BRANCH:-master} +SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/fujita/swift3.git} +SWIFT3_BRANCH=${SWIFT3_BRANCH:-master} # python swift client library -SWIFTCLIENT_REPO=${GIT_BASE}/openstack/python-swiftclient.git -SWIFTCLIENT_BRANCH=master +SWIFTCLIENT_REPO=${SWIFTCLIENT_REPO:-${GIT_BASE}/openstack/python-swiftclient.git} +SWIFTCLIENT_BRANCH=${SWIFTCLIENT_BRANCH:-master} # image catalog service -GLANCE_REPO=${GIT_BASE}/openstack/glance.git -GLANCE_BRANCH=master +GLANCE_REPO=${GLANCE_REPO:-${GIT_BASE}/openstack/glance.git} +GLANCE_BRANCH=${GLANCE_BRANCH:-master} # python glance client library -GLANCECLIENT_REPO=${GIT_BASE}/openstack/python-glanceclient.git -GLANCECLIENT_BRANCH=master +GLANCECLIENT_REPO=${GLANCECLIENT_REPO:-${GIT_BASE}/openstack/python-glanceclient.git} +GLANCECLIENT_BRANCH=${GLANCECLIENT_BRANCH:-master} # unified auth system (manages accounts/tokens) -KEYSTONE_REPO=${GIT_BASE}/openstack/keystone.git -KEYSTONE_BRANCH=master +KEYSTONE_REPO=${KEYSTONE_REPO:-${GIT_BASE}/openstack/keystone.git} +KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-master} # a websockets/html5 or flash powered VNC console for vm instances -NOVNC_REPO=${GIT_BASE}/kanaka/noVNC.git -NOVNC_BRANCH=master +NOVNC_REPO=${NOVNC_REPO:-${GIT_BASE}/kanaka/noVNC.git} +NOVNC_BRANCH=${NOVNC_BRANCH:-master} # a websockets/html5 or flash powered SPICE console for vm instances -SPICE_REPO=http://anongit.freedesktop.org/git/spice/spice-html5.git -SPICE_BRANCH=master +SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git} +SPICE_BRANCH=${SPICE_BRANCH:-master} # django powered web control panel for openstack -HORIZON_REPO=${GIT_BASE}/openstack/horizon.git -HORIZON_BRANCH=master +HORIZON_REPO=${HORIZON_REPO:-${GIT_BASE}/openstack/horizon.git} +HORIZON_BRANCH=${HORIZON_BRANCH:-master} # python client library to nova that horizon (and others) use -NOVACLIENT_REPO=${GIT_BASE}/openstack/python-novaclient.git -NOVACLIENT_BRANCH=master +NOVACLIENT_REPO=${NOVACLIENT_REPO:-${GIT_BASE}/openstack/python-novaclient.git} +NOVACLIENT_BRANCH=${NOVACLIENT_BRANCH:-master} # consolidated openstack python client -OPENSTACKCLIENT_REPO=${GIT_BASE}/openstack/python-openstackclient.git -OPENSTACKCLIENT_BRANCH=master +OPENSTACKCLIENT_REPO=${OPENSTACKCLIENT_REPO:-${GIT_BASE}/openstack/python-openstackclient.git} +OPENSTACKCLIENT_BRANCH=${OPENSTACKCLIENT_BRANCH:-master} # python keystone client library to nova that horizon uses -KEYSTONECLIENT_REPO=${GIT_BASE}/openstack/python-keystoneclient.git -KEYSTONECLIENT_BRANCH=master +KEYSTONECLIENT_REPO=${KEYSTONECLIENT_REPO:-${GIT_BASE}/openstack/python-keystoneclient.git} +KEYSTONECLIENT_BRANCH=${KEYSTONECLIENT_BRANCH:-master} # quantum service -QUANTUM_REPO=${GIT_BASE}/openstack/quantum.git -QUANTUM_BRANCH=master +QUANTUM_REPO=${QUANTUM_REPO:-${GIT_BASE}/openstack/quantum.git} +QUANTUM_BRANCH=${QUANTUM_BRANCH:-master} # quantum client -QUANTUMCLIENT_REPO=${GIT_BASE}/openstack/python-quantumclient.git -QUANTUMCLIENT_BRANCH=master +QUANTUMCLIENT_REPO=${QUANTUMCLIENT_REPO:-${GIT_BASE}/openstack/python-quantumclient.git} +QUANTUMCLIENT_BRANCH=${QUANTUMCLIENT_BRANCH:-master} # Tempest test suite -TEMPEST_REPO=${GIT_BASE}/openstack/tempest.git -TEMPEST_BRANCH=master +TEMPEST_REPO=${TEMPEST_REPO:-${GIT_BASE}/openstack/tempest.git} +TEMPEST_BRANCH=${TEMPEST_BRANCH:-master} # heat service -HEAT_REPO=${GIT_BASE}/openstack/heat.git -HEAT_BRANCH=master +HEAT_REPO=${HEAT_REPO:-${GIT_BASE}/openstack/heat.git} +HEAT_BRANCH=${HEAT_BRANCH:-master} # python heat client library -HEATCLIENT_REPO=${GIT_BASE}/openstack/python-heatclient.git -HEATCLIENT_BRANCH=master +HEATCLIENT_REPO=${HEATCLIENT_REPO:-${GIT_BASE}/openstack/python-heatclient.git} +HEATCLIENT_BRANCH=${HEATCLIENT_BRANCH:-master} # ryu service -RYU_REPO=${GIT_BASE}/osrg/ryu.git -RYU_BRANCH=master +RYU_REPO=${RYU_REPO:-${GIT_BASE}/osrg/ryu.git} +RYU_BRANCH=${RYU_BRANCH:-master} # diskimage-builder -BM_IMAGE_BUILD_REPO=${GIT_BASE}/stackforge/diskimage-builder.git -BM_IMAGE_BUILD_BRANCH=master +BM_IMAGE_BUILD_REPO=${BM_IMAGE_BUILD_REPO:-${GIT_BASE}/stackforge/diskimage-builder.git} +BM_IMAGE_BUILD_BRANCH=${BM_IMAGE_BUILD_BRANCH:-master} # bm_poseur # Used to simulate a hardware environment for baremetal # Only used if BM_USE_FAKE_ENV is set -BM_POSEUR_REPO=${GIT_BASE}/tripleo/bm_poseur.git -BM_POSEUR_BRANCH=master +BM_POSEUR_REPO=${BM_POSEUR_REPO:-${GIT_BASE}/tripleo/bm_poseur.git} +BM_POSEUR_BRANCH=${BM_POSEUR_BRANCH:-master} # Nova hypervisor configuration. We default to libvirt with **kvm** but will @@ -158,11 +163,6 @@ BM_POSEUR_BRANCH=master VIRT_DRIVER=${VIRT_DRIVER:-libvirt} LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} -# allow local overrides of env variables -if [ -f $RC_DIR/localrc ]; then - source $RC_DIR/localrc -fi - # Specify a comma-separated list of UEC images to download and install into glance. # supported urls here are: # * "uec-style" images: From fe51a90005fbea4bacb660907d4f7dd3a1c7f0f1 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 1 Apr 2013 15:48:44 -0500 Subject: [PATCH 0036/4704] Refactor source installation Clean up and organize the download and configuration of source repositories. Change-Id: Iaf38c0aed0a83ebf9a5da3505fe92c9fa200ac1e --- lib/quantum | 12 +----- stack.sh | 110 ++++++++++++++++++++-------------------------------- 2 files changed, 44 insertions(+), 78 deletions(-) diff --git a/lib/quantum b/lib/quantum index 9ad15386f1..68c0539874 100644 --- a/lib/quantum +++ b/lib/quantum @@ -11,8 +11,6 @@ # install_quantumclient # install_quantum_agent_packages # install_quantum_third_party -# setup_quantum -# setup_quantumclient # configure_quantum # init_quantum # configure_quantum_third_party @@ -343,11 +341,13 @@ function init_quantum() { # install_quantum() - Collect source and prepare function install_quantum() { git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH + setup_develop $QUANTUM_DIR } # install_quantumclient() - Collect source and prepare function install_quantumclient() { git_clone $QUANTUMCLIENT_REPO $QUANTUMCLIENT_DIR $QUANTUMCLIENT_BRANCH + setup_develop $QUANTUMCLIENT_DIR } # install_quantum_agent_packages() - Collect source and prepare @@ -356,14 +356,6 @@ function install_quantum_agent_packages() { quantum_plugin_install_agent_packages } -function setup_quantum() { - setup_develop $QUANTUM_DIR -} - -function setup_quantumclient() { - setup_develop $QUANTUMCLIENT_DIR -} - # Start running processes, including screen function start_quantum_service_and_check() { # Start the Quantum service diff --git a/stack.sh b/stack.sh index 1010b4e031..62309dc1ae 100755 --- a/stack.sh +++ b/stack.sh @@ -556,117 +556,99 @@ if [[ $TRACK_DEPENDS = True ]] ; then fi -# Check Out Source -# ---------------- +# Check Out and Install Source +# ---------------------------- echo_summary "Installing OpenStack project source" -# Grab clients first +# Install clients libraries install_keystoneclient install_glanceclient install_cinderclient install_novaclient -# Check out the client libs that are used most +if is_service_enabled swift glance; then + install_swiftclient +fi +if is_service_enabled quantum nova; then + install_quantumclient +fi + git_clone $OPENSTACKCLIENT_REPO $OPENSTACKCLIENT_DIR $OPENSTACKCLIENT_BRANCH setup_develop $OPENSTACKCLIENT_DIR -# glance, swift middleware and nova api needs keystone middleware -if is_service_enabled key g-api n-api s-proxy; then - # unified auth system (manages accounts/tokens) +if is_service_enabled key; then install_keystone + configure_keystone fi if is_service_enabled s-proxy; then - install_swiftclient install_swift + configure_swift + if is_service_enabled swift3; then # swift3 middleware to provide S3 emulation to Swift git_clone $SWIFT3_REPO $SWIFT3_DIR $SWIFT3_BRANCH + setup_develop $SWIFT3_DIR fi fi if is_service_enabled g-api n-api; then # image catalog service install_glance + configure_glance +fi + +if is_service_enabled cinder; then + install_cinder + configure_cinder +fi + +if is_service_enabled quantum; then + install_quantum + install_quantum_third_party fi + if is_service_enabled nova; then # compute service install_nova + cleanup_nova + configure_nova fi + if is_service_enabled n-novnc; then # a websockets/html5 or flash powered VNC console for vm instances git_clone $NOVNC_REPO $NOVNC_DIR $NOVNC_BRANCH fi + if is_service_enabled n-spice; then # a websockets/html5 or flash powered SPICE console for vm instances git_clone $SPICE_REPO $SPICE_DIR $SPICE_BRANCH fi + if is_service_enabled horizon; then # dashboard install_horizon + configure_horizon fi -if is_service_enabled quantum; then - install_quantum - install_quantumclient - install_quantum_third_party -fi -if is_service_enabled heat; then - install_heat - install_heatclient -fi -if is_service_enabled cinder; then - install_cinder -fi + if is_service_enabled ceilometer; then install_ceilometerclient install_ceilometer fi - -# Initialization -# ============== - -echo_summary "Configuring OpenStack projects" - -# Set up our checkouts so they are installed in the python path - -if is_service_enabled key g-api n-api s-proxy; then - configure_keystone -fi - -if is_service_enabled s-proxy; then - configure_swift - if is_service_enabled swift3; then - setup_develop $SWIFT3_DIR - fi -fi - -if is_service_enabled g-api n-api; then - configure_glance -fi - -if is_service_enabled nova; then - # First clean up old instances - cleanup_nova - configure_nova -fi - -if is_service_enabled horizon; then - configure_horizon -fi - -if is_service_enabled quantum; then - setup_quantumclient - setup_quantum -fi - if is_service_enabled heat; then + install_heat + install_heatclient configure_heat configure_heatclient fi -if is_service_enabled cinder; then - configure_cinder +if is_service_enabled tls-proxy; then + configure_CA + init_CA + init_cert + # Add name to /etc/hosts + # don't be naive and add to existing line! fi if [[ $TRACK_DEPENDS = True ]] ; then @@ -678,14 +660,6 @@ if [[ $TRACK_DEPENDS = True ]] ; then exit 0 fi -if is_service_enabled tls-proxy; then - configure_CA - init_CA - init_cert - # Add name to /etc/hosts - # don't be naive and add to existing line! -fi - # Syslog # ------ From ba04583cb7ef827829b746ae4b9b70b4e9f05157 Mon Sep 17 00:00:00 2001 From: Andy Chong Date: Wed, 3 Apr 2013 15:04:50 +0800 Subject: [PATCH 0037/4704] Add network block device package to support LXC rootfs mount on it. This fix the bug 970645. Change-Id: If9fde90ac71cdaaff0d006a4df1a325f6f656744 --- files/apts/n-cpu | 1 + 1 file changed, 1 insertion(+) diff --git a/files/apts/n-cpu b/files/apts/n-cpu index ad2d6d710b..ef281cad0e 100644 --- a/files/apts/n-cpu +++ b/files/apts/n-cpu @@ -1,4 +1,5 @@ # Stuff for diablo volumes +nbd-client lvm2 open-iscsi open-iscsi-utils From fe586b1cbe3fcd62e14027c576c1140d94b9a8fb Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Thu, 28 Mar 2013 15:02:27 +0000 Subject: [PATCH 0038/4704] xenapi: /boot/guest should point to local SR Fixes bug 1037516 This patch creates a directory os-guest-kernels inside the local SR, and sets up /boot/guest to be a symlink to that directory. This way OpenStack won't pollute Dom0's filesystem. Change-Id: If8dfe24355bd782a401fed0f2c4b423efd9c11ba --- tools/xen/functions | 22 ++++++++--- tools/xen/mocks | 26 +++++++++++++ tools/xen/test_functions.sh | 74 ++++++++++++++++++++++++++++++++++++- 3 files changed, 115 insertions(+), 7 deletions(-) diff --git a/tools/xen/functions b/tools/xen/functions index 5b4a661acd..a7d779841f 100644 --- a/tools/xen/functions +++ b/tools/xen/functions @@ -1,10 +1,8 @@ #!/bin/bash function xapi_plugin_location { - for PLUGIN_DIR in "/etc/xapi.d/plugins/" "/usr/lib/xcp/plugins/" - do - if [ -d $PLUGIN_DIR ] - then + for PLUGIN_DIR in "/etc/xapi.d/plugins/" "/usr/lib/xcp/plugins/"; do + if [ -d $PLUGIN_DIR ]; then echo $PLUGIN_DIR return 0 fi @@ -17,7 +15,13 @@ function zip_snapshot_location { } function create_directory_for_kernels { - mkdir -p "/boot/guest" + if [ -d "/boot/guest" ]; then + echo "INFO: /boot/guest directory already exists, using that" >&2 + else + local LOCALPATH="$(get_local_sr_path)/os-guest-kernels" + mkdir -p $LOCALPATH + ln -s $LOCALPATH /boot/guest + fi } function extract_remote_zipball { @@ -53,3 +57,11 @@ function install_xapi_plugins_from_zipball { rm -rf $EXTRACTED_FILES chmod a+x ${XAPI_PLUGIN_DIR}* } + +function get_local_sr { + xe sr-list name-label="Local storage" --minimal +} + +function get_local_sr_path { + echo "/var/run/sr-mount/$(get_local_sr)" +} diff --git a/tools/xen/mocks b/tools/xen/mocks index b00655873c..6da6acbba7 100644 --- a/tools/xen/mocks +++ b/tools/xen/mocks @@ -12,6 +12,18 @@ test ! -e "$LIST_OF_DIRECTORIES" && { exit 1 } +test ! -e "$XE_RESPONSE" && { + echo "Mocking is not set up properly." + echo "XE_RESPONSE should point to an existing file." + exit 1 +} + +test ! -e "$XE_CALLS" && { + echo "Mocking is not set up properly." + echo "XE_CALLS should point to an existing file." + exit 1 +} + function mktemp { if test "${1:-}" = "-d"; then @@ -41,6 +53,10 @@ function rm { echo "rm $@" >> $LIST_OF_ACTIONS } +function ln { + echo "ln $@" >> $LIST_OF_ACTIONS +} + function [ { if test "${1:-}" = "-d"; then @@ -57,3 +73,13 @@ function [ { echo "Mock test does not implement the requested function" exit 1 } + +function xe { + cat $XE_RESPONSE + { + for i in $(seq "$#") + do + eval "echo \"\$$i\"" + done + } >> $XE_CALLS +} diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh index 6817ec3956..62393ca2eb 100755 --- a/tools/xen/test_functions.sh +++ b/tools/xen/test_functions.sh @@ -23,15 +23,27 @@ function before_each_test { LIST_OF_ACTIONS=$(mktemp) truncate -s 0 $LIST_OF_ACTIONS + + XE_RESPONSE=$(mktemp) + truncate -s 0 $XE_RESPONSE + + XE_CALLS=$(mktemp) + truncate -s 0 $XE_CALLS } # Teardown function after_each_test { rm -f $LIST_OF_DIRECTORIES rm -f $LIST_OF_ACTIONS + rm -f $XE_RESPONSE + rm -f $XE_CALLS } # Helpers +function setup_xe_response { + echo "$1" > $XE_RESPONSE +} + function given_directory_exists { echo "$1" >> $LIST_OF_DIRECTORIES } @@ -44,6 +56,30 @@ function assert_previous_command_failed { [ "$?" != "0" ] || exit 1 } +function assert_xe_min { + grep -qe "^--minimal\$" $XE_CALLS +} + +function assert_xe_param { + grep -qe "^$1\$" $XE_CALLS +} + +function mock_out { + local FNNAME="$1" + local OUTPUT="$2" + + . <(cat << EOF +function $FNNAME { + echo "$OUTPUT" +} +EOF +) +} + +function assert_symlink { + grep -qe "^ln -s $2 $1\$" $LIST_OF_ACTIONS +} + # Tests function test_plugin_directory_on_xenserver { given_directory_exists "/etc/xapi.d/plugins/" @@ -80,9 +116,26 @@ function test_zip_snapshot_location { } function test_create_directory_for_kernels { - (. mocks && create_directory_for_kernels) + ( + . mocks + mock_out get_local_sr uuid1 + create_directory_for_kernels + ) + + assert_directory_exists "/var/run/sr-mount/uuid1/os-guest-kernels" + assert_symlink "/boot/guest" "/var/run/sr-mount/uuid1/os-guest-kernels" +} + +function test_create_directory_for_kernels_existing_dir { + ( + . mocks + given_directory_exists "/boot/guest" + create_directory_for_kernels + ) - assert_directory_exists "/boot/guest" + diff -u $LIST_OF_ACTIONS - << EOF +[ -d /boot/guest ] +EOF } function test_extract_remote_zipball { @@ -107,6 +160,23 @@ function test_find_nova_plugins { rm -rf $tmpdir } +function test_get_local_sr { + setup_xe_response "uuid123" + + local RESULT=$(. mocks && get_local_sr) + + [ "$RESULT" == "uuid123" ] + + assert_xe_min + assert_xe_param "sr-list" "name-label=Local storage" +} + +function test_get_local_sr_path { + local RESULT=$(mock_out get_local_sr "uuid1" && get_local_sr_path) + + [ "/var/run/sr-mount/uuid1" == "$RESULT" ] +} + # Test runner [ "$1" = "" ] && { grep -e "^function *test_" $0 | cut -d" " -f2 From 7a8d852759ccc01fac590226c7c1bc4884ba36fd Mon Sep 17 00:00:00 2001 From: "Baodong (Robert) Li" Date: Thu, 21 Mar 2013 06:16:55 -0700 Subject: [PATCH 0039/4704] Support multiple config file options on quantum-server command line fix bug #1156831 Change-Id: Id05929c3114bc385c09fc25179cc15a358d38189 --- lib/quantum | 34 +++++++++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/lib/quantum b/lib/quantum index 68c0539874..e2a0d5349f 100644 --- a/lib/quantum +++ b/lib/quantum @@ -92,6 +92,9 @@ Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True} Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False} # The name of the default q-l3 router Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1} +# List of config file names in addition to the main plugin config file +# See _configure_quantum_common() for details about setting it up +declare -a Q_PLUGIN_EXTRA_CONF_FILES if is_service_enabled quantum; then Q_RR_CONF_FILE=$QUANTUM_CONF_DIR/rootwrap.conf @@ -358,8 +361,14 @@ function install_quantum_agent_packages() { # Start running processes, including screen function start_quantum_service_and_check() { + # build config-file options + local cfg_file + local CFG_FILE_OPTIONS="--config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE" + for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do + CFG_FILE_OPTIONS+=" --config-file /$cfg_file" + done # Start the Quantum service - screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE" + screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server $CFG_FILE_OPTIONS" echo "Waiting for Quantum to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$Q_HOST:$Q_PORT; do sleep 1; done"; then die $LINENO "Quantum did not start" @@ -405,8 +414,11 @@ function _configure_quantum_common() { cp $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF - # set plugin-specific variables - # Q_PLUGIN_CONF_PATH, Q_PLUGIN_CONF_FILENAME, Q_DB_NAME, Q_PLUGIN_CLASS + # Set plugin-specific variables Q_DB_NAME, Q_PLUGIN_CLASS. + # For main plugin config file, set Q_PLUGIN_CONF_PATH, Q_PLUGIN_CONF_FILENAME. + # For addition plugin config files, set Q_PLUGIN_EXTRA_CONF_PATH, + # Q_PLUGIN_EXTRA_CONF_FILES. For example: + # Q_PLUGIN_EXTRA_CONF_FILES=(file1, file2) quantum_plugin_configure_common if [[ $Q_PLUGIN_CONF_PATH == '' || $Q_PLUGIN_CONF_FILENAME == '' || $Q_PLUGIN_CLASS == '' ]]; then @@ -421,6 +433,22 @@ function _configure_quantum_common() { iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection `database_connection_url $Q_DB_NAME` iniset $QUANTUM_CONF DEFAULT state_path $DATA_DIR/quantum + # If addition config files are set, make sure their path name is set as well + if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 && $Q_PLUGIN_EXTRA_CONF_PATH == '' ]]; then + die $LINENO "Quantum additional plugin config not set.. exiting" + fi + + # If additional config files exist, copy them over to quantum configuration + # directory + if [[ $Q_PLUGIN_EXTRA_CONF_PATH != '' ]]; then + mkdir -p /$Q_PLUGIN_EXTRA_CONF_PATH + local f + for (( f=0; $f < ${#Q_PLUGIN_EXTRA_CONF_FILES[@]}; f+=1 )); do + Q_PLUGIN_EXTRA_CONF_FILES[$f]=$Q_PLUGIN_EXTRA_CONF_PATH/${Q_PLUGIN_EXTRA_CONF_FILES[$f]} + cp $QUANTUM_DIR/${Q_PLUGIN_EXTRA_CONF_FILES[$f]} /${Q_PLUGIN_EXTRA_CONF_FILES[$f]} + done + fi + _quantum_setup_rootwrap } From aacb01dc03b31591c2af205a68e559ff95392850 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Wed, 3 Apr 2013 09:31:20 -0700 Subject: [PATCH 0040/4704] Remove variable substituion to %NUM% to correctly invoke local command Referencing %NUM% must be a mistake, because with it networks are not deleted properly. See other commands within quantum-adv-test.sh as an example. Fixes bug #1164018 Change-Id: Ib222d82a1b8187167606b2543bb74c28b8465b5a --- exercises/quantum-adv-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exercises/quantum-adv-test.sh b/exercises/quantum-adv-test.sh index a1fb2ad03c..fbb1b779f2 100755 --- a/exercises/quantum-adv-test.sh +++ b/exercises/quantum-adv-test.sh @@ -330,7 +330,7 @@ function delete_network { } function delete_networks { - foreach_tenant_net 'delete_network ${%TENANT%_NAME} ${%NUM%}' + foreach_tenant_net 'delete_network ${%TENANT%_NAME} %NUM%' #TODO(nati) add secuirty group check after it is implemented # source $TOP_DIR/openrc demo1 demo1 # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0 From d857f4b4a4af2ed468c219d24da9d98416aff5d1 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 20 Mar 2013 14:51:06 +1100 Subject: [PATCH 0041/4704] Add rhel to DISTRO match Match RHEL and CentOS and provide them as a DISTRO prefix of "rhel" Change-Id: Ida19130ce9499d4d1fd2360fadc6b870b24bbc95 --- functions | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/functions b/functions index edc4bf9aef..445af5fa57 100644 --- a/functions +++ b/functions @@ -408,6 +408,9 @@ function GetDistro() { else DISTRO="sle${os_RELEASE}sp${os_UPDATE}" fi + elif [[ "$os_VENDOR" =~ (Red Hat) || "$os_VENDOR" =~ (CentOS) ]]; then + # Drop the . release as we assume it's compatible + DISTRO="rhel${os_RELEASE::1}" else # Catch-all for now is Vendor + Release + Update DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE" @@ -440,7 +443,6 @@ function is_fedora { [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || [ "$os_VENDOR" = "CentOS" ] } - # Determine if current distribution is a SUSE-based distribution # (openSUSE, SLE). # is_suse From d8246c2abb0ac32a0038ba65d2c18d7f81370328 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Thu, 4 Apr 2013 09:22:02 +0200 Subject: [PATCH 0042/4704] Update dependencies on openSUSE openSUSE 12.3 comes with the DHCP lease utilities in the dnsmasq-utils package. Also drop the gcc dependency in n-api (like it was done for debian, see e9e80f9) Change-Id: Id045674946d7856f6364c2357f3e126b05651461 --- files/rpms-suse/n-api | 1 - files/rpms-suse/nova | 2 +- files/rpms-suse/quantum | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/files/rpms-suse/n-api b/files/rpms-suse/n-api index ad943ffdf8..0f08daace3 100644 --- a/files/rpms-suse/n-api +++ b/files/rpms-suse/n-api @@ -1,2 +1 @@ -gcc # temporary because this pulls in glance to get the client without running the glance prereqs python-dateutil diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova index 04af7f3110..8a28e7d222 100644 --- a/files/rpms-suse/nova +++ b/files/rpms-suse/nova @@ -1,6 +1,6 @@ curl -# Note: we need to package dhcp_release in dnsmasq! dnsmasq +dnsmasq-utils # dist:opensuse-12.3 ebtables gawk genisoimage # required for config_drive diff --git a/files/rpms-suse/quantum b/files/rpms-suse/quantum index 068c15c22f..aadb156732 100644 --- a/files/rpms-suse/quantum +++ b/files/rpms-suse/quantum @@ -1,5 +1,5 @@ -# Note: we need to package dhcp_release in dnsmasq! dnsmasq +dnsmasq-utils # dist:opensuse-12.3 ebtables iptables iputils From 896eb666e8825b25ce86d3293b13aa5cff4ad781 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 5 Apr 2013 15:02:01 -0500 Subject: [PATCH 0043/4704] Add err()/err_if_not_set() * err() and err_if_not_set() do error-like reporting without aborting the script * die_if_not_set() now properly dies * add is_running() from Grenade Change-Id: I38b88112415a3c07e35bbc2dc65ad839c4d63fce --- functions | 73 ++++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 61 insertions(+), 12 deletions(-) diff --git a/functions b/functions index 445af5fa57..95ae239be4 100644 --- a/functions +++ b/functions @@ -57,32 +57,64 @@ function cp_it { # die $LINENO "message" function die() { local exitcode=$? + set +o xtrace + local line=$1; shift if [ $exitcode == 0 ]; then exitcode=1 fi + err $line "$*" + exit $exitcode +} + + +# Checks an environment variable is not set or has length 0 OR if the +# exit code is non-zero and prints "message" and exits +# NOTE: env-var is the variable name without a '$' +# die_if_not_set $LINENO env-var "message" +function die_if_not_set() { + local exitcode=$? + FXTRACE=$(set +o | grep xtrace) + set +o xtrace + local line=$1; shift + local evar=$1; shift + if ! is_set $evar || [ $exitcode != 0 ]; then + die $line "$*" + fi + $FXTRACE +} + + +# Prints line number and "message" in error format +# err $LINENO "message" +function err() { + local exitcode=$? + errXTRACE=$(set +o | grep xtrace) set +o xtrace local msg="[ERROR] $0:$1 $2" echo $msg 1>&2; if [[ -n ${SCREEN_LOGDIR} ]]; then echo $msg >> "${SCREEN_LOGDIR}/error.log" fi - exit $exitcode + $errXTRACE + return $exitcode } # Checks an environment variable is not set or has length 0 OR if the -# exit code is non-zero and prints "message" and exits +# exit code is non-zero and prints "message" # NOTE: env-var is the variable name without a '$' -# die_if_not_set $LINENO env-var "message" -function die_if_not_set() { - ( - local exitcode=$? - set +o xtrace - local evar=$2; shift - if ! is_set $evar || [ $exitcode != 0 ]; then - die $@ - fi - ) +# err_if_not_set $LINENO env-var "message" +function err_if_not_set() { + local exitcode=$? + errinsXTRACE=$(set +o | grep xtrace) + set +o xtrace + local line=$1; shift + local evar=$1; shift + if ! is_set $evar || [ $exitcode != 0 ]; then + err $line "$*" + fi + $errinsXTRACE + return $exitcode } @@ -538,6 +570,7 @@ function inicomment() { sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file" } + # Uncomment an option in an INI file # iniuncomment config-file section option function iniuncomment() { @@ -559,6 +592,7 @@ function iniget() { echo ${line#*=} } + # Determinate is the given option present in the INI file # ini_has_option config-file section option function ini_has_option() { @@ -570,6 +604,7 @@ function ini_has_option() { [ -n "$line" ] } + # Set an option in an INI file # iniset config-file section option value function iniset() { @@ -592,6 +627,7 @@ $option = $value fi } + # Get a multiple line option from an INI file # iniget_multiline config-file section option function iniget_multiline() { @@ -603,6 +639,7 @@ function iniget_multiline() { echo ${values} } + # Set a multiple line option in an INI file # iniset_multiline config-file section option value1 value2 valu3 ... function iniset_multiline() { @@ -632,6 +669,7 @@ $option = $v done } + # Append a new option in an ini file without replacing the old value # iniadd config-file section option value1 value2 value3 ... function iniadd() { @@ -643,6 +681,17 @@ function iniadd() { iniset_multiline $file $section $option $values } +# Find out if a process exists by partial name. +# is_running name +function is_running() { + local name=$1 + ps auxw | grep -v grep | grep ${name} > /dev/null + RC=$? + # some times I really hate bash reverse binary logic + return $RC +} + + # is_service_enabled() checks if the service(s) specified as arguments are # enabled by the user in ``ENABLED_SERVICES``. # From c4193b22f3ef30c9975e53562c02173c638f5631 Mon Sep 17 00:00:00 2001 From: Arata Notsu Date: Mon, 8 Apr 2013 15:25:30 +0900 Subject: [PATCH 0044/4704] Use baremetal-interface-add, not baremetal-add-interface Since nova doesn't have such subcommand. Change-Id: I3c99de38f959418f82d45078c7784ba6ab60c383 --- lib/baremetal | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/baremetal b/lib/baremetal index 24cce9f7dd..17a967fe92 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -427,7 +427,7 @@ function add_baremetal_node() { "$mac_1" \ | grep ' id ' | get_field 2 ) [ $? -eq 0 ] || [ "$id" ] || die "Error adding baremetal node" - id2=$(nova baremetal-add-interface "$id" "$mac_2" ) + id2=$(nova baremetal-interface-add "$id" "$mac_2" ) [ $? -eq 0 ] || [ "$id2" ] || die "Error adding interface to barmetal node $id" } From 4a8496eca628f03944cd2abdce38bb6c5f521bdf Mon Sep 17 00:00:00 2001 From: Derek Morton Date: Mon, 8 Apr 2013 23:46:08 -0500 Subject: [PATCH 0045/4704] Linux Mint support This patch allows DevStack installation on Linux Mint. Installations on Linux Mint previously failed since its distribution ID did not match any of the package installation methods and defaulted to using rpm packages. Linux Mint is a Ubuntu derivative and such does not have native support for rpm packages. This change adds "LinuxMint" to the distribution matching options along with Debian and Ubuntu. Change-Id: Ice6a201cabe07373a9c1354699777835addeac53 Fixes: bug #1166422 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 445af5fa57..9fdc391c5c 100644 --- a/functions +++ b/functions @@ -299,7 +299,7 @@ GetOSVersion() { os_RELEASE=$(lsb_release -r -s) os_UPDATE="" os_PACKAGE="rpm" - if [[ "Debian,Ubuntu" =~ $os_VENDOR ]]; then + if [[ "Debian,Ubuntu,LinuxMint" =~ $os_VENDOR ]]; then os_PACKAGE="deb" elif [[ "SUSE LINUX" =~ $os_VENDOR ]]; then lsb_release -d -s | grep -q openSUSE From a44d5ed1353745258fec1a0e2c9a778d1c1f77d9 Mon Sep 17 00:00:00 2001 From: Matthieu Huin Date: Mon, 8 Apr 2013 13:31:54 +0200 Subject: [PATCH 0046/4704] Set swift3 before s3token in proxy server middleware pipeline swift3 and s3token middlewares were not set in correct order in /etc/swift/proxy-server.conf when the swift3 service is enabled with keystone. swift3 must be set before s3token. Fixes: bug #1166123 Change-Id: I97c051af95cbdc26ccfe9753c5719394e1875dde --- lib/swift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/swift b/lib/swift index eb57477ed7..ca9c37333d 100644 --- a/lib/swift +++ b/lib/swift @@ -169,7 +169,7 @@ function configure_swift() { # configured keystone it will configure swift with it. if is_service_enabled key;then if is_service_enabled swift3;then - swift_pipeline=" s3token swift3 " + swift_pipeline=" swift3 s3token " fi swift_pipeline+=" authtoken keystoneauth " else From 3ebb01adfe05326a96418bc97e6674a250601ccd Mon Sep 17 00:00:00 2001 From: mathieu-rohon Date: Tue, 9 Apr 2013 15:09:07 +0200 Subject: [PATCH 0047/4704] Enable debug logging in q-agt output when only q-agt is used, without q-svc, typically in a multi-host config, q-agt wasn't logging debug output. This change enable debug output, as it's done for q-l3, q-dhcp or q-meta agent. Change-Id: I1f49cf9994fa9cfbaa166dac8d4e713ba99c5674 Fixes: bug #1166775 --- lib/quantum | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/quantum b/lib/quantum index e2a0d5349f..b995a91162 100644 --- a/lib/quantum +++ b/lib/quantum @@ -538,6 +538,8 @@ function _configure_quantum_plugin_agent() { # Specify the default root helper prior to agent configuration to # ensure that an agent's configuration can override the default iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" + iniset $QUANTUM_CONF DEFAULT verbose True + iniset $QUANTUM_CONF DEFAULT debug True # Configure agent for plugin quantum_plugin_configure_plugin_agent From cc6b4435458b5db6aed17631e4789c43d21ee8e5 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 8 Apr 2013 15:38:03 -0500 Subject: [PATCH 0048/4704] Formatting cleanups, doc updates and whatnot Change-Id: Ica8298353be22f947c8e8a03d8dc29ded9cb26dd --- HACKING.rst | 21 ++++++++++++------ README.md | 6 ++++-- exercise.sh | 8 +++---- exercises/client-args.sh | 13 ++++++------ exercises/client-env.sh | 13 ++++++------ functions | 6 +++--- lib/baremetal | 1 + lib/ceilometer | 9 ++++++-- lib/cinder | 5 +++++ lib/database | 10 ++++++++- lib/databases/mysql | 6 ++++++ lib/databases/postgresql | 6 ++++++ lib/glance | 5 +++-- lib/heat | 8 ++++++- lib/horizon | 4 ++++ lib/keystone | 4 ++-- lib/ldap | 10 ++++++--- lib/nova | 9 ++++---- lib/quantum | 9 ++++---- lib/rpc_backend | 6 ++++-- lib/swift | 4 ++-- lib/tempest | 5 +++-- lib/tls | 4 ++++ stack.sh | 46 +++++++++++++++++++++------------------- stackrc | 2 +- 25 files changed, 145 insertions(+), 75 deletions(-) diff --git a/HACKING.rst b/HACKING.rst index 6ad8c7e638..77194a3d41 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -7,8 +7,8 @@ General DevStack is written in POSIX shell script. This choice was made because it best illustrates the configuration steps that this implementation takes -on setting up and interacting with OpenStack components. DevStack specifies -BASH and is compatible with Bash 3. +on setting up and interacting with OpenStack components. DevStack specifically +uses Bash and is compatible with Bash 3. DevStack's official repository is located on GitHub at https://github.com/openstack-dev/devstack.git. Besides the master branch that @@ -30,9 +30,17 @@ work for DevStack's use cases. There is a subscript ``functions`` that contains generally useful shell functions and is used by a number of the scripts in DevStack. +The ``lib`` directory contains sub-scripts for projects or packages that ``stack.sh`` +sources to perform much of the work related to those projects. These sub-scripts +contain configuration defaults and functions to configure, start and stop the project +or package. These variables and functions are also used by related projects, +such as Grenade, to manage a DevStack installation. + A number of additional scripts can be found in the ``tools`` directory that may -be useful in setting up special-case uses of DevStack. These include: bare metal -deployment, ramdisk deployment and Jenkins integration. +be useful in supporting DevStack installations. Of particular note are ``info.sh`` +to collect and report information about the installed system, and ``instal_prereqs.sh`` +that handles installation of the prerequisite packages for DevStack. It is +suitable, for example, to pre-load a system for making a snapshot. Scripts @@ -63,8 +71,8 @@ configuration of the user environment:: source $TOP_DIR/openrc ``stack.sh`` is a rather large monolithic script that flows through from beginning -to end. The process of breaking it down into project-level sub-scripts is nearly -complete and should make ``stack.sh`` easier to read and manage. +to end. It has been broken down into project-specific subscripts (as noted above) +located in ``lib`` to make ``stack.sh`` more manageable and to promote code reuse. These library sub-scripts have a number of fixed entry points, some of which may just be stubs. These entry points will be called by ``stack.sh`` in the @@ -112,6 +120,7 @@ Also, variable declarations in ``stackrc`` do NOT allow overriding (the form ``FOO=${FOO:-baz}``); if they did then they can already be changed in ``localrc`` and can stay in the project file. + Documentation ------------- diff --git a/README.md b/README.md index d8538c2583..6570a14351 100644 --- a/README.md +++ b/README.md @@ -120,14 +120,16 @@ In order to enable Quantum a single node setup, you'll need the following settin # Optional, to enable tempest configuration as part of devstack enable_service tempest -Then run stack.sh as normal. +Then run `stack.sh` as normal. + +# Tempest If tempest has been successfully configured, a basic set of smoke tests can be run as follows: $ cd /opt/stack/tempest $ nosetests tempest/tests/network/test_network_basic_ops.py -Multi-Node Setup +# Multi-Node Setup A more interesting setup involves running multiple compute nodes, with Quantum networks connecting VMs on different compute nodes. You should run at least one "controller node", which should have a `stackrc` that includes at least: diff --git a/exercise.sh b/exercise.sh index 3516738549..ce694fba66 100755 --- a/exercise.sh +++ b/exercise.sh @@ -23,7 +23,7 @@ basenames=${RUN_EXERCISES:-""} EXERCISE_DIR=$TOP_DIR/exercises -if [ -z "${basenames}" ] ; then +if [[ -z "${basenames}" ]]; then # Locate the scripts we should run basenames=$(for b in `ls $EXERCISE_DIR/*.sh`; do basename $b .sh; done) else @@ -38,7 +38,7 @@ skips="" # Loop over each possible script (by basename) for script in $basenames; do - if [[ ,$SKIP_EXERCISES, =~ ,$script, ]] ; then + if [[ ,$SKIP_EXERCISES, =~ ,$script, ]]; then skips="$skips $script" else echo "=====================================================================" @@ -48,7 +48,7 @@ for script in $basenames; do exitcode=$? if [[ $exitcode == 55 ]]; then skips="$skips $script" - elif [[ $exitcode -ne 0 ]] ; then + elif [[ $exitcode -ne 0 ]]; then failures="$failures $script" else passes="$passes $script" @@ -69,6 +69,6 @@ for script in $failures; do done echo "=====================================================================" -if [ -n "$failures" ] ; then +if [[ -n "$failures" ]]; then exit 1 fi diff --git a/exercises/client-args.sh b/exercises/client-args.sh index 1e92500701..28f4123863 100755 --- a/exercises/client-args.sh +++ b/exercises/client-args.sh @@ -67,7 +67,7 @@ RETURN=0 # Keystone client # --------------- if [[ "$ENABLED_SERVICES" =~ "key" ]]; then - if [[ "$SKIP_EXERCISES" =~ "key" ]] ; then + if [[ "$SKIP_EXERCISES" =~ "key" ]]; then STATUS_KEYSTONE="Skipped" else echo -e "\nTest Keystone" @@ -84,7 +84,7 @@ fi # ----------- if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then - if [[ "$SKIP_EXERCISES" =~ "n-api" ]] ; then + if [[ "$SKIP_EXERCISES" =~ "n-api" ]]; then STATUS_NOVA="Skipped" STATUS_EC2="Skipped" else @@ -103,7 +103,7 @@ fi # ------------- if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then - if [[ "$SKIP_EXERCISES" =~ "c-api" ]] ; then + if [[ "$SKIP_EXERCISES" =~ "c-api" ]]; then STATUS_CINDER="Skipped" else echo -e "\nTest Cinder" @@ -120,7 +120,7 @@ fi # ------------- if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then - if [[ "$SKIP_EXERCISES" =~ "g-api" ]] ; then + if [[ "$SKIP_EXERCISES" =~ "g-api" ]]; then STATUS_GLANCE="Skipped" else echo -e "\nTest Glance" @@ -137,7 +137,7 @@ fi # ------------ if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then - if [[ "$SKIP_EXERCISES" =~ "swift" ]] ; then + if [[ "$SKIP_EXERCISES" =~ "swift" ]]; then STATUS_SWIFT="Skipped" else echo -e "\nTest Swift" @@ -152,8 +152,9 @@ fi set +o xtrace + # Results -# ------- +# ======= function report() { if [[ -n "$2" ]]; then diff --git a/exercises/client-env.sh b/exercises/client-env.sh index dd8e56e22f..147fdfcfea 100755 --- a/exercises/client-env.sh +++ b/exercises/client-env.sh @@ -60,7 +60,7 @@ RETURN=0 # Keystone client # --------------- if [[ "$ENABLED_SERVICES" =~ "key" ]]; then - if [[ "$SKIP_EXERCISES" =~ "key" ]] ; then + if [[ "$SKIP_EXERCISES" =~ "key" ]]; then STATUS_KEYSTONE="Skipped" else echo -e "\nTest Keystone" @@ -77,7 +77,7 @@ fi # ----------- if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then - if [[ "$SKIP_EXERCISES" =~ "n-api" ]] ; then + if [[ "$SKIP_EXERCISES" =~ "n-api" ]]; then STATUS_NOVA="Skipped" STATUS_EC2="Skipped" else @@ -111,7 +111,7 @@ fi # ------------- if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then - if [[ "$SKIP_EXERCISES" =~ "c-api" ]] ; then + if [[ "$SKIP_EXERCISES" =~ "c-api" ]]; then STATUS_CINDER="Skipped" else echo -e "\nTest Cinder" @@ -128,7 +128,7 @@ fi # ------------- if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then - if [[ "$SKIP_EXERCISES" =~ "g-api" ]] ; then + if [[ "$SKIP_EXERCISES" =~ "g-api" ]]; then STATUS_GLANCE="Skipped" else echo -e "\nTest Glance" @@ -146,7 +146,7 @@ fi if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then - if [[ "$SKIP_EXERCISES" =~ "swift" ]] ; then + if [[ "$SKIP_EXERCISES" =~ "swift" ]]; then STATUS_SWIFT="Skipped" else echo -e "\nTest Swift" @@ -161,8 +161,9 @@ fi set +o xtrace + # Results -# ------- +# ======= function report() { if [[ -n "$2" ]]; then diff --git a/functions b/functions index 9fdc391c5c..f6226176ba 100644 --- a/functions +++ b/functions @@ -830,7 +830,7 @@ function pip_install { if [[ -z "$os_PACKAGE" ]]; then GetOSVersion fi - if [[ $TRACK_DEPENDS = True ]] ; then + if [[ $TRACK_DEPENDS = True ]]; then source $DEST/.venv/bin/activate CMD_PIP=$DEST/.venv/bin/pip SUDO_PIP="env" @@ -1005,7 +1005,7 @@ function service_check() { # Uses globals ``TRACK_DEPENDES``, ``*_proxy` # setup_develop directory function setup_develop() { - if [[ $TRACK_DEPENDS = True ]] ; then + if [[ $TRACK_DEPENDS = True ]]; then SUDO_CMD="env" else SUDO_CMD="sudo" @@ -1288,7 +1288,7 @@ function _ssh_check_novanet() { local DEFAULT_INSTANCE_USER=$4 local ACTIVE_TIMEOUT=$5 local probe_cmd="" - if ! timeout $ACTIVE_TIMEOUT sh -c "while ! ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP echo success ; do sleep 1; done"; then + if ! timeout $ACTIVE_TIMEOUT sh -c "while ! ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP echo success; do sleep 1; done"; then die $LINENO "server didn't become ssh-able!" fi } diff --git a/lib/baremetal b/lib/baremetal index 17a967fe92..8658c3aa17 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -53,6 +53,7 @@ XTRACE=$(set +o | grep xtrace) set +o xtrace + # Sub-driver settings # ------------------- diff --git a/lib/ceilometer b/lib/ceilometer index f7d14d547f..6b110cbb0c 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -37,12 +37,16 @@ CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api CEILOMETER_AUTH_CACHE_DIR=${CEILOMETER_AUTH_CACHE_DIR:-/var/cache/ceilometer} # Support potential entry-points console scripts -if [ -d $CEILOMETER_DIR/bin ] ; then +if [[ -d $CEILOMETER_DIR/bin ]]; then CEILOMETER_BIN_DIR=$CEILOMETER_DIR/bin else CEILOMETER_BIN_DIR=$(get_python_exec_prefix) fi + +# Functions +# --------- + # cleanup_ceilometer() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_ceilometer() { @@ -93,7 +97,7 @@ function configure_ceilometer() { } function configure_mongodb() { - if is_fedora ; then + if is_fedora; then # ensure smallfiles selected to minimize freespace requirements sudo sed -i '/--smallfiles/!s/OPTIONS=\"/OPTIONS=\"--smallfiles /' /etc/sysconfig/mongod @@ -135,6 +139,7 @@ function stop_ceilometer() { done } + # Restore xtrace $XTRACE diff --git a/lib/cinder b/lib/cinder index deace68277..d621e69a68 100644 --- a/lib/cinder +++ b/lib/cinder @@ -64,6 +64,10 @@ VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} VOLUME_GROUP2=${VOLUME_GROUP2:-stack-volumes2} VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} + +# Functions +# --------- + # _clean_volume_group removes all cinder volumes from the specified volume group # _clean_volume_group $VOLUME_GROUP $VOLUME_NAME_PREFIX function _clean_volume_group() { @@ -432,6 +436,7 @@ function stop_cinder() { fi } + # Restore xtrace $XTRACE diff --git a/lib/database b/lib/database index cbe886f5c8..e63d5e240d 100644 --- a/lib/database +++ b/lib/database @@ -20,14 +20,18 @@ XTRACE=$(set +o | grep xtrace) set +o xtrace + # Register a database backend # $1 The name of the database backend +# This is required to be defined before the specific database scripts are sourced function register_database { [ -z "$DATABASE_BACKENDS" ] && DATABASE_BACKENDS=$1 || DATABASE_BACKENDS+=" $1" } # Sourcing the database libs sets DATABASE_BACKENDS with the available list -for f in $TOP_DIR/lib/databases/*; do source $f; done +for f in $TOP_DIR/lib/databases/*; do + source $f; +done # ``DATABASE_BACKENDS`` now contains a list of the supported databases # Look in ``ENABLED_SERVICES`` to see if one has been selected @@ -42,6 +46,9 @@ done # This is not an error as multi-node installs will do this on the compute nodes +# Functions +# --------- + # Get rid of everything enough to cleanly change database backends function cleanup_database { cleanup_database_$DATABASE_TYPE @@ -112,6 +119,7 @@ function database_connection_url { database_connection_url_$DATABASE_TYPE $var $db } + # Restore xtrace $XTRACE diff --git a/lib/databases/mysql b/lib/databases/mysql index 30450b1caf..056aec46ba 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -8,8 +8,13 @@ MY_XTRACE=$(set +o | grep xtrace) set +o xtrace + register_database mysql + +# Functions +# --------- + # Get rid of everything enough to cleanly change database backends function cleanup_database_mysql { if is_ubuntu; then @@ -137,6 +142,7 @@ function database_connection_url_mysql { echo "$BASE_SQL_CONN/$db?charset=utf8" } + # Restore xtrace $MY_XTRACE diff --git a/lib/databases/postgresql b/lib/databases/postgresql index b64de2c95e..b173772170 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -8,8 +8,13 @@ PG_XTRACE=$(set +o | grep xtrace) set +o xtrace + register_database postgresql + +# Functions +# --------- + # Get rid of everything enough to cleanly change database backends function cleanup_database_postgresql { stop_service postgresql @@ -88,6 +93,7 @@ function database_connection_url_postgresql { echo "$BASE_SQL_CONN/$db?client_encoding=utf8" } + # Restore xtrace $PG_XTRACE diff --git a/lib/glance b/lib/glance index 3376400035..583f879555 100644 --- a/lib/glance +++ b/lib/glance @@ -51,8 +51,8 @@ fi GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$SERVICE_HOST:9292} -# Entry Points -# ------------ +# Functions +# --------- # cleanup_glance() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up @@ -199,6 +199,7 @@ function stop_glance() { screen -S $SCREEN_NAME -p g-reg -X kill } + # Restore xtrace $XTRACE diff --git a/lib/heat b/lib/heat index 88535c352e..32c0182c01 100644 --- a/lib/heat +++ b/lib/heat @@ -25,9 +25,14 @@ set +o xtrace # Defaults # -------- + +# set up default directories HEAT_DIR=$DEST/heat HEATCLIENT_DIR=$DEST/python-heatclient -# set up default directories + + +# Functions +# --------- # cleanup_heat() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up @@ -182,6 +187,7 @@ function stop_heat() { done } + # Restore xtrace $XTRACE diff --git a/lib/horizon b/lib/horizon index b63e1f8b01..94aac5c979 100644 --- a/lib/horizon +++ b/lib/horizon @@ -38,6 +38,10 @@ HORIZON_SETTINGS=${HORIZON_SETTINGS:-$HORIZON_DIR/openstack_dashboard/local/loca APACHE_USER=${APACHE_USER:-$USER} APACHE_GROUP=${APACHE_GROUP:-$(id -gn $APACHE_USER)} + +# Functions +# --------- + # utility method of setting python option function _horizon_config_set() { local file=$1 diff --git a/lib/keystone b/lib/keystone index 0fbc7d709a..6bf4d9fde4 100644 --- a/lib/keystone +++ b/lib/keystone @@ -63,8 +63,8 @@ KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} -# Entry Points -# ------------ +# Functions +# --------- # cleanup_keystone() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up diff --git a/lib/ldap b/lib/ldap index 53f68377b5..89b31b2c25 100644 --- a/lib/ldap +++ b/lib/ldap @@ -1,13 +1,17 @@ # lib/ldap # Functions to control the installation and configuration of **ldap** -# ``stack.sh`` calls the entry points in this order: -# +# ``lib/keystone`` calls the entry points in this order: +# install_ldap() # Save trace setting XTRACE=$(set +o | grep xtrace) set +o xtrace + +# Functions +# --------- + # install_ldap # install_ldap() - Collect source and prepare function install_ldap() { @@ -44,7 +48,7 @@ function install_ldap() { fi # add our top level ldap nodes - if ldapsearch -x -w $LDAP_PASSWORD -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -x -b dc=openstack,dc=org | grep -q "Success" ; then + if ldapsearch -x -w $LDAP_PASSWORD -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -x -b dc=openstack,dc=org | grep -q "Success"; then printf "LDAP already configured for OpenStack\n" if [[ "$KEYSTONE_CLEAR_LDAP" == "yes" ]]; then # clear LDAP state diff --git a/lib/nova b/lib/nova index 8d045b5aa9..ea73badb58 100644 --- a/lib/nova +++ b/lib/nova @@ -122,8 +122,8 @@ TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29} -# Entry Points -# ------------ +# Functions +# --------- function add_nova_opt { echo "$1" >>$NOVA_CONF @@ -276,7 +276,7 @@ function configure_nova() { configure_baremetal_nova_dirs fi - if is_service_enabled quantum && is_quantum_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF ; then + if is_service_enabled quantum && is_quantum_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces cat </dev/null } + # Local variables: # mode: shell-script # End: diff --git a/stack.sh b/stack.sh index 62309dc1ae..8c92ea6cf9 100755 --- a/stack.sh +++ b/stack.sh @@ -12,7 +12,7 @@ # developer install. # To keep this script simple we assume you are running on a recent **Ubuntu** -# (11.10 Oneiric or newer) or **Fedora** (F16 or newer) machine. It +# (12.04 Precise or newer) or **Fedora** (F16 or newer) machine. It # should work in a VM or physical server. Additionally we put the list of # ``apt`` and ``rpm`` dependencies and other configuration files in this repo. @@ -51,8 +51,8 @@ GetDistro # be overwritten by a DevStack update. # # DevStack distributes ``stackrc`` which contains locations for the OpenStack -# repositories and branches to configure. ``stackrc`` sources ``localrc`` to -# allow you to safely override those settings. +# repositories, branches to configure, and other configuration defaults. +# ``stackrc`` sources ``localrc`` to allow you to safely override those settings. if [[ ! -r $TOP_DIR/stackrc ]]; then log_error $LINENO "missing $TOP_DIR/stackrc - did you grab more than just stack.sh?" @@ -78,6 +78,19 @@ if [[ -r $TOP_DIR/.stackenv ]]; then rm $TOP_DIR/.stackenv fi +# ``stack.sh`` keeps the list of ``apt`` and ``rpm`` dependencies and config +# templates and other useful files in the ``files`` subdirectory +FILES=$TOP_DIR/files +if [ ! -d $FILES ]; then + log_error $LINENO "missing devstack/files" +fi + +# ``stack.sh`` keeps function libraries here +# Make sure ``$TOP_DIR/lib`` directory is present +if [ ! -d $TOP_DIR/lib ]; then + log_error $LINENO "missing devstack/lib" +fi + # Import common services (database, message queue) configuration source $TOP_DIR/lib/database source $TOP_DIR/lib/rpc_backend @@ -100,21 +113,9 @@ fi # and the specified rpc backend is available on your platform. check_rpc_backend -# ``stack.sh`` keeps function libraries here -# Make sure ``$TOP_DIR/lib`` directory is present -if [ ! -d $TOP_DIR/lib ]; then - log_error $LINENO "missing devstack/lib" -fi - -# ``stack.sh`` keeps the list of ``apt`` and ``rpm`` dependencies and config -# templates and other useful files in the ``files`` subdirectory -FILES=$TOP_DIR/files -if [ ! -d $FILES ]; then - log_error $LINENO "missing devstack/files" -fi - SCREEN_NAME=${SCREEN_NAME:-stack} # Check to see if we are already running DevStack +# Note that this may fail if USE_SCREEN=False if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].$SCREEN_NAME"; then echo "You are already running a stack.sh session." echo "To rejoin this session type 'screen -x stack'." @@ -230,6 +231,8 @@ fi # Allow the use of an alternate hostname (such as localhost/127.0.0.1) for service endpoints. SERVICE_HOST=${SERVICE_HOST:-$HOST_IP} + +# Allow the use of an alternate protocol (such as https) for service endpoints SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} # Configure services to use syslog instead of writing to individual log files @@ -241,7 +244,6 @@ SYSLOG_PORT=${SYSLOG_PORT:-516} SYSSTAT_FILE=${SYSSTAT_FILE:-"sysstat.dat"} SYSSTAT_INTERVAL=${SYSSTAT_INTERVAL:-"1"} - # Use color for logging output (only available if syslog is not used) LOG_COLOR=`trueorfalse True $LOG_COLOR` @@ -267,7 +269,6 @@ source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/ldap # Set the destination directories for OpenStack projects -HORIZON_DIR=$DEST/horizon OPENSTACKCLIENT_DIR=$DEST/python-openstackclient @@ -545,7 +546,7 @@ fi TRACK_DEPENDS=${TRACK_DEPENDS:-False} # Install python packages into a virtualenv so that we can track them -if [[ $TRACK_DEPENDS = True ]] ; then +if [[ $TRACK_DEPENDS = True ]]; then echo_summary "Installing Python packages into a virtualenv $DEST/.venv" install_package python-virtualenv @@ -651,9 +652,9 @@ if is_service_enabled tls-proxy; then # don't be naive and add to existing line! fi -if [[ $TRACK_DEPENDS = True ]] ; then +if [[ $TRACK_DEPENDS = True ]]; then $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip - if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff ; then + if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff; then cat $DEST/requires.diff fi echo "Ran stack.sh in depend tracking mode, bailing out now" @@ -719,10 +720,10 @@ if [[ -e $SCREENRC ]]; then echo -n > $SCREENRC fi - # Initialize the directory for service status check init_service_check + # Kick off Sysstat # ------------------------ # run sysstat if it is enabled, this has to be early as daemon @@ -735,6 +736,7 @@ if is_service_enabled sysstat;then fi fi + # Keystone # -------- diff --git a/stackrc b/stackrc index 3a17e4aa54..c55e8dc78e 100644 --- a/stackrc +++ b/stackrc @@ -49,6 +49,7 @@ if [ -f $RC_DIR/localrc ]; then source $RC_DIR/localrc fi + # Repositories # ------------ @@ -156,7 +157,6 @@ BM_IMAGE_BUILD_BRANCH=${BM_IMAGE_BUILD_BRANCH:-master} BM_POSEUR_REPO=${BM_POSEUR_REPO:-${GIT_BASE}/tripleo/bm_poseur.git} BM_POSEUR_BRANCH=${BM_POSEUR_BRANCH:-master} - # Nova hypervisor configuration. We default to libvirt with **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can # also install an **LXC** or **OpenVZ** based system. From 4e971118ff410407ead7b577cc31bf7341555746 Mon Sep 17 00:00:00 2001 From: Jason Dunsmore Date: Wed, 10 Apr 2013 10:17:40 -0500 Subject: [PATCH 0049/4704] Make sure custom grep options don't get in the way Change-Id: I3c8fa21793906d80790c31ed02f585eff35a64a5 --- stack.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stack.sh b/stack.sh index 8c92ea6cf9..497e8a1b71 100755 --- a/stack.sh +++ b/stack.sh @@ -18,6 +18,9 @@ # Learn more and get the most recent version at http://devstack.org +# Make sure custom grep options don't get in the way +unset GREP_OPTIONS + # Keep track of the devstack directory TOP_DIR=$(cd $(dirname "$0") && pwd) From 623a0a58f6db70dd563c951bd601c18e6a1eb524 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Thu, 11 Apr 2013 08:41:27 +0200 Subject: [PATCH 0050/4704] Do not install mysql if mariadb is installed on openSUSE mariadb and mysql are conflicting on a package level, but are compatible for our needs. So if mariadb is already installed, do not try to install mysql. Change-Id: I3aa991c1c4691df3e3f4798505668da3ab908998 --- lib/databases/mysql | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index 056aec46ba..086fe86287 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -131,7 +131,9 @@ EOF if is_ubuntu || is_fedora; then install_package mysql-server elif is_suse; then - install_package mysql-community-server + if ! is_package_installed mariadb; then + install_package mysql-community-server + fi else exit_distro_not_supported "mysql installation" fi From 18225d92c0ef8d56dc7df3177eaed4860d97604b Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Sun, 14 Apr 2013 12:48:41 -0700 Subject: [PATCH 0051/4704] link /etc/tgt/stack.d to cinder volumes directory This creates /etc/tgt/stack.d as a symlink to the cinder volumes directory and includes it in /etc/tgt/targets.conf in a similar way to /etc/tgt/conf.d targets.conf only gets appended to when the symlink is created. Any old /etc/tgt/conf.d/stack.conf is deleted Change-Id: Ifb6412f6860ceb28b724644fb9b618638f552102 Fixes: bug #1072121 --- lib/cinder | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/lib/cinder b/lib/cinder index deace68277..71210eb9ef 100644 --- a/lib/cinder +++ b/lib/cinder @@ -373,21 +373,20 @@ function install_cinderclient() { setup_develop $CINDERCLIENT_DIR } -# apply config.d approach (e.g. Oneiric does not have this) +# apply config.d approach for cinder volumes directory function _configure_tgt_for_config_d() { - if [[ ! -d /etc/tgt/conf.d/ ]]; then - sudo mkdir -p /etc/tgt/conf.d - echo "include /etc/tgt/conf.d/*.conf" | sudo tee -a /etc/tgt/targets.conf + if [[ ! -d /etc/tgt/stack.d/ ]]; then + sudo ln -sf $CINDER_STATE_PATH/volumes /etc/tgt/stack.d + echo "include /etc/tgt/stack.d/*" | sudo tee -a /etc/tgt/targets.conf fi } # start_cinder() - Start running processes, including screen function start_cinder() { if is_service_enabled c-vol; then + # Delete any old stack.conf + sudo rm -f /etc/tgt/conf.d/stack.conf _configure_tgt_for_config_d - if [[ ! -f /etc/tgt/conf.d/stack.conf ]]; then - echo "include $CINDER_STATE_PATH/volumes/*" | sudo tee /etc/tgt/conf.d/stack.conf - fi if is_ubuntu; then # tgt in oneiric doesn't restart properly if tgtd isn't running # do it in two steps From 627d9c77e538bdcf972ff1c7d48dafd966b44d74 Mon Sep 17 00:00:00 2001 From: Sunil Thaha Date: Wed, 10 Apr 2013 14:11:44 +1000 Subject: [PATCH 0052/4704] Fixes running horizon on fedora 18 and above Fixes bug: 1167066 Summary: 1. Starting with fedora 18 the httpd.conf has AllowOverride none Require all denied which requires you to explicitly permit access to web content directories in other blocks 2. Install nodejs on fedora 18 and above Change-Id: I487a7a74bad6627d32c1081dcbe48630a704a106 --- files/apache-horizon.template | 1 + lib/horizon | 11 +++++++++++ 2 files changed, 12 insertions(+) diff --git a/files/apache-horizon.template b/files/apache-horizon.template index fb98471bb7..af880c4f51 100644 --- a/files/apache-horizon.template +++ b/files/apache-horizon.template @@ -17,6 +17,7 @@ Options Indexes FollowSymLinks MultiViews + %HORIZON_REQUIRE% AllowOverride None Order allow,deny allow from all diff --git a/lib/horizon b/lib/horizon index 94aac5c979..05bf6d3915 100644 --- a/lib/horizon +++ b/lib/horizon @@ -102,6 +102,7 @@ function init_horizon() { sudo mkdir -p $HORIZON_DIR/.blackhole + HORIZON_REQUIRE='' if is_ubuntu; then APACHE_NAME=apache2 APACHE_CONF=sites-available/horizon @@ -115,6 +116,12 @@ function init_horizon() { elif is_fedora; then APACHE_NAME=httpd APACHE_CONF=conf.d/horizon.conf + + if [[ "$os_RELEASE" -ge "18" ]]; then + # fedora 18 has Require all denied in its httpd.conf + # and requires explicit Require all granted + HORIZON_REQUIRE='Require all granted' + fi sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf elif is_suse; then APACHE_NAME=apache2 @@ -132,6 +139,7 @@ function init_horizon() { s,%HORIZON_DIR%,$HORIZON_DIR,g; s,%APACHE_NAME%,$APACHE_NAME,g; s,%DEST%,$DEST,g; + s,%HORIZON_REQUIRE%,$HORIZON_REQUIRE,g; \" $FILES/apache-horizon.template >/etc/$APACHE_NAME/$APACHE_CONF" } @@ -156,6 +164,9 @@ function install_horizon() { if [[ ! -e "/usr/bin/node" ]]; then install_package nodejs-legacy fi + elif is_fedora && [[ "$os_RELEASE" -ge "18" ]]; then + # fedora 18 and higher gets nodejs + install_package nodejs fi git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH $HORIZON_TAG From 43a655c312de0c2f9bd0b053f3e0a0c5ef64476e Mon Sep 17 00:00:00 2001 From: Edgar Magana Date: Wed, 17 Apr 2013 15:11:04 -0700 Subject: [PATCH 0053/4704] Parameterized configuration variables for PLUMgrid plugin Fixes bug 1171028 Change-Id: Ie60ef8903001913996e265917c449bdce1e5aae9 --- lib/quantum_plugins/plumgrid | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/quantum_plugins/plumgrid b/lib/quantum_plugins/plumgrid index b49aa92af2..912aa7ed80 100644 --- a/lib/quantum_plugins/plumgrid +++ b/lib/quantum_plugins/plumgrid @@ -25,8 +25,10 @@ function quantum_plugin_configure_common() { } function quantum_plugin_configure_service() { - iniset /$Q_PLUGIN_CONF_FILE PLUMgridNOS nos_server localhost - iniset /$Q_PLUGIN_CONF_FILE PLUMgridNOS nos_server_port 7766 + PLUMGRID_NOS_IP=${PLUMGRID_NOS_IP:-localhost} + PLUMGRID_NOS_PORT=${PLUMGRID_NOS_PORT:-7766} + iniset /$Q_PLUGIN_CONF_FILE PLUMgridNOS nos_server $PLUMGRID_NOS_IP + iniset /$Q_PLUGIN_CONF_FILE PLUMgridNOS nos_server_port $PLUMGRID_NOS_PORT } function quantum_plugin_configure_debug_command() { From aa8242970dc46da60d95ecbd3e8ee207409ff82f Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 2 Apr 2013 13:42:16 +1100 Subject: [PATCH 0054/4704] Add a generic post-prereq phase This generic extra phase is handy for fixing up things like python modules after pip is installed or other distro specific requirements. Change-Id: I4a68b830cd98c6f93191dac1edd7de2a0381feaa --- stack.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/stack.sh b/stack.sh index 497e8a1b71..93b7281a84 100755 --- a/stack.sh +++ b/stack.sh @@ -538,6 +538,12 @@ source $TOP_DIR/tools/install_prereqs.sh install_rpc_backend +# a place for distro-specific post-prereq workarounds +if [[ -f $TOP_DIR/tools/${DISTRO}/post-prereq.sh ]]; then + echo_summary "Running ${DISTRO} extra prereq tasks" + source $TOP_DIR/tools/${DISTRO}/post-prereq.sh +fi + if is_service_enabled $DATABASE_BACKENDS; then install_database fi From d67dd87bfb59e602f564abbdbe9dee8cc8214a9e Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 11 Apr 2013 11:14:36 +1000 Subject: [PATCH 0055/4704] Skip pip mirrors for RHEL The RHEL6 version of pip doesn't have support for mirrors, so skip asking for them. Change-Id: Iaf2900067bb4b41f88d8fe82ea16b6b53d6bcc60 --- functions | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/functions b/functions index 88e4a62595..02c2b3a9c3 100644 --- a/functions +++ b/functions @@ -887,9 +887,18 @@ function pip_install { SUDO_PIP="sudo" CMD_PIP=$(get_pip_command) fi + + if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then + # RHEL6 pip by default doesn't have this (was introduced + # around 0.8.1 or so) + PIP_USE_MIRRORS=${PIP_USE_MIRRORS:-False} + else + PIP_USE_MIRRORS=${PIP_USE_MIRRORS:-True} + fi if [[ "$PIP_USE_MIRRORS" != "False" ]]; then PIP_MIRROR_OPT="--use-mirrors" fi + $SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \ HTTP_PROXY=$http_proxy \ HTTPS_PROXY=$https_proxy \ From 2578565d55c6ebee538b9d960da195f1dab0670d Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 11 Apr 2013 11:15:57 +1000 Subject: [PATCH 0056/4704] Add rhel6 as an allowed distro Add RHEL6 based distributions as a valid install target Change-Id: I89fb68d18a0d2079493be93b230162679123881d --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 497e8a1b71..0461c49d2c 100755 --- a/stack.sh +++ b/stack.sh @@ -105,7 +105,7 @@ disable_negated_services # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17|f18|opensuse-12.2) ]]; then +if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17|f18|opensuse-12.2|rhel6) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From c2fc5f890ee5cdc7f4f3957b61d4237bd1abda38 Mon Sep 17 00:00:00 2001 From: Chris Yeoh Date: Mon, 22 Apr 2013 10:33:07 +0930 Subject: [PATCH 0057/4704] Fix attach volume detect in euca test Fixes test for detecting when a volume has been successfully attached to an instance to not just rely on the state being in-use, but also be marked as "attached". The attachment state will be displayed when https://review.openstack.org/#/c/27098/ goes through. The attachment state is not currently displayed by euca-describe-volumes because of the extraneous data returned by the API as per bug #1074901. In the meantime the "attaching" status of the volume itself, rather than the attachment state, suffices for the test to work properly, but in the long term this will disappear as it is not a valid ec2 API state and volumes will move straight to the in-use state. Fixes bug #1170548 Change-Id: Id38f37e1a0efc991f60da35145f809d98b5f41cd --- exercises/euca.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exercises/euca.sh b/exercises/euca.sh index 50d4744e69..d704279431 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -96,7 +96,7 @@ if [[ "$ENABLED_SERVICES" =~ "c-vol" ]]; then # Attach volume to an instance euca-attach-volume -i $INSTANCE -d $ATTACH_DEVICE $VOLUME || \ die $LINENO "Failure attaching volume $VOLUME to $INSTANCE" - if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q in-use; do sleep 1; done"; then + if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -A 1 in-use | grep -q attach; do sleep 1; done"; then die $LINENO "Could not attach $VOLUME to $INSTANCE" fi From cd26151261425ab81e678a6f6b16011ab4ca8b2f Mon Sep 17 00:00:00 2001 From: Eric Harney Date: Wed, 10 Apr 2013 12:48:09 -0400 Subject: [PATCH 0058/4704] Add support for GlusterFS Cinder driver The GlusterFS volume driver can be used with Cinder by setting the following in localrc: CINDER_DRIVER=glusterfs CINDER_GLUSTERFS_SHARES="127.0.0.1:/vol1;127.0.0.1:/vol2" Shares are : and separated by semicolons. Change-Id: Iaa105233c9fce8d8fda0a9ea447e045b8d010db7 --- lib/cinder | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/lib/cinder b/lib/cinder index d621e69a68..ead471b3f7 100644 --- a/lib/cinder +++ b/lib/cinder @@ -234,6 +234,19 @@ function configure_cinder() { ) elif [ "$CINDER_DRIVER" == "sheepdog" ]; then iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.sheepdog.SheepdogDriver" + elif [ "$CINDER_DRIVER" == "glusterfs" ]; then + # To use glusterfs, set the following in localrc: + # CINDER_DRIVER=glusterfs + # CINDER_GLUSTERFS_SHARES="127.0.0.1:/vol1;127.0.0.1:/vol2" + # Shares are : and separated by semicolons. + + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.glusterfs.GlusterfsDriver" + iniset $CINDER_CONF DEFAULT glusterfs_shares_config "$CINDER_CONF_DIR/glusterfs_shares" + touch $CINDER_CONF_DIR/glusterfs_shares + if [ ! -z "$CINDER_GLUSTERFS_SHARES" ]; then + CINDER_GLUSTERFS_SHARES=$(echo $CINDER_GLUSTERFS_SHARES | tr ";" "\n") + echo "$CINDER_GLUSTERFS_SHARES" > $CINDER_CONF_DIR/glusterfs_shares + fi fi } From 64dd03dd78b420a7983e2e36fb8ffb234c12e859 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 11 Apr 2013 12:01:09 +1000 Subject: [PATCH 0059/4704] Modify RPM lists for RHEL6 Modifications to the RPM list to make devstack work on RHEL6. Makes various packages only install on Fedora distros; generally letting pip install the missing dependencies on RHEL. Additionally the Qpid package name is different in RHEL6 to Fedora. Also a small re-write of the config file is required to avoid authenication issues. Change-Id: If497099d27aa9cd80e1c1cee1aff2ed6b076d309 --- files/rpms/glance | 2 +- files/rpms/horizon | 4 ++-- files/rpms/keystone | 12 +++++++----- files/rpms/nova | 8 +++++--- files/rpms/quantum | 8 +++++--- files/rpms/ryu | 2 +- files/rpms/swift | 4 ++-- lib/rpc_backend | 17 +++++++++++++++-- 8 files changed, 38 insertions(+), 19 deletions(-) diff --git a/files/rpms/glance b/files/rpms/glance index eff6c2c038..34e3f9ada1 100644 --- a/files/rpms/glance +++ b/files/rpms/glance @@ -3,7 +3,7 @@ python-argparse python-devel python-eventlet python-greenlet -python-paste-deploy +python-paste-deploy #dist:f16,f17,f18 python-routes python-sqlalchemy python-wsgiref diff --git a/files/rpms/horizon b/files/rpms/horizon index 12f75ba5a8..151e7e21af 100644 --- a/files/rpms/horizon +++ b/files/rpms/horizon @@ -17,8 +17,8 @@ python-migrate python-mox python-netaddr python-nose -python-paste -python-paste-deploy +python-paste #dist:f16,f17,f18 +python-paste-deploy #dist:f16,f17,f18 python-pep8 python-routes python-sphinx diff --git a/files/rpms/keystone b/files/rpms/keystone index 59868c7f2f..078adf7718 100644 --- a/files/rpms/keystone +++ b/files/rpms/keystone @@ -1,11 +1,13 @@ python-greenlet -python-lxml -python-paste -python-paste-deploy -python-paste-script +python-lxml #dist:f16,f17,f18 +python-paste #dist:f16,f17,f18 +python-paste-deploy #dist:f16,f17,f18 +python-paste-script #dist:f16,f17,f18 python-routes -python-setuptools +python-setuptools #dist:f16,f17,f18 python-sqlalchemy python-sqlite2 python-webob sqlite + +# Deps installed via pip for RHEL \ No newline at end of file diff --git a/files/rpms/nova b/files/rpms/nova index 7ff926b230..b8c8528c20 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -28,9 +28,11 @@ python-lockfile python-migrate python-mox python-netaddr -python-paramiko -python-paste -python-paste-deploy +python-paramiko # dist:f16,f17,f18 +# ^ on RHEL, brings in python-crypto which conflicts with version from +# pip we need +python-paste # dist:f16,f17,f18 +python-paste-deploy # dist:f16,f17,f18 python-qpid python-routes python-sqlalchemy diff --git a/files/rpms/quantum b/files/rpms/quantum index 05398fcf85..450e39cdc8 100644 --- a/files/rpms/quantum +++ b/files/rpms/quantum @@ -10,14 +10,16 @@ python-greenlet python-iso8601 python-kombu python-netaddr -python-paste -python-paste-deploy +#rhel6 gets via pip +python-paste # dist:f16,f17,f18 +python-paste-deploy # dist:f16,f17,f18 python-qpid python-routes python-sqlalchemy python-suds rabbitmq-server # NOPRIME -qpid-cpp-server-daemon # NOPRIME +qpid-cpp-server-daemon # NOPRIME dist:f16,f17,f18 +qpid-cpp-server # NOPRIME dist:rhel6 sqlite sudo vconfig diff --git a/files/rpms/ryu b/files/rpms/ryu index 4a4fc523b5..7cf3bd7f0b 100644 --- a/files/rpms/ryu +++ b/files/rpms/ryu @@ -1,5 +1,5 @@ -python-setuptools python-gevent python-gflags python-netifaces +python-setuptools #dist:f16,f17,f18 python-sphinx diff --git a/files/rpms/swift b/files/rpms/swift index ce41ceb8e2..1b36e34eab 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -8,8 +8,8 @@ python-eventlet python-greenlet python-netifaces python-nose -python-paste-deploy -python-setuptools +python-paste-deploy # dist:f16,f17,f18 +python-setuptools # dist:f16,f17,f18 python-simplejson python-webob pyxattr diff --git a/lib/rpc_backend b/lib/rpc_backend index 7d165a47b1..1edea15524 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -57,7 +57,11 @@ function cleanup_rpc_backend { fi elif is_service_enabled qpid; then if is_fedora; then - uninstall_package qpid-cpp-server-daemon + if [[ $DISTRO =~ (rhel6) ]]; then + uninstall_package qpid-cpp-server + else + uninstall_package qpid-cpp-server-daemon + fi elif is_ubuntu; then uninstall_package qpidd else @@ -87,7 +91,16 @@ function install_rpc_backend() { rm -f "$tfile" elif is_service_enabled qpid; then if is_fedora; then - install_package qpid-cpp-server-daemon + if [[ $DISTRO =~ (rhel6) ]]; then + install_package qpid-cpp-server + + # RHEL6 leaves "auth=yes" in /etc/qpidd.conf, it needs to + # be no or you get GSS authentication errors as it + # attempts to default to this. + sudo sed -i.bak 's/^auth=yes$/auth=no/' /etc/qpidd.conf + else + install_package qpid-cpp-server-daemon + fi elif is_ubuntu; then install_package qpidd sudo sed -i '/PLAIN/!s/mech_list: /mech_list: PLAIN /' /etc/sasl2/qpidd.conf From 1dcbed37b463a115d161955d3dc03cf25fa27b0e Mon Sep 17 00:00:00 2001 From: Tiago Mello Date: Sun, 21 Apr 2013 14:35:22 -0400 Subject: [PATCH 0060/4704] Adds PowerVM support for the VIRT_DRIVER conf Change-Id: I95d0f1417bd29196da281e8d76ecbdb1d407851f --- stack.sh | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/stack.sh b/stack.sh index 497e8a1b71..e7c1babb87 100755 --- a/stack.sh +++ b/stack.sh @@ -929,6 +929,25 @@ if is_service_enabled nova; then iniset $NOVA_CONF baremetal ${I/=/ } done + # PowerVM + # ------- + + elif [ "$VIRT_DRIVER" = 'powervm' ]; then + echo_summary "Using PowerVM driver" + POWERVM_MGR_TYPE=${POWERVM_MGR_TYPE:-"ivm"} + POWERVM_MGR_HOST=${POWERVM_MGR_HOST:-"powervm.host"} + POWERVM_MGR_USER=${POWERVM_MGR_USER:-"padmin"} + POWERVM_MGR_PASSWD=${POWERVM_MGR_PASSWD:-"password"} + POWERVM_IMG_REMOTE_PATH=${POWERVM_IMG_REMOTE_PATH:-"/tmp"} + POWERVM_IMG_LOCAL_PATH=${POWERVM_IMG_LOCAL_PATH:-"/tmp"} + iniset $NOVA_CONF DEFAULT compute_driver nova.virt.powervm.PowerVMDriver + iniset $NOVA_CONF DEFAULT powervm_mgr_type $POWERVM_MGR_TYPE + iniset $NOVA_CONF DEFAULT powervm_mgr $POWERVM_MGR_HOST + iniset $NOVA_CONF DEFAULT powervm_mgr_user $POWERVM_MGR_USER + iniset $NOVA_CONF DEFAULT powervm_mgr_passwd $POWERVM_MGR_PASSWD + iniset $NOVA_CONF DEFAULT powervm_img_remote_path $POWERVM_IMG_REMOTE_PATH + iniset $NOVA_CONF DEFAULT powervm_img_local_path $POWERVM_IMG_LOCAL_PATH + # Default # ------- From 5fe933399f57a2753d1f7615534b707160d6d497 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Tue, 23 Apr 2013 10:04:12 +0200 Subject: [PATCH 0061/4704] Install polkit on Fedora/openSUSE when using nova We clearly depend on polkit. It's usually already installed, but for people using JeOS images, it's not. Change-Id: Ieccd0ff569f7d4541833b98232aeebb36b3493cb --- files/rpms-suse/nova | 1 + files/rpms/nova | 1 + 2 files changed, 2 insertions(+) diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova index 04af7f3110..a3fd4799c0 100644 --- a/files/rpms-suse/nova +++ b/files/rpms-suse/nova @@ -15,6 +15,7 @@ libvirt-python libxml2-python mysql-community-server # NOPRIME parted +polkit python-M2Crypto python-m2crypto # dist:sle11sp2 python-Paste diff --git a/files/rpms/nova b/files/rpms/nova index 7ff926b230..9d8891d97d 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -15,6 +15,7 @@ numpy # needed by websockify for spice console m2crypto mysql-server # NOPRIME parted +polkit python-boto python-carrot python-cheetah From 9d2647a93f8788b22bc1d22046d40165bfba0013 Mon Sep 17 00:00:00 2001 From: rahmu Date: Wed, 24 Apr 2013 10:40:07 +0200 Subject: [PATCH 0062/4704] Set the port of S3_URL to 8080 with swift3 enabled Fixes: bug #1171608 The value of the port defaults to 3333 which is the default for nova-objectstore. Change-Id: If87df6a765bcfc4d467628411dceec72d5af412e --- stack.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 497e8a1b71..bcf359272e 100755 --- a/stack.sh +++ b/stack.sh @@ -589,8 +589,10 @@ if is_service_enabled s-proxy; then install_swift configure_swift + # swift3 middleware to provide S3 emulation to Swift if is_service_enabled swift3; then - # swift3 middleware to provide S3 emulation to Swift + # replace the nova-objectstore port by the swift port + S3_SERVICE_PORT=8080 git_clone $SWIFT3_REPO $SWIFT3_DIR $SWIFT3_BRANCH setup_develop $SWIFT3_DIR fi From f661658ed1e719964b96002f958c016a2db635db Mon Sep 17 00:00:00 2001 From: Jian Wen Date: Thu, 25 Apr 2013 15:33:10 +0800 Subject: [PATCH 0063/4704] Remove redundant arping package from apts/nova Nova is dependent on arping because 'arping -U' is used. Arping package doesn't provide 'arping -U'. Let iputils-arping do the arping job. Change-Id: Iad4cbf823881de4656fb6a4e32285cf2a49b1d2a --- files/apts/nova | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/files/apts/nova b/files/apts/nova index f4615c41ce..298e25feee 100644 --- a/files/apts/nova +++ b/files/apts/nova @@ -2,8 +2,7 @@ dnsmasq-base dnsmasq-utils # for dhcp_release only available in dist:oneiric,precise,quantal kpartx parted -arping # only available in dist:natty -iputils-arping # only available in dist:oneiric +iputils-arping mysql-server # NOPRIME python-mysqldb python-xattr # needed for glance which is needed for nova --- this shouldn't be here From 76ca9a9e235c9f57eaecb387dc22d02e241de402 Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Fri, 12 Apr 2013 12:39:58 +0200 Subject: [PATCH 0064/4704] Fix nova compute failing to start on openSUSE 12.3 The syntax of polkit authorization rules is different now. Using the same code as for Fedora 18 now. Change-Id: I4f66d8fc65c90a309aab478a4df35c77c7669314 --- lib/nova | 44 +++++++++++++++++++++++--------------------- 1 file changed, 23 insertions(+), 21 deletions(-) diff --git a/lib/nova b/lib/nova index ea73badb58..f28094e7a1 100644 --- a/lib/nova +++ b/lib/nova @@ -296,22 +296,8 @@ EOF - if is_fedora; then - # Starting with fedora 18 enable stack-user to virsh -c qemu:///system - # by creating a policy-kit rule for stack-user - if [[ "$os_RELEASE" -ge "18" ]]; then - rules_dir=/etc/polkit-1/rules.d - sudo mkdir -p $rules_dir - sudo bash -c "cat < $rules_dir/50-libvirt-$STACK_USER.rules -polkit.addRule(function(action, subject) { - if (action.id == 'org.libvirt.unix.manage' && - subject.user == '"$STACK_USER"') { - return polkit.Result.YES; - } -}); -EOF" - unset rules_dir - else + if is_fedora || is_suse; then + if is_fedora && [[ "$os_RELEASE" -le "17" ]]; then sudo bash -c 'cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla [libvirt Management Access] Identity=unix-group:libvirtd @@ -320,11 +306,11 @@ ResultAny=yes ResultInactive=yes ResultActive=yes EOF' - fi - elif is_suse; then - # Work around the fact that polkit-default-privs overrules pklas - # with 'unix-group:$group'. - sudo bash -c "cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla + elif is_suse && [[ $os_RELEASE = 12.2 || "$os_VENDOR" = "SUSE LINUX" ]]; then + # openSUSE < 12.3 or SLE + # Work around the fact that polkit-default-privs overrules pklas + # with 'unix-group:$group'. + sudo bash -c "cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla [libvirt Management Access] Identity=unix-user:$USER Action=org.libvirt.unix.manage @@ -332,6 +318,22 @@ ResultAny=yes ResultInactive=yes ResultActive=yes EOF" + else + # Starting with fedora 18 and opensuse-12.3 enable stack-user to + # virsh -c qemu:///system by creating a policy-kit rule for + # stack-user using the new Javascript syntax + rules_dir=/etc/polkit-1/rules.d + sudo mkdir -p $rules_dir + sudo bash -c "cat < $rules_dir/50-libvirt-$STACK_USER.rules +polkit.addRule(function(action, subject) { + if (action.id == 'org.libvirt.unix.manage' && + subject.user == '"$STACK_USER"') { + return polkit.Result.YES; + } +}); +EOF" + unset rules_dir + fi fi # The user that nova runs as needs to be member of **libvirtd** group otherwise From c32490838a2bbbda4ab383f573dedfc484ca9cf9 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Tue, 9 Apr 2013 13:41:47 +1200 Subject: [PATCH 0065/4704] Move auth_token config to .conf, key cache directories auth_token configuration can now be read from the conf files rather than the paste.ini files. A key cache directory has been created for each of the 3 API services under /var/cache/heat This is the devstack change relating to Heat Blueprint: keystone-middleware This is related to this committed change: https://review.openstack.org/#/c/26351/ Devstack users will find Heat to be broken until this corresponding change is approved. Change-Id: If6f77f86a3eeb08a58b516725bd806e39ccedb50 --- lib/heat | 61 +++++++++++++++++++++++++++++++++++--------------------- stack.sh | 1 + 2 files changed, 39 insertions(+), 23 deletions(-) diff --git a/lib/heat b/lib/heat index 32c0182c01..c6e936f3e8 100644 --- a/lib/heat +++ b/lib/heat @@ -29,6 +29,7 @@ set +o xtrace # set up default directories HEAT_DIR=$DEST/heat HEATCLIENT_DIR=$DEST/python-heatclient +HEAT_AUTH_CACHE_DIR=${HEAT_AUTH_CACHE_DIR:-/var/cache/heat} # Functions @@ -37,8 +38,7 @@ HEATCLIENT_DIR=$DEST/python-heatclient # cleanup_heat() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_heat() { - # This function intentionally left blank - : + sudo rm -rf $HEAT_AUTH_CACHE_DIR } # configure_heatclient() - Set config files, create data dirs, etc @@ -73,18 +73,19 @@ function configure_heat() { iniset $HEAT_API_CFN_CONF DEFAULT use_syslog $SYSLOG iniset $HEAT_API_CFN_CONF DEFAULT bind_host $HEAT_API_CFN_HOST iniset $HEAT_API_CFN_CONF DEFAULT bind_port $HEAT_API_CFN_PORT + iniset $HEAT_API_CFN_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $HEAT_API_CFN_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $HEAT_API_CFN_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $HEAT_API_CFN_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + iniset $HEAT_API_CFN_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $HEAT_API_CFN_CONF keystone_authtoken admin_user heat + iniset $HEAT_API_CFN_CONF keystone_authtoken admin_password $SERVICE_PASSWORD + iniset $HEAT_API_CFN_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR/api-cfn iniset_rpc_backend heat $HEAT_API_CFN_CONF DEFAULT HEAT_API_CFN_PASTE_INI=$HEAT_CONF_DIR/heat-api-cfn-paste.ini cp $HEAT_DIR/etc/heat/heat-api-cfn-paste.ini $HEAT_API_CFN_PASTE_INI - iniset $HEAT_API_CFN_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST - iniset $HEAT_API_CFN_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT - iniset $HEAT_API_CFN_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $HEAT_API_CFN_PASTE_INI filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 - iniset $HEAT_API_CFN_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $HEAT_API_CFN_PASTE_INI filter:authtoken admin_user heat - iniset $HEAT_API_CFN_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD iniset $HEAT_API_CFN_PASTE_INI filter:ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 iniset $HEAT_API_CFN_PASTE_INI filter:ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens @@ -96,18 +97,19 @@ function configure_heat() { iniset $HEAT_API_CONF DEFAULT use_syslog $SYSLOG iniset $HEAT_API_CONF DEFAULT bind_host $HEAT_API_HOST iniset $HEAT_API_CONF DEFAULT bind_port $HEAT_API_PORT + iniset $HEAT_API_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $HEAT_API_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $HEAT_API_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $HEAT_API_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + iniset $HEAT_API_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $HEAT_API_CONF keystone_authtoken admin_user heat + iniset $HEAT_API_CONF keystone_authtoken admin_password $SERVICE_PASSWORD + iniset $HEAT_API_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR/api iniset_rpc_backend heat $HEAT_API_CONF DEFAULT HEAT_API_PASTE_INI=$HEAT_CONF_DIR/heat-api-paste.ini cp $HEAT_DIR/etc/heat/heat-api-paste.ini $HEAT_API_PASTE_INI - iniset $HEAT_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST - iniset $HEAT_API_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT - iniset $HEAT_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $HEAT_API_PASTE_INI filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 - iniset $HEAT_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $HEAT_API_PASTE_INI filter:authtoken admin_user heat - iniset $HEAT_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD iniset $HEAT_API_PASTE_INI filter:ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 iniset $HEAT_API_PASTE_INI filter:ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens @@ -135,18 +137,19 @@ function configure_heat() { iniset $HEAT_API_CW_CONF DEFAULT use_syslog $SYSLOG iniset $HEAT_API_CW_CONF DEFAULT bind_host $HEAT_API_CW_HOST iniset $HEAT_API_CW_CONF DEFAULT bind_port $HEAT_API_CW_PORT + iniset $HEAT_API_CW_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $HEAT_API_CW_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $HEAT_API_CW_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $HEAT_API_CW_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + iniset $HEAT_API_CW_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $HEAT_API_CW_CONF keystone_authtoken admin_user heat + iniset $HEAT_API_CW_CONF keystone_authtoken admin_password $SERVICE_PASSWORD + iniset $HEAT_API_CW_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR/api-cloudwatch iniset_rpc_backend heat $HEAT_API_CW_CONF DEFAULT HEAT_API_CW_PASTE_INI=$HEAT_CONF_DIR/heat-api-cloudwatch-paste.ini cp $HEAT_DIR/etc/heat/heat-api-cloudwatch-paste.ini $HEAT_API_CW_PASTE_INI - iniset $HEAT_API_CW_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST - iniset $HEAT_API_CW_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT - iniset $HEAT_API_CW_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $HEAT_API_CW_PASTE_INI filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 - iniset $HEAT_API_CW_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $HEAT_API_CW_PASTE_INI filter:authtoken admin_user heat - iniset $HEAT_API_CW_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD iniset $HEAT_API_CW_PASTE_INI filter:ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 iniset $HEAT_API_CW_PASTE_INI filter:ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens } @@ -159,6 +162,18 @@ function init_heat() { $HEAT_DIR/bin/heat-db-setup $os_PACKAGE -r $DATABASE_PASSWORD $HEAT_DIR/tools/nova_create_flavors.sh + create_heat_cache_dir +} + +# create_heat_cache_dir() - Part of the init_heat() process +function create_heat_cache_dir() { + # Create cache dirs + sudo mkdir -p $HEAT_AUTH_CACHE_DIR/api + sudo chown $STACK_USER $HEAT_AUTH_CACHE_DIR/api + sudo mkdir -p $HEAT_AUTH_CACHE_DIR/api-cfn + sudo chown $STACK_USER $HEAT_AUTH_CACHE_DIR/api-cfn + sudo mkdir -p $HEAT_AUTH_CACHE_DIR/api-cloudwatch + sudo chown $STACK_USER $HEAT_AUTH_CACHE_DIR/api-cloudwatch } # install_heatclient() - Collect source and prepare diff --git a/stack.sh b/stack.sh index 497e8a1b71..90e607e728 100755 --- a/stack.sh +++ b/stack.sh @@ -643,6 +643,7 @@ fi if is_service_enabled heat; then install_heat install_heatclient + cleanup_heat configure_heat configure_heatclient fi From fd1ebe7cba2388803e37b8f55475ffff50028506 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Sat, 13 Apr 2013 09:49:14 +1000 Subject: [PATCH 0066/4704] Move ec2authtoken config from paste.ini to conf Nothing now needs to be configured in paste.ini on launch This change relates to these committed Heat changes: https://review.openstack.org/#/c/27382/ https://review.openstack.org/#/c/27382/ Change-Id: Ia29dee8e32cd3d43d1b9200dd94de70e02a54bfc --- lib/heat | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/lib/heat b/lib/heat index c6e936f3e8..b9d0ee5cd3 100644 --- a/lib/heat +++ b/lib/heat @@ -81,14 +81,11 @@ function configure_heat() { iniset $HEAT_API_CFN_CONF keystone_authtoken admin_user heat iniset $HEAT_API_CFN_CONF keystone_authtoken admin_password $SERVICE_PASSWORD iniset $HEAT_API_CFN_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR/api-cfn + iniset $HEAT_API_CFN_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + iniset $HEAT_API_CFN_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens iniset_rpc_backend heat $HEAT_API_CFN_CONF DEFAULT - HEAT_API_CFN_PASTE_INI=$HEAT_CONF_DIR/heat-api-cfn-paste.ini - cp $HEAT_DIR/etc/heat/heat-api-cfn-paste.ini $HEAT_API_CFN_PASTE_INI - iniset $HEAT_API_CFN_PASTE_INI filter:ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 - iniset $HEAT_API_CFN_PASTE_INI filter:ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens - # OpenStack API HEAT_API_CONF=$HEAT_CONF_DIR/heat-api.conf cp $HEAT_DIR/etc/heat/heat-api.conf $HEAT_API_CONF @@ -105,13 +102,11 @@ function configure_heat() { iniset $HEAT_API_CONF keystone_authtoken admin_user heat iniset $HEAT_API_CONF keystone_authtoken admin_password $SERVICE_PASSWORD iniset $HEAT_API_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR/api + iniset $HEAT_API_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + iniset $HEAT_API_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens iniset_rpc_backend heat $HEAT_API_CONF DEFAULT - HEAT_API_PASTE_INI=$HEAT_CONF_DIR/heat-api-paste.ini - cp $HEAT_DIR/etc/heat/heat-api-paste.ini $HEAT_API_PASTE_INI - iniset $HEAT_API_PASTE_INI filter:ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 - iniset $HEAT_API_PASTE_INI filter:ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens # engine HEAT_ENGINE_CONF=$HEAT_CONF_DIR/heat-engine.conf @@ -145,13 +140,11 @@ function configure_heat() { iniset $HEAT_API_CW_CONF keystone_authtoken admin_user heat iniset $HEAT_API_CW_CONF keystone_authtoken admin_password $SERVICE_PASSWORD iniset $HEAT_API_CW_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR/api-cloudwatch + iniset $HEAT_API_CW_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + iniset $HEAT_API_CW_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens iniset_rpc_backend heat $HEAT_API_CW_CONF DEFAULT - HEAT_API_CW_PASTE_INI=$HEAT_CONF_DIR/heat-api-cloudwatch-paste.ini - cp $HEAT_DIR/etc/heat/heat-api-cloudwatch-paste.ini $HEAT_API_CW_PASTE_INI - iniset $HEAT_API_CW_PASTE_INI filter:ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 - iniset $HEAT_API_CW_PASTE_INI filter:ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens } # init_heat() - Initialize database From d5cd79b16bbb4bae6da7dadfcbb7a4c2520a7045 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Sat, 13 Apr 2013 11:14:41 -0700 Subject: [PATCH 0067/4704] Install api-paste.ini and policy.json This is related to this Heat change, and Heat on Devstack will not work while only one of these changes is applied: Consolidated api-paste.ini file https://review.openstack.org/#/c/27384/ Change-Id: Iba02b56173f796de5348289247233c8765f83ac2 --- lib/heat | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/heat b/lib/heat index b9d0ee5cd3..cd0a204f22 100644 --- a/lib/heat +++ b/lib/heat @@ -64,6 +64,11 @@ function configure_heat() { HEAT_API_CW_PORT=${HEAT_API_CW_PORT:-8003} HEAT_API_HOST=${HEAT_API_HOST:-$SERVICE_HOST} HEAT_API_PORT=${HEAT_API_PORT:-8004} + HEAT_API_PASTE_FILE=$HEAT_CONF_DIR/api-paste.ini + HEAT_POLICY_FILE=$HEAT_CONF_DIR/policy.json + + cp $HEAT_DIR/etc/heat/api-paste.ini $HEAT_API_PASTE_FILE + cp $HEAT_DIR/etc/heat/policy.json $HEAT_POLICY_FILE # Cloudformation API HEAT_API_CFN_CONF=$HEAT_CONF_DIR/heat-api-cfn.conf From a418af9503d6e5d7b767d6840efd6eb7ca199e13 Mon Sep 17 00:00:00 2001 From: hartsocks Date: Wed, 24 Apr 2013 14:49:56 -0700 Subject: [PATCH 0068/4704] fix support for VMware vCenter Driver Change-Id: Iedd26dbb89731f49718604eb09eb84b3e0b648c0 --- stack.sh | 12 ++++++++++++ stackrc | 2 ++ 2 files changed, 14 insertions(+) diff --git a/stack.sh b/stack.sh index e7c1babb87..726f1bd37e 100755 --- a/stack.sh +++ b/stack.sh @@ -948,6 +948,18 @@ if is_service_enabled nova; then iniset $NOVA_CONF DEFAULT powervm_img_remote_path $POWERVM_IMG_REMOTE_PATH iniset $NOVA_CONF DEFAULT powervm_img_local_path $POWERVM_IMG_LOCAL_PATH + # vSphere API + # ------- + + elif [ "$VIRT_DRIVER" = 'vsphere' ]; then + echo_summary "Using VMware vCenter driver" + iniset $NOVA_CONF DEFAULT compute_driver "vmwareapi.VMwareVCDriver" + VMWAREAPI_USER=${VMWAREAPI_USER:-"root"} + iniset $NOVA_CONF DEFAULT vmwareapi_host_ip "$VMWAREAPI_IP" + iniset $NOVA_CONF DEFAULT vmwareapi_host_username "$VMWAREAPI_USER" + iniset $NOVA_CONF DEFAULT vmwareapi_host_password "$VMWAREAPI_PASSWORD" + iniset $NOVA_CONF DEFAULT vmwareapi_cluster_name "$VMWAREAPI_CLUSTER" + # Default # ------- diff --git a/stackrc b/stackrc index c55e8dc78e..f99eab1852 100644 --- a/stackrc +++ b/stackrc @@ -199,6 +199,8 @@ case "$VIRT_DRIVER" in IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz"};; esac ;; + vsphere) + IMAGE_URLS="";; *) # otherwise, use the uec style image (with kernel, ramdisk, disk) DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.1-x86_64-uec} IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz"};; From 7a7a4667386424b949a9e4e1c65683d71a1161fe Mon Sep 17 00:00:00 2001 From: Matthieu Huin Date: Mon, 15 Apr 2013 17:13:41 +0200 Subject: [PATCH 0069/4704] Removes "RPC not enabled" error message when no backend is needed When no service needing a RPC backend is activated, no error message should appear if a RPC backend is not installed. A simple check is done on the services installation files to see which services need to initialize a RPC backend at some point; if none of these services are in ENABLED_SERVICES then the error message is skipped. Change-Id: I4e47e0c675c74775b4ea53a00848ac1d777f0125 Fixes: bug #1167338 --- lib/rpc_backend | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/lib/rpc_backend b/lib/rpc_backend index 1edea15524..3c485e42c7 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -21,9 +21,22 @@ set +o xtrace # Functions # --------- + # Make sure we only have one rpc backend enabled. # Also check the specified rpc backend is available on your platform. function check_rpc_backend() { + local rpc_needed=1 + # We rely on the fact that filenames in lib/* match the service names + # that can be passed as arguments to is_service_enabled. + # We check for a call to iniset_rpc_backend in these files, meaning + # the service needs a backend. + rpc_candidates=$(grep -rl iniset_rpc_backend . | awk -F/ '{print $NF}') + for c in ${rpc_candidates}; do + if is_service_enabled $c; then + rpc_needed=0 + break + fi + done local rpc_backend_cnt=0 for svc in qpid zeromq rabbit; do is_service_enabled $svc && @@ -33,7 +46,7 @@ function check_rpc_backend() { echo "ERROR: only one rpc backend may be enabled," echo " set only one of 'rabbit', 'qpid', 'zeromq'" echo " via ENABLED_SERVICES." - elif [ "$rpc_backend_cnt" == 0 ]; then + elif [ "$rpc_backend_cnt" == 0 ] && [ "$rpc_needed" == 0 ]; then echo "ERROR: at least one rpc backend must be enabled," echo " set one of 'rabbit', 'qpid', 'zeromq'" echo " via ENABLED_SERVICES." From e88b3a4d67f84d660f3043dbf7cc33e946458e5e Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 26 Apr 2013 19:49:15 +1000 Subject: [PATCH 0070/4704] Remove unused post-prereq phase This phase was going to be part of RHEL6 support, but is no longer necessary Change-Id: I8614710b35db018dcf9d906e0af70e4f6b8c7191 --- stack.sh | 6 ------ 1 file changed, 6 deletions(-) diff --git a/stack.sh b/stack.sh index f248b6eeec..0979bc5008 100755 --- a/stack.sh +++ b/stack.sh @@ -538,12 +538,6 @@ source $TOP_DIR/tools/install_prereqs.sh install_rpc_backend -# a place for distro-specific post-prereq workarounds -if [[ -f $TOP_DIR/tools/${DISTRO}/post-prereq.sh ]]; then - echo_summary "Running ${DISTRO} extra prereq tasks" - source $TOP_DIR/tools/${DISTRO}/post-prereq.sh -fi - if is_service_enabled $DATABASE_BACKENDS; then install_database fi From 7919d851a9e532c9d92c82a067e68cf95b6770c8 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 26 Apr 2013 11:28:29 +1000 Subject: [PATCH 0071/4704] RHEL6 support rpms Install some rpms required for operation on RHEL6. Additionally, remove some system packages that interfere with pip installs. Change-Id: I273ce59d7bf066e73d524f61b8ad048599101dab --- files/rpms/general | 14 ++++++++++++++ files/rpms/glance | 1 + stack.sh | 38 ++++++++++++++++++++++++++++++++++++-- tools/install_prereqs.sh | 3 ++- 4 files changed, 53 insertions(+), 3 deletions(-) diff --git a/files/rpms/general b/files/rpms/general index fc3412ba4b..764b602da0 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -1,14 +1,19 @@ bridge-utils curl +dbus euca2ools # only for testing client +gcc # dist:rhel6 [2] git-core openssh-server openssl +libxml2-devel # dist:rhel6 [2] +libxslt-devel # dist:rhel6 [2] psmisc pylint python-netaddr python-pep8 python-pip +python-prettytable # dist:rhel6 [1] python-unittest2 python-virtualenv screen @@ -16,3 +21,12 @@ tar tcpdump unzip wget + +# [1] : some of installed tools have unversioned dependencies on this, +# but others have versioned (<=0.7). So if a later version (0.7.1) +# gets installed in response to an unversioned dependency, it breaks. +# This pre-installs a compatible 0.6(ish) version from RHEL + +# [2] : RHEL6 rpm versions of python-lxml is old, and has to be +# removed. Several tools rely on it, so we install the dependencies +# pip needs to build it here (see tools/install_prereqs.sh) \ No newline at end of file diff --git a/files/rpms/glance b/files/rpms/glance index 34e3f9ada1..097cf3f7e0 100644 --- a/files/rpms/glance +++ b/files/rpms/glance @@ -1,3 +1,4 @@ +gcc libxml2-devel python-argparse python-devel diff --git a/stack.sh b/stack.sh index e192588c94..d534b87ddb 100755 --- a/stack.sh +++ b/stack.sh @@ -525,7 +525,6 @@ failed() { # an error. It is also useful for following along as the install occurs. set -o xtrace - # Install Packages # ================ @@ -546,6 +545,42 @@ if is_service_enabled q-agt; then install_quantum_agent_packages fi +# +# System-specific preconfigure +# ============================ + +if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then + # An old version (2.0.1) of python-crypto is probably installed on + # a fresh system, via the dependency chain + # cas->python-paramiko->python-crypto (related to anaconda). + # Unfortunately, "pip uninstall pycrypto" will remove the + # .egg-info file for this rpm-installed version, but leave most of + # the actual library files behind in /usr/lib64/python2.6/Crypto. + # When later "pip install pycrypto" happens, the built library + # will be installed over these existing files; the result is a + # useless mess of old, rpm-packaged files and pip-installed files. + # Unsurprisingly, the end result is it doesn't work. Thus we have + # to get rid of it now so that any packages that pip-install + # pycrypto get a "clean slate". + # (note, we have to be careful about other RPM packages specified + # pulling in python-crypto as well. That's why RHEL6 doesn't + # install python-paramiko packages for example...) + uninstall_package python-crypto + + # A similar thing happens for python-lxml (a dependency of + # ipa-client, an auditing thing we don't care about). We have the + # build-dependencies the lxml pip-install will need (gcc, + # libxml2-dev & libxslt-dev) in the "general" rpm lists + uninstall_package python-lxml + + # If the dbus rpm was installed by the devstack rpm dependencies + # then you may hit a bug where the uuid isn't generated because + # the service was never started (PR#598200), causing issues for + # Nova stopping later on complaining that + # '/var/lib/dbus/machine-id' doesn't exist. + sudo service messagebus restart +fi + TRACK_DEPENDS=${TRACK_DEPENDS:-False} # Install python packages into a virtualenv so that we can track them @@ -559,7 +594,6 @@ if [[ $TRACK_DEPENDS = True ]]; then $DEST/.venv/bin/pip freeze > $DEST/requires-pre-pip fi - # Check Out and Install Source # ---------------------------- diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh index 7c4386f903..68f11ce35e 100755 --- a/tools/install_prereqs.sh +++ b/tools/install_prereqs.sh @@ -42,7 +42,8 @@ NOW=$(date "+%s") LAST_RUN=$(head -1 $PREREQ_RERUN_MARKER 2>/dev/null || echo "0") DELTA=$(($NOW - $LAST_RUN)) if [[ $DELTA -lt $PREREQ_RERUN_SECONDS && -z "$FORCE_PREREQ" ]]; then - echo "Re-run time has not expired ($(($PREREQ_RERUN_SECONDS - $DELTA)) seconds remaining); exiting..." + echo "Re-run time has not expired ($(($PREREQ_RERUN_SECONDS - $DELTA)) seconds remaining) " + echo "and FORCE_PREREQ not set; exiting..." return 0 fi From ad43b3957ee8082c1e27568de1070edb81734181 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 11 Apr 2013 11:13:09 +1000 Subject: [PATCH 0072/4704] Grab upstream nodejs for RHEL6 RHEL6 has no nodejs in main packages or in EPEL. The easiest way is to just install the upstream binary version which works fine for the very minimal usage by lesscss. Change-Id: Ia35e7dbaf4c7add43797d6b7d0c846bab1cf0cb0 --- lib/horizon | 47 +++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 43 insertions(+), 4 deletions(-) diff --git a/lib/horizon b/lib/horizon index 05bf6d3915..3d8b3e6d1f 100644 --- a/lib/horizon +++ b/lib/horizon @@ -61,16 +61,47 @@ function _horizon_config_set() { fi } +# Basic install of upstream nodejs for platforms that want it +function install_nodejs() { + if [[ $(which node) ]]; then + echo "You already appear to have nodejs, skipping install" + return + fi + + # There are several node deployment scripts; one may be more + # appropriate at some future point, but for now direct download is + # the simplest way. The version barely matters for lesscss which + # doesn't use anything fancy. + local ver=0.10.1 + local nodejs=node-v${ver}-linux-x64 + local tar=$nodejs.tar.gz + local nodejs_url=http://nodejs.org/dist/v${ver}/${tar} + + curl -Ss ${nodejs_url} | tar -C ${DEST} -xz + if [ $? -ne 0 ]; then + echo "*** Download of nodejs failed" + return 1 + fi + + # /usr/bin so it gets found in the PATH available to horizon + sudo ln -s $DEST/$nodejs/bin/node /usr/bin/node +} + # Entry Points # ------------ # cleanup_horizon() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_horizon() { - # kill instances (nova) - # delete image files (glance) - # This function intentionally left blank - : + + if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then + # if the /usr/bin/node link looks like it's pointing into $DEST, + # then we installed it via install_nodejs + if [[ $(readlink -f /usr/bin/node) =~ ($DEST) ]]; then + sudo rm /usr/bin/node + fi + fi + } # configure_horizon() - Set config files, create data dirs, etc @@ -159,6 +190,14 @@ function install_horizon() { exit_distro_not_supported "apache installation" fi + if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then + # RHEL6 currently has no native way to get nodejs, so we do a + # basic install here (see cleanup_horizon too). + # TODO: does nova have a better way that we can limit + # requirement of site-wide nodejs install? + install_nodejs + fi + # NOTE(sdague) quantal changed the name of the node binary if is_ubuntu; then if [[ ! -e "/usr/bin/node" ]]; then From 0729d06fae1ee005d553350b729b233256032590 Mon Sep 17 00:00:00 2001 From: Tal Kain Date: Mon, 22 Apr 2013 17:50:27 +0300 Subject: [PATCH 0073/4704] lib/database: Fixed a bug in database_connection_url The changes from https://review.openstack.org/#/c/23364/ simplified the usage of database_connection_url_mysql and database_connection_url_postgresql without making the proper changes to database_connection_url. Fixes: bug #1167668 Signed-off-by: Tal Kain Change-Id: I5115d123ad794f2eb2e144b76932031af5248f26 --- lib/database | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/lib/database b/lib/database index e63d5e240d..442ed56fbe 100644 --- a/lib/database +++ b/lib/database @@ -38,7 +38,7 @@ done for db in $DATABASE_BACKENDS; do # Set the type for the rest of the backend to use if is_service_enabled $db; then - # Set this now for the rest of the database funtions + # Set this now for the rest of the database functions DATABASE_TYPE=$db fi done @@ -110,13 +110,11 @@ function configure_database { configure_database_$DATABASE_TYPE } -# Generate an SQLAlchemy connection URL and store it in a variable -# $1 The variable name in which to store the connection URL -# $2 The name of the database +# Generate an SQLAlchemy connection URL and output it using echo +# $1 The name of the database function database_connection_url { - local var=$1 - local db=$2 - database_connection_url_$DATABASE_TYPE $var $db + local db=$1 + database_connection_url_$DATABASE_TYPE $db } From 2634be00b862b3577d6f40c72486339fbc27c937 Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Mon, 29 Apr 2013 23:35:57 +0100 Subject: [PATCH 0074/4704] Correcting comment for HOST_IP_IFACE Change-Id: I4c3438dc1168401c193841976cf8ba8534b33679 Fixes: bug #1174455 --- tools/xen/README.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tools/xen/README.md b/tools/xen/README.md index 1cd45cff55..3fadc7839c 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -57,8 +57,12 @@ Of course, use real passwords if this machine is exposed. MULTI_HOST=1 # Give extra time for boot ACTIVE_TIMEOUT=45 - # Interface on which you would like to access services - HOST_IP_IFACE=ethX + # Host Interface, i.e. the interface on the nova vm you want to expose the + # services on. Usually eth2 (management network) or eth3 (public network) and + # not eth0 (private network with XenServer host) or eth1 (VM traffic network) + # This is also used as the interface for the Ubuntu install + # The default is eth3. + # HOST_IP_IFACE=eth3 # First time Ubuntu network install params NETINSTALLIP="dhcp" NAMESERVERS="" From d7150e9bbce54479028c5c744d5648ae530925ea Mon Sep 17 00:00:00 2001 From: Maru Newby Date: Tue, 23 Apr 2013 06:16:11 +0000 Subject: [PATCH 0075/4704] Add configurable use of veths with Quantum+OVS. * This patch adds the ability to configure use of veths with OVS via the Q_OVS_USE_VETH variable. The use of veths with OVS is required to support namespaces on RHEL. * Supports bug 1171727 Change-Id: I5f557d659684ead99a3e5e2b4df787699d9d3f05 --- lib/quantum | 7 +++++++ lib/quantum_plugins/services/agent_loadbalancer | 5 +++++ 2 files changed, 12 insertions(+) diff --git a/lib/quantum b/lib/quantum index 96ccf206c9..293ef3af3c 100644 --- a/lib/quantum +++ b/lib/quantum @@ -82,6 +82,8 @@ Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum} Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} # Use namespace or not Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True} +# RHEL's support for namespaces requires using veths with ovs +Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False} Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} # Meta data IP Q_META_DATA_IP=${Q_META_DATA_IP:-$HOST_IP} @@ -665,6 +667,11 @@ function _quantum_commentout_keystone_authtoken() { } function _quantum_setup_interface_driver() { + + # ovs_use_veth needs to be set before the plugin configuration + # occurs to allow plugins to override the setting. + iniset $1 DEFAULT ovs_use_veth $Q_OVS_USE_VETH + quantum_plugin_setup_interface_driver $1 } diff --git a/lib/quantum_plugins/services/agent_loadbalancer b/lib/quantum_plugins/services/agent_loadbalancer index b6528b0e84..ee3faa5bb0 100644 --- a/lib/quantum_plugins/services/agent_loadbalancer +++ b/lib/quantum_plugins/services/agent_loadbalancer @@ -34,6 +34,11 @@ function quantum_agent_lbaas_configure_agent() { cp $QUANTUM_DIR/etc/lbaas_agent.ini $LBAAS_AGENT_CONF_FILENAME + iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT use_namespaces $Q_USE_NAMESPACE + # ovs_use_veth needs to be set before the plugin configuration + # occurs to allow plugins to override the setting. + iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT ovs_use_veth $Q_OVS_USE_VETH + quantum_plugin_setup_interface_driver $LBAAS_AGENT_CONF_FILENAME if is_fedora; then From a8f7a62f133976af32736fd892f64b56787ebe3c Mon Sep 17 00:00:00 2001 From: Maru Newby Date: Wed, 1 May 2013 20:48:54 +0000 Subject: [PATCH 0076/4704] Add XAPI config to quantum rootwrap for XS/XCP. * Supports blueprint xenapi-ovs Change-Id: I902458ec26cd07e94fe50bb3648efa75658ccba3 --- lib/quantum_plugins/openvswitch | 11 +++++++---- stack.sh | 2 -- stackrc | 4 ++++ 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/lib/quantum_plugins/openvswitch b/lib/quantum_plugins/openvswitch index ab16483452..f8512cf927 100644 --- a/lib/quantum_plugins/openvswitch +++ b/lib/quantum_plugins/openvswitch @@ -72,10 +72,13 @@ function quantum_plugin_configure_plugin_agent() { AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent" if [ "$VIRT_DRIVER" = 'xenserver' ]; then - # Nova will always be installed along with quantum for a domU - # devstack install, so it should be safe to rely on nova.conf - # for xenapi configuration. - Q_RR_DOM0_COMMAND="$QUANTUM_DIR/bin/quantum-rootwrap-xen-dom0 $NOVA_CONF" + Q_RR_DOM0_COMMAND="$QUANTUM_DIR/bin/quantum-rootwrap-xen-dom0 $Q_RR_CONF_FILE" + + # For now, duplicate the xen configuration already found in nova.conf + iniset $Q_RR_CONF_FILE XENAPI xenapi_connection_url "$XENAPI_CONNECTION_URL" + iniset $Q_RR_CONF_FILE XENAPI xenapi_connection_username "$XENAPI_USER" + iniset $Q_RR_CONF_FILE XENAPI xenapi_connection_password "$XENAPI_PASSWORD" + # Under XS/XCP, the ovs agent needs to target the dom0 # integration bridge. This is enabled by using a root wrapper # that executes commands on dom0 via a XenAPI plugin. diff --git a/stack.sh b/stack.sh index d534b87ddb..d43c9487c7 100755 --- a/stack.sh +++ b/stack.sh @@ -924,8 +924,6 @@ if is_service_enabled nova; then echo_summary "Using XenServer virtualization driver" read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN." iniset $NOVA_CONF DEFAULT compute_driver "xenapi.XenAPIDriver" - XENAPI_CONNECTION_URL=${XENAPI_CONNECTION_URL:-"http://169.254.0.1"} - XENAPI_USER=${XENAPI_USER:-"root"} iniset $NOVA_CONF DEFAULT xenapi_connection_url "$XENAPI_CONNECTION_URL" iniset $NOVA_CONF DEFAULT xenapi_connection_username "$XENAPI_USER" iniset $NOVA_CONF DEFAULT xenapi_connection_password "$XENAPI_PASSWORD" diff --git a/stackrc b/stackrc index f99eab1852..6dcb462cc7 100644 --- a/stackrc +++ b/stackrc @@ -224,6 +224,10 @@ PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"public"} # Compatibility until it's eradicated from CI USE_SCREEN=${SCREEN_DEV:-$USE_SCREEN} +# Xen config common to nova and quantum +XENAPI_CONNECTION_URL=${XENAPI_CONNECTION_URL:-"http://169.254.0.1"} +XENAPI_USER=${XENAPI_USER:-"root"} + # Local variables: # mode: shell-script # End: From f91d6d21ba64906aa2d8b0ba67f60333908e710f Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 1 May 2013 13:26:01 +1000 Subject: [PATCH 0077/4704] Pre-install hgtools for RHEL6 See the inline comment; pre-installing this package works around [1] [1] https://bugzilla.redhat.com/show_bug.cgi?id=924038 Change-Id: Id021b44ac9d8aa28286828ccfd6515b7850bad16 --- stack.sh | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/stack.sh b/stack.sh index d534b87ddb..47fccd83a4 100755 --- a/stack.sh +++ b/stack.sh @@ -579,6 +579,17 @@ if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then # Nova stopping later on complaining that # '/var/lib/dbus/machine-id' doesn't exist. sudo service messagebus restart + + # In setup.py, a "setup_requires" package is supposed to + # transient. However there is a bug with rhel6 distribute where + # setup_requires packages can register entry points that aren't + # cleared out properly after the setup-phase; the end result is + # installation failures (bz#924038). Thus we pre-install the + # problem package here; this way the setup_requires dependency is + # already satisfied and it will not need to be installed + # transiently, meaning we avoid the issue of it not being cleaned + # out properly. Note we do this before the track-depends below. + pip_install hgtools fi TRACK_DEPENDS=${TRACK_DEPENDS:-False} From b85f9d73c1be03ebcd6ce55eb654e53c2fc4837c Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 2 May 2013 12:02:22 +1000 Subject: [PATCH 0078/4704] Ensure correct polkit config written for RHEL6 Ensure the correct polkit config is written for RHEL6, or nova-cpu will fail to start with libvirt permission errors Change-Id: I871ef4e03157883db15be41222b7338765fbb843 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index f28094e7a1..3a59681ba6 100644 --- a/lib/nova +++ b/lib/nova @@ -297,7 +297,7 @@ EOF if is_fedora || is_suse; then - if is_fedora && [[ "$os_RELEASE" -le "17" ]]; then + if is_fedora && [[ $DISTRO =~ (rhel6) || "$os_RELEASE" -le "17" ]]; then sudo bash -c 'cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla [libvirt Management Access] Identity=unix-group:libvirtd From a43fbf647fa5b75c208e465d93c986db21eb6369 Mon Sep 17 00:00:00 2001 From: Chuck Short Date: Thu, 2 May 2013 09:28:54 -0500 Subject: [PATCH 0079/4704] Add saucy support Add support for "Saucy Salamander" Change-Id: Ic7dfef2903634b17304243917fa3d8389a93b0c0 Signed-off-by: Chuck Short --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index d534b87ddb..096c203df2 100755 --- a/stack.sh +++ b/stack.sh @@ -105,7 +105,7 @@ disable_negated_services # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17|f18|opensuse-12.2|rhel6) ]]; then +if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|saucy|f16|f17|f18|opensuse-12.2|rhel6) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From de0898a02c453a0fbff5119cc74b37a1e336d28d Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Wed, 8 May 2013 11:37:26 +1200 Subject: [PATCH 0080/4704] Do not run heat/tools/nova_create_flavors.sh This script takes a long time to run which will affect tempest run times. Instead of running this, example templates will be modified to align with the default heat flavors. Change-Id: I588b1da9f5a02de3bf64ac8011d75c7d5432ef26 --- lib/heat | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/heat b/lib/heat index cd0a204f22..0c95ebb517 100644 --- a/lib/heat +++ b/lib/heat @@ -159,7 +159,6 @@ function init_heat() { recreate_database heat utf8 $HEAT_DIR/bin/heat-db-setup $os_PACKAGE -r $DATABASE_PASSWORD - $HEAT_DIR/tools/nova_create_flavors.sh create_heat_cache_dir } From 0488edda8a34b0be6693cafdf506cfc8185a2a83 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 11 Apr 2013 12:04:36 +1000 Subject: [PATCH 0081/4704] Basic check for homedir permissions Several guides suggest using data directories under your homedir, rather than the default /opt area. This is fine, but on RHEL6 and similar distros homedirs are very restrictive 0700 permissions which doesn't allow things like httpd to pass through to serve up files. Even though stack.sh is taking over the host, changing permissions automatically is not a nice idea. So we just warn when it looks like this is happening. Change-Id: I9cd70e7fe90638a2a5c3b8fd94756afacac7c7be --- functions | 29 +++++++++++++++++++++++++++++ stack.sh | 3 +++ 2 files changed, 32 insertions(+) diff --git a/functions b/functions index 02c2b3a9c3..fdb532f713 100644 --- a/functions +++ b/functions @@ -1411,6 +1411,35 @@ function get_pip_command() { fi } +# Path permissions sanity check +# check_path_perm_sanity path +function check_path_perm_sanity() { + # Ensure no element of the path has 0700 permissions, which is very + # likely to cause issues for daemons. Inspired by default 0700 + # homedir permissions on RHEL and common practice of making DEST in + # the stack user's homedir. + + local real_path=$(readlink -f $1) + local rebuilt_path="" + for i in $(echo ${real_path} | tr "/" " "); do + rebuilt_path=$rebuilt_path"/"$i + + if [[ $(stat -c '%a' ${rebuilt_path}) = 700 ]]; then + echo "*** DEST path element" + echo "*** ${rebuilt_path}" + echo "*** appears to have 0700 permissions." + echo "*** This is very likely to cause fatal issues for devstack daemons." + + if [[ -n "$SKIP_PATH_SANITY" ]]; then + return + else + echo "*** Set SKIP_PATH_SANITY to skip this check" + die $LINENO "Invalid path permissions" + fi + fi + done +} + # Restore xtrace $XTRACE diff --git a/stack.sh b/stack.sh index 32a7d747ba..56ced5f67f 100755 --- a/stack.sh +++ b/stack.sh @@ -199,6 +199,9 @@ fi sudo mkdir -p $DEST sudo chown -R $STACK_USER $DEST +# a basic test for $DEST path permissions (fatal on error unless skipped) +check_path_perm_sanity ${DEST} + # Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly without # Internet access. ``stack.sh`` must have been previously run with Internet # access to install prerequisites and fetch repositories. From 2832f2801200d8c14449463c855e68ddb684a375 Mon Sep 17 00:00:00 2001 From: zhhuabj Date: Wed, 8 May 2013 18:43:26 +0800 Subject: [PATCH 0082/4704] Set MYSQL_HOST parameter to better support multi-node mode. When deploying openstack with multi-node mode, mysql needs to be started by listening it's real ip, not localhost. but devstack always use localhost to recreate databases that will lead bellow errors. Fix bug 1177735 Change-Id: I50284f469a998d023a41b4796f1dc775bb52e710 --- lib/databases/mysql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index 056aec46ba..211d797d2b 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -36,8 +36,8 @@ function cleanup_database_mysql { function recreate_database_mysql { local db=$1 local charset=$2 - mysql -u$DATABASE_USER -p$DATABASE_PASSWORD -e "DROP DATABASE IF EXISTS $db;" - mysql -u$DATABASE_USER -p$DATABASE_PASSWORD -e "CREATE DATABASE $db CHARACTER SET $charset;" + mysql -u$DATABASE_USER -p$DATABASE_PASSWORD -h$MYSQL_HOST -e "DROP DATABASE IF EXISTS $db;" + mysql -u$DATABASE_USER -p$DATABASE_PASSWORD -h$MYSQL_HOST -e "CREATE DATABASE $db CHARACTER SET $charset;" } function configure_database_mysql { From 5e159496b88772a20102b646d02ed4cc13b15f7a Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Wed, 8 May 2013 14:29:52 -0400 Subject: [PATCH 0083/4704] Add pbr to devstack. It's being used in all of the core projects, so we should install it at the start so we can ensure that we don't break everything all at once. Change-Id: I326d724264803e88315ee9e40f4634836baf6e0b --- stack.sh | 5 +++++ stackrc | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/stack.sh b/stack.sh index 32a7d747ba..63a87b9cdf 100755 --- a/stack.sh +++ b/stack.sh @@ -273,6 +273,7 @@ source $TOP_DIR/lib/ldap # Set the destination directories for OpenStack projects OPENSTACKCLIENT_DIR=$DEST/python-openstackclient +PBR_DIR=$DEST/pbr # Interactive Configuration @@ -610,6 +611,10 @@ fi echo_summary "Installing OpenStack project source" +# Install pbr +git_clone $PBR_REPO $PBR_DIR $PBR_BRANCH +setup_develop $PBR_DIR + # Install clients libraries install_keystoneclient install_glanceclient diff --git a/stackrc b/stackrc index f99eab1852..aaf17d1a86 100644 --- a/stackrc +++ b/stackrc @@ -157,6 +157,11 @@ BM_IMAGE_BUILD_BRANCH=${BM_IMAGE_BUILD_BRANCH:-master} BM_POSEUR_REPO=${BM_POSEUR_REPO:-${GIT_BASE}/tripleo/bm_poseur.git} BM_POSEUR_BRANCH=${BM_POSEUR_BRANCH:-master} +# pbr +# Used to drive the setuptools configs +PBR_REPO=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git} +PBR_BRANCH=${PBR_BRANCH:-master} + # Nova hypervisor configuration. We default to libvirt with **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can # also install an **LXC** or **OpenVZ** based system. From 2ae6acfe38db1142a4a93471bc15d642e40c3db6 Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Wed, 8 May 2013 19:14:29 +0200 Subject: [PATCH 0084/4704] Stop quantum-ns-metadata-proxy process on unstack Change function stop_quantum() in lib/quantum to also kill quantum-ns-metadata-proxy process Fix Bug #1175658 Change-Id: I9e8f8437817cc645ab15eecfea1a134ea5ac13f2 --- lib/quantum | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/quantum b/lib/quantum index 293ef3af3c..dfd73e99f4 100644 --- a/lib/quantum +++ b/lib/quantum @@ -397,6 +397,10 @@ function stop_quantum() { pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }') [ ! -z "$pid" ] && sudo kill -9 $pid fi + if is_service_enabled q-meta; then + pid=$(ps aux | awk '/quantum-ns-metadata-proxy/ { print $2 }') + [ ! -z "$pid" ] && sudo kill -9 $pid + fi } # cleanup_quantum() - Remove residual data files, anything left over from previous From b2ef890db3d78b24f9da2f4dd80502165c669ad0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89milien=20Macchi?= Date: Sat, 4 May 2013 00:48:20 +0200 Subject: [PATCH 0085/4704] Add Debian OS support in DevStack Fix bug #1176216 Change-Id: Ia94c332f02a921a936db249dc3b4a7ae4eff0400 --- files/apts/n-cpu | 2 +- functions | 12 ++++++++---- lib/baremetal | 2 +- lib/ceilometer | 2 +- lib/nova | 18 +++++++++--------- stack.sh | 11 ++++++++++- stackrc | 5 +++++ 7 files changed, 35 insertions(+), 17 deletions(-) diff --git a/files/apts/n-cpu b/files/apts/n-cpu index ef281cad0e..88e0144079 100644 --- a/files/apts/n-cpu +++ b/files/apts/n-cpu @@ -2,7 +2,7 @@ nbd-client lvm2 open-iscsi -open-iscsi-utils +open-iscsi-utils # Deprecated since quantal dist:lucid,oneiric,precise genisoimage sysfsutils sg3-utils diff --git a/functions b/functions index 02c2b3a9c3..0b2710ca77 100644 --- a/functions +++ b/functions @@ -380,6 +380,12 @@ GetOSVersion() { os_VENDOR="" done os_PACKAGE="rpm" + # If lsb_release is not installed, we should be able to detect Debian OS + elif [[ -f /etc/debian_version ]] && [[ $(cat /proc/version) =~ "Debian" ]]; then + os_VENDOR="Debian" + os_PACKAGE="deb" + os_CODENAME=$(awk '/VERSION=/' /etc/os-release | sed 's/VERSION=//' | sed -r 's/\"|\(|\)//g' | awk '{print $2}') + os_RELEASE=$(awk '/VERSION_ID=/' /etc/os-release | sed 's/VERSION_ID=//' | sed 's/\"//g') fi export os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME } @@ -425,8 +431,8 @@ function git_update_remote_branch() { # Sets ``DISTRO`` from the ``os_*`` values function GetDistro() { GetOSVersion - if [[ "$os_VENDOR" =~ (Ubuntu) ]]; then - # 'Everyone' refers to Ubuntu releases by the code name adjective + if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then + # 'Everyone' refers to Ubuntu / Debian releases by the code name adjective DISTRO=$os_CODENAME elif [[ "$os_VENDOR" =~ (Fedora) ]]; then # For Fedora, just use 'f' and the release @@ -459,11 +465,9 @@ function is_ubuntu { if [[ -z "$os_PACKAGE" ]]; then GetOSVersion fi - [ "$os_PACKAGE" = "deb" ] } - # Determine if current distribution is a Fedora-based distribution # (Fedora, RHEL, CentOS). # is_fedora diff --git a/lib/baremetal b/lib/baremetal index 8658c3aa17..bed3c093c3 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -204,7 +204,7 @@ function configure_baremetal_nova_dirs() { sudo mkdir -p /tftpboot sudo mkdir -p /tftpboot/pxelinux.cfg sudo cp /usr/lib/syslinux/pxelinux.0 /tftpboot/ - sudo chown -R $STACK_USER:libvirtd /tftpboot + sudo chown -R $STACK_USER:$LIBVIRT_GROUP /tftpboot # ensure $NOVA_STATE_PATH/baremetal is prepared sudo mkdir -p $NOVA_STATE_PATH/baremetal diff --git a/lib/ceilometer b/lib/ceilometer index 6b110cbb0c..1c289fd471 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -125,7 +125,7 @@ function install_ceilometerclient() { # start_ceilometer() - Start running processes, including screen function start_ceilometer() { - screen_it ceilometer-acompute "cd $CEILOMETER_DIR && sg libvirtd \"$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" + screen_it ceilometer-acompute "cd $CEILOMETER_DIR && sg $LIBVIRT_GROUP \"$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" screen_it ceilometer-acentral "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_CONF" screen_it ceilometer-collector "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_CONF" screen_it ceilometer-api "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" diff --git a/lib/nova b/lib/nova index 3a59681ba6..6fa1db4fdb 100644 --- a/lib/nova +++ b/lib/nova @@ -298,14 +298,14 @@ EOF if is_fedora || is_suse; then if is_fedora && [[ $DISTRO =~ (rhel6) || "$os_RELEASE" -le "17" ]]; then - sudo bash -c 'cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla + sudo bash -c "cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla [libvirt Management Access] -Identity=unix-group:libvirtd +Identity=unix-group:$LIBVIRT_GROUP Action=org.libvirt.unix.manage ResultAny=yes ResultInactive=yes ResultActive=yes -EOF' +EOF" elif is_suse && [[ $os_RELEASE = 12.2 || "$os_VENDOR" = "SUSE LINUX" ]]; then # openSUSE < 12.3 or SLE # Work around the fact that polkit-default-privs overrules pklas @@ -338,10 +338,10 @@ EOF" # The user that nova runs as needs to be member of **libvirtd** group otherwise # nova-compute will be unable to use libvirt. - if ! getent group libvirtd >/dev/null; then - sudo groupadd libvirtd + if ! getent group $LIBVIRT_GROUP >/dev/null; then + sudo groupadd $LIBVIRT_GROUP fi - add_user_to_group $STACK_USER libvirtd + add_user_to_group $STACK_USER $LIBVIRT_GROUP # libvirt detects various settings on startup, as we potentially changed # the system configuration (modules, filesystems), we need to restart @@ -648,11 +648,11 @@ function start_nova_api() { # start_nova() - Start running processes, including screen function start_nova() { - # The group **libvirtd** is added to the current user in this script. - # Use 'sg' to execute nova-compute as a member of the **libvirtd** group. + # The group **$LIBVIRT_GROUP** is added to the current user in this script. + # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group. # ``screen_it`` checks ``is_service_enabled``, it is not needed here screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor" - screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_BIN_DIR/nova-compute" + screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP $NOVA_BIN_DIR/nova-compute" screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert" screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network" screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler" diff --git a/stack.sh b/stack.sh index 32a7d747ba..40a068f722 100755 --- a/stack.sh +++ b/stack.sh @@ -32,6 +32,15 @@ source $TOP_DIR/functions # and ``DISTRO`` GetDistro +# Some dependencies are not available in Debian Wheezy official +# repositories. However, it's possible to run OpenStack from gplhost +# repository. +if [[ "$os_VENDOR" =~ (Debian) ]]; then + echo 'deb http://archive.gplhost.com/debian grizzly main' | sudo tee /etc/apt/sources.list.d/gplhost_wheezy-backports.list + echo 'deb http://archive.gplhost.com/debian grizzly-backports main' | sudo tee -a /etc/apt/sources.list.d/gplhost_wheezy-backports.list + apt_get update + apt_get install --force-yes gplhost-archive-keyring +fi # Global Settings # =============== @@ -105,7 +114,7 @@ disable_negated_services # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|saucy|f16|f17|f18|opensuse-12.2|rhel6) ]]; then +if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|saucy|7.0|wheezy|sid|testing|jessie|f16|f17|f18|opensuse-12.2|rhel6) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" diff --git a/stackrc b/stackrc index f99eab1852..6d6f7bf19c 100644 --- a/stackrc +++ b/stackrc @@ -162,6 +162,11 @@ BM_POSEUR_BRANCH=${BM_POSEUR_BRANCH:-master} # also install an **LXC** or **OpenVZ** based system. VIRT_DRIVER=${VIRT_DRIVER:-libvirt} LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} +if [[ "$os_VENDOR" =~ (Debian) ]]; then + LIBVIRT_GROUP=libvirt +else + LIBVIRT_GROUP=libvirtd +fi # Specify a comma-separated list of UEC images to download and install into glance. # supported urls here are: From 5595fdc2ac1437eb669a50aad1861a3ef6f69750 Mon Sep 17 00:00:00 2001 From: zhhuabj Date: Wed, 8 May 2013 18:27:20 +0800 Subject: [PATCH 0086/4704] Update the package name of qpid in the fedora platform The package name of qpid in default yum repository is qpid-cpp-server, not qpid-cpp-server-daemon. Fix bug 1177731 Change-Id: I4412029966583f5ef5a5a4cc80e7fdc4771c8eca --- files/rpms/nova | 2 +- files/rpms/quantum | 3 +-- lib/rpc_backend | 11 ++--------- 3 files changed, 4 insertions(+), 12 deletions(-) diff --git a/files/rpms/nova b/files/rpms/nova index 328e7d6bed..c74f3963d5 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -40,7 +40,7 @@ python-sqlalchemy python-suds python-tempita rabbitmq-server # NOPRIME -qpid-cpp-server-daemon # NOPRIME +qpid-cpp-server # NOPRIME sqlite sudo vconfig diff --git a/files/rpms/quantum b/files/rpms/quantum index 450e39cdc8..32c6f626dc 100644 --- a/files/rpms/quantum +++ b/files/rpms/quantum @@ -18,8 +18,7 @@ python-routes python-sqlalchemy python-suds rabbitmq-server # NOPRIME -qpid-cpp-server-daemon # NOPRIME dist:f16,f17,f18 -qpid-cpp-server # NOPRIME dist:rhel6 +qpid-cpp-server # NOPRIME sqlite sudo vconfig diff --git a/lib/rpc_backend b/lib/rpc_backend index 3c485e42c7..27d3ba3364 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -70,11 +70,7 @@ function cleanup_rpc_backend { fi elif is_service_enabled qpid; then if is_fedora; then - if [[ $DISTRO =~ (rhel6) ]]; then - uninstall_package qpid-cpp-server - else - uninstall_package qpid-cpp-server-daemon - fi + uninstall_package qpid-cpp-server elif is_ubuntu; then uninstall_package qpidd else @@ -104,15 +100,12 @@ function install_rpc_backend() { rm -f "$tfile" elif is_service_enabled qpid; then if is_fedora; then + install_package qpid-cpp-server if [[ $DISTRO =~ (rhel6) ]]; then - install_package qpid-cpp-server - # RHEL6 leaves "auth=yes" in /etc/qpidd.conf, it needs to # be no or you get GSS authentication errors as it # attempts to default to this. sudo sed -i.bak 's/^auth=yes$/auth=no/' /etc/qpidd.conf - else - install_package qpid-cpp-server-daemon fi elif is_ubuntu; then install_package qpidd From 7c025fedc32c48f4fe7c4d3edc6c2d3674f69d86 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Wed, 8 May 2013 11:33:07 -0700 Subject: [PATCH 0087/4704] Check quantum status dependent on the plugin There are Quantum plugins that do not require the use of an agent. This patch relaxes the constraint in quantum-adv-test.sh by adding the ability to customize the logic that verifies whether the test can run or not, depending on the Quantum plugin of your choice. Fixes bug #1177904 Change-Id: I6e2c17f43d9e1475b2eb175cceb5107a83f9aa74 --- exercises/quantum-adv-test.sh | 9 ++++----- lib/quantum_plugins/README.md | 2 ++ lib/quantum_plugins/bigswitch_floodlight | 4 ++++ lib/quantum_plugins/brocade | 4 ++++ lib/quantum_plugins/linuxbridge | 4 ++++ lib/quantum_plugins/nec | 4 ++++ lib/quantum_plugins/nicira | 4 ++++ lib/quantum_plugins/openvswitch | 4 ++++ lib/quantum_plugins/plumgrid | 3 +++ lib/quantum_plugins/ryu | 4 ++++ 10 files changed, 37 insertions(+), 5 deletions(-) diff --git a/exercises/quantum-adv-test.sh b/exercises/quantum-adv-test.sh index fbb1b779f2..34f4f62312 100755 --- a/exercises/quantum-adv-test.sh +++ b/exercises/quantum-adv-test.sh @@ -43,13 +43,12 @@ source $TOP_DIR/functions # Import configuration source $TOP_DIR/openrc -# If quantum is not enabled we exit with exitcode 55 which mean -# exercise is skipped. -is_service_enabled quantum && is_service_enabled q-agt && is_service_enabled q-dhcp || exit 55 - -# Import quantum fucntions +# Import quantum functions source $TOP_DIR/lib/quantum +# If quantum is not enabled we exit with exitcode 55, which means exercise is skipped. +quantum_plugin_check_adv_test_requirements || exit 55 + # Import exercise configuration source $TOP_DIR/exerciserc diff --git a/lib/quantum_plugins/README.md b/lib/quantum_plugins/README.md index 05bfb85125..e8299409cb 100644 --- a/lib/quantum_plugins/README.md +++ b/lib/quantum_plugins/README.md @@ -34,3 +34,5 @@ functions * ``quantum_plugin_setup_interface_driver`` * ``has_quantum_plugin_security_group``: return 0 if the plugin support quantum security group otherwise return 1 +* ``quantum_plugin_check_adv_test_requirements``: + return 0 if requirements are satisfied otherwise return 1 diff --git a/lib/quantum_plugins/bigswitch_floodlight b/lib/quantum_plugins/bigswitch_floodlight index 4857f49569..edee0eb748 100644 --- a/lib/quantum_plugins/bigswitch_floodlight +++ b/lib/quantum_plugins/bigswitch_floodlight @@ -56,5 +56,9 @@ function has_quantum_plugin_security_group() { return 1 } +function quantum_plugin_check_adv_test_requirements() { + is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 +} + # Restore xtrace $MY_XTRACE diff --git a/lib/quantum_plugins/brocade b/lib/quantum_plugins/brocade index 6e26ad7842..fc86debb90 100644 --- a/lib/quantum_plugins/brocade +++ b/lib/quantum_plugins/brocade @@ -50,5 +50,9 @@ function has_quantum_plugin_security_group() { return 0 } +function quantum_plugin_check_adv_test_requirements() { + is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 +} + # Restore xtrace $BRCD_XTRACE diff --git a/lib/quantum_plugins/linuxbridge b/lib/quantum_plugins/linuxbridge index 324e255231..b4b52e9c57 100644 --- a/lib/quantum_plugins/linuxbridge +++ b/lib/quantum_plugins/linuxbridge @@ -86,5 +86,9 @@ function has_quantum_plugin_security_group() { return 0 } +function quantum_plugin_check_adv_test_requirements() { + is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 +} + # Restore xtrace $MY_XTRACE diff --git a/lib/quantum_plugins/nec b/lib/quantum_plugins/nec index f61f50bba5..4a2a49767a 100644 --- a/lib/quantum_plugins/nec +++ b/lib/quantum_plugins/nec @@ -118,5 +118,9 @@ function has_quantum_plugin_security_group() { return 0 } +function quantum_plugin_check_adv_test_requirements() { + is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 +} + # Restore xtrace $MY_XTRACE diff --git a/lib/quantum_plugins/nicira b/lib/quantum_plugins/nicira index 6eefb022ec..305c3bfa7d 100644 --- a/lib/quantum_plugins/nicira +++ b/lib/quantum_plugins/nicira @@ -146,5 +146,9 @@ function has_quantum_plugin_security_group() { return 0 } +function quantum_plugin_check_adv_test_requirements() { + is_service_enabled q-dhcp && return 0 +} + # Restore xtrace $MY_XTRACE diff --git a/lib/quantum_plugins/openvswitch b/lib/quantum_plugins/openvswitch index ab16483452..6293257c96 100644 --- a/lib/quantum_plugins/openvswitch +++ b/lib/quantum_plugins/openvswitch @@ -144,5 +144,9 @@ function has_quantum_plugin_security_group() { return 0 } +function quantum_plugin_check_adv_test_requirements() { + is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 +} + # Restore xtrace $MY_XTRACE diff --git a/lib/quantum_plugins/plumgrid b/lib/quantum_plugins/plumgrid index 912aa7ed80..14567104ed 100644 --- a/lib/quantum_plugins/plumgrid +++ b/lib/quantum_plugins/plumgrid @@ -35,5 +35,8 @@ function quantum_plugin_configure_debug_command() { : } +function quantum_plugin_check_adv_test_requirements() { + is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 +} # Restore xtrace $MY_XTRACE diff --git a/lib/quantum_plugins/ryu b/lib/quantum_plugins/ryu index 113923235c..1b039dc950 100644 --- a/lib/quantum_plugins/ryu +++ b/lib/quantum_plugins/ryu @@ -71,5 +71,9 @@ function has_quantum_plugin_security_group() { return 0 } +function quantum_plugin_check_adv_test_requirements() { + is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 +} + # Restore xtrace $MY_XTRACE From cabc60cc0ec0c8af68e059fb136bbf3b59bfbd2f Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Thu, 9 May 2013 11:33:16 -0700 Subject: [PATCH 0088/4704] Add third-party support for Quantum NVP plugin This patch leverages the third-party mechanism provided by DevStack to customize the configuration of DevStack instances when working with the Quantum NVP plugin. This is useful in dev/test scenarios, where connectivity between the DevStack VM and the NVP Gateway is required. Supports blueprint nvp-third-party-support Change-Id: I3f5afa5de1219f491e37c8b9b28370855d6b017c --- lib/quantum_thirdparty/nicira | 52 +++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 lib/quantum_thirdparty/nicira diff --git a/lib/quantum_thirdparty/nicira b/lib/quantum_thirdparty/nicira new file mode 100644 index 0000000000..5a20934a1b --- /dev/null +++ b/lib/quantum_thirdparty/nicira @@ -0,0 +1,52 @@ +# Nicira NVP +# ---------- + +# This third-party addition can be used to configure connectivity between a DevStack instance +# and an NVP Gateway in dev/test environments. In order to use this correctly, the following +# env variables need to be set (e.g. in your localrc file): +# +# * enable_service nicira --> to execute this third-party addition +# * PUBLIC_BRIDGE --> bridge used for external connectivity, typically br-ex +# * NVP_GATEWAY_NETWORK_INTERFACE --> interface used to communicate with the NVP Gateway +# * NVP_GATEWAY_NETWORK_CIDR --> CIDR to configure br-ex, e.g. 172.24.4.211/24 + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# This is the interface that connects the Devstack instance +# to an network that allows it to talk to the gateway for +# testing purposes +NVP_GATEWAY_NETWORK_INTERFACE=${NVP_GATEWAY_NETWORK_INTERFACE:-eth2} + +function configure_nicira() { + : +} + +function init_nicira() { + die_if_not_set $LINENO NVP_GATEWAY_NETWORK_CIDR "Please, specify CIDR for the gateway network interface." + # Make sure the interface is up, but not configured + sudo ifconfig $NVP_GATEWAY_NETWORK_INTERFACE up + sudo ip addr flush $NVP_GATEWAY_NETWORK_INTERFACE + # Use the PUBLIC Bridge to route traffic to the NVP gateway + # NOTE(armando-migliaccio): if running in a nested environment this will work + # only with mac learning enabled, portsecurity and security profiles disabled + sudo ovs-vsctl -- --may-exist add-port $PUBLIC_BRIDGE $NVP_GATEWAY_NETWORK_INTERFACE + nvp_gw_net_if_mac=$(ip link show $NVP_GATEWAY_NETWORK_INTERFACE | awk '/ether/ {print $2}') + sudo ifconfig $PUBLIC_BRIDGE $NVP_GATEWAY_NETWORK_CIDR hw ether $nvp_gw_net_if_mac +} + +function install_nicira() { + : +} + +function start_nicira() { + : +} + +function stop_nicira() { + : +} + +# Restore xtrace +$MY_XTRACE From 053dafe6dee084d0caf5cf49836263c00166066c Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 3 May 2013 13:08:52 +1000 Subject: [PATCH 0089/4704] Disable selinux for RHEL6 selinux locks down httpd which causes various issues for Horizon working from git checkouts. Dealing with selinux is more a deployment than a development issue, so we just disable it for devstack. Change-Id: I1e61f34e812360dafacd3b3288e66bc9cc5f1648 Fixes: 1175444 --- stack.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stack.sh b/stack.sh index 5a6945df90..e5b2b2703d 100755 --- a/stack.sh +++ b/stack.sh @@ -560,6 +560,10 @@ fi # ============================ if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then + # Avoid having to configure selinux to allow things like httpd to + # access horizion files or run binaries like nodejs (LP#1175444) + sudo setenforce 0 + # An old version (2.0.1) of python-crypto is probably installed on # a fresh system, via the dependency chain # cas->python-paramiko->python-crypto (related to anaconda). From 1e4587ef99cef970ef881ebf2ee2e65f9e939f34 Mon Sep 17 00:00:00 2001 From: JordanP Date: Wed, 8 May 2013 22:19:59 +0200 Subject: [PATCH 0090/4704] Clean up horizon apache logs on unstack It feels like the right think to do. Moreover it will avoid to display horizon errors from a previous devstack run that would be displayed because of the tail -f in start_horizon() Change-Id: Id1ab272e60e0733a4feef3b85029f438f2273cb0 --- lib/horizon | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/lib/horizon b/lib/horizon index 3d8b3e6d1f..1ee530ecff 100644 --- a/lib/horizon +++ b/lib/horizon @@ -38,6 +38,18 @@ HORIZON_SETTINGS=${HORIZON_SETTINGS:-$HORIZON_DIR/openstack_dashboard/local/loca APACHE_USER=${APACHE_USER:-$USER} APACHE_GROUP=${APACHE_GROUP:-$(id -gn $APACHE_USER)} +# Set up service name and configuration path +if is_ubuntu; then + APACHE_NAME=apache2 + APACHE_CONF=sites-available/horizon +elif is_fedora; then + APACHE_NAME=httpd + APACHE_CONF=conf.d/horizon.conf +elif is_suse; then + APACHE_NAME=apache2 + APACHE_CONF=vhosts.d/horizon.conf +fi + # Functions # --------- @@ -135,8 +147,6 @@ function init_horizon() { HORIZON_REQUIRE='' if is_ubuntu; then - APACHE_NAME=apache2 - APACHE_CONF=sites-available/horizon # Clean up the old config name sudo rm -f /etc/apache2/sites-enabled/000-default # Be a good citizen and use the distro tools here @@ -145,9 +155,6 @@ function init_horizon() { # WSGI isn't enabled by default, enable it sudo a2enmod wsgi elif is_fedora; then - APACHE_NAME=httpd - APACHE_CONF=conf.d/horizon.conf - if [[ "$os_RELEASE" -ge "18" ]]; then # fedora 18 has Require all denied in its httpd.conf # and requires explicit Require all granted @@ -155,14 +162,16 @@ function init_horizon() { fi sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf elif is_suse; then - APACHE_NAME=apache2 - APACHE_CONF=vhosts.d/horizon.conf # WSGI isn't enabled by default, enable it sudo a2enmod wsgi else exit_distro_not_supported "apache configuration" fi + # Remove old log files that could mess with how devstack detects whether Horizon + # has been successfully started (see start_horizon() and functions::screen_it()) + sudo rm -f /var/log/$APACHE_NAME/horizon_* + # Configure apache to run horizon sudo sh -c "sed -e \" s,%USER%,$APACHE_USER,g; @@ -219,12 +228,8 @@ function start_horizon() { # stop_horizon() - Stop running processes (non-screen) function stop_horizon() { - if is_ubuntu; then - stop_service apache2 - elif is_fedora; then - stop_service httpd - elif is_suse; then - stop_service apache2 + if [ -n "$APACHE_NAME" ]; then + stop_service $APACHE_NAME else exit_distro_not_supported "apache configuration" fi From 701eb61931d95d00c3618216ed7998d7c74e0837 Mon Sep 17 00:00:00 2001 From: Gordon Chung Date: Fri, 10 May 2013 10:45:50 -0400 Subject: [PATCH 0091/4704] ceilometer setup incorrectly sets notification_topics drop glance_notifications from notification_topics in ceilometer setup Change-Id: Ib18b4193af9df925b4ce4dbe3faf4c6a756ed03f Fixes:Bug #1178704 --- lib/ceilometer | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ceilometer b/lib/ceilometer index 1c289fd471..b165a8f444 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -70,7 +70,7 @@ function configure_ceilometer() { iniset_rpc_backend ceilometer $CEILOMETER_CONF DEFAULT - iniset $CEILOMETER_CONF DEFAULT notification_topics 'notifications,glance_notifications' + iniset $CEILOMETER_CONF DEFAULT notification_topics 'notifications' iniset $CEILOMETER_CONF DEFAULT verbose True # Install the policy file for the API server From 0b3804bff7899211d3a80de5f7f22d3bc616f3bc Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Tue, 7 May 2013 16:58:17 +0100 Subject: [PATCH 0092/4704] xenapi - cleanup Add error on unitialised variable to the scripts. This way some issues were identified. Also modify README with fresh variables. The patch includes: - Add SWIFT_HASH to the README - Add XENAPI_CONNECTION_URL to the README - Add VNCSERVER_PROXYCLIENT_ADDRESS to the README - Introduce UBUNTU_INST_IFACE which is the OpenStack VM interface used for the netinstall. It defaults to eth3. Previously this parameter was set by the combination of HOST_IP_IFACE and the undocumented NETINSTALL_IP - get rid of NETINSTALL_IP - xenrc includes CLEAN_TEMPLATES - xenrc no longer tries to change directory - remove chrooting from prepare_guest.sh (STAGING_DIR was always / ) - remove DO_TGZ variable from prepare_guest.sh - use arguments to call prepare_guest.sh, instead of env vars - Fix backslash escaping in prepare_guest_template.sh NOTE: networking is about to be addressed in a separate change. Related to blueprint xenapi-devstack-cleanup Change-Id: Ie9a75321c7f41cc9a0cc051398d1e6ec2c88adfa --- tools/xen/README.md | 56 ++++++++-------- tools/xen/install_os_domU.sh | 9 +-- tools/xen/prepare_guest.sh | 68 +++++++++----------- tools/xen/prepare_guest_template.sh | 10 +-- tools/xen/scripts/install_ubuntu_template.sh | 7 +- tools/xen/xenrc | 11 ++-- 6 files changed, 75 insertions(+), 86 deletions(-) diff --git a/tools/xen/README.md b/tools/xen/README.md index 3fadc7839c..258d7a32a7 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -1,5 +1,4 @@ -Getting Started With XenServer 5.6 and Devstack -=============================================== +# Getting Started With XenServer 5.6 and Devstack The purpose of the code in this directory it to help developers bootstrap a XenServer 5.6 (or greater) + Openstack development environment. This file gives some pointers on how to get started. @@ -9,8 +8,7 @@ The Openstack services are configured to run within a "privileged" virtual machine on the Xenserver host (called OS domU). The VM uses the XAPI toolstack to communicate with the host. -Step 1: Install Xenserver ------------------------- +## Step 1: Install Xenserver Install XenServer 5.6+ on a clean box. You can get XenServer by signing up for an account on citrix.com, and then visiting: https://www.citrix.com/English/ss/downloads/details.asp?downloadId=2311504&productId=683148 @@ -25,16 +23,14 @@ getting started (Settings like this have been used with a laptop + cheap wifi ro * XenServer Gateway: 192.168.1.1 * XenServer DNS: 192.168.1.1 -Step 2: Download devstack --------------------------- +## Step 2: Download devstack On your XenServer host, run the following commands as root: wget --no-check-certificate https://github.com/openstack-dev/devstack/zipball/master unzip -o master -d ./devstack cd devstack/*/ -Step 3: Configure your localrc inside the devstack directory ------------------------------------------------------------- +## Step 3: Configure your localrc inside the devstack directory Devstack uses a localrc for user-specific configuration. Note that the XENAPI_PASSWORD must be your dom0 root password. Of course, use real passwords if this machine is exposed. @@ -43,12 +39,18 @@ Of course, use real passwords if this machine is exposed. MYSQL_PASSWORD=my_super_secret SERVICE_TOKEN=my_super_secret ADMIN_PASSWORD=my_super_secret - SERVICE_PASSWORD=$ADMIN_PASSWORD + SERVICE_PASSWORD=my_super_secret RABBIT_PASSWORD=my_super_secret - # This is the password for your guest (for both stack and root users) + SWIFT_HASH="66a3d6b56c1f479c8b4e70ab5c2000f5" + # This is the password for the OpenStack VM (for both stack and root users) GUEST_PASSWORD=my_super_secret + + # XenAPI parameters # IMPORTANT: The following must be set to your dom0 root password! - XENAPI_PASSWORD=my_super_secret + XENAPI_PASSWORD=my_xenserver_root_password + XENAPI_CONNECTION_URL="http://address_of_your_xenserver" + VNCSERVER_PROXYCLIENT_ADDRESS=address_of_your_xenserver + # Do not download the usual images yet! IMAGE_URLS="" # Explicitly set virt driver here @@ -60,34 +62,32 @@ Of course, use real passwords if this machine is exposed. # Host Interface, i.e. the interface on the nova vm you want to expose the # services on. Usually eth2 (management network) or eth3 (public network) and # not eth0 (private network with XenServer host) or eth1 (VM traffic network) - # This is also used as the interface for the Ubuntu install # The default is eth3. # HOST_IP_IFACE=eth3 + + # Settings for netinstalling Ubuntu + # UBUNTU_INST_RELEASE=precise + # First time Ubuntu network install params - NETINSTALLIP="dhcp" - NAMESERVERS="" - NETMASK="" - GATEWAY="" + # UBUNTU_INST_IFACE="eth3" + # UBUNTU_INST_IP="dhcp" EOF -Step 4: Run ./install_os_domU.sh from the tools/xen directory -------------------------------------------------------------- -cd tools/xen -./install_os_domU.sh +## Step 4: Run `./install_os_domU.sh` from the `tools/xen` directory -Once this script finishes executing, log into the VM (openstack domU) -that it installed and tail the run.sh.log file. You will need to wait -until it run.sh has finished executing. + cd tools/xen + ./install_os_domU.sh +Once this script finishes executing, log into the VM (openstack domU) that it +installed and tail the run.sh.log file. You will need to wait until it run.sh +has finished executing. -Step 5: Do cloudy stuff! --------------------------- +## Step 5: Do cloudy stuff! * Play with horizon * Play with the CLI * Log bugs to devstack and core projects, and submit fixes! -Step 6: Run from snapshot -------------------------- +## Step 6: Run from snapshot If you want to quicky re-run devstack from a clean state, using the same settings you used in your previous run, -you can revert the DomU to the snapshot called "before_first_boot" +you can revert the DomU to the snapshot called `before_first_boot` diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 7c3b839209..bcea939932 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -1,15 +1,13 @@ #!/bin/bash -# This script is a level script -# It must be run on a XenServer or XCP machine +# This script must be run on a XenServer or XCP machine # # It creates a DomU VM that runs OpenStack services # # For more details see: README.md -# Exit on errors set -o errexit -# Echo commands +set -o nounset set -o xtrace # Abort if localrc is not set @@ -31,13 +29,12 @@ THIS_DIR=$(cd $(dirname "$0") && pwd) # xapi functions . $THIS_DIR/functions - # # Get Settings # # Source params - override xenrc params in your localrc to suit your taste -source xenrc +source $THIS_DIR/xenrc xe_min() { diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh index fe52445424..0e112263e2 100755 --- a/tools/xen/prepare_guest.sh +++ b/tools/xen/prepare_guest.sh @@ -10,54 +10,51 @@ # creating the user called "stack", # and shuts down the VM to signal the script has completed -set -x -# Echo commands +set -o errexit +set -o nounset set -o xtrace # Configurable nuggets -GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} -STAGING_DIR=${STAGING_DIR:-stage} -DO_TGZ=${DO_TGZ:-1} -XS_TOOLS_PATH=${XS_TOOLS_PATH:-"/root/xs-tools.deb"} -STACK_USER=${STACK_USER:-stack} +GUEST_PASSWORD="$1" +XS_TOOLS_PATH="$2" +STACK_USER="$3" # Install basics -chroot $STAGING_DIR apt-get update -chroot $STAGING_DIR apt-get install -y cracklib-runtime curl wget ssh openssh-server tcpdump ethtool -chroot $STAGING_DIR apt-get install -y curl wget ssh openssh-server python-pip git vim-nox sudo -chroot $STAGING_DIR pip install xenapi +apt-get update +apt-get install -y cracklib-runtime curl wget ssh openssh-server tcpdump ethtool +apt-get install -y curl wget ssh openssh-server python-pip git vim-nox sudo +pip install xenapi # Install XenServer guest utilities -cp $XS_TOOLS_PATH ${STAGING_DIR}${XS_TOOLS_PATH} -chroot $STAGING_DIR dpkg -i $XS_TOOLS_PATH -chroot $STAGING_DIR update-rc.d -f xe-linux-distribution remove -chroot $STAGING_DIR update-rc.d xe-linux-distribution defaults +dpkg -i $XS_TOOLS_PATH +update-rc.d -f xe-linux-distribution remove +update-rc.d xe-linux-distribution defaults # Make a small cracklib dictionary, so that passwd still works, but we don't # have the big dictionary. -mkdir -p $STAGING_DIR/usr/share/cracklib -echo a | chroot $STAGING_DIR cracklib-packer +mkdir -p /usr/share/cracklib +echo a | cracklib-packer # Make /etc/shadow, and set the root password -chroot $STAGING_DIR "pwconv" -echo "root:$GUEST_PASSWORD" | chroot $STAGING_DIR chpasswd +pwconv +echo "root:$GUEST_PASSWORD" | chpasswd # Put the VPX into UTC. -rm -f $STAGING_DIR/etc/localtime +rm -f /etc/localtime # Add stack user -chroot $STAGING_DIR groupadd libvirtd -chroot $STAGING_DIR useradd $STACK_USER -s /bin/bash -d /opt/stack -G libvirtd -echo $STACK_USER:$GUEST_PASSWORD | chroot $STAGING_DIR chpasswd -echo "$STACK_USER ALL=(ALL) NOPASSWD: ALL" >> $STAGING_DIR/etc/sudoers +groupadd libvirtd +useradd $STACK_USER -s /bin/bash -d /opt/stack -G libvirtd +echo $STACK_USER:$GUEST_PASSWORD | chpasswd +echo "$STACK_USER ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers # Give ownership of /opt/stack to stack user -chroot $STAGING_DIR chown -R $STACK_USER /opt/stack +chown -R $STACK_USER /opt/stack # Make our ip address hostnames look nice at the command prompt -echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $STAGING_DIR/opt/stack/.bashrc -echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $STAGING_DIR/root/.bashrc -echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $STAGING_DIR/etc/profile +echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> /opt/stack/.bashrc +echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> /root/.bashrc +echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> /etc/profile function setup_vimrc { if [ ! -e $1 ]; then @@ -72,20 +69,15 @@ EOF } # Setup simple .vimrcs -setup_vimrc $STAGING_DIR/root/.vimrc -setup_vimrc $STAGING_DIR/opt/stack/.vimrc - -if [ "$DO_TGZ" = "1" ]; then - # Compress - rm -f stage.tgz - tar cfz stage.tgz stage -fi +setup_vimrc /root/.vimrc +setup_vimrc /opt/stack/.vimrc # remove self from local.rc # so this script is not run again rm -rf /etc/rc.local -mv /etc/rc.local.preparebackup /etc/rc.local -cp $STAGING_DIR/etc/rc.local $STAGING_DIR/etc/rc.local.backup + +# Restore rc.local file +cp /etc/rc.local.preparebackup /etc/rc.local # shutdown to notify we are done shutdown -h now diff --git a/tools/xen/prepare_guest_template.sh b/tools/xen/prepare_guest_template.sh index 19bd2f841a..6ea6f6321d 100755 --- a/tools/xen/prepare_guest_template.sh +++ b/tools/xen/prepare_guest_template.sh @@ -15,9 +15,8 @@ # The resultant image is started by install_os_domU.sh, # and once the VM has shutdown, build_xva.sh is run -# Exit on errors set -o errexit -# Echo commands +set -o nounset set -o xtrace # This directory @@ -75,7 +74,8 @@ cp $STAGING_DIR/etc/rc.local $STAGING_DIR/etc/rc.local.preparebackup # run prepare_guest.sh on boot cat <$STAGING_DIR/etc/rc.local -GUEST_PASSWORD=$GUEST_PASSWORD STAGING_DIR=/ \ - DO_TGZ=0 XS_TOOLS_PATH=$XS_TOOLS_PATH \ - bash /opt/stack/prepare_guest.sh > /opt/stack/prepare_guest.log 2>&1 +#!/bin/sh -e +bash /opt/stack/prepare_guest.sh \\ + "$GUEST_PASSWORD" "$XS_TOOLS_PATH" "$STACK_USER" \\ + > /opt/stack/prepare_guest.log 2>&1 EOF diff --git a/tools/xen/scripts/install_ubuntu_template.sh b/tools/xen/scripts/install_ubuntu_template.sh index 43b6decd90..00cabb038d 100755 --- a/tools/xen/scripts/install_ubuntu_template.sh +++ b/tools/xen/scripts/install_ubuntu_template.sh @@ -7,9 +7,8 @@ # Based on a script by: David Markey # -# Exit on errors set -o errexit -# Echo commands +set -o nounset set -o xtrace # This directory @@ -54,11 +53,11 @@ disk_size=$(($OSDOMU_VDI_GB * 1024 * 1024 * 1024)) pvargs="-- quiet console=hvc0 partman/default_filesystem=ext3 \ console-setup/ask_detect=false locale=${UBUNTU_INST_LOCALE} \ keyboard-configuration/layoutcode=${UBUNTU_INST_KEYBOARD} \ -netcfg/choose_interface=${HOST_IP_IFACE} \ +netcfg/choose_interface=${UBUNTU_INST_IFACE} \ netcfg/get_hostname=os netcfg/get_domain=os auto \ url=${preseed_url}" -if [ "$NETINSTALLIP" != "dhcp" ]; then +if [ "$UBUNTU_INST_IP" != "dhcp" ]; then netcfgargs="netcfg/disable_autoconfig=true \ netcfg/get_nameservers=${UBUNTU_INST_NAMESERVERS} \ netcfg/get_ipaddress=${UBUNTU_INST_IP} \ diff --git a/tools/xen/xenrc b/tools/xen/xenrc index e4d8ac9161..1956623a81 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -8,6 +8,9 @@ # Name of this guest GUEST_NAME=${GUEST_NAME:-DevStackOSDomU} +# Template cleanup +CLEAN_TEMPLATES=${CLEAN_TEMPLATES:-false} + # Size of image VDI_MB=${VDI_MB:-5000} OSDOMU_MEM_MB=1024 @@ -19,7 +22,6 @@ GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} # Host Interface, i.e. the interface on the nova vm you want to expose the # services on. Usually eth2 (management network) or eth3 (public network) and # not eth0 (private network with XenServer host) or eth1 (VM traffic network) -# This is also used as the interface for the Ubuntu install HOST_IP_IFACE=${HOST_IP_IFACE:-eth3} # @@ -65,12 +67,11 @@ UBUNTU_INST_ARCH="amd64" UBUNTU_INST_REPOSITORY="http://archive.ubuntu.net/ubuntu" UBUNTU_INST_LOCALE="en_US" UBUNTU_INST_KEYBOARD="us" -# network configuration for HOST_IP_IFACE during install +# network configuration for ubuntu netinstall +UBUNTU_INST_IFACE="eth3" UBUNTU_INST_IP="dhcp" UBUNTU_INST_NAMESERVERS="" UBUNTU_INST_NETMASK="" UBUNTU_INST_GATEWAY="" -# Load stackrc defaults -# then override with settings from localrc -cd ../.. && source ./stackrc && cd $TOP_DIR +source ../../stackrc From 8644676846baa3025e8a071852d43c64ef2e12b0 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Sun, 12 May 2013 18:34:29 +0100 Subject: [PATCH 0093/4704] xenapi - use management network to reach OS VM Devstack used the HOST_IP_IFACE to reach the OpenStack VM through ssh. This patch changes this behavior, so that the IP address of the interface connected to the management network will be used. Related to blueprint xenapi-devstack-cleanup Change-Id: I7f34d973870792d60a33ea512901d9b0d422150b --- tools/xen/functions | 28 ++++++++++++++++ tools/xen/install_os_domU.sh | 63 +++++++++++------------------------- 2 files changed, 46 insertions(+), 45 deletions(-) diff --git a/tools/xen/functions b/tools/xen/functions index a7d779841f..26ddb8be49 100644 --- a/tools/xen/functions +++ b/tools/xen/functions @@ -65,3 +65,31 @@ function get_local_sr { function get_local_sr_path { echo "/var/run/sr-mount/$(get_local_sr)" } + +function find_ip_by_name() { + local guest_name="$1" + local interface="$2" + + local period=10 + local max_tries=10 + local i=0 + + while true; do + if [ $i -ge $max_tries ]; then + echo "Timeout: ip address for interface $interface of $guest_name" + exit 11 + fi + + ipaddress=$(xe vm-list --minimal \ + name-label=$guest_name \ + params=networks | sed -ne "s,^.*${interface}/ip: \([0-9.]*\).*\$,\1,p") + + if [ -z "$ipaddress" ]; then + sleep $period + ((i++)) + else + echo $ipaddress + break + fi + done +} diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 7c3b839209..8d469391a0 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -313,53 +313,26 @@ xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_FIRST_BOOT" # xe vm-start vm="$GUEST_NAME" - -# -# Find IP and optionally wait for stack.sh to complete -# - -function find_ip_by_name() { - local guest_name="$1" - local interface="$2" - local period=10 - max_tries=10 - i=0 - while true - do - if [ $i -ge $max_tries ]; then - echo "Timed out waiting for devstack ip address" - exit 11 - fi - - devstackip=$(xe vm-list --minimal \ - name-label=$guest_name \ - params=networks | sed -ne "s,^.*${interface}/ip: \([0-9.]*\).*\$,\1,p") - if [ -z "$devstackip" ] - then - sleep $period - ((i++)) - else - echo $devstackip - break - fi - done -} - function ssh_no_check() { ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "$@" } -# Note the XenServer needs to be on the chosen -# network, so XenServer can access Glance API +# Get hold of the Management IP of OpenStack VM +OS_VM_MANAGEMENT_ADDRESS=$MGT_IP +if [ $OS_VM_MANAGEMENT_ADDRESS == "dhcp" ]; then + OS_VM_MANAGEMENT_ADDRESS=$(find_ip_by_name $GUEST_NAME 2) +fi + +# Get hold of the Service IP of OpenStack VM if [ $HOST_IP_IFACE == "eth2" ]; then - DOMU_IP=$MGT_IP + OS_VM_SERVICES_ADDRESS=$MGT_IP if [ $MGT_IP == "dhcp" ]; then - DOMU_IP=$(find_ip_by_name $GUEST_NAME 2) + OS_VM_SERVICES_ADDRESS=$(find_ip_by_name $GUEST_NAME 2) fi else - DOMU_IP=$PUB_IP + OS_VM_SERVICES_ADDRESS=$PUB_IP if [ $PUB_IP == "dhcp" ]; then - DOMU_IP=$(find_ip_by_name $GUEST_NAME 3) + OS_VM_SERVICES_ADDRESS=$(find_ip_by_name $GUEST_NAME 3) fi fi @@ -371,11 +344,11 @@ if [ "$WAIT_TILL_LAUNCH" = "1" ] && [ -e ~/.ssh/id_rsa.pub ] && [ "$COPYENV" = echo "VM Launched - Waiting for startup script" # wait for log to appear - while ! ssh_no_check -q stack@$DOMU_IP "[ -e run.sh.log ]"; do + while ! ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "[ -e run.sh.log ]"; do sleep 10 done echo -n "Running" - while [ `ssh_no_check -q stack@$DOMU_IP pgrep -c run.sh` -ge 1 ] + while [ `ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS pgrep -c run.sh` -ge 1 ] do sleep 10 echo -n "." @@ -384,17 +357,17 @@ if [ "$WAIT_TILL_LAUNCH" = "1" ] && [ -e ~/.ssh/id_rsa.pub ] && [ "$COPYENV" = set -x # output the run.sh.log - ssh_no_check -q stack@$DOMU_IP 'cat run.sh.log' + ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS 'cat run.sh.log' # Fail if the expected text is not found - ssh_no_check -q stack@$DOMU_IP 'cat run.sh.log' | grep -q 'stack.sh completed in' + ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS 'cat run.sh.log' | grep -q 'stack.sh completed in' set +x echo "################################################################################" echo "" echo "All Finished!" echo "You can visit the OpenStack Dashboard" - echo "at http://$DOMU_IP, and contact other services at the usual ports." + echo "at http://$OS_VM_SERVICES_ADDRESS, and contact other services at the usual ports." else set +x echo "################################################################################" @@ -403,9 +376,9 @@ else echo "Now, you can monitor the progress of the stack.sh installation by " echo "tailing /opt/stack/run.sh.log from within your domU." echo "" - echo "ssh into your domU now: 'ssh stack@$DOMU_IP' using your password" + echo "ssh into your domU now: 'ssh stack@$OS_VM_MANAGEMENT_ADDRESS' using your password" echo "and then do: 'tail -f /opt/stack/run.sh.log'" echo "" echo "When the script completes, you can then visit the OpenStack Dashboard" - echo "at http://$DOMU_IP, and contact other services at the usual ports." + echo "at http://$OS_VM_SERVICES_ADDRESS, and contact other services at the usual ports." fi From fb2a3ae3cabe4fae3f6d9bc0d600807ff39e5e78 Mon Sep 17 00:00:00 2001 From: Kieran Spear Date: Mon, 11 Mar 2013 23:55:49 +0000 Subject: [PATCH 0094/4704] Basic cells support Adds support for running a region and child cell within a single devstack environment. README.md has been updated with some info on getting started. Rebased/updated from initial work by Andrew Laski . Change-Id: Ic181da2180ccaa51df7efc9d66f7ccb820aac19b --- README.md | 20 ++++++++++++++++ lib/nova | 64 ++++++++++++++++++++++++++++++++++++++++--------- lib/rpc_backend | 7 ++++++ stack.sh | 13 +++++++--- 4 files changed, 90 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 6570a14351..1987db850e 100644 --- a/README.md +++ b/README.md @@ -153,3 +153,23 @@ You can then run many compute nodes, each of which should have a `stackrc` which MYSQL_HOST=$SERVICE_HOST RABBIT_HOST=$SERVICE_HOST Q_HOST=$SERVICE_HOST + +# Cells + +Cells is a new scaling option with a full spec at http://wiki.openstack.org/blueprint-nova-compute-cells. + +To setup a cells environment add the following to your `localrc`: + + enable_service n-cell + enable_service n-api-meta + MULTI_HOST=True + + # The following have not been tested with cells, they may or may not work. + disable_service n-obj + disable_service cinder + disable_service c-sch + disable_service c-api + disable_service c-vol + disable_service n-xvnc + +Be aware that there are some features currently missing in cells, one notable one being security groups. diff --git a/lib/nova b/lib/nova index 6fa1db4fdb..6fc0c7917a 100644 --- a/lib/nova +++ b/lib/nova @@ -37,6 +37,9 @@ NOVA_AUTH_CACHE_DIR=${NOVA_AUTH_CACHE_DIR:-/var/cache/nova} NOVA_CONF_DIR=/etc/nova NOVA_CONF=$NOVA_CONF_DIR/nova.conf +NOVA_CELLS_CONF=$NOVA_CONF_DIR/nova-cells.conf +NOVA_CELLS_DB=${NOVA_CELLS_DB:-nova_cell} + NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini} # Public facing bits @@ -125,10 +128,6 @@ TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29} # Functions # --------- -function add_nova_opt { - echo "$1" >>$NOVA_CONF -} - # Helper to clean iptables rules function clean_iptables() { # Delete rules @@ -415,7 +414,6 @@ function create_nova_conf() { # (Re)create ``nova.conf`` rm -f $NOVA_CONF - add_nova_opt "[DEFAULT]" iniset $NOVA_CONF DEFAULT verbose "True" iniset $NOVA_CONF DEFAULT debug "True" iniset $NOVA_CONF DEFAULT auth_strategy "keystone" @@ -539,6 +537,32 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT" } +function init_nova_cells() { + if is_service_enabled n-cell; then + cp $NOVA_CONF $NOVA_CELLS_CONF + iniset $NOVA_CELLS_CONF DEFAULT sql_connection `database_connection_url $NOVA_CELLS_DB` + iniset $NOVA_CELLS_CONF DEFAULT rabbit_virtual_host child_cell + iniset $NOVA_CELLS_CONF DEFAULT dhcpbridge_flagfile $NOVA_CELLS_CONF + iniset $NOVA_CELLS_CONF cells enable True + iniset $NOVA_CELLS_CONF cells name child + + iniset $NOVA_CONF DEFAULT scheduler_topic cells + iniset $NOVA_CONF DEFAULT compute_api_class nova.compute.cells_api.ComputeCellsAPI + iniset $NOVA_CONF cells enable True + iniset $NOVA_CONF cells name region + + if is_service_enabled n-api-meta; then + NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/,metadata//") + iniset $NOVA_CONF DEFAULT enabled_apis $NOVA_ENABLED_APIS + iniset $NOVA_CELLS_CONF DEFAULT enabled_apis metadata + fi + + $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF db sync + $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF cell create --name=region --cell_type=parent --username=guest --hostname=$RABBIT_HOST --port=5672 --password=$RABBIT_PASSWORD --virtual_host=/ --woffset=0 --wscale=1 + $NOVA_BIN_DIR/nova-manage cell create --name=child --cell_type=child --username=guest --hostname=$RABBIT_HOST --port=5672 --password=$RABBIT_PASSWORD --virtual_host=child_cell --woffset=0 --wscale=1 + fi +} + # create_nova_cache_dir() - Part of the init_nova() process function create_nova_cache_dir() { # Create cache dir @@ -578,6 +602,10 @@ function init_nova() { # Migrate nova database $NOVA_BIN_DIR/nova-manage db sync + if is_service_enabled n-cell; then + recreate_database $NOVA_CELLS_DB latin1 + fi + # (Re)create nova baremetal database if is_baremetal; then recreate_database nova_bm latin1 @@ -648,14 +676,26 @@ function start_nova_api() { # start_nova() - Start running processes, including screen function start_nova() { - # The group **$LIBVIRT_GROUP** is added to the current user in this script. - # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group. + NOVA_CONF_BOTTOM=$NOVA_CONF + # ``screen_it`` checks ``is_service_enabled``, it is not needed here screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor" - screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP $NOVA_BIN_DIR/nova-compute" + + if is_service_enabled n-cell; then + NOVA_CONF_BOTTOM=$NOVA_CELLS_CONF + screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor --config-file $NOVA_CELLS_CONF" + screen_it n-cell "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CONF" + screen_it n-cell "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CELLS_CONF" + fi + + # The group **$LIBVIRT_GROUP** is added to the current user in this script. + # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group. + screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP \"$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM\"" screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert" - screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network" - screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler" + screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network --config-file $NOVA_CONF_BOTTOM" + screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler --config-file $NOVA_CONF_BOTTOM" + screen_it n-api-meta "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api-metadata --config-file $NOVA_CONF_BOTTOM" + screen_it n-novnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-novncproxy --config-file $NOVA_CONF --web $NOVNC_DIR" screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $NOVA_CONF" screen_it n-spice "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $NOVA_CONF --web $SPICE_DIR" @@ -670,7 +710,9 @@ function start_nova() { # stop_nova() - Stop running processes (non-screen) function stop_nova() { # Kill the nova screen windows - for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-cond n-spice; do + # Some services are listed here twice since more than one instance + # of a service may be running in certain configs. + for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cond n-cell n-cell n-api-meta; do screen -S $SCREEN_NAME -p $serv -X kill done } diff --git a/lib/rpc_backend b/lib/rpc_backend index 27d3ba3364..6b334a98ad 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -138,6 +138,13 @@ function restart_rpc_backend() { fi # change the rabbit password since the default is "guest" sudo rabbitmqctl change_password guest $RABBIT_PASSWORD + if is_service_enabled n-cell; then + # Add partitioned access for the child cell + if [ -z `sudo rabbitmqctl list_vhosts | grep child_cell` ]; then + sudo rabbitmqctl add_vhost child_cell + sudo rabbitmqctl set_permissions -p child_cell guest ".*" ".*" ".*" + fi + fi elif is_service_enabled qpid; then echo_summary "Starting qpid" restart_service qpidd diff --git a/stack.sh b/stack.sh index c0a314d029..37abd6da50 100755 --- a/stack.sh +++ b/stack.sh @@ -1034,6 +1034,8 @@ if is_service_enabled nova; then LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" fi + + init_nova_cells fi # Extra things to prepare nova for baremetal, before nova starts @@ -1094,14 +1096,19 @@ if is_service_enabled q-svc; then create_quantum_initial_network setup_quantum_debug elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then + NM_CONF=${NOVA_CONF} + if is_service_enabled n-cell; then + NM_CONF=${NOVA_CELLS_CONF} + fi + # Create a small network - $NOVA_BIN_DIR/nova-manage network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS + $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS # Create some floating ips - $NOVA_BIN_DIR/nova-manage floating create $FLOATING_RANGE --pool=$PUBLIC_NETWORK_NAME + $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF floating create $FLOATING_RANGE --pool=$PUBLIC_NETWORK_NAME # Create a second pool - $NOVA_BIN_DIR/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL + $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL fi if is_service_enabled quantum; then From 78a53d92c734e6921a555313a0163bd3ca7afa31 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 9 May 2013 17:20:31 -0700 Subject: [PATCH 0095/4704] Only search the lib dir for rpc_backend The grep in rpc_backend uses . to search all files. Unfortunately the current directory when the command is called is not the lib directory, so change it to search the lib directory instead. Without this fix the command is forced to grep all files under devstack which could be very slow if one has downloaded large images. Fixes bug 1174718 Change-Id: Ic8a59885cd07e5d6f2e734e80824762f9ac12996 --- lib/rpc_backend | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/rpc_backend b/lib/rpc_backend index 27d3ba3364..1fb1f21599 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -30,7 +30,7 @@ function check_rpc_backend() { # that can be passed as arguments to is_service_enabled. # We check for a call to iniset_rpc_backend in these files, meaning # the service needs a backend. - rpc_candidates=$(grep -rl iniset_rpc_backend . | awk -F/ '{print $NF}') + rpc_candidates=$(grep -rl iniset_rpc_backend $TOP_DIR/lib/ | awk -F/ '{print $NF}') for c in ${rpc_candidates}; do if is_service_enabled $c; then rpc_needed=0 From d3740f70b52763d547d2a1d861829e6f853775bc Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Thu, 9 May 2013 15:02:21 +0100 Subject: [PATCH 0096/4704] xenapi: Ubuntu installation repository/proxy fix Get rid of UBUNTU_INST_REPOSITORY, use UBUNTU_INST_HTTP_HOSTNAME and UBUNTU_INST_HTTP_DIRECTORY instead. User can also specify UBUNTU_INST_HTTP_PROXY to utilize a proxy for the OpenStack VM installation. The answer file will be edited to contain the specified values. Also get rid of the magic, undocumented MIRROR variable. This is related to blueprint xenapi-devstack-cleanup Change-Id: Ic9fc564c4ad0f43e2e536854335ebe14791d0255 --- tools/xen/install_os_domU.sh | 11 ++++++----- tools/xen/scripts/install_ubuntu_template.sh | 7 ++++++- tools/xen/xenrc | 4 +++- 3 files changed, 15 insertions(+), 7 deletions(-) diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index bcea939932..0e194fe973 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -250,11 +250,12 @@ if [ -z "$templateuuid" ]; then mkdir -p $HTTP_SERVER_LOCATION fi cp -f $THIS_DIR/devstackubuntupreseed.cfg $HTTP_SERVER_LOCATION - MIRROR=${MIRROR:-""} - if [ -n "$MIRROR" ]; then - sed -e "s,d-i mirror/http/hostname string .*,d-i mirror/http/hostname string $MIRROR," \ - -i "${HTTP_SERVER_LOCATION}/devstackubuntupreseed.cfg" - fi + + sed \ + -e "s,\(d-i mirror/http/hostname string\).*,\1 $UBUNTU_INST_HTTP_HOSTNAME,g" \ + -e "s,\(d-i mirror/http/directory string\).*,\1 $UBUNTU_INST_HTTP_DIRECTORY,g" \ + -e "s,\(d-i mirror/http/proxy string\).*,\1 $UBUNTU_INST_HTTP_PROXY,g" \ + -i "${HTTP_SERVER_LOCATION}/devstackubuntupreseed.cfg" fi # Update the template diff --git a/tools/xen/scripts/install_ubuntu_template.sh b/tools/xen/scripts/install_ubuntu_template.sh index 00cabb038d..b7a8eff952 100755 --- a/tools/xen/scripts/install_ubuntu_template.sh +++ b/tools/xen/scripts/install_ubuntu_template.sh @@ -69,11 +69,16 @@ fi xe template-param-set uuid=$new_uuid \ other-config:install-methods=http \ - other-config:install-repository="$UBUNTU_INST_REPOSITORY" \ + other-config:install-repository="http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY}" \ PV-args="$pvargs" \ other-config:debian-release="$UBUNTU_INST_RELEASE" \ other-config:default_template=true \ other-config:disks='' \ other-config:install-arch="$UBUNTU_INST_ARCH" +if ! [ -z "$UBUNTU_INST_HTTP_PROXY" ]; then + xe template-param-set uuid=$new_uuid \ + other-config:install-proxy="$UBUNTU_INST_HTTP_PROXY" +fi + echo "Ubuntu template installed uuid:$new_uuid" diff --git a/tools/xen/xenrc b/tools/xen/xenrc index 1956623a81..e50f954715 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -64,7 +64,9 @@ UBUNTU_INST_TEMPLATE_NAME="Ubuntu 11.10 (64-bit) for DevStack" # XenServer 6.1 and later or XCP 1.6 or later # 11.10 is only really supported with XenServer 6.0.2 and later UBUNTU_INST_ARCH="amd64" -UBUNTU_INST_REPOSITORY="http://archive.ubuntu.net/ubuntu" +UBUNTU_INST_HTTP_HOSTNAME="archive.ubuntu.net" +UBUNTU_INST_HTTP_DIRECTORY="/ubuntu" +UBUNTU_INST_HTTP_PROXY="" UBUNTU_INST_LOCALE="en_US" UBUNTU_INST_KEYBOARD="us" # network configuration for ubuntu netinstall From eda5579e1ce2aa77f0eed71881780da585c77cfe Mon Sep 17 00:00:00 2001 From: Shengjie Min Date: Mon, 13 May 2013 18:00:57 +0100 Subject: [PATCH 0097/4704] fix ceilometer ENABLE_SERVICES comment - keep it aligned with the Ceilometer doc Change-Id: Ib1ca0cfbeafc61723e5d4fe79da6355db7d76f0c Fixes: bug #1179590 --- lib/ceilometer | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 1c289fd471..0f0265dda0 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -1,8 +1,8 @@ # lib/ceilometer # Install and start **Ceilometer** service -# To enable, add the following to localrc -# ENABLED_SERVICES+=ceilometer-acompute,ceilometer-acentral,ceilometer-collector,ceilometer-api +# To enable Ceilometer services, add the following to localrc +# enable_service ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api # Dependencies: # - functions From 73695d0ea490c4c7a1158957dd5a85586cfa0933 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Tue, 14 May 2013 15:01:01 +0000 Subject: [PATCH 0098/4704] Cleanup openrc. - Cleanup openrc, removing the sourcing of functions (which was failing for zsh) and remove the *_DEBUG comment since they have been removed from the clients. Change-Id: Ie2e6fb1e770403c4ef3463a850e8151bd312614c --- openrc | 7 ------- 1 file changed, 7 deletions(-) diff --git a/openrc b/openrc index 8af28543fb..2d5d48aea3 100644 --- a/openrc +++ b/openrc @@ -20,9 +20,6 @@ fi # Find the other rc files RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd) -# Import common functions -source $RC_DIR/functions - # Load local configuration source $RC_DIR/stackrc @@ -80,7 +77,3 @@ export OS_CACERT=$INT_CA_DIR/ca-chain.pem export NOVA_VERSION=${NOVA_VERSION:-1.1} # In the future this will change names: export COMPUTE_API_VERSION=${COMPUTE_API_VERSION:-$NOVA_VERSION} - -# set log level to DEBUG (helps debug issues) -# export KEYSTONECLIENT_DEBUG=1 -# export NOVACLIENT_DEBUG=1 From b1802db8078b61fe3ee4d3d6239d069123638932 Mon Sep 17 00:00:00 2001 From: Maru Newby Date: Wed, 15 May 2013 19:44:10 +0000 Subject: [PATCH 0099/4704] Do not run selinux commands if it is disabled. * Execution of the setenforce command when selinux was not enabled was resulting in a non-zero exit status, which halted devstack. * Addresses bug 1175444 Change-Id: I7f6492dea7c52d153e755cda826d6fabd53f3771 --- stack.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 7ec25cf9a0..5dea00097a 100755 --- a/stack.sh +++ b/stack.sh @@ -565,7 +565,9 @@ fi if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then # Avoid having to configure selinux to allow things like httpd to # access horizion files or run binaries like nodejs (LP#1175444) - sudo setenforce 0 + if selinuxenabled; then + sudo setenforce 0 + fi # An old version (2.0.1) of python-crypto is probably installed on # a fresh system, via the dependency chain From 337bd8176fff19f22a5cc8e2ce256ca6b574198b Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Thu, 16 May 2013 14:27:01 +0100 Subject: [PATCH 0100/4704] Move hypervisor-specific code into a case statement Change-Id: Id799506f180ac81d493f49de140cc079338430aa --- stackrc | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/stackrc b/stackrc index 871c8a1bfa..edf5a824f3 100644 --- a/stackrc +++ b/stackrc @@ -164,14 +164,25 @@ PBR_BRANCH=${PBR_BRANCH:-master} # Nova hypervisor configuration. We default to libvirt with **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can -# also install an **LXC** or **OpenVZ** based system. +# also install an **LXC**, **OpenVZ** or **XenAPI** based system. VIRT_DRIVER=${VIRT_DRIVER:-libvirt} -LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} -if [[ "$os_VENDOR" =~ (Debian) ]]; then - LIBVIRT_GROUP=libvirt -else - LIBVIRT_GROUP=libvirtd -fi +case "$VIRT_DRIVER" in + libvirt) + LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} + if [[ "$os_VENDOR" =~ (Debian) ]]; then + LIBVIRT_GROUP=libvirt + else + LIBVIRT_GROUP=libvirtd + fi + ;; + xenserver) + # Xen config common to nova and quantum + XENAPI_CONNECTION_URL=${XENAPI_CONNECTION_URL:-"http://169.254.0.1"} + XENAPI_USER=${XENAPI_USER:-"root"} + ;; + *) + ;; +esac # Specify a comma-separated list of UEC images to download and install into glance. # supported urls here are: @@ -234,10 +245,6 @@ PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"public"} # Compatibility until it's eradicated from CI USE_SCREEN=${SCREEN_DEV:-$USE_SCREEN} -# Xen config common to nova and quantum -XENAPI_CONNECTION_URL=${XENAPI_CONNECTION_URL:-"http://169.254.0.1"} -XENAPI_USER=${XENAPI_USER:-"root"} - # Local variables: # mode: shell-script # End: From f35ff72b77d479d43c1ede6b9f691ae54a2c60a1 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 16 May 2013 16:31:12 -0400 Subject: [PATCH 0101/4704] add support for changing cinder perodic_interval As discovered in tempest, when you create and delete volumes quickly, and have a small volume storage pool, you can get cinder scheduler to think you are "out of space" when you are not. This is because cinder scheduler updates free space on it's periodic job, which defaults to 60 seconds. We need control over that value for the devstack gate otherwise we regularly get overruns if we run too many volume tests quickly. Work around for bug 1180976 Expect that this will get removed later if cinder gets a more efficient way to update the scheduler for freed resources. Also, don't be completely stupid about setting defaults.... Change-Id: I20e52e66fcc94b224476cdd14c88bd6981b4e617 --- lib/cinder | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lib/cinder b/lib/cinder index 82e7454423..7e9c2ba6e5 100644 --- a/lib/cinder +++ b/lib/cinder @@ -58,6 +58,14 @@ CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND) # https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755 CINDER_SECURE_DELETE=`trueorfalse True $CINDER_SECURE_DELETE` +# Cinder reports allocations back to the scheduler on periodic intervals +# it turns out we can get an "out of space" issue when we run tests too +# quickly just because cinder didn't realize we'd freed up resources. +# Make this configurable so that devstack-gate/tempest can set it to +# less than the 60 second default +# https://bugs.launchpad.net/cinder/+bug/1180976 +CINDER_PERIODIC_INTERVAL=${CINDER_PERIODIC_INTERVAL:-60} + # Name of the lvm volume groups to use/create for iscsi volumes # VOLUME_GROUP2 is used only if CINDER_MULTI_LVM_BACKEND = True VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} @@ -197,6 +205,7 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT rootwrap_config "$CINDER_CONF_DIR/rootwrap.conf" iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.contrib.standard_extensions iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH + iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL if is_service_enabled tls-proxy; then # Set the service port for a proxy to take the original From d52008acd48dbff6fcee01a159e9f65bf142b714 Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Mon, 20 May 2013 15:03:43 +0100 Subject: [PATCH 0102/4704] Use --managed-save flag to virsh undefine If a virtual machine has got a managed save image, it is not possible to delete it using a plain 'virsh undefine' command. While Nova doesn't use 'managed save', the libvirt-guests init script or systemd service may have created one if the user rebooted their host. Thus we devstack should pass the --managed-save flag to virsh to ensure the VM's removal. Change-Id: Id9b072a6dceeb4e179d2b6a25bbdfb559c299e95 Signed-off-by: Daniel P. Berrange --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 6fc0c7917a..9fc4ded7b6 100644 --- a/lib/nova +++ b/lib/nova @@ -151,7 +151,7 @@ function cleanup_nova() { instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"` if [ ! "$instances" = "" ]; then echo $instances | xargs -n1 sudo virsh destroy || true - echo $instances | xargs -n1 sudo virsh undefine || true + echo $instances | xargs -n1 sudo virsh undefine --managed-save || true fi # Logout and delete iscsi sessions From 70e6b7d14c159045f9e648c34f265c6cee196c71 Mon Sep 17 00:00:00 2001 From: Maru Newby Date: Thu, 16 May 2013 10:57:17 +0000 Subject: [PATCH 0103/4704] Avoid installing python-nose for RHEL6. * python-nose on RHEL isn't new enough to support Tempest. Change-Id: I98107bf664a12f6252f74b4511e3228d511ff855 --- files/rpms/horizon | 3 ++- files/rpms/swift | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/files/rpms/horizon b/files/rpms/horizon index 151e7e21af..d50482ea1d 100644 --- a/files/rpms/horizon +++ b/files/rpms/horizon @@ -16,7 +16,8 @@ python-kombu python-migrate python-mox python-netaddr -python-nose +# RHEL6's python-nose is incompatible with Tempest +python-nose #dist:f16,f17,f18 python-paste #dist:f16,f17,f18 python-paste-deploy #dist:f16,f17,f18 python-pep8 diff --git a/files/rpms/swift b/files/rpms/swift index 1b36e34eab..c626d8e3e0 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -7,7 +7,8 @@ python-devel python-eventlet python-greenlet python-netifaces -python-nose +# RHEL6's python-nose is incompatible with Tempest +python-nose # dist:f16,f17,f18 python-paste-deploy # dist:f16,f17,f18 python-setuptools # dist:f16,f17,f18 python-simplejson From eb2da5dfb8c1014fb973bb21428af5b8ed5d8848 Mon Sep 17 00:00:00 2001 From: Maru Newby Date: Wed, 15 May 2013 21:28:29 +0000 Subject: [PATCH 0104/4704] Ensure Open vSwitch can be installed on RHEL6. * RHEL6 does not include Open vSwitch in the default repos, but it is available via the RDO repo. This patch automatically configures the RDO repo for RHEL6. * The openvswitch package is now listed as an rpm dependency to ensure it can be cached by a pre-built CI environment. Change-Id: I8f93f53039cca4ff29bcb91720be7bb047b3ed8a --- files/rpms/quantum | 1 + stack.sh | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/files/rpms/quantum b/files/rpms/quantum index 32c6f626dc..8827d5aa1f 100644 --- a/files/rpms/quantum +++ b/files/rpms/quantum @@ -4,6 +4,7 @@ ebtables iptables iputils mysql-server # NOPRIME +openvswitch # NOPRIME python-boto python-eventlet python-greenlet diff --git a/stack.sh b/stack.sh index 5dea00097a..3a454d8d4b 100755 --- a/stack.sh +++ b/stack.sh @@ -32,6 +32,12 @@ source $TOP_DIR/functions # and ``DISTRO`` GetDistro + +# Configure non-default repos +# =========================== + +# Repo configuration needs to occur before package installation. + # Some dependencies are not available in Debian Wheezy official # repositories. However, it's possible to run OpenStack from gplhost # repository. @@ -42,6 +48,17 @@ if [[ "$os_VENDOR" =~ (Debian) ]]; then apt_get install --force-yes gplhost-archive-keyring fi +# Installing Open vSwitch on RHEL6 requires enabling the RDO repo. +RHEL6_RDO_REPO_RPM=${RHEL6_RDO_REPO_RPM:-"http://rdo.fedorapeople.org/openstack/openstack-grizzly/rdo-release-grizzly-3.noarch.rpm"} +RHEL6_RDO_REPO_ID=${RHEL6_RDO_REPO_ID:-"openstack-grizzly"} +if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then + if ! yum repolist enabled $RHEL6_RDO_REPO_ID | grep -q $RHEL6_RDO_REPO_ID; then + echo "RDO repo not detected; installing" + yum_install $RHEL6_RDO_REPO_RPM + fi +fi + + # Global Settings # =============== From f28c9c759599500db11960266b9c6f61f150f2f1 Mon Sep 17 00:00:00 2001 From: Jian Wen Date: Tue, 21 May 2013 14:16:56 +0800 Subject: [PATCH 0105/4704] use SERVICE_HOST intead of HOST_IP for quantum services The other services, like nova, glance and cinder, use SERVICE_HOST instead of HOST_IP to configure their server listening addresses. This is more friendly to multi-node quantum deployment. Change-Id: Ibee22ef59d532e8f7004d8e4582015d303d15404 --- lib/quantum | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/quantum b/lib/quantum index dfd73e99f4..12702be52a 100644 --- a/lib/quantum +++ b/lib/quantum @@ -75,7 +75,7 @@ Q_PLUGIN=${Q_PLUGIN:-openvswitch} # Default Quantum Port Q_PORT=${Q_PORT:-9696} # Default Quantum Host -Q_HOST=${Q_HOST:-$HOST_IP} +Q_HOST=${Q_HOST:-$SERVICE_HOST} # Default admin username Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum} # Default auth strategy @@ -86,7 +86,7 @@ Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True} Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False} Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} # Meta data IP -Q_META_DATA_IP=${Q_META_DATA_IP:-$HOST_IP} +Q_META_DATA_IP=${Q_META_DATA_IP:-$SERVICE_HOST} # Allow Overlapping IP among subnets Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True} # Use quantum-debug command From 64539924db3744d2483a1da7231cc801e332e180 Mon Sep 17 00:00:00 2001 From: Akihiro MOTOKI Date: Sat, 27 Apr 2013 00:23:27 +0900 Subject: [PATCH 0106/4704] Remove temporary logic for Quantum auth_token transition When Quantum auth_token configuration were moved to quantum.conf, the temporary logic to disable auth_token in api-paste.ini was added to avoid gating test failures of patches based on the version with api-paste.ini auth_token configurations. This temporary logic is no longer needed. Change-Id: I539301a338d5ada01c83af5e9cf203849a67c6c1 --- lib/quantum | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/lib/quantum b/lib/quantum index dfd73e99f4..21baaf8c17 100644 --- a/lib/quantum +++ b/lib/quantum @@ -581,10 +581,6 @@ function _configure_quantum_service() { iniset $QUANTUM_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY _quantum_setup_keystone $QUANTUM_CONF keystone_authtoken - # Comment out keystone authtoken configuration in api-paste.ini - # It is required to avoid any breakage in Quantum where the sample - # api-paste.ini has authtoken configurations. - _quantum_commentout_keystone_authtoken $Q_API_PASTE_FILE filter:authtoken # Configure plugin quantum_plugin_configure_service @@ -655,21 +651,6 @@ function _quantum_setup_keystone() { rm -f $QUANTUM_AUTH_CACHE_DIR/* } -function _quantum_commentout_keystone_authtoken() { - local conf_file=$1 - local section=$2 - - inicomment $conf_file $section auth_host - inicomment $conf_file $section auth_port - inicomment $conf_file $section auth_protocol - inicomment $conf_file $section auth_url - - inicomment $conf_file $section admin_tenant_name - inicomment $conf_file $section admin_user - inicomment $conf_file $section admin_password - inicomment $conf_file $section signing_dir -} - function _quantum_setup_interface_driver() { # ovs_use_veth needs to be set before the plugin configuration From 614202fc40923a60f83d7108ea66ce07846de324 Mon Sep 17 00:00:00 2001 From: JordanP Date: Thu, 16 May 2013 11:16:13 +0200 Subject: [PATCH 0107/4704] On unstack : clean up network namespaces created by quantum Also removes OVS ports Fix Bug #1156837 Change-Id: Id4178c0a12a3ada76403a57a736a9c7a908d6a4a --- lib/quantum | 10 ++++++++-- lib/quantum_plugins/nec | 2 -- lib/quantum_plugins/nicira | 1 - lib/quantum_plugins/openvswitch | 1 - lib/quantum_plugins/ovs_base | 15 +++++++++++++++ lib/quantum_plugins/ryu | 1 - lib/quantum_thirdparty/bigswitch_floodlight | 1 - unstack.sh | 1 + 8 files changed, 24 insertions(+), 8 deletions(-) diff --git a/lib/quantum b/lib/quantum index dfd73e99f4..4d6793c510 100644 --- a/lib/quantum +++ b/lib/quantum @@ -406,7 +406,14 @@ function stop_quantum() { # cleanup_quantum() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_quantum() { - : + if is_quantum_ovs_base_plugin; then + quantum_ovs_base_cleanup + fi + + # delete all namespaces created by quantum + for ns in $(sudo ip netns list | grep -o -e qdhcp-[0-9a-f\-]* -e qrouter-[0-9a-f\-]*); do + sudo ip netns delete ${ns} + done } # _configure_quantum_common() @@ -502,7 +509,6 @@ function _configure_quantum_l3_agent() { # for l3-agent, only use per tenant router if we have namespaces Q_L3_ROUTER_PER_TENANT=$Q_USE_NAMESPACE AGENT_L3_BINARY="$QUANTUM_DIR/bin/quantum-l3-agent" - PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} Q_L3_CONF_FILE=$QUANTUM_CONF_DIR/l3_agent.ini cp $QUANTUM_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE diff --git a/lib/quantum_plugins/nec b/lib/quantum_plugins/nec index 4a2a49767a..411f5a4b01 100644 --- a/lib/quantum_plugins/nec +++ b/lib/quantum_plugins/nec @@ -17,8 +17,6 @@ OFC_DRIVER=${OFC_DRIVER:-trema} OFC_RETRY_MAX=${OFC_RETRY_MAX:-0} OFC_RETRY_INTERVAL=${OFC_RETRY_INTERVAL:-1} -OVS_BRIDGE=${OVS_BRIDGE:-br-int} - # Main logic # --------------------------- diff --git a/lib/quantum_plugins/nicira b/lib/quantum_plugins/nicira index 305c3bfa7d..fc06b55713 100644 --- a/lib/quantum_plugins/nicira +++ b/lib/quantum_plugins/nicira @@ -8,7 +8,6 @@ set +o xtrace source $TOP_DIR/lib/quantum_plugins/ovs_base function setup_integration_bridge() { - OVS_BRIDGE=${OVS_BRIDGE:-br-int} _quantum_ovs_base_setup_bridge $OVS_BRIDGE # Set manager to NVP controller (1st of list) if [[ "$NVP_CONTROLLERS" != "" ]]; then diff --git a/lib/quantum_plugins/openvswitch b/lib/quantum_plugins/openvswitch index dda9e61129..374172014b 100644 --- a/lib/quantum_plugins/openvswitch +++ b/lib/quantum_plugins/openvswitch @@ -41,7 +41,6 @@ function quantum_plugin_configure_l3_agent() { function quantum_plugin_configure_plugin_agent() { # Setup integration bridge - OVS_BRIDGE=${OVS_BRIDGE:-br-int} _quantum_ovs_base_setup_bridge $OVS_BRIDGE _quantum_ovs_base_configure_firewall_driver diff --git a/lib/quantum_plugins/ovs_base b/lib/quantum_plugins/ovs_base index 2ada0dbf5a..a5e03acd51 100644 --- a/lib/quantum_plugins/ovs_base +++ b/lib/quantum_plugins/ovs_base @@ -5,6 +5,9 @@ MY_XTRACE=$(set +o | grep xtrace) set +o xtrace +OVS_BRIDGE=${OVS_BRIDGE:-br-int} +PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} + function is_quantum_ovs_base_plugin() { # Yes, we use OVS. return 0 @@ -17,6 +20,18 @@ function _quantum_ovs_base_setup_bridge() { sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge } +function quantum_ovs_base_cleanup() { + # remove all OVS ports that look like Quantum created ports + for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do + sudo ovs-vsctl del-port ${port} + done + + # remove all OVS bridges created by Quantum + for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${OVS_BRIDGE} -e ${PUBLIC_BRIDGE}); do + sudo ovs-vsctl del-br ${bridge} + done +} + function _quantum_ovs_base_install_agent_packages() { local kernel_version # Install deps diff --git a/lib/quantum_plugins/ryu b/lib/quantum_plugins/ryu index 1b039dc950..c482747759 100644 --- a/lib/quantum_plugins/ryu +++ b/lib/quantum_plugins/ryu @@ -45,7 +45,6 @@ function quantum_plugin_configure_l3_agent() { function quantum_plugin_configure_plugin_agent() { # Set up integration bridge - OVS_BRIDGE=${OVS_BRIDGE:-br-int} _quantum_ovs_base_setup_bridge $OVS_BRIDGE if [ -n "$RYU_INTERNAL_INTERFACE" ]; then sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $RYU_INTERNAL_INTERFACE diff --git a/lib/quantum_thirdparty/bigswitch_floodlight b/lib/quantum_thirdparty/bigswitch_floodlight index 60e39248c4..385bd0d9b0 100644 --- a/lib/quantum_thirdparty/bigswitch_floodlight +++ b/lib/quantum_thirdparty/bigswitch_floodlight @@ -7,7 +7,6 @@ set +o xtrace BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80} BS_FL_OF_PORT=${BS_FL_OF_PORT:-6633} -OVS_BRIDGE=${OVS_BRIDGE:-br-int} function configure_bigswitch_floodlight() { : diff --git a/unstack.sh b/unstack.sh index 3ac29857f8..d1d03494de 100755 --- a/unstack.sh +++ b/unstack.sh @@ -109,4 +109,5 @@ fi if is_service_enabled quantum; then stop_quantum stop_quantum_third_party + cleanup_quantum fi From 2fce0a973de0fb77212b4309421b432b14f039b2 Mon Sep 17 00:00:00 2001 From: Ben Nemec Date: Wed, 22 May 2013 21:13:11 +0000 Subject: [PATCH 0108/4704] Fix screenrc for nova-compute When devstack creates stack-screenrc, it includes unescaped " characters that cause failures starting nova-compute. This fix changes the " to ' so there isn't a conflict. Fixes bug 1183114 Change-Id: I7830879d56f1ac20950aace46dd3b72d209986ce --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 9fc4ded7b6..c38f50c801 100644 --- a/lib/nova +++ b/lib/nova @@ -690,7 +690,7 @@ function start_nova() { # The group **$LIBVIRT_GROUP** is added to the current user in this script. # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group. - screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP \"$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM\"" + screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM'" screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert" screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network --config-file $NOVA_CONF_BOTTOM" screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler --config-file $NOVA_CONF_BOTTOM" From e6f2ee508a44b1008ac226b86a68e22dc5ea7ac5 Mon Sep 17 00:00:00 2001 From: Jiajun Liu Date: Tue, 14 May 2013 09:48:15 +0000 Subject: [PATCH 0109/4704] add firewall driver if we use quantum security group fixes bug 1179820 we must set firewall driver if we want to use quantum security group because quantum will disable security group if we do not change the default firewall driver. Currently devstack will not change the default firewall driver if we just running quantum server on a node which will cause nova unable to security group information. Change-Id: Ie274325decbf252630a237ed3d6ee3136eb259fe --- lib/quantum_plugins/linuxbridge | 5 +++++ lib/quantum_plugins/nec | 2 ++ lib/quantum_plugins/openvswitch | 2 ++ lib/quantum_plugins/ryu | 2 ++ 4 files changed, 11 insertions(+) diff --git a/lib/quantum_plugins/linuxbridge b/lib/quantum_plugins/linuxbridge index 324e255231..cc4040bccd 100644 --- a/lib/quantum_plugins/linuxbridge +++ b/lib/quantum_plugins/linuxbridge @@ -74,6 +74,11 @@ function quantum_plugin_configure_service() { if [[ "$LB_VLAN_RANGES" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE VLANS network_vlan_ranges $LB_VLAN_RANGES fi + if [[ "$Q_USE_SECGROUP" == "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.linux.iptables_firewall.IptablesFirewallDriver + else + iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.firewall.NoopFirewallDriver + fi } function quantum_plugin_setup_interface_driver() { diff --git a/lib/quantum_plugins/nec b/lib/quantum_plugins/nec index f61f50bba5..158c4c7991 100644 --- a/lib/quantum_plugins/nec +++ b/lib/quantum_plugins/nec @@ -84,6 +84,8 @@ function quantum_plugin_configure_service() { iniset /$Q_PLUGIN_CONF_FILE OFC driver $OFC_DRIVER iniset /$Q_PLUGIN_CONF_FILE OFC api_retry_max OFC_RETRY_MAX iniset /$Q_PLUGIN_CONF_FILE OFC api_retry_interval OFC_RETRY_INTERVAL + + _quantum_ovs_base_configure_firewall_driver } function quantum_plugin_setup_interface_driver() { diff --git a/lib/quantum_plugins/openvswitch b/lib/quantum_plugins/openvswitch index ab16483452..f56c09ea6b 100644 --- a/lib/quantum_plugins/openvswitch +++ b/lib/quantum_plugins/openvswitch @@ -133,6 +133,8 @@ function quantum_plugin_configure_service() { if [[ $OVS_ENABLE_TUNNELING = "True" ]]; then iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True fi + + _quantum_ovs_base_configure_firewall_driver } function quantum_plugin_setup_interface_driver() { diff --git a/lib/quantum_plugins/ryu b/lib/quantum_plugins/ryu index 113923235c..3dc9f1282d 100644 --- a/lib/quantum_plugins/ryu +++ b/lib/quantum_plugins/ryu @@ -58,6 +58,8 @@ function quantum_plugin_configure_plugin_agent() { function quantum_plugin_configure_service() { iniset /$Q_PLUGIN_CONF_FILE OVS openflow_rest_api $RYU_API_HOST:$RYU_API_PORT + + _quantum_ovs_base_configure_firewall_driver } function quantum_plugin_setup_interface_driver() { From 09d20513e96fe42a2ed8bc15c22bdc5764de2e1b Mon Sep 17 00:00:00 2001 From: Jiajun Liu Date: Thu, 23 May 2013 09:12:04 +0000 Subject: [PATCH 0110/4704] Create .mailmap file Use .mailmap file to coalesce commits by the same person using multiple names and/or email addresses Change-Id: I00c060d62a4671ca6ce0a5e04ac3c4f144ed671e --- .mailmap | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 .mailmap diff --git a/.mailmap b/.mailmap new file mode 100644 index 0000000000..d5ad780b38 --- /dev/null +++ b/.mailmap @@ -0,0 +1,4 @@ +# Format is: +# +# +Jiajun Liu From 3759c65677cafffd2762178efa1b7948d2a6dea6 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 24 May 2013 06:41:18 -0400 Subject: [PATCH 0111/4704] add myself to AUTHORS realized that apparently I'd never done that, assuming it was auto generated like other projects. Oops. Change-Id: Ib3374e1eed54fb723da8afc006f83d42432307aa --- AUTHORS | 1 + 1 file changed, 1 insertion(+) diff --git a/AUTHORS b/AUTHORS index 718a76095c..6b6a85fee4 100644 --- a/AUTHORS +++ b/AUTHORS @@ -37,6 +37,7 @@ Matt Joyce Osamu Habuka Russell Bryant Scott Moser +Sean Dague Sumit Naiksatam Thierry Carrez Todd Willey From 9e326779278a410366e913b65b9d0d9145fe53f9 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Wed, 8 May 2013 16:42:22 +0100 Subject: [PATCH 0112/4704] xenapi - cleanup network configuration The Xenserver/XCP part of devstack was configuring the hypervisor's connectivity to the outer world, by adding VLANs, and physical interfaces to the virtual networks. It added a lot of complexity, and made it hard to get started with XenServer. This patch removes that extra complexity, so it is left as an exercise for the user. Related to blueprint blueprint xenapi-devstack-cleanup Change-Id: If3367335c3da8621d0afe1f6cae77511fbdbb3e2 --- tools/xen/README.md | 87 ++++++++++++++++----- tools/xen/functions | 81 +++++++++++++++++++ tools/xen/install_os_domU.sh | 117 +++++++--------------------- tools/xen/scripts/install-os-vpx.sh | 2 +- tools/xen/xenrc | 35 ++++----- 5 files changed, 193 insertions(+), 129 deletions(-) diff --git a/tools/xen/README.md b/tools/xen/README.md index 258d7a32a7..8f0c10d0d7 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -8,6 +8,34 @@ The Openstack services are configured to run within a "privileged" virtual machine on the Xenserver host (called OS domU). The VM uses the XAPI toolstack to communicate with the host. +The provided localrc helps to build a basic environment. +The requirements are: + - An internet-enabled network with a DHCP server on it + - XenServer box plugged in to the same network +This network will be used as the OpenStack management network. The VM Network +and the Public Network will not be connected to any physical interfaces, only +new virtual networks will be created by the `install_os_domU.sh` script. + +Steps to follow: + - Install XenServer + - Download Devstack to XenServer + - Customise `localrc` + - Start `install_os_domU.sh` script + +The `install_os_domU.sh` script will: + - Setup XenAPI plugins + - Create the named networks, if they don't exist + - Install an Ubuntu Virtual Machine, with 4 network interfaces: + - eth0 - internal xapi interface + - eth1 - VM interface, connected to `VM_BRIDGE_OR_NET_NAME` defaults to + `"OpenStack VM Network"`. + - eth2 - Management interface, connected to `MGT_BRIDGE_OR_NET_NAME`, + defaults to `xenbr0`, XenServer's bridge associated with the Hypervisors + `eth0`. + - eth3 - Public interface, connected to `PUB_BRIDGE_OR_NET_NAME` defaults to + `"OpenStack Public Network"`. + - Start devstack inside the created OpenStack VM + ## Step 1: Install Xenserver Install XenServer 5.6+ on a clean box. You can get XenServer by signing up for an account on citrix.com, and then visiting: @@ -15,13 +43,12 @@ https://www.citrix.com/English/ss/downloads/details.asp?downloadId=2311504&produ For details on installation, see: http://wiki.openstack.org/XenServer/Install -Here are some sample Xenserver network settings for when you are just -getting started (Settings like this have been used with a laptop + cheap wifi router): - -* XenServer Host IP: 192.168.1.10 -* XenServer Netmask: 255.255.255.0 -* XenServer Gateway: 192.168.1.1 -* XenServer DNS: 192.168.1.1 +The XenServer IP configuration depends on your local network setup. If you are +using dhcp, make a reservation for XenServer, so its IP address won't change +over time. Make a note of the XenServer's IP address, as it has to be specified +in `localrc`. The other option is to manually specify the IP setup for the +XenServer box. Please make sure, that a gateway and a nameserver is configured, +as `install_os_domU.sh` will connect to github.com to get source-code snapshots. ## Step 2: Download devstack On your XenServer host, run the following commands as root: @@ -32,45 +59,63 @@ On your XenServer host, run the following commands as root: ## Step 3: Configure your localrc inside the devstack directory Devstack uses a localrc for user-specific configuration. Note that -the XENAPI_PASSWORD must be your dom0 root password. +the `XENAPI_PASSWORD` must be your dom0 root password. Of course, use real passwords if this machine is exposed. cat > ./localrc <&2 << EOF +ERROR: Multiple networks found matching name-label to "$bridge_or_net_name" +please review your XenServer network configuration / localrc file. +EOF + exit 1 + fi + else + _create_new_network "$bridge_or_net_name" + fi + fi +} + +function bridge_for() { + local bridge_or_net_name + bridge_or_net_name=$1 + + if _bridge_exists "$bridge_or_net_name"; then + echo "$bridge_or_net_name" + else + xe network-list name-label="$bridge_or_net_name" params=bridge --minimal + fi +} + +function xenapi_ip_on() { + local bridge_or_net_name + bridge_or_net_name=$1 + + ifconfig $(bridge_for "$bridge_or_net_name") | grep "inet addr" | cut -d ":" -f2 | sed "s/ .*//" +} + +function xenapi_is_listening_on() { + local bridge_or_net_name + bridge_or_net_name=$1 + + ! [ -z $(xenapi_ip_on "$bridge_or_net_name") ] +} + +function parameter_is_specified() { + local parameter_name + parameter_name=$1 + + compgen -v | grep "$parameter_name" +} diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 0d5e31eebd..161d7e774d 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -67,97 +67,28 @@ create_directory_for_kernels # # Configure Networking # - -# Helper to create networks -# Uses echo trickery to return network uuid -function create_network() { - br=$1 - dev=$2 - vlan=$3 - netname=$4 - if [ -z $br ] - then - pif=$(xe_min pif-list device=$dev VLAN=$vlan) - if [ -z $pif ] - then - net=$(xe network-create name-label=$netname) - else - net=$(xe_min network-list PIF-uuids=$pif) - fi - echo $net - return 0 - fi - if [ ! $(xe_min network-list params=bridge | grep -w --only-matching $br) ] - then - echo "Specified bridge $br does not exist" - echo "If you wish to use defaults, please keep the bridge name empty" - exit 1 - else - net=$(xe_min network-list bridge=$br) - echo $net - fi -} - -function errorcheck() { - rc=$? - if [ $rc -ne 0 ] - then - exit $rc - fi -} - -# Create host, vm, mgmt, pub networks on XenServer -VM_NET=$(create_network "$VM_BR" "$VM_DEV" "$VM_VLAN" "vmbr") -errorcheck -MGT_NET=$(create_network "$MGT_BR" "$MGT_DEV" "$MGT_VLAN" "mgtbr") -errorcheck -PUB_NET=$(create_network "$PUB_BR" "$PUB_DEV" "$PUB_VLAN" "pubbr") -errorcheck - -# Helper to create vlans -function create_vlan() { - dev=$1 - vlan=$2 - net=$3 - # VLAN -1 refers to no VLAN (physical network) - if [ $vlan -eq -1 ] - then - return - fi - if [ -z $(xe_min vlan-list tag=$vlan) ] - then - pif=$(xe_min pif-list network-uuid=$net) - # We created a brand new network this time - if [ -z $pif ] - then - pif=$(xe_min pif-list device=$dev VLAN=-1) - xe vlan-create pif-uuid=$pif vlan=$vlan network-uuid=$net - else - echo "VLAN does not exist but PIF attached to this network" - echo "How did we reach here?" - exit 1 - fi - fi -} - -# Create vlans for vm and management -create_vlan $PUB_DEV $PUB_VLAN $PUB_NET -create_vlan $VM_DEV $VM_VLAN $VM_NET -create_vlan $MGT_DEV $MGT_VLAN $MGT_NET - -# Get final bridge names -if [ -z $VM_BR ]; then - VM_BR=$(xe_min network-list uuid=$VM_NET params=bridge) -fi -if [ -z $MGT_BR ]; then - MGT_BR=$(xe_min network-list uuid=$MGT_NET params=bridge) +setup_network "$VM_BRIDGE_OR_NET_NAME" +setup_network "$MGT_BRIDGE_OR_NET_NAME" +setup_network "$PUB_BRIDGE_OR_NET_NAME" + +if parameter_is_specified "FLAT_NETWORK_BRIDGE"; then + cat >&2 << EOF +ERROR: FLAT_NETWORK_BRIDGE is specified in localrc file +This is considered as an error, as its value will be derived from the +VM_BRIDGE_OR_NET_NAME variable's value. +EOF + exit 1 fi -if [ -z $PUB_BR ]; then - PUB_BR=$(xe_min network-list uuid=$PUB_NET params=bridge) + +if ! xenapi_is_listening_on "$MGT_BRIDGE_OR_NET_NAME"; then + cat >&2 << EOF +ERROR: XenAPI does not have an assigned IP address on the management network. +please review your XenServer network configuration / localrc file. +EOF + exit 1 fi -# dom0 ip, XenAPI is assumed to be listening -HOST_IP=${HOST_IP:-`ifconfig xenbr0 | grep "inet addr" | cut -d ":" -f2 | sed "s/ .*//"`} +HOST_IP=$(xenapi_ip_on "$MGT_BRIDGE_OR_NET_NAME") # Set up ip forwarding, but skip on xcp-xapi if [ -a /etc/sysconfig/network ]; then @@ -263,7 +194,15 @@ if [ -z "$templateuuid" ]; then # create a new VM with the given template # creating the correct VIFs and metadata - $THIS_DIR/scripts/install-os-vpx.sh -t "$UBUNTU_INST_TEMPLATE_NAME" -v $VM_BR -m $MGT_BR -p $PUB_BR -l $GUEST_NAME -r $OSDOMU_MEM_MB -k "flat_network_bridge=${VM_BR}" + FLAT_NETWORK_BRIDGE=$(bridge_for "$VM_BRIDGE_OR_NET_NAME") + $THIS_DIR/scripts/install-os-vpx.sh \ + -t "$UBUNTU_INST_TEMPLATE_NAME" \ + -v "$VM_BRIDGE_OR_NET_NAME" \ + -m "$MGT_BRIDGE_OR_NET_NAME" \ + -p "$PUB_BRIDGE_OR_NET_NAME" \ + -l "$GUEST_NAME" \ + -r "$OSDOMU_MEM_MB" \ + -k "flat_network_bridge=${FLAT_NETWORK_BRIDGE}" # wait for install to finish wait_for_VM_to_halt diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh index 241296bd87..6105a1ea5e 100755 --- a/tools/xen/scripts/install-os-vpx.sh +++ b/tools/xen/scripts/install-os-vpx.sh @@ -235,7 +235,7 @@ create_gi_vif() create_vm_vif() { local v="$1" - echo "Installing management interface on $BRIDGE_V." + echo "Installing VM interface on $BRIDGE_V." local out_network_uuid=$(find_network "$BRIDGE_V") create_vif "$v" "$out_network_uuid" "1" >/dev/null } diff --git a/tools/xen/xenrc b/tools/xen/xenrc index e50f954715..7aaafd219d 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -16,6 +16,16 @@ VDI_MB=${VDI_MB:-5000} OSDOMU_MEM_MB=1024 OSDOMU_VDI_GB=8 +# Network mapping. Specify bridge names or network names. Network names may +# differ across localised versions of XenServer. If a given bridge/network +# was not found, a new network will be created with the specified name. + +# The management network is specified by the bridge name. xenbr0 is usually +# the name of the bridge of the network associated with the hypervisor's eth0. +MGT_BRIDGE_OR_NET_NAME="xenbr0" +VM_BRIDGE_OR_NET_NAME="OpenStack VM Network" +PUB_BRIDGE_OR_NET_NAME="OpenStack Public Network" + # VM Password GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} @@ -28,28 +38,17 @@ HOST_IP_IFACE=${HOST_IP_IFACE:-eth3} # Our nova host's network info # -# A host-only ip that let's the interface come up, otherwise unused -VM_IP=${VM_IP:-10.255.255.255} +# Management network MGT_IP=${MGT_IP:-172.16.100.55} -PUB_IP=${PUB_IP:-192.168.1.55} - -# Public network -PUB_NETMASK=${PUB_NETMASK:-255.255.255.0} -PUB_BR=${PUB_BR:-"xenbr0"} -PUB_VLAN=${PUB_VLAN:--1} -PUB_DEV=${PUB_DEV:-eth0} +MGT_NETMASK=${MGT_NETMASK:-255.255.255.0} -# VM network params +# VM Network +VM_IP=${VM_IP:-10.255.255.255} VM_NETMASK=${VM_NETMASK:-255.255.255.0} -VM_BR=${VM_BR:-""} -VM_VLAN=${VM_VLAN:-100} -VM_DEV=${VM_DEV:-eth0} -# MGMT network params -MGT_NETMASK=${MGT_NETMASK:-255.255.255.0} -MGT_BR=${MGT_BR:-""} -MGT_VLAN=${MGT_VLAN:-101} -MGT_DEV=${MGT_DEV:-eth0} +# Public network +PUB_IP=${PUB_IP:-192.168.1.55} +PUB_NETMASK=${PUB_NETMASK:-255.255.255.0} # Decide if you should enable eth0, # the guest installer network From 767c6dfa0ee42c4803e28a88e62276b2b0edf7f6 Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Thu, 23 May 2013 11:55:40 -0400 Subject: [PATCH 0113/4704] Create usable IPC directory for ZeroMQ By default, the zeromq driver looks for /var/run/openstack which needs to be created and given the right permissions. It is easier and just as safe in the case of devstack to just use mktemp to establish a temporary working directory. Change-Id: I4cec33e49d2b042a244420fb40d83d476e4971cd --- lib/rpc_backend | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/rpc_backend b/lib/rpc_backend index fc439ecfb3..16882192ea 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -86,6 +86,10 @@ function cleanup_rpc_backend { else exit_distro_not_supported "zeromq installation" fi + + # Necessary directory for socket location. + sudo mkdir -p /var/run/openstack + sudo chown $STACK_USER /var/run/openstack fi } From 800bf387b370e00436953e8a0076c5127e616b0f Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Fri, 24 May 2013 11:21:11 -0400 Subject: [PATCH 0114/4704] ZeroMQ driver to default to redis matchmaker Switching to the redis matchmaker. The localhost matchmaker does not presently work. Also, the localhost matchmaker could not work for multi-host setups. Change-Id: I81a26b9af78328e360a18b87371c619e194365d2 --- README.md | 1 + lib/rpc_backend | 20 ++++++++++++++------ 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 1987db850e..8573638c41 100644 --- a/README.md +++ b/README.md @@ -153,6 +153,7 @@ You can then run many compute nodes, each of which should have a `stackrc` which MYSQL_HOST=$SERVICE_HOST RABBIT_HOST=$SERVICE_HOST Q_HOST=$SERVICE_HOST + MATCHMAKER_REDIS_HOST=$SERVICE_HOST # Cells diff --git a/lib/rpc_backend b/lib/rpc_backend index fc439ecfb3..e64d098d3d 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -78,11 +78,11 @@ function cleanup_rpc_backend { fi elif is_service_enabled zeromq; then if is_fedora; then - uninstall_package zeromq python-zmq + uninstall_package zeromq python-zmq redis elif is_ubuntu; then - uninstall_package libzmq1 python-zmq + uninstall_package libzmq1 python-zmq redis-server elif is_suse; then - uninstall_package libzmq1 python-pyzmq + uninstall_package libzmq1 python-pyzmq redis else exit_distro_not_supported "zeromq installation" fi @@ -115,12 +115,15 @@ function install_rpc_backend() { exit_distro_not_supported "qpid installation" fi elif is_service_enabled zeromq; then + # NOTE(ewindisch): Redis is not strictly necessary + # but there is a matchmaker driver that works + # really well & out of the box for multi-node. if is_fedora; then - install_package zeromq python-zmq + install_package zeromq python-zmq redis elif is_ubuntu; then - install_package libzmq1 python-zmq + install_package libzmq1 python-zmq redis-server elif is_suse; then - install_package libzmq1 python-pyzmq + install_package libzmq1 python-pyzmq redis else exit_distro_not_supported "zeromq installation" fi @@ -158,6 +161,11 @@ function iniset_rpc_backend() { local section=$3 if is_service_enabled zeromq; then iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_zmq + iniset $file $section rpc_zmq_matchmaker \ + ${package}.openstack.common.rpc.matchmaker_redis.MatchMakerRedis + # Set MATCHMAKER_REDIS_HOST if running multi-node. + MATCHMAKER_REDIS_HOST=${MATCHMAKER_REDIS_HOST:-127.0.0.1} + iniset $file matchmaker_redis host $MATCHMAKER_REDIS_HOST elif is_service_enabled qpid; then iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_qpid if is_ubuntu; then From 7164fc9bc06534e3c54ce9b7784676bda5f757b6 Mon Sep 17 00:00:00 2001 From: Jian Wen Date: Sat, 25 May 2013 00:43:44 +0800 Subject: [PATCH 0115/4704] Add myself to AUTHORS and add my email addresses to .mailmap Change-Id: I5922421cfb5ef0929f6375089efcd1dc1224e30a --- .mailmap | 1 + AUTHORS | 1 + 2 files changed, 2 insertions(+) diff --git a/.mailmap b/.mailmap index d5ad780b38..a49875d22d 100644 --- a/.mailmap +++ b/.mailmap @@ -2,3 +2,4 @@ # # Jiajun Liu +Jian Wen \ No newline at end of file diff --git a/AUTHORS b/AUTHORS index 6b6a85fee4..c6b40d8203 100644 --- a/AUTHORS +++ b/AUTHORS @@ -26,6 +26,7 @@ James E. Blair Jason Cannavale Jay Pipes Jesse Andrews +Jian Wen Joe Gordon Johannes Erdfelt John Postlethwait From f652e0fb6d46dd39cf9932234a04571c7ede3110 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Tue, 21 May 2013 18:12:48 +0100 Subject: [PATCH 0116/4704] xenapi - quantum: L2 agents for dom0 and domU This patch makes it easier to support the all-in-one installation of XenServer with Quantum: - separate agents for domU and dom0 openvswitches. The domU agent's name is "q-domua", and the dom0 agent is called "q-agt" - create a physnet1 physical network to connect those openvswitches Note: This patch depends on the quantum patch: https://review.openstack.org/15022 For a diagram, see: http://goo.gl/BuAdg Change-Id: Iff851438553528b9051edd380ecbb2c0415cbf78 --- lib/nova | 5 +++- lib/quantum | 5 ++++ lib/quantum_plugins/openvswitch | 50 +++++++++++++++++---------------- tools/xen/functions | 26 +++++++++++++++++ tools/xen/install_os_domU.sh | 21 ++++++++++++-- tools/xen/xenrc | 1 + 6 files changed, 81 insertions(+), 27 deletions(-) diff --git a/lib/nova b/lib/nova index 9fc4ded7b6..ea932a31d9 100644 --- a/lib/nova +++ b/lib/nova @@ -80,7 +80,10 @@ if [ "$VIRT_DRIVER" = 'xenserver' ]; then PUBLIC_INTERFACE_DEFAULT=eth3 GUEST_INTERFACE_DEFAULT=eth1 # Allow ``build_domU.sh`` to specify the flat network bridge via kernel args - FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[[:alnum:]]*' /proc/cmdline | cut -d= -f 2 | sort -u) + FLAT_NETWORK_BRIDGE_DEFAULT=$(sed -e 's/.* flat_network_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline) + if is_service_enabled quantum; then + XEN_INTEGRATION_BRIDGE=$(sed -e 's/.* xen_integration_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline) + fi elif [ "$VIRT_DRIVER" = 'baremetal' ]; then NETWORK_MANAGER=${NETWORK_MANAGER:-FlatManager} PUBLIC_INTERFACE_DEFAULT=eth0 diff --git a/lib/quantum b/lib/quantum index c36a743518..d85c6483aa 100644 --- a/lib/quantum +++ b/lib/quantum @@ -386,6 +386,11 @@ function start_quantum_agents() { screen_it q-l3 "cd $QUANTUM_DIR && python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE" screen_it q-meta "cd $QUANTUM_DIR && python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE" + if [ "$VIRT_DRIVER" = 'xenserver' ]; then + # For XenServer, start an agent for the domU openvswitch + screen_it q-domua "cd $QUANTUM_DIR && python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU" + fi + if is_service_enabled q-lbaas; then screen_it q-lbaas "cd $QUANTUM_DIR && python $AGENT_LBAAS_BINARY --config-file $QUANTUM_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME" fi diff --git a/lib/quantum_plugins/openvswitch b/lib/quantum_plugins/openvswitch index 374172014b..7b9835df9f 100644 --- a/lib/quantum_plugins/openvswitch +++ b/lib/quantum_plugins/openvswitch @@ -11,7 +11,9 @@ function quantum_plugin_create_nova_conf() { _quantum_ovs_base_configure_nova_vif_driver if [ "$VIRT_DRIVER" = 'xenserver' ]; then iniset $NOVA_CONF DEFAULT xenapi_vif_driver nova.virt.xenapi.vif.XenAPIOpenVswitchDriver - iniset $NOVA_CONF DEFAULT xenapi_ovs_integration_bridge $FLAT_NETWORK_BRIDGE + iniset $NOVA_CONF DEFAULT xenapi_ovs_integration_bridge $XEN_INTEGRATION_BRIDGE + # Disable nova's firewall so that it does not conflict with quantum + iniset $NOVA_CONF DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver fi } @@ -71,6 +73,10 @@ function quantum_plugin_configure_plugin_agent() { AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent" if [ "$VIRT_DRIVER" = 'xenserver' ]; then + # Make a copy of our config for domU + sudo cp /$Q_PLUGIN_CONF_FILE "/$Q_PLUGIN_CONF_FILE.domu" + + # Deal with Dom0's L2 Agent: Q_RR_DOM0_COMMAND="$QUANTUM_DIR/bin/quantum-rootwrap-xen-dom0 $Q_RR_CONF_FILE" # For now, duplicate the xen configuration already found in nova.conf @@ -83,29 +89,25 @@ function quantum_plugin_configure_plugin_agent() { # that executes commands on dom0 via a XenAPI plugin. iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_DOM0_COMMAND" - # FLAT_NETWORK_BRIDGE is the dom0 integration bridge. To - # ensure the bridge lacks direct connectivity, set - # VM_VLAN=-1;VM_DEV=invalid in localrc - iniset /$Q_PLUGIN_CONF_FILE OVS integration_bridge $FLAT_NETWORK_BRIDGE - - # The ovs agent needs to ensure that the ports associated with - # a given network share the same local vlan tag. On - # single-node XS/XCP, this requires monitoring both the dom0 - # bridge, where VM's are attached, and the domU bridge, where - # dhcp servers are attached. - if is_service_enabled q-dhcp; then - iniset /$Q_PLUGIN_CONF_FILE OVS domu_integration_bridge $OVS_BRIDGE - # DomU will use the regular rootwrap - iniset /$Q_PLUGIN_CONF_FILE AGENT domu_root_helper "$Q_RR_COMMAND" - # Plug the vm interface into the domU integration bridge. - sudo ip addr flush dev $GUEST_INTERFACE_DEFAULT - sudo ip link set $OVS_BRIDGE up - # Assign the VM IP only if it has been set explicitly - if [[ "$VM_IP" != "" ]]; then - sudo ip addr add $VM_IP dev $OVS_BRIDGE - fi - sudo ovs-vsctl add-port $OVS_BRIDGE $GUEST_INTERFACE_DEFAULT - fi + # Set "physical" mapping + iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings "physnet1:$FLAT_NETWORK_BRIDGE" + + # XEN_INTEGRATION_BRIDGE is the integration bridge in dom0 + iniset /$Q_PLUGIN_CONF_FILE OVS integration_bridge $XEN_INTEGRATION_BRIDGE + + # Set up domU's L2 agent: + + # Create a bridge "br-$GUEST_INTERFACE_DEFAULT" + sudo ovs-vsctl --no-wait -- --may-exist add-br "br-$GUEST_INTERFACE_DEFAULT" + # Add $GUEST_INTERFACE_DEFAULT to that bridge + sudo ovs-vsctl add-port "br-$GUEST_INTERFACE_DEFAULT" $GUEST_INTERFACE_DEFAULT + + # Set bridge mappings to "physnet1:br-$GUEST_INTERFACE_DEFAULT" + iniset "/$Q_PLUGIN_CONF_FILE.domU" OVS bridge_mappings "physnet1:br-$GUEST_INTERFACE_DEFAULT" + # Set integration bridge to domU's + iniset "/$Q_PLUGIN_CONF_FILE.domU" OVS integration_bridge $OVS_BRIDGE + # Set root wrap + iniset "/$Q_PLUGIN_CONF_FILE.domU" AGENT root_helper "$Q_RR_COMMAND" fi } diff --git a/tools/xen/functions b/tools/xen/functions index c6e484d03e..3458263206 100644 --- a/tools/xen/functions +++ b/tools/xen/functions @@ -123,6 +123,32 @@ function _bridge_exists() { ! [ -z $(xe network-list bridge="$bridge" --minimal) ] } +function _network_uuid() { + local bridge_or_net_name + bridge_or_net_name=$1 + + if _bridge_exists "$bridge_or_net_name"; then + xe network-list bridge="$bridge_or_net_name" --minimal + else + xe network-list name-label="$bridge_or_net_name" --minimal + fi +} + +function add_interface() { + local vm_name + local bridge_or_network_name + + vm_name="$1" + bridge_or_network_name="$2" + device_number="$3" + + local vm + local net + + vm=$(xe vm-list name-label="$vm_name" --minimal) + net=$(_network_uuid "$bridge_or_network_name") + xe vif-create network-uuid=$net vm-uuid=$vm device=$device_number +} function setup_network() { local bridge_or_net_name diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 161d7e774d..8b2a687119 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -71,6 +71,12 @@ setup_network "$VM_BRIDGE_OR_NET_NAME" setup_network "$MGT_BRIDGE_OR_NET_NAME" setup_network "$PUB_BRIDGE_OR_NET_NAME" +# With quantum, one more network is required, which is internal to the +# hypervisor, and used by the VMs +if is_service_enabled quantum; then + setup_network "$XEN_INT_BRIDGE_OR_NET_NAME" +fi + if parameter_is_specified "FLAT_NETWORK_BRIDGE"; then cat >&2 << EOF ERROR: FLAT_NETWORK_BRIDGE is specified in localrc file @@ -195,6 +201,12 @@ if [ -z "$templateuuid" ]; then # create a new VM with the given template # creating the correct VIFs and metadata FLAT_NETWORK_BRIDGE=$(bridge_for "$VM_BRIDGE_OR_NET_NAME") + + KERNEL_PARAMS_FOR_QUANTUM="" + if is_service_enabled quantum; then + XEN_INTEGRATION_BRIDGE=$(bridge_for "$XEN_INT_BRIDGE_OR_NET_NAME") + KERNEL_PARAMS_FOR_QUANTUM="xen_integration_bridge=${XEN_INTEGRATION_BRIDGE}" + fi $THIS_DIR/scripts/install-os-vpx.sh \ -t "$UBUNTU_INST_TEMPLATE_NAME" \ -v "$VM_BRIDGE_OR_NET_NAME" \ @@ -202,7 +214,7 @@ if [ -z "$templateuuid" ]; then -p "$PUB_BRIDGE_OR_NET_NAME" \ -l "$GUEST_NAME" \ -r "$OSDOMU_MEM_MB" \ - -k "flat_network_bridge=${FLAT_NETWORK_BRIDGE}" + -k "flat_network_bridge=${FLAT_NETWORK_BRIDGE} ${KERNEL_PARAMS_FOR_QUANTUM}" # wait for install to finish wait_for_VM_to_halt @@ -240,11 +252,16 @@ fi # $THIS_DIR/build_xva.sh "$GUEST_NAME" +# Attach a network interface for the integration network (so that the bridge +# is created by XenServer). This is required for Quantum. +if is_service_enabled quantum; then + add_interface "$GUEST_NAME" "$XEN_INT_BRIDGE_OR_NET_NAME" "4" +fi + # create a snapshot before the first boot # to allow a quick re-run with the same settings xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_FIRST_BOOT" - # # Run DevStack VM # diff --git a/tools/xen/xenrc b/tools/xen/xenrc index 7aaafd219d..0ed3a6a7e5 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -25,6 +25,7 @@ OSDOMU_VDI_GB=8 MGT_BRIDGE_OR_NET_NAME="xenbr0" VM_BRIDGE_OR_NET_NAME="OpenStack VM Network" PUB_BRIDGE_OR_NET_NAME="OpenStack Public Network" +XEN_INT_BRIDGE_OR_NET_NAME="OpenStack VM Integration Network" # VM Password GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} From 4a9f26bf2081f6d3643f4d4e30434024b8724887 Mon Sep 17 00:00:00 2001 From: Maru Newby Date: Wed, 22 May 2013 05:50:21 +0000 Subject: [PATCH 0117/4704] Install nose 1.1 from EPEL for RHEL. * It's not possible to prevent python-nose (which is not compatible with Tempest) from being installed since so many os packages dependent on it. * This patch installs python-nose1.1 from EPEL and adds a /usr/local/bin symlink for nosetests1.1 so that invocations for Tempest will work as expected. Change-Id: I756d1ca5eda127dcdd2efb047c8e14dd344f710d --- files/rpms/horizon | 3 +-- files/rpms/swift | 3 +-- stack.sh | 9 +++++++++ 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/files/rpms/horizon b/files/rpms/horizon index d50482ea1d..151e7e21af 100644 --- a/files/rpms/horizon +++ b/files/rpms/horizon @@ -16,8 +16,7 @@ python-kombu python-migrate python-mox python-netaddr -# RHEL6's python-nose is incompatible with Tempest -python-nose #dist:f16,f17,f18 +python-nose python-paste #dist:f16,f17,f18 python-paste-deploy #dist:f16,f17,f18 python-pep8 diff --git a/files/rpms/swift b/files/rpms/swift index c626d8e3e0..1b36e34eab 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -7,8 +7,7 @@ python-devel python-eventlet python-greenlet python-netifaces -# RHEL6's python-nose is incompatible with Tempest -python-nose # dist:f16,f17,f18 +python-nose python-paste-deploy # dist:f16,f17,f18 python-setuptools # dist:f16,f17,f18 python-simplejson diff --git a/stack.sh b/stack.sh index 5dea00097a..16533df6f4 100755 --- a/stack.sh +++ b/stack.sh @@ -609,6 +609,15 @@ if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then # transiently, meaning we avoid the issue of it not being cleaned # out properly. Note we do this before the track-depends below. pip_install hgtools + + # The version of python-nose in the RHEL6 repo is incompatible + # with Tempest. As a workaround: + + # Install nose 1.1 (Tempest-compatible) from EPEL + install_package python-nose1.1 + # Add a symlink for the new nosetests to allow tox for Tempest to + # work unmolested. + sudo ln -sf /usr/bin/nosetests1.1 /usr/local/bin/nosetests fi TRACK_DEPENDS=${TRACK_DEPENDS:-False} From 7033829d71084a6183a0c6432e748ddd3e48a6ea Mon Sep 17 00:00:00 2001 From: Kieran Spear Date: Tue, 28 May 2013 11:31:31 +1000 Subject: [PATCH 0118/4704] Add h-api to suggested ENABLED_SERVICES for Heat Currently lib/heat says: To enable, add the following to localrc ENABLED_SERVICES+=,heat,h-api-cfn,h-api-cw,h-eng Once the stack is up, `heat list` fails because the API server isn't running. This commit adds h-api to that list. Also make sure h-api is killed in stop_heat(). Change-Id: I2e818bb343680b3778f9277c23c766f784d28887 --- lib/heat | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/heat b/lib/heat index 0c95ebb517..4d2f84e1c4 100644 --- a/lib/heat +++ b/lib/heat @@ -2,7 +2,7 @@ # Install and start **Heat** service # To enable, add the following to localrc -# ENABLED_SERVICES+=,heat,h-api-cfn,h-api-cw,h-eng +# ENABLED_SERVICES+=,heat,h-api,h-api-cfn,h-api-cw,h-eng # Dependencies: # - functions @@ -193,8 +193,8 @@ function start_heat() { # stop_heat() - Stop running processes function stop_heat() { - # Kill the cinder screen windows - for serv in h-eng h-api-cfn h-api-cw; do + # Kill the screen windows + for serv in h-eng h-api h-api-cfn h-api-cw; do screen -S $SCREEN_NAME -p $serv -X kill done } From f02be85cc8096a458f03d18fb92fd36f5cedbc12 Mon Sep 17 00:00:00 2001 From: Jiajun Liu Date: Wed, 22 May 2013 08:55:25 +0000 Subject: [PATCH 0119/4704] specify session name on perform rejoin stack rejoin-stack.sh would not specify screen session name which will lead unstack.sh unable to kill the screen session started by rejoin-stack.sh after hypervisor rebooted. So specify the session name if rejoin-stack.sh need to start a new sreen session. fixes bug 1182806 Change-Id: I68635887aa82fa63f3667e0f090ea2836eec7b68 --- rejoin-stack.sh | 4 +++- stack.sh | 1 - stackrc | 3 +++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/rejoin-stack.sh b/rejoin-stack.sh index a82c73cb3b..c452694f9f 100755 --- a/rejoin-stack.sh +++ b/rejoin-stack.sh @@ -5,13 +5,15 @@ TOP_DIR=`dirname $0` +source $TOP_DIR/stackrc + # if screenrc exists, run screen if [[ -e $TOP_DIR/stack-screenrc ]]; then if screen -ls | egrep -q "[0-9].stack"; then echo "Attaching to already started screen session.." exec screen -r stack fi - exec screen -c $TOP_DIR/stack-screenrc + exec screen -c $TOP_DIR/stack-screenrc -S $SCREEN_NAME fi echo "Couldn't find $TOP_DIR/stack-screenrc file; have you run stack.sh yet?" diff --git a/stack.sh b/stack.sh index 5dea00097a..d7367fb0a4 100755 --- a/stack.sh +++ b/stack.sh @@ -125,7 +125,6 @@ fi # and the specified rpc backend is available on your platform. check_rpc_backend -SCREEN_NAME=${SCREEN_NAME:-stack} # Check to see if we are already running DevStack # Note that this may fail if USE_SCREEN=False if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].$SCREEN_NAME"; then diff --git a/stackrc b/stackrc index edf5a824f3..2ac564cb8e 100644 --- a/stackrc +++ b/stackrc @@ -245,6 +245,9 @@ PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"public"} # Compatibility until it's eradicated from CI USE_SCREEN=${SCREEN_DEV:-$USE_SCREEN} +# Set default screen name +SCREEN_NAME=${SCREEN_NAME:-stack} + # Local variables: # mode: shell-script # End: From a25fa9db838db84d242a8c1c4dfc5bbf1231c5e0 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Mon, 15 Apr 2013 13:46:35 -0700 Subject: [PATCH 0120/4704] Changes to NVP plugin configuration file Tweak devstack support to the nicira plugin in order to reflect changes made in: https://review.openstack.org/#/c/26427 This complements fixes for bug #1121605 Change-Id: I221466fb40412ceb457d3e888767291a76176aa6 --- lib/quantum_plugins/nicira | 87 ++++++++++++++++---------------------- 1 file changed, 36 insertions(+), 51 deletions(-) diff --git a/lib/quantum_plugins/nicira b/lib/quantum_plugins/nicira index fc06b55713..7795eed8aa 100644 --- a/lib/quantum_plugins/nicira +++ b/lib/quantum_plugins/nicira @@ -14,9 +14,6 @@ function setup_integration_bridge() { # Get the first controller controllers=(${NVP_CONTROLLERS//,/ }) OVS_MGR_IP=${controllers[0]} - elif [[ "$NVP_CONTROLLER_CONNECTION" != "" ]]; then - conn=(${NVP_CONTROLLER_CONNECTION//\:/ }) - OVS_MGR_IP=${conn[0]} else die $LINENO "Error - No controller specified. Unable to set a manager for OVS" fi @@ -83,55 +80,43 @@ function quantum_plugin_configure_service() { iniset /$Q_PLUGIN_CONF_FILE NVP concurrent_connections $CONCURRENT_CONNECTIONS fi - if [[ "$DEFAULT_CLUSTER" != "" ]]; then - # Make name shorter for sake of readability - DC=$DEFAULT_CLUSTER - if [[ "$DEFAULT_TZ_UUID" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" default_tz_uuid $DEFAULT_TZ_UUID - else - die $LINENO "The nicira plugin won't work without a default transport zone." - fi - if [[ "$DEFAULT_L3_GW_SVC_UUID" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" default_l3_gw_service_uuid $DEFAULT_L3_GW_SVC_UUID - Q_L3_ENABLED=True - Q_L3_ROUTER_PER_TENANT=True - iniset /$Q_PLUGIN_CONF_FILE NVP enable_metadata_access_network True - else - echo "WARNING - No l3 gw service enabled. You will not be able to use the L3 API extension" - fi - if [[ "$DEFAULT_L2_GW_SVC_UUID" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" default_l2_gw_service_uuid $DEFAULT_L2_GW_SVC_UUID - fi - # NVP_CONTROLLERS must be a comma separated string - if [[ "$NVP_CONTROLLERS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" nvp_controllers $NVP_CONTROLLERS - elif [[ "$NVP_CONTROLLER_CONNECTION" != "" ]]; then - # Only 1 controller can be specified in this case - iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" nvp_controller_connection $NVP_CONTROLLER_CONNECTION - else - die $LINENO "The nicira plugin needs at least an NVP controller." - fi - if [[ "$NVP_USER" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" nvp_user $NVP_USER - fi - if [[ "$NVP_PASSWORD" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" nvp_password $NVP_PASSWORD - fi - if [[ "$NVP_REQ_TIMEOUT" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" req_timeout $NVP_REQ_TIMEOUT - fi - if [[ "$NVP_HTTP_TIMEOUT" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" http_timeout $NVP_HTTP_TIMEOUT - fi - if [[ "$NVP_RETRIES" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" retries $NVP_RETRIES - fi - if [[ "$NVP_REDIRECTS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" redirects $NVP_REDIRECTS - fi + if [[ "$DEFAULT_TZ_UUID" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_tz_uuid $DEFAULT_TZ_UUID else - echo "ERROR - Default cluster not configured. Quantum will not start" - exit 1 + die $LINENO "The nicira plugin won't work without a default transport zone." + fi + if [[ "$DEFAULT_L3_GW_SVC_UUID" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l3_gw_service_uuid $DEFAULT_L3_GW_SVC_UUID + Q_L3_ENABLED=True + Q_L3_ROUTER_PER_TENANT=True + iniset /$Q_PLUGIN_CONF_FILE NVP enable_metadata_access_network True + fi + if [[ "$DEFAULT_L2_GW_SVC_UUID" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l2_gw_service_uuid $DEFAULT_L2_GW_SVC_UUID + fi + # NVP_CONTROLLERS must be a comma separated string + if [[ "$NVP_CONTROLLERS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT nvp_controllers $NVP_CONTROLLERS + else + die $LINENO "The nicira plugin needs at least an NVP controller." + fi + if [[ "$NVP_USER" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT nvp_user $NVP_USER + fi + if [[ "$NVP_PASSWORD" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT nvp_password $NVP_PASSWORD + fi + if [[ "$NVP_REQ_TIMEOUT" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT req_timeout $NVP_REQ_TIMEOUT + fi + if [[ "$NVP_HTTP_TIMEOUT" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT http_timeout $NVP_HTTP_TIMEOUT + fi + if [[ "$NVP_RETRIES" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT retries $NVP_RETRIES + fi + if [[ "$NVP_REDIRECTS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT redirects $NVP_REDIRECTS fi } From 2bda6cfabf5e60f8bb74bb22f3c895ca108dff3c Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 23 May 2013 09:25:10 +1000 Subject: [PATCH 0121/4704] Install nodejs for RHEL/Fedora nodejs is required for Horizon (ships a copy of lessc internally). I couldn't find nodejs in the rpm list, despite it being installed in lib/horizon. This adds to the rpm list as NOPRIME and retains the old install point. RHEL6 has nodejs in EPEL now, so we remove the old hack that installed an upstream version and add it to the install mentioned previously Change-Id: Ib3ee2f7d77e22666c9055c8528288c01b46a95d3 --- files/rpms/horizon | 1 + lib/horizon | 46 ++-------------------------------------------- 2 files changed, 3 insertions(+), 44 deletions(-) diff --git a/files/rpms/horizon b/files/rpms/horizon index 151e7e21af..cf16cdbeac 100644 --- a/files/rpms/horizon +++ b/files/rpms/horizon @@ -3,6 +3,7 @@ django-registration gcc httpd # NOPRIME mod_wsgi # NOPRIME +nodejs # NOPRIME pylint python-anyjson python-BeautifulSoup diff --git a/lib/horizon b/lib/horizon index 1ee530ecff..ab1139996a 100644 --- a/lib/horizon +++ b/lib/horizon @@ -73,31 +73,6 @@ function _horizon_config_set() { fi } -# Basic install of upstream nodejs for platforms that want it -function install_nodejs() { - if [[ $(which node) ]]; then - echo "You already appear to have nodejs, skipping install" - return - fi - - # There are several node deployment scripts; one may be more - # appropriate at some future point, but for now direct download is - # the simplest way. The version barely matters for lesscss which - # doesn't use anything fancy. - local ver=0.10.1 - local nodejs=node-v${ver}-linux-x64 - local tar=$nodejs.tar.gz - local nodejs_url=http://nodejs.org/dist/v${ver}/${tar} - - curl -Ss ${nodejs_url} | tar -C ${DEST} -xz - if [ $? -ne 0 ]; then - echo "*** Download of nodejs failed" - return 1 - fi - - # /usr/bin so it gets found in the PATH available to horizon - sudo ln -s $DEST/$nodejs/bin/node /usr/bin/node -} # Entry Points # ------------ @@ -105,15 +80,7 @@ function install_nodejs() { # cleanup_horizon() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_horizon() { - - if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then - # if the /usr/bin/node link looks like it's pointing into $DEST, - # then we installed it via install_nodejs - if [[ $(readlink -f /usr/bin/node) =~ ($DEST) ]]; then - sudo rm /usr/bin/node - fi - fi - + : } # configure_horizon() - Set config files, create data dirs, etc @@ -199,21 +166,12 @@ function install_horizon() { exit_distro_not_supported "apache installation" fi - if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then - # RHEL6 currently has no native way to get nodejs, so we do a - # basic install here (see cleanup_horizon too). - # TODO: does nova have a better way that we can limit - # requirement of site-wide nodejs install? - install_nodejs - fi - # NOTE(sdague) quantal changed the name of the node binary if is_ubuntu; then if [[ ! -e "/usr/bin/node" ]]; then install_package nodejs-legacy fi - elif is_fedora && [[ "$os_RELEASE" -ge "18" ]]; then - # fedora 18 and higher gets nodejs + elif is_fedora && [[ $DISTRO =~ (rhel6) || "$os_RELEASE" -ge "18" ]]; then install_package nodejs fi From e4859f0bd3aee92ccc588614925ed697d1ba06d2 Mon Sep 17 00:00:00 2001 From: cloudnull Date: Tue, 28 May 2013 14:10:58 -0500 Subject: [PATCH 0122/4704] Set variables in rsyslog.conf to disable rate-limiting. Per Bug https://bugs.launchpad.net/openstack-ci/+bug/1024487, this will modify the rate-limiting setting for rsyslog.conf. If rate-limiting is being used the addition will set it to 0 otherwise the addition will add the variables to the configuration file and set them to 0. Implements: Variables "$SystemLogRateLimitBurst 0", and "$SystemLogRateLimitInterval 0" in "/etc/rsyslog.conf". These changes are persuent to what has been outlined in the rsyslog docs as found here : http://www.rsyslog.com/tag/SystemLogRateLimitInterval/ Updated commit implements changes in code and placement post code review. Recent change implements the "SystemLogRateLimitBurst" in first if statement, which was "SystemLogRateLimitInterval" and wrong. Fixes: Bug 1024487 Change-Id: I2637889cbe9e5e87bbfc0f1bb5047abae34d953c --- stack.sh | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/stack.sh b/stack.sh index 5dea00097a..ce7ab57454 100755 --- a/stack.sh +++ b/stack.sh @@ -754,6 +754,22 @@ EOF EOF sudo mv /tmp/90-stack-s.conf /etc/rsyslog.d fi + + RSYSLOGCONF="/etc/rsyslog.conf" + if [ -f $RSYSLOGCONF ]; then + sudo cp -b $RSYSLOGCONF $RSYSLOGCONF.bak + if [[ $(grep '$SystemLogRateLimitBurst' $RSYSLOGCONF) ]]; then + sudo sed -i 's/$SystemLogRateLimitBurst\ .*/$SystemLogRateLimitBurst\ 0/' $RSYSLOGCONF + else + sudo sed -i '$ i $SystemLogRateLimitBurst\ 0' $RSYSLOGCONF + fi + if [[ $(grep '$SystemLogRateLimitInterval' $RSYSLOGCONF) ]]; then + sudo sed -i 's/$SystemLogRateLimitInterval\ .*/$SystemLogRateLimitInterval\ 0/' $RSYSLOGCONF + else + sudo sed -i '$ i $SystemLogRateLimitInterval\ 0' $RSYSLOGCONF + fi + fi + echo_summary "Starting rsyslog" restart_service rsyslog fi From 796cf890ffe2add16bddc8fa59cb73a51e8f3b50 Mon Sep 17 00:00:00 2001 From: Emanuele Rocca Date: Sat, 25 May 2013 23:48:33 +0200 Subject: [PATCH 0123/4704] Add missing .debs required by Debian dnsmasq-utils is available on all Debian-based supported distros. Debian Wheezy and Jessie need qemu to be installed in order for the resulting OpenStack environment to be fully functional. Change-Id: I910968f60ca1ed5c300a1fa599fb480d65591145 --- files/apts/nova | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/files/apts/nova b/files/apts/nova index 298e25feee..c24333c3db 100644 --- a/files/apts/nova +++ b/files/apts/nova @@ -1,5 +1,5 @@ dnsmasq-base -dnsmasq-utils # for dhcp_release only available in dist:oneiric,precise,quantal +dnsmasq-utils # for dhcp_release kpartx parted iputils-arping @@ -13,6 +13,7 @@ ebtables sqlite3 sudo kvm +qemu # dist:wheezy,jessie libvirt-bin # NOPRIME libjs-jquery-tablesorter # Needed for coverage html reports vlan From 535a8148ccfc0ff99d58ba9242950811d14d2935 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 15 May 2013 09:25:27 +1000 Subject: [PATCH 0124/4704] Install EPEL for rhel6 if not detected Simple check to see if EPEL repo is enabled for RHEL6 and, if not, automated install of the repo RPM. Also adds an additional sanity check which checking for pip. In the prior case of EPEL being disabled, a RHEL6 host may not have access to python-pip. Although this shouldn't happen, its a good sanity check. Change-Id: I4e8a4dda9475b75d071534d4eef469198502a048 --- functions | 4 ++++ stack.sh | 15 +++++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/functions b/functions index 669fa69bd5..dfde7dc551 100644 --- a/functions +++ b/functions @@ -1413,6 +1413,10 @@ function get_pip_command() { else which pip fi + + if [ $? -ne 0 ]; then + die $LINENO "Unable to find pip; cannot continue" + fi } # Path permissions sanity check diff --git a/stack.sh b/stack.sh index 9a7f2ab693..99c630c9da 100755 --- a/stack.sh +++ b/stack.sh @@ -51,13 +51,24 @@ fi # Installing Open vSwitch on RHEL6 requires enabling the RDO repo. RHEL6_RDO_REPO_RPM=${RHEL6_RDO_REPO_RPM:-"http://rdo.fedorapeople.org/openstack/openstack-grizzly/rdo-release-grizzly-3.noarch.rpm"} RHEL6_RDO_REPO_ID=${RHEL6_RDO_REPO_ID:-"openstack-grizzly"} +# RHEL6 requires EPEL for many Open Stack dependencies +RHEL6_EPEL_RPM=${RHEL6_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm"} + if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then + if ! yum repolist enabled $RHEL6_RDO_REPO_ID | grep -q $RHEL6_RDO_REPO_ID; then echo "RDO repo not detected; installing" - yum_install $RHEL6_RDO_REPO_RPM + yum_install $RHEL6_RDO_REPO_RPM || \ + die $LINENO "Error installing RDO repo, cannot continue" fi -fi + if ! yum repolist enabled epel | grep -q 'epel'; then + echo "EPEL not detected; installing" + yum_install ${RHEL6_EPEL_RPM} || \ + die $LINENO "Error installing EPEL repo, cannot continue" + fi + +fi # Global Settings # =============== From 8ff33ce75f1ea4ede96cdbf7cda983900119e6d2 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Thu, 30 May 2013 13:26:58 +0100 Subject: [PATCH 0125/4704] xenapi: kernel_cmdline moved out from template The kernel's cmdline was set during the initial devstack machine installation. Thus, during second runs, the kernel's cmdline was not updated. This patch extracts append_kernel_cmdline, and configures domU's kernel cmdline every time. As some networking parameters are passed through the kernel cmdline, this patch makes it possible to change the network configuration, even if a cached devstack exists. Related to blueprint xenapi-devstack-cleanup Change-Id: I3b7175f4e83326c3e28825ac50625f6bd2a9a029 --- tools/xen/functions | 30 ++++++++++++++++++++++++++--- tools/xen/install_os_domU.sh | 21 ++++++++++---------- tools/xen/scripts/install-os-vpx.sh | 28 ++------------------------- 3 files changed, 40 insertions(+), 39 deletions(-) diff --git a/tools/xen/functions b/tools/xen/functions index 3458263206..ebfd4835a2 100644 --- a/tools/xen/functions +++ b/tools/xen/functions @@ -94,6 +94,14 @@ function find_ip_by_name() { done } +function _vm_uuid() { + local vm_name_label + + vm_name_label="$1" + + xe vm-list name-label="$vm_name_label" --minimal +} + function _create_new_network() { local name_label name_label=$1 @@ -135,17 +143,17 @@ function _network_uuid() { } function add_interface() { - local vm_name + local vm_name_label local bridge_or_network_name - vm_name="$1" + vm_name_label="$1" bridge_or_network_name="$2" device_number="$3" local vm local net - vm=$(xe vm-list name-label="$vm_name" --minimal) + vm=$(_vm_uuid "$vm_name_label") net=$(_network_uuid "$bridge_or_network_name") xe vif-create network-uuid=$net vm-uuid=$vm device=$device_number } @@ -200,3 +208,19 @@ function parameter_is_specified() { compgen -v | grep "$parameter_name" } + +function append_kernel_cmdline() +{ + local vm_name_label + local kernel_args + + vm_name_label="$1" + kernel_args="$2" + + local vm + local pv_args + + vm=$(_vm_uuid "$vm_name_label") + pv_args=$(xe vm-param-get param-name=PV-args uuid=$vm) + xe vm-param-set PV-args="$pv_args $kernel_args" uuid=$vm +} diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 8b2a687119..a744869288 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -200,21 +200,13 @@ if [ -z "$templateuuid" ]; then # create a new VM with the given template # creating the correct VIFs and metadata - FLAT_NETWORK_BRIDGE=$(bridge_for "$VM_BRIDGE_OR_NET_NAME") - - KERNEL_PARAMS_FOR_QUANTUM="" - if is_service_enabled quantum; then - XEN_INTEGRATION_BRIDGE=$(bridge_for "$XEN_INT_BRIDGE_OR_NET_NAME") - KERNEL_PARAMS_FOR_QUANTUM="xen_integration_bridge=${XEN_INTEGRATION_BRIDGE}" - fi $THIS_DIR/scripts/install-os-vpx.sh \ -t "$UBUNTU_INST_TEMPLATE_NAME" \ -v "$VM_BRIDGE_OR_NET_NAME" \ -m "$MGT_BRIDGE_OR_NET_NAME" \ -p "$PUB_BRIDGE_OR_NET_NAME" \ -l "$GUEST_NAME" \ - -r "$OSDOMU_MEM_MB" \ - -k "flat_network_bridge=${FLAT_NETWORK_BRIDGE} ${KERNEL_PARAMS_FOR_QUANTUM}" + -r "$OSDOMU_MEM_MB" # wait for install to finish wait_for_VM_to_halt @@ -253,11 +245,20 @@ fi $THIS_DIR/build_xva.sh "$GUEST_NAME" # Attach a network interface for the integration network (so that the bridge -# is created by XenServer). This is required for Quantum. +# is created by XenServer). This is required for Quantum. Also pass that as a +# kernel parameter for DomU if is_service_enabled quantum; then add_interface "$GUEST_NAME" "$XEN_INT_BRIDGE_OR_NET_NAME" "4" + + XEN_INTEGRATION_BRIDGE=$(bridge_for "$XEN_INT_BRIDGE_OR_NET_NAME") + append_kernel_cmdline \ + "$GUEST_NAME" \ + "xen_integration_bridge=${XEN_INTEGRATION_BRIDGE}" fi +FLAT_NETWORK_BRIDGE=$(bridge_for "$VM_BRIDGE_OR_NET_NAME") +append_kernel_cmdline "$GUEST_NAME" "flat_network_bridge=${FLAT_NETWORK_BRIDGE}" + # create a snapshot before the first boot # to allow a quick re-run with the same settings xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_FIRST_BOOT" diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh index 6105a1ea5e..c82f8702ba 100755 --- a/tools/xen/scripts/install-os-vpx.sh +++ b/tools/xen/scripts/install-os-vpx.sh @@ -25,7 +25,6 @@ NAME="XenServer OpenStack VPX" DATA_VDI_SIZE="500MiB" BRIDGE_M= BRIDGE_P= -KERNEL_PARAMS= VPX_FILE=os-vpx.xva AS_TEMPLATE= FROM_TEMPLATE= @@ -38,7 +37,7 @@ usage() cat << EOF Usage: $0 [-f FILE_PATH] [-d DISK_SIZE] [-v BRIDGE_NAME] [-m BRIDGE_NAME] [-p BRIDGE_NAME] - [-k PARAMS] [-r RAM] [-i|-c] [-w] [-b] [-l NAME_LABEL] [-t TEMPLATE_NW_INSTALL] + [-r RAM] [-i|-c] [-w] [-b] [-l NAME_LABEL] [-t TEMPLATE_NW_INSTALL] Installs XenServer OpenStack VPX. @@ -57,7 +56,6 @@ cat << EOF Defaults to xenbr0. -v bridge Specifies the bridge for the vm network -p bridge Specifies the bridge for the externally facing network. - -k params Specifies kernel parameters. -r MiB Specifies RAM used by the VPX, in MiB. By default it will take the value from the XVA. -l name Specifies the name label for the VM. @@ -81,15 +79,12 @@ cat << EOF using the default for management traffic: install-os-vpx.sh -m xapi4 - Create a VPX that automatically becomes the master: - install-os-vpx.sh -k geppetto_master=true - EOF } get_params() { - while getopts "hicwbf:d:v:m:p:k:r:l:t:" OPTION; + while getopts "hicwbf:d:v:m:p:r:l:t:" OPTION; do case $OPTION in h) usage @@ -119,9 +114,6 @@ get_params() p) BRIDGE_P=$OPTARG ;; - k) - KERNEL_PARAMS=$OPTARG - ;; r) RAM=$OPTARG ;; @@ -328,20 +320,6 @@ create_data_disk() } -set_kernel_params() -{ - local v="$1" - local args=$KERNEL_PARAMS - if [ "$args" != "" ] - then - echo "Passing Geppetto args to VPX: $args." - pvargs=$(xe vm-param-get param-name=PV-args uuid="$v") - args="$pvargs $args" - xe vm-param-set PV-args="$args" uuid="$v" - fi -} - - set_memory() { local v="$1" @@ -367,7 +345,6 @@ set_auto_start() set_all() { local v="$1" - set_kernel_params "$v" set_memory "$v" set_auto_start "$v" label_system_disk "$v" @@ -430,7 +407,6 @@ then create_vm_vif "$vm_uuid" create_management_vif "$vm_uuid" create_public_vif "$vm_uuid" - set_kernel_params "$vm_uuid" xe vm-param-set other-config:os-vpx=true uuid="$vm_uuid" xe vm-param-set actions-after-reboot=Destroy uuid="$vm_uuid" set_memory "$vm_uuid" From 951a48df3cc72c52700d72d7c21173eb95daa0cf Mon Sep 17 00:00:00 2001 From: Chris Yeoh Date: Fri, 31 May 2013 21:37:25 +0930 Subject: [PATCH 0126/4704] Enable Nova v3 API The Nova v3 API will be disabled by default after 31164 goes through. This change explicitly enables the v3 API in the nova config file Change-Id: I8b4c64b4718fc96d7a928c6fa3a0e8716c7edba1 --- lib/nova | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/nova b/lib/nova index c38f50c801..d5c5dbb509 100644 --- a/lib/nova +++ b/lib/nova @@ -436,6 +436,7 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE" iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none" iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" + iniset $NOVA_CONF DEFAULT osapi_v3_enabled "True" if is_service_enabled n-api; then iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS" From 8f3af42f568a03c185b23942eb1c47f0b926d98a Mon Sep 17 00:00:00 2001 From: Gordon Chung Date: Fri, 31 May 2013 08:58:30 -0400 Subject: [PATCH 0127/4704] ceilometer does not set db in conf file Set [database]connection option to mongodb rather than sqlite as mongodb is the only fully supported db. Change-Id: I2b9fbc65de4476593d456a782dbc7e6540a75141 Fixes: bug #1186240 --- lib/ceilometer | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ceilometer b/lib/ceilometer index 90a18845c6..50060a78e0 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -91,6 +91,8 @@ function configure_ceilometer() { iniset $CEILOMETER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $CEILOMETER_CONF keystone_authtoken signing_dir $CEILOMETER_AUTH_CACHE_DIR + iniset $CEILOMETER_CONF database connection mongodb://localhost:27017/ceilometer + configure_mongodb cleanup_ceilometer From f05f11ef01e7645ecc719c8ed7b682c79e29c512 Mon Sep 17 00:00:00 2001 From: Shiv Haris Date: Fri, 31 May 2013 19:04:32 -0700 Subject: [PATCH 0128/4704] Turn on l3 agent and external network bridge for Brocade plugin Fixes bug: 1186467 Change-Id: I534a6f415ab14b2f4d75f244bbe79582ae2046bb --- lib/quantum_plugins/brocade | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/quantum_plugins/brocade b/lib/quantum_plugins/brocade index fc86debb90..52ce3dbc18 100644 --- a/lib/quantum_plugins/brocade +++ b/lib/quantum_plugins/brocade @@ -25,15 +25,16 @@ function quantum_plugin_configure_common() { } function quantum_plugin_configure_debug_command() { - : + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge } function quantum_plugin_configure_dhcp_agent() { - : + iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager quantum.agent.dhcp_agent.DhcpAgentWithStateReport } function quantum_plugin_configure_l3_agent() { - : + iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge + iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager quantum.agent.l3_agent.L3NATAgentWithStateReport } function quantum_plugin_configure_plugin_agent() { From ebdd61d1363cd8b99ee00d6d87e08e2f6e33d87e Mon Sep 17 00:00:00 2001 From: Jeff Peeler Date: Sat, 1 Jun 2013 00:54:47 -0400 Subject: [PATCH 0129/4704] Functions are required to parse localrc This is essentially a revert of 73695d0ea490c4c7a1158957dd5a85586cfa0933. Since stackrc is responsible for sourcing localrc, the required functions are necessary (like for enable_service). fixes bug 1186488 Change-Id: Iad90f802e77b94416821008c294c1a2ede8a4729 --- openrc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/openrc b/openrc index 2d5d48aea3..f1026a50a4 100644 --- a/openrc +++ b/openrc @@ -20,6 +20,9 @@ fi # Find the other rc files RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd) +# Import common functions +source $RC_DIR/functions + # Load local configuration source $RC_DIR/stackrc From e32b4853c59daee7ea947d7a6039f3d2089b9485 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89milien=20Macchi?= Date: Sun, 2 Jun 2013 15:58:14 +0200 Subject: [PATCH 0130/4704] Add a new dependency for n-api msgpack-python is missing as a dependency in Ubuntu. Fix Bug #1186739 Change-Id: I9e6ccc27970fb9fef2352a3d9864f325160a6e19 --- files/apts/n-api | 1 + 1 file changed, 1 insertion(+) diff --git a/files/apts/n-api b/files/apts/n-api index 0f08daace3..e0e5e7f5b3 100644 --- a/files/apts/n-api +++ b/files/apts/n-api @@ -1 +1,2 @@ python-dateutil +msgpack-python From 1b4d91b71e84a185adb59261f95c7cc0b3727100 Mon Sep 17 00:00:00 2001 From: Chris Yeoh Date: Mon, 3 Jun 2013 16:23:01 +0930 Subject: [PATCH 0131/4704] Moves Nova v3 API enable config to group https://review.openstack.org/#/c/29487/ moves the Nova v3 API enable flag into its own config group. And renames it to just "enable". This changeset follows that change. As we do not yet gate on any Nova V3 API tests we don't need to keep the old setting around. Change-Id: If1592cd7b9aad24b6d010870a5fb6bcc60efb8ac --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 809f56c648..bb94affc82 100644 --- a/lib/nova +++ b/lib/nova @@ -439,7 +439,7 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE" iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none" iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" - iniset $NOVA_CONF DEFAULT osapi_v3_enabled "True" + iniset $NOVA_CONF osapi_v3 enabled "True" if is_service_enabled n-api; then iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS" From 26824b054623dfda64ffe9617bcf5df835c7c433 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Tue, 4 Jun 2013 13:15:04 +1200 Subject: [PATCH 0132/4704] Run heat tempest tests if heat is enabled in devstack. Change-Id: I6a5d1bae99982a8f0201885abd1203e1a8d78dcc --- lib/tempest | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/tempest b/lib/tempest index e59737bb2b..a259ee977b 100644 --- a/lib/tempest +++ b/lib/tempest @@ -255,6 +255,11 @@ function configure_tempest() { iniset $TEMPEST_CONF boto http_socket_timeout 30 iniset $TEMPEST_CONF boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros} + # orchestration + if is_service_enabled heat; then + iniset $TEMPEST_CONF orchestration heat_available "True" + fi + echo "Created tempest configuration file:" cat $TEMPEST_CONF From 51c90b858d5106a6a3fe57aa417e1fb8faea414d Mon Sep 17 00:00:00 2001 From: JordanP Date: Thu, 23 May 2013 10:27:51 +0200 Subject: [PATCH 0133/4704] On unstack, cleanup LVM setup for Cinder Change-Id: I4f01dd65a4e5dd0dff33ea7c0024fb75fe83834b --- clean.sh | 2 +- lib/cinder | 41 +++++++++++++++++++++++++++-------------- 2 files changed, 28 insertions(+), 15 deletions(-) diff --git a/clean.sh b/clean.sh index cf24f278b8..92466c1038 100755 --- a/clean.sh +++ b/clean.sh @@ -61,7 +61,7 @@ cleanup_nova cleanup_quantum cleanup_swift -# cinder doesn't clean up the volume group as it might be used elsewhere... +# cinder doesn't always clean up the volume group as it might be used elsewhere... # clean it up if it is a loop device VG_DEV=$(sudo losetup -j $DATA_DIR/${VOLUME_GROUP}-backing-file | awk -F':' '/backing-file/ { print $1}') if [[ -n "$VG_DEV" ]]; then diff --git a/lib/cinder b/lib/cinder index 7e9c2ba6e5..93cbc1704b 100644 --- a/lib/cinder +++ b/lib/cinder @@ -67,27 +67,43 @@ CINDER_SECURE_DELETE=`trueorfalse True $CINDER_SECURE_DELETE` CINDER_PERIODIC_INTERVAL=${CINDER_PERIODIC_INTERVAL:-60} # Name of the lvm volume groups to use/create for iscsi volumes -# VOLUME_GROUP2 is used only if CINDER_MULTI_LVM_BACKEND = True VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} +VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file} + +# VOLUME_GROUP2 is used only if CINDER_MULTI_LVM_BACKEND = True VOLUME_GROUP2=${VOLUME_GROUP2:-stack-volumes2} +VOLUME_BACKING_FILE2=${VOLUME_BACKING_FILE2:-$DATA_DIR/${VOLUME_GROUP2}-backing-file} + VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} # Functions # --------- - -# _clean_volume_group removes all cinder volumes from the specified volume group -# _clean_volume_group $VOLUME_GROUP $VOLUME_NAME_PREFIX -function _clean_volume_group() { +# _cleanup_lvm removes all cinder volumes and the backing file of the +# volume group used by cinder +# _cleanup_lvm $VOLUME_GROUP $VOLUME_NAME_PREFIX +function _cleanup_lvm() { local vg=$1 - local vg_prefix=$2 + local lv_prefix=$2 + # Clean out existing volumes for lv in `sudo lvs --noheadings -o lv_name $vg`; do - # vg_prefix prefixes the LVs we want - if [[ "${lv#$vg_prefix}" != "$lv" ]]; then + # lv_prefix prefixes the LVs we want + if [[ "${lv#$lv_prefix}" != "$lv" ]]; then sudo lvremove -f $vg/$lv fi done + + # if there is no logical volume left, it's safe to attempt a cleanup + # of the backing file + if [ -z "`sudo lvs --noheadings -o lv_name $vg`" ]; then + # if the backing physical device is a loop device, it was probably setup by devstack + VG_DEV=$(sudo losetup -j $DATA_DIR/${vg}-backing-file | awk -F':' '/backing-file/ { print $1}') + if [[ -n "$VG_DEV" ]]; then + sudo losetup -d $VG_DEV + rm -f $DATA_DIR/${vg}-backing-file + fi + fi } # cleanup_cinder() - Remove residual data files, anything left over from previous @@ -127,9 +143,10 @@ function cleanup_cinder() { fi # Campsite rule: leave behind a volume group at least as clean as we found it - _clean_volume_group $VOLUME_GROUP $VOLUME_NAME_PREFIX + _cleanup_lvm $VOLUME_GROUP $VOLUME_NAME_PREFIX + if [ "$CINDER_MULTI_LVM_BACKEND" = "True" ]; then - _clean_volume_group $VOLUME_GROUP2 $VOLUME_NAME_PREFIX + _cleanup_lvm $VOLUME_GROUP2 $VOLUME_NAME_PREFIX fi } @@ -318,8 +335,6 @@ create_cinder_volume_group() { # ``/opt/stack/data``. if ! sudo vgs $VOLUME_GROUP; then - VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file} - # Only create if the file doesn't already exists [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE @@ -334,8 +349,6 @@ create_cinder_volume_group() { #set up the second volume if CINDER_MULTI_LVM_BACKEND is enabled if ! sudo vgs $VOLUME_GROUP2; then - VOLUME_BACKING_FILE2=${VOLUME_BACKING_FILE2:-$DATA_DIR/${VOLUME_GROUP2}-backing-file} - # Only create if the file doesn't already exists [[ -f $VOLUME_BACKING_FILE2 ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE2 From 58f9cf761a5f4b7968de2286797e2e4d2284fff8 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 4 Jun 2013 12:51:54 -0500 Subject: [PATCH 0134/4704] Check for .stackenv * Skip sourcing .stackenv if it doesn't exist * Remove .stackenv at end Change-Id: Icc2e8e4ea6fada5f9d0b906fedacdbe2c1b3b320 --- clean.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/clean.sh b/clean.sh index cf24f278b8..ffc462c6f3 100755 --- a/clean.sh +++ b/clean.sh @@ -19,7 +19,9 @@ source $TOP_DIR/functions source $TOP_DIR/stackrc # Get the variables that are set in stack.sh -source $TOP_DIR/.stackenv +if [[ -r $TOP_DIR/.stackenv ]]; then + source $TOP_DIR/.stackenv +fi # Determine what system we are running on. This provides ``os_VENDOR``, # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` @@ -88,4 +90,4 @@ cleanup_database # FIXED_IP_ADDR in br100 # Clean up files -#rm -f .stackenv +rm -f $TOP_DIR/.stackenv From 1216b9fae9d8b93c9387147ab2b5bce07af7daa4 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Tue, 4 Jun 2013 18:55:06 +0000 Subject: [PATCH 0135/4704] Use service role instead of admin role for service users Instead of using a full admin role to validate tokens just use the service role. Change where possible, some services use the service user for more then just token validation. Fixes bug 1153789 Change-Id: I0801475b62a7b025fdd871f52d8606aa614d1a32 --- files/keystone_data.sh | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 72b5b1e9c8..a1875e183b 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -5,9 +5,9 @@ # Tenant User Roles # ------------------------------------------------------------------ # service glance admin -# service swift admin # if enabled -# service heat admin # if enabled -# service ceilometer admin # if enabled +# service swift service # if enabled +# service heat service # if enabled +# service ceilometer service # if enabled # Tempest Only: # alt_demo alt_demo Member # @@ -47,6 +47,8 @@ MEMBER_ROLE=$(keystone role-list | awk "/ Member / { print \$2 }") # but ResellerAdmin is needed for a user to act as any tenant. The name of this # role is also configurable in swift-proxy.conf RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin) +# Service role, so service users do not have to be admins +SERVICE_ROLE=$(get_id keystone role-create --name=service) # Services @@ -70,7 +72,7 @@ if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then --email=heat@example.com) keystone user-role-add --tenant_id $SERVICE_TENANT \ --user_id $HEAT_USER \ - --role_id $ADMIN_ROLE + --role_id $SERVICE_ROLE # heat_stack_user role is for users created by Heat keystone role-create --name heat_stack_user if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then @@ -133,7 +135,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; the keystone user-role-add \ --tenant_id $SERVICE_TENANT \ --user_id $SWIFT_USER \ - --role_id $ADMIN_ROLE + --role_id $SERVICE_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then SWIFT_SERVICE=$(get_id keystone service-create \ --name=swift \ @@ -155,7 +157,7 @@ if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]]; then --email=ceilometer@example.com) keystone user-role-add --tenant_id $SERVICE_TENANT \ --user_id $CEILOMETER_USER \ - --role_id $ADMIN_ROLE + --role_id $SERVICE_ROLE # Ceilometer needs ResellerAdmin role to access swift account stats. keystone user-role-add --tenant_id $SERVICE_TENANT \ --user_id $CEILOMETER_USER \ From 32a348df5140d0546fa8866f569601aa8478a9d9 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Wed, 5 Jun 2013 15:31:09 -0700 Subject: [PATCH 0136/4704] Update my mailmap Change-Id: Ia5e9b543ad1c9e21a722cf739d1469e63e2c005c --- .mailmap | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.mailmap b/.mailmap index a49875d22d..29be995ef8 100644 --- a/.mailmap +++ b/.mailmap @@ -2,4 +2,5 @@ # # Jiajun Liu -Jian Wen \ No newline at end of file +Jian Wen +Joe Gordon From 00fd79d324283882c23059f1e12906d413b0a64f Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 6 Jun 2013 11:19:16 +1000 Subject: [PATCH 0137/4704] Use nova bindir of /usr/bin on Fedora Fedora (and RHEL) like to install things in /usr/bin, e.g. 01:30:42 Creating /usr/lib/python2.7/site-packages/nova.egg-link (link to .) 01:30:42 Adding nova 2013.2.a1031.gfc5137d to easy-install.pth file 01:30:42 Installing nova-dhcpbridge script to /usr/bin ... The default nova/paths.py defaults to /usr/local/... which causes quite a few errors when stressing various parts of nova and tools aren't found. Change-Id: Iaa93af96ddfb4deb6d16ea1dcac25ae6ed1e317d --- lib/nova | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/nova b/lib/nova index 809f56c648..be526cfdb8 100644 --- a/lib/nova +++ b/lib/nova @@ -441,6 +441,12 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" iniset $NOVA_CONF DEFAULT osapi_v3_enabled "True" + if is_fedora; then + # nova defaults to /usr/local/bin, but fedora pip likes to + # install things in /usr/bin + iniset $NOVA_CONF DEFAULT bindir "/usr/bin" + fi + if is_service_enabled n-api; then iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS" if is_service_enabled tls-proxy; then From 5ecfd48c519749b745c9e3c311bf4385f7cfd0a5 Mon Sep 17 00:00:00 2001 From: Gordon Chung Date: Thu, 6 Jun 2013 00:25:16 -0400 Subject: [PATCH 0138/4704] ceilometer switch to setuptools console scripts ceilometer has dropped bin scripts and switched to console scripts blueprint setuptools-console-scripts Change-Id: If6dff10471e351fe3640a60cc45fef719f671997 --- lib/ceilometer | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 50060a78e0..bd4ab0f2dd 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -127,10 +127,10 @@ function install_ceilometerclient() { # start_ceilometer() - Start running processes, including screen function start_ceilometer() { - screen_it ceilometer-acompute "cd $CEILOMETER_DIR && sg $LIBVIRT_GROUP \"$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" - screen_it ceilometer-acentral "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_CONF" - screen_it ceilometer-collector "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_CONF" - screen_it ceilometer-api "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" + screen_it ceilometer-acompute "sg $LIBVIRT_GROUP \"ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" + screen_it ceilometer-acentral "ceilometer-agent-central --config-file $CEILOMETER_CONF" + screen_it ceilometer-collector "ceilometer-collector --config-file $CEILOMETER_CONF" + screen_it ceilometer-api "ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" } # stop_ceilometer() - Stop running processes From 63a71a231638b8ed3390d04b986b8db177e4ab13 Mon Sep 17 00:00:00 2001 From: Adalberto Medeiros Date: Thu, 6 Jun 2013 08:46:04 -0400 Subject: [PATCH 0139/4704] Ensure flavor_ref_alt is different from flavor_ref For tempest, server resize tests needs different values for flavors to pass. Ensure different values in tempest.conf for flavor_ref_alt and flavor_ref. Change-Id: I2f8ee92967abc767637e334ae483b540dd7d5b78 Fix: bug 1187768 --- lib/tempest | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/lib/tempest b/lib/tempest index e59737bb2b..1a9cad2862 100644 --- a/lib/tempest +++ b/lib/tempest @@ -166,9 +166,15 @@ function configure_tempest() { fi flavor_ref=${flavors[0]} flavor_ref_alt=$flavor_ref - if [[ $num_flavors -gt 1 ]]; then - flavor_ref_alt=${flavors[1]} - fi + + # ensure flavor_ref and flavor_ref_alt have different values + # some resize instance in tempest tests depends on this. + for f in ${flavors[@]:1}; do + if [[ $f -ne $flavor_ref ]]; then + flavor_ref_alt=$f + break + fi + done fi if [ "$Q_USE_NAMESPACE" != "False" ]; then From baf37ea81720982050eceea2b1b1e9bbdf6f0c94 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Wed, 5 Jun 2013 15:18:44 -0700 Subject: [PATCH 0140/4704] Add Fake virt driver support nova fake virt driver can be used to simulate running arbitrarily large number of VMs. When the fake virt driver is selected, all other arbitrary limits are disabled (quotas, scheduler limitations) as well. The fake virt driver can be selected by adding the following line to your localrc VIRT_DRIVER=fake This setting can be used to do simulated large scale (as defined by number of VMs) testing (such as boot 500 VMs at once) inside a single devstack instance. Change-Id: I000c3cba107cd91bef11c4d5325e1ce7308faa6c --- stack.sh | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/stack.sh b/stack.sh index 5dea00097a..28b8cb97d3 100755 --- a/stack.sh +++ b/stack.sh @@ -1029,6 +1029,27 @@ if is_service_enabled nova; then iniset $NOVA_CONF DEFAULT vmwareapi_host_password "$VMWAREAPI_PASSWORD" iniset $NOVA_CONF DEFAULT vmwareapi_cluster_name "$VMWAREAPI_CLUSTER" + # fake + # ----- + + elif [ "$VIRT_DRIVER" = 'fake' ]; then + echo_summary "Using fake Virt driver" + iniset $NOVA_CONF DEFAULT compute_driver "nova.virt.fake.FakeDriver" + # Disable arbitrary limits + iniset $NOVA_CONF DEFAULT quota_instances -1 + iniset $NOVA_CONF DEFAULT quota_cores -1 + iniset $NOVA_CONF DEFAULT quota_ram -1 + iniset $NOVA_CONF DEFAULT quota_floating_ips -1 + iniset $NOVA_CONF DEFAULT quota_fixed_ips -1 + iniset $NOVA_CONF DEFAULT quota_metadata_items -1 + iniset $NOVA_CONF DEFAULT quota_injected_files -1 + iniset $NOVA_CONF DEFAULT quota_injected_file_path_bytes -1 + iniset $NOVA_CONF DEFAULT quota_security_groups -1 + iniset $NOVA_CONF DEFAULT quota_security_group_rules -1 + iniset $NOVA_CONF DEFAULT quota_key_pairs -1 + iniset $NOVA_CONF DEFAULT scheduler_default_filters "RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter" + + # Default # ------- From 88317adc0ad98f197a571cb34615f8b135463369 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Thu, 6 Jun 2013 15:42:13 -0400 Subject: [PATCH 0141/4704] Source functions in rejoin-stack.sh stackrc loads localrc and localrc can use functions like enable_service and disable_service. Make sure the functions are defined before loading stackrc. Change-Id: Ied7bbe0d228f252902cc491b8db0738c5071f06b Signed-off-by: Doug Hellmann --- rejoin-stack.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/rejoin-stack.sh b/rejoin-stack.sh index c452694f9f..65ba7214fa 100755 --- a/rejoin-stack.sh +++ b/rejoin-stack.sh @@ -5,6 +5,10 @@ TOP_DIR=`dirname $0` +# Import common functions in case the localrc (loaded via stackrc) +# uses them. +source $TOP_DIR/functions + source $TOP_DIR/stackrc # if screenrc exists, run screen From 5747b8017577ea7b8f41c1456aeb91ee13d000e1 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Thu, 6 Jun 2013 17:37:56 -0700 Subject: [PATCH 0142/4704] Don't install pep8 from packages We have migrated to flake8 and most distros don't have the version of pep8 that flake8 (and hacking) use. Instead of installing pep8 from packages it should be installed from pip Change-Id: I86e4f8316f3cdc27303bfd039a78c0d6202ce321 --- files/apts/general | 1 - files/apts/horizon | 1 - files/rpms-suse/general | 1 - files/rpms-suse/horizon | 1 - files/rpms/general | 3 +-- files/rpms/horizon | 1 - 6 files changed, 1 insertion(+), 7 deletions(-) diff --git a/files/apts/general b/files/apts/general index a1fcf3cb61..ec6dd0db4b 100644 --- a/files/apts/general +++ b/files/apts/general @@ -1,5 +1,4 @@ bridge-utils -pep8 pylint python-pip screen diff --git a/files/apts/horizon b/files/apts/horizon index 2c2faf1a21..e1ce85f7d5 100644 --- a/files/apts/horizon +++ b/files/apts/horizon @@ -11,7 +11,6 @@ python-sqlalchemy python-webob python-kombu pylint -pep8 python-eventlet python-nose python-sphinx diff --git a/files/rpms-suse/general b/files/rpms-suse/general index b8ceeb7aab..93711ff784 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -8,7 +8,6 @@ openssl psmisc python-cmd2 # dist:opensuse-12.3 python-netaddr -python-pep8 python-pip python-pylint python-unittest2 diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon index 7e46ffe0f7..405fb7ac56 100644 --- a/files/rpms-suse/horizon +++ b/files/rpms-suse/horizon @@ -17,7 +17,6 @@ python-kombu python-mox python-netaddr python-nose -python-pep8 python-pylint python-sqlalchemy-migrate python-xattr diff --git a/files/rpms/general b/files/rpms/general index 764b602da0..d6abae99df 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -11,7 +11,6 @@ libxslt-devel # dist:rhel6 [2] psmisc pylint python-netaddr -python-pep8 python-pip python-prettytable # dist:rhel6 [1] python-unittest2 @@ -29,4 +28,4 @@ wget # [2] : RHEL6 rpm versions of python-lxml is old, and has to be # removed. Several tools rely on it, so we install the dependencies -# pip needs to build it here (see tools/install_prereqs.sh) \ No newline at end of file +# pip needs to build it here (see tools/install_prereqs.sh) diff --git a/files/rpms/horizon b/files/rpms/horizon index cf16cdbeac..e27888a4af 100644 --- a/files/rpms/horizon +++ b/files/rpms/horizon @@ -20,7 +20,6 @@ python-netaddr python-nose python-paste #dist:f16,f17,f18 python-paste-deploy #dist:f16,f17,f18 -python-pep8 python-routes python-sphinx python-sqlalchemy From c9b0f1abaac6410b0a02a76423a9e923f2dda112 Mon Sep 17 00:00:00 2001 From: Bob Kukura Date: Tue, 23 Apr 2013 16:28:24 -0400 Subject: [PATCH 0143/4704] Support for the ml2 quantum plugin. Support is added for configuring devstack to use quantum's Modular L2 plugin, ml2. See https://review.openstack.org/#/c/20105/ and the blueprint for details. Either the openvswitch or linuxbridge L2 agent can be used with the ml2 plugin (or both in combination on different nodes). Code configurating devstack for these L2 agents has been moved into separate files so it can be shared between ml2 and the existing monolithic plugins. To use ml2 in devstack, set Q_PLUGIN=ml2 in localrc. By default it will use the openvswitch L2 agent. To use ml2 with the linuxbridge agent, also set Q_AGENT=linuxbridge in localrc. The local, flat, and vlan network types are supported with either agent. Support for openvswitch's gre network type is not yet implemented in ml2. Note that ml2 does not yet return a useful value for binding:vif_type, so nova's GenricVIFDriver cannot currently be used. If using the linuxbridge agent, work around this by setting NOVA_VIF_DRIVER=nova.virt.libvirt.vif.LinuxBridgeVIFDriver in localrc. Implements: blueprint modular-l2 Change-Id: Ib3ed039d25295cf1ab268536cec408af8cd3795a --- lib/quantum_plugins/linuxbridge | 53 ------------ lib/quantum_plugins/linuxbridge_agent | 62 ++++++++++++++ lib/quantum_plugins/ml2 | 62 ++++++++++++++ lib/quantum_plugins/openvswitch | 110 +----------------------- lib/quantum_plugins/openvswitch_agent | 117 ++++++++++++++++++++++++++ 5 files changed, 243 insertions(+), 161 deletions(-) create mode 100644 lib/quantum_plugins/linuxbridge_agent create mode 100644 lib/quantum_plugins/ml2 create mode 100644 lib/quantum_plugins/openvswitch_agent diff --git a/lib/quantum_plugins/linuxbridge b/lib/quantum_plugins/linuxbridge index 980df5fedb..71832f1427 100644 --- a/lib/quantum_plugins/linuxbridge +++ b/lib/quantum_plugins/linuxbridge @@ -5,19 +5,6 @@ MY_XTRACE=$(set +o | grep xtrace) set +o xtrace -function is_quantum_ovs_base_plugin() { - # linuxbridge doesn't use OVS - return 1 -} - -function quantum_plugin_create_nova_conf() { - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} -} - -function quantum_plugin_install_agent_packages() { - install_package bridge-utils -} - function quantum_plugin_configure_common() { Q_PLUGIN_CONF_PATH=etc/quantum/plugins/linuxbridge Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini @@ -25,37 +12,6 @@ function quantum_plugin_configure_common() { Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2" } -function quantum_plugin_configure_debug_command() { - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge -} - -function quantum_plugin_configure_dhcp_agent() { - iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager quantum.agent.dhcp_agent.DhcpAgentWithStateReport -} - -function quantum_plugin_configure_l3_agent() { - iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge - iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager quantum.agent.l3_agent.L3NATAgentWithStateReport -} - -function quantum_plugin_configure_plugin_agent() { - # Setup physical network interface mappings. Override - # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more - # complex physical network configurations. - if [[ "$LB_INTERFACE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$LB_PHYSICAL_INTERFACE" != "" ]]; then - LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE - fi - if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings $LB_INTERFACE_MAPPINGS - fi - if [[ "$Q_USE_SECGROUP" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.linux.iptables_firewall.IptablesFirewallDriver - else - iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.firewall.NoopFirewallDriver - fi - AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent" -} - function quantum_plugin_configure_service() { if [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then iniset /$Q_PLUGIN_CONF_FILE VLANS tenant_network_type vlan @@ -81,19 +37,10 @@ function quantum_plugin_configure_service() { fi } -function quantum_plugin_setup_interface_driver() { - local conf_file=$1 - iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver -} - function has_quantum_plugin_security_group() { # 0 means True here return 0 } -function quantum_plugin_check_adv_test_requirements() { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 -} - # Restore xtrace $MY_XTRACE diff --git a/lib/quantum_plugins/linuxbridge_agent b/lib/quantum_plugins/linuxbridge_agent new file mode 100644 index 0000000000..1e83275723 --- /dev/null +++ b/lib/quantum_plugins/linuxbridge_agent @@ -0,0 +1,62 @@ +# Quantum Linux Bridge L2 agent +# ----------------------------- + +# Save trace setting +PLUGIN_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +function is_quantum_ovs_base_plugin() { + # linuxbridge doesn't use OVS + return 1 +} + +function quantum_plugin_create_nova_conf() { + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} +} + +function quantum_plugin_install_agent_packages() { + install_package bridge-utils +} + +function quantum_plugin_configure_debug_command() { + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge +} + +function quantum_plugin_configure_dhcp_agent() { + iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager quantum.agent.dhcp_agent.DhcpAgentWithStateReport +} + +function quantum_plugin_configure_l3_agent() { + iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge + iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager quantum.agent.l3_agent.L3NATAgentWithStateReport +} + +function quantum_plugin_configure_plugin_agent() { + # Setup physical network interface mappings. Override + # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more + # complex physical network configurations. + if [[ "$LB_INTERFACE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$LB_PHYSICAL_INTERFACE" != "" ]]; then + LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE + fi + if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings $LB_INTERFACE_MAPPINGS + fi + if [[ "$Q_USE_SECGROUP" == "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.linux.iptables_firewall.IptablesFirewallDriver + else + iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.firewall.NoopFirewallDriver + fi + AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent" +} + +function quantum_plugin_setup_interface_driver() { + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver +} + +function quantum_plugin_check_adv_test_requirements() { + is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 +} + +# Restore xtrace +$PLUGIN_XTRACE diff --git a/lib/quantum_plugins/ml2 b/lib/quantum_plugins/ml2 new file mode 100644 index 0000000000..ae8fe6c997 --- /dev/null +++ b/lib/quantum_plugins/ml2 @@ -0,0 +1,62 @@ +# Quantum Modular Layer 2 plugin +# ------------------------------ + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# Default openvswitch L2 agent +Q_AGENT=${Q_AGENT:-openvswitch} +source $TOP_DIR/lib/quantum_plugins/${Q_AGENT}_agent + +function quantum_plugin_configure_common() { + Q_PLUGIN_CONF_PATH=etc/quantum/plugins/ml2 + Q_PLUGIN_CONF_FILENAME=ml2_conf.ini + Q_DB_NAME="quantum_ml2" + Q_PLUGIN_CLASS="quantum.plugins.ml2.plugin.Ml2Plugin" +} + +function quantum_plugin_configure_service() { + if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE ml2 tenant_network_types gre + iniset /$Q_PLUGIN_CONF_FILE ml2_type_gre tunnel_id_ranges $TENANT_TUNNEL_RANGES + elif [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE ml2 tenant_network_types vlan + else + echo "WARNING - The ml2 plugin is using local tenant networks, with no connectivity between hosts." + fi + + # Override ``ML2_VLAN_RANGES`` and any needed agent configuration + # variables in ``localrc`` for more complex physical network + # configurations. + if [[ "$ML2_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then + ML2_VLAN_RANGES=$PHYSICAL_NETWORK + if [[ "$TENANT_VLAN_RANGE" != "" ]]; then + ML2_VLAN_RANGES=$ML2_VLAN_RANGES:$TENANT_VLAN_RANGE + fi + fi + if [[ "$ML2_VLAN_RANGES" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE ml2_type_vlan network_vlan_ranges $ML2_VLAN_RANGES + fi + + # REVISIT(rkukura): Setting firewall_driver here for + # quantum.agent.securitygroups_rpc.is_firewall_enabled() which is + # used in the server, in case no L2 agent is configured on the + # server's node. If an L2 agent is configured, this will get + # overridden with the correct driver. The ml2 plugin should + # instead use its own config variable to indicate whether security + # groups is enabled, and that will need to be set here instead. + if [[ "$Q_USE_SECGROUP" == "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.not.a.real.FirewallDriver + else + iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.firewall.NoopFirewallDriver + fi + +} + +function has_quantum_plugin_security_group() { + return 0 +} + +# Restore xtrace +$MY_XTRACE diff --git a/lib/quantum_plugins/openvswitch b/lib/quantum_plugins/openvswitch index d5d4f102f6..cd29c199a1 100644 --- a/lib/quantum_plugins/openvswitch +++ b/lib/quantum_plugins/openvswitch @@ -1,25 +1,11 @@ -# Quantum Open vSwtich plugin +# Quantum Open vSwitch plugin # --------------------------- # Save trace setting MY_XTRACE=$(set +o | grep xtrace) set +o xtrace -source $TOP_DIR/lib/quantum_plugins/ovs_base - -function quantum_plugin_create_nova_conf() { - _quantum_ovs_base_configure_nova_vif_driver - if [ "$VIRT_DRIVER" = 'xenserver' ]; then - iniset $NOVA_CONF DEFAULT xenapi_vif_driver nova.virt.xenapi.vif.XenAPIOpenVswitchDriver - iniset $NOVA_CONF DEFAULT xenapi_ovs_integration_bridge $XEN_INTEGRATION_BRIDGE - # Disable nova's firewall so that it does not conflict with quantum - iniset $NOVA_CONF DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver - fi -} - -function quantum_plugin_install_agent_packages() { - _quantum_ovs_base_install_agent_packages -} +source $TOP_DIR/lib/quantum_plugins/openvswitch_agent function quantum_plugin_configure_common() { Q_PLUGIN_CONF_PATH=etc/quantum/plugins/openvswitch @@ -28,89 +14,6 @@ function quantum_plugin_configure_common() { Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2" } -function quantum_plugin_configure_debug_command() { - _quantum_ovs_base_configure_debug_command -} - -function quantum_plugin_configure_dhcp_agent() { - iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager quantum.agent.dhcp_agent.DhcpAgentWithStateReport -} - -function quantum_plugin_configure_l3_agent() { - _quantum_ovs_base_configure_l3_agent - iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager quantum.agent.l3_agent.L3NATAgentWithStateReport -} - -function quantum_plugin_configure_plugin_agent() { - # Setup integration bridge - _quantum_ovs_base_setup_bridge $OVS_BRIDGE - _quantum_ovs_base_configure_firewall_driver - - # Setup agent for tunneling - if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then - # Verify tunnels are supported - # REVISIT - also check kernel module support for GRE and patch ports - OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'` - if [ $OVS_VERSION \< "1.4" ] && ! is_service_enabled q-svc ; then - die $LINENO "You are running OVS version $OVS_VERSION. OVS 1.4+ is required for tunneling between multiple hosts." - fi - iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True - iniset /$Q_PLUGIN_CONF_FILE OVS local_ip $HOST_IP - fi - - # Setup physical network bridge mappings. Override - # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more - # complex physical network configurations. - if [[ "$OVS_BRIDGE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then - OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE - - # Configure bridge manually with physical interface as port for multi-node - sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE - fi - if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings $OVS_BRIDGE_MAPPINGS - fi - AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent" - - if [ "$VIRT_DRIVER" = 'xenserver' ]; then - # Make a copy of our config for domU - sudo cp /$Q_PLUGIN_CONF_FILE "/$Q_PLUGIN_CONF_FILE.domu" - - # Deal with Dom0's L2 Agent: - Q_RR_DOM0_COMMAND="$QUANTUM_DIR/bin/quantum-rootwrap-xen-dom0 $Q_RR_CONF_FILE" - - # For now, duplicate the xen configuration already found in nova.conf - iniset $Q_RR_CONF_FILE XENAPI xenapi_connection_url "$XENAPI_CONNECTION_URL" - iniset $Q_RR_CONF_FILE XENAPI xenapi_connection_username "$XENAPI_USER" - iniset $Q_RR_CONF_FILE XENAPI xenapi_connection_password "$XENAPI_PASSWORD" - - # Under XS/XCP, the ovs agent needs to target the dom0 - # integration bridge. This is enabled by using a root wrapper - # that executes commands on dom0 via a XenAPI plugin. - iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_DOM0_COMMAND" - - # Set "physical" mapping - iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings "physnet1:$FLAT_NETWORK_BRIDGE" - - # XEN_INTEGRATION_BRIDGE is the integration bridge in dom0 - iniset /$Q_PLUGIN_CONF_FILE OVS integration_bridge $XEN_INTEGRATION_BRIDGE - - # Set up domU's L2 agent: - - # Create a bridge "br-$GUEST_INTERFACE_DEFAULT" - sudo ovs-vsctl --no-wait -- --may-exist add-br "br-$GUEST_INTERFACE_DEFAULT" - # Add $GUEST_INTERFACE_DEFAULT to that bridge - sudo ovs-vsctl add-port "br-$GUEST_INTERFACE_DEFAULT" $GUEST_INTERFACE_DEFAULT - - # Set bridge mappings to "physnet1:br-$GUEST_INTERFACE_DEFAULT" - iniset "/$Q_PLUGIN_CONF_FILE.domU" OVS bridge_mappings "physnet1:br-$GUEST_INTERFACE_DEFAULT" - # Set integration bridge to domU's - iniset "/$Q_PLUGIN_CONF_FILE.domU" OVS integration_bridge $OVS_BRIDGE - # Set root wrap - iniset "/$Q_PLUGIN_CONF_FILE.domU" AGENT root_helper "$Q_RR_COMMAND" - fi -} - function quantum_plugin_configure_service() { if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type gre @@ -141,18 +44,9 @@ function quantum_plugin_configure_service() { _quantum_ovs_base_configure_firewall_driver } -function quantum_plugin_setup_interface_driver() { - local conf_file=$1 - iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver -} - function has_quantum_plugin_security_group() { return 0 } -function quantum_plugin_check_adv_test_requirements() { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 -} - # Restore xtrace $MY_XTRACE diff --git a/lib/quantum_plugins/openvswitch_agent b/lib/quantum_plugins/openvswitch_agent new file mode 100644 index 0000000000..87f5e97adc --- /dev/null +++ b/lib/quantum_plugins/openvswitch_agent @@ -0,0 +1,117 @@ +# Quantum Open vSwitch L2 agent +# ----------------------------- + +# Save trace setting +PLUGIN_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/quantum_plugins/ovs_base + +function quantum_plugin_create_nova_conf() { + _quantum_ovs_base_configure_nova_vif_driver + if [ "$VIRT_DRIVER" = 'xenserver' ]; then + iniset $NOVA_CONF DEFAULT xenapi_vif_driver nova.virt.xenapi.vif.XenAPIOpenVswitchDriver + iniset $NOVA_CONF DEFAULT xenapi_ovs_integration_bridge $XEN_INTEGRATION_BRIDGE + # Disable nova's firewall so that it does not conflict with quantum + iniset $NOVA_CONF DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver + fi +} + +function quantum_plugin_install_agent_packages() { + _quantum_ovs_base_install_agent_packages +} + +function quantum_plugin_configure_debug_command() { + _quantum_ovs_base_configure_debug_command +} + +function quantum_plugin_configure_dhcp_agent() { + iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager quantum.agent.dhcp_agent.DhcpAgentWithStateReport +} + +function quantum_plugin_configure_l3_agent() { + _quantum_ovs_base_configure_l3_agent + iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager quantum.agent.l3_agent.L3NATAgentWithStateReport +} + +function quantum_plugin_configure_plugin_agent() { + # Setup integration bridge + _quantum_ovs_base_setup_bridge $OVS_BRIDGE + _quantum_ovs_base_configure_firewall_driver + + # Setup agent for tunneling + if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then + # Verify tunnels are supported + # REVISIT - also check kernel module support for GRE and patch ports + OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'` + if [ $OVS_VERSION \< "1.4" ] && ! is_service_enabled q-svc ; then + die $LINENO "You are running OVS version $OVS_VERSION. OVS 1.4+ is required for tunneling between multiple hosts." + fi + iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True + iniset /$Q_PLUGIN_CONF_FILE OVS local_ip $HOST_IP + fi + + # Setup physical network bridge mappings. Override + # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more + # complex physical network configurations. + if [[ "$OVS_BRIDGE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then + OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE + + # Configure bridge manually with physical interface as port for multi-node + sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE + fi + if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings $OVS_BRIDGE_MAPPINGS + fi + AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent" + + if [ "$VIRT_DRIVER" = 'xenserver' ]; then + # Make a copy of our config for domU + sudo cp /$Q_PLUGIN_CONF_FILE "/$Q_PLUGIN_CONF_FILE.domu" + + # Deal with Dom0's L2 Agent: + Q_RR_DOM0_COMMAND="$QUANTUM_DIR/bin/quantum-rootwrap-xen-dom0 $Q_RR_CONF_FILE" + + # For now, duplicate the xen configuration already found in nova.conf + iniset $Q_RR_CONF_FILE XENAPI xenapi_connection_url "$XENAPI_CONNECTION_URL" + iniset $Q_RR_CONF_FILE XENAPI xenapi_connection_username "$XENAPI_USER" + iniset $Q_RR_CONF_FILE XENAPI xenapi_connection_password "$XENAPI_PASSWORD" + + # Under XS/XCP, the ovs agent needs to target the dom0 + # integration bridge. This is enabled by using a root wrapper + # that executes commands on dom0 via a XenAPI plugin. + iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_DOM0_COMMAND" + + # Set "physical" mapping + iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings "physnet1:$FLAT_NETWORK_BRIDGE" + + # XEN_INTEGRATION_BRIDGE is the integration bridge in dom0 + iniset /$Q_PLUGIN_CONF_FILE OVS integration_bridge $XEN_INTEGRATION_BRIDGE + + # Set up domU's L2 agent: + + # Create a bridge "br-$GUEST_INTERFACE_DEFAULT" + sudo ovs-vsctl --no-wait -- --may-exist add-br "br-$GUEST_INTERFACE_DEFAULT" + # Add $GUEST_INTERFACE_DEFAULT to that bridge + sudo ovs-vsctl add-port "br-$GUEST_INTERFACE_DEFAULT" $GUEST_INTERFACE_DEFAULT + + # Set bridge mappings to "physnet1:br-$GUEST_INTERFACE_DEFAULT" + iniset "/$Q_PLUGIN_CONF_FILE.domU" OVS bridge_mappings "physnet1:br-$GUEST_INTERFACE_DEFAULT" + # Set integration bridge to domU's + iniset "/$Q_PLUGIN_CONF_FILE.domU" OVS integration_bridge $OVS_BRIDGE + # Set root wrap + iniset "/$Q_PLUGIN_CONF_FILE.domU" AGENT root_helper "$Q_RR_COMMAND" + fi +} + +function quantum_plugin_setup_interface_driver() { + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver +} + +function quantum_plugin_check_adv_test_requirements() { + is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 +} + +# Restore xtrace +$PLUGIN_XTRACE From b1e49bfdf7f33b52e7aa14e5dfbb11c3328d3e6f Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Thu, 30 May 2013 16:47:19 +0100 Subject: [PATCH 0144/4704] Make dependency on libvirt dynamic Fixes bug 1184960 Change-Id: If702807d9ae326bf216a2e076ce61062045d7c6b --- files/apts/nova | 6 +- files/rpms-suse/nova | 6 +- files/rpms/nova | 4 +- lib/nova | 192 +++++++++++++++++++++++-------------------- 4 files changed, 111 insertions(+), 97 deletions(-) diff --git a/files/apts/nova b/files/apts/nova index c24333c3db..6a7ef74c59 100644 --- a/files/apts/nova +++ b/files/apts/nova @@ -12,8 +12,8 @@ iptables ebtables sqlite3 sudo -kvm -qemu # dist:wheezy,jessie +kvm # NOPRIME +qemu # dist:wheezy,jessie NOPRIME libvirt-bin # NOPRIME libjs-jquery-tablesorter # Needed for coverage html reports vlan @@ -27,7 +27,7 @@ python-paste python-migrate python-gflags python-greenlet -python-libvirt +python-libvirt # NOPRIME python-libxml2 python-routes python-netaddr diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova index a3fd4799c0..1be24a824e 100644 --- a/files/rpms-suse/nova +++ b/files/rpms-suse/nova @@ -7,11 +7,11 @@ genisoimage # required for config_drive iptables iputils kpartx -kvm +kvm # NOPRIME # qemu as fallback if kvm cannot be used -qemu +qemu # NOPRIME libvirt # NOPRIME -libvirt-python +libvirt-python # NOPRIME libxml2-python mysql-community-server # NOPRIME parted diff --git a/files/rpms/nova b/files/rpms/nova index c74f3963d5..f50d93f883 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -7,9 +7,9 @@ genisoimage # required for config_drive iptables iputils kpartx -kvm +kvm # NOPRIME libvirt-bin # NOPRIME -libvirt-python +libvirt-python # NOPRIME libxml2-python numpy # needed by websockify for spice console m2crypto diff --git a/lib/nova b/lib/nova index be526cfdb8..ae9da237c7 100644 --- a/lib/nova +++ b/lib/nova @@ -237,37 +237,39 @@ function configure_nova() { # Force IP forwarding on, just on case sudo sysctl -w net.ipv4.ip_forward=1 - # Attempt to load modules: network block device - used to manage qcow images - sudo modprobe nbd || true - - # Check for kvm (hardware based virtualization). If unable to initialize - # kvm, we drop back to the slower emulation mode (qemu). Note: many systems - # come with hardware virtualization disabled in BIOS. - if [[ "$LIBVIRT_TYPE" == "kvm" ]]; then - sudo modprobe kvm || true - if [ ! -e /dev/kvm ]; then - echo "WARNING: Switching to QEMU" - LIBVIRT_TYPE=qemu - if which selinuxenabled 2>&1 > /dev/null && selinuxenabled; then - # https://bugzilla.redhat.com/show_bug.cgi?id=753589 - sudo setsebool virt_use_execmem on + if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then + # Attempt to load modules: network block device - used to manage qcow images + sudo modprobe nbd || true + + # Check for kvm (hardware based virtualization). If unable to initialize + # kvm, we drop back to the slower emulation mode (qemu). Note: many systems + # come with hardware virtualization disabled in BIOS. + if [[ "$LIBVIRT_TYPE" == "kvm" ]]; then + sudo modprobe kvm || true + if [ ! -e /dev/kvm ]; then + echo "WARNING: Switching to QEMU" + LIBVIRT_TYPE=qemu + if which selinuxenabled 2>&1 > /dev/null && selinuxenabled; then + # https://bugzilla.redhat.com/show_bug.cgi?id=753589 + sudo setsebool virt_use_execmem on + fi fi fi - fi - # Install and configure **LXC** if specified. LXC is another approach to - # splitting a system into many smaller parts. LXC uses cgroups and chroot - # to simulate multiple systems. - if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then - if is_ubuntu; then - if [[ ! "$DISTRO" > natty ]]; then - cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0" - sudo mkdir -p /cgroup - if ! grep -q cgroup /etc/fstab; then - echo "$cgline" | sudo tee -a /etc/fstab - fi - if ! mount -n | grep -q cgroup; then - sudo mount /cgroup + # Install and configure **LXC** if specified. LXC is another approach to + # splitting a system into many smaller parts. LXC uses cgroups and chroot + # to simulate multiple systems. + if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then + if is_ubuntu; then + if [[ ! "$DISTRO" > natty ]]; then + cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0" + sudo mkdir -p /cgroup + if ! grep -q cgroup /etc/fstab; then + echo "$cgline" | sudo tee -a /etc/fstab + fi + if ! mount -n | grep -q cgroup; then + sudo mount /cgroup + fi fi fi fi @@ -278,9 +280,10 @@ function configure_nova() { configure_baremetal_nova_dirs fi - if is_service_enabled quantum && is_quantum_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then - # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces - cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla + if is_fedora || is_suse; then + if is_fedora && [[ $DISTRO =~ (rhel6) || "$os_RELEASE" -le "17" ]]; then + sudo bash -c "cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla [libvirt Management Access] Identity=unix-group:$LIBVIRT_GROUP Action=org.libvirt.unix.manage @@ -308,11 +309,11 @@ ResultAny=yes ResultInactive=yes ResultActive=yes EOF" - elif is_suse && [[ $os_RELEASE = 12.2 || "$os_VENDOR" = "SUSE LINUX" ]]; then - # openSUSE < 12.3 or SLE - # Work around the fact that polkit-default-privs overrules pklas - # with 'unix-group:$group'. - sudo bash -c "cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla + elif is_suse && [[ $os_RELEASE = 12.2 || "$os_VENDOR" = "SUSE LINUX" ]]; then + # openSUSE < 12.3 or SLE + # Work around the fact that polkit-default-privs overrules pklas + # with 'unix-group:$group'. + sudo bash -c "cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla [libvirt Management Access] Identity=unix-user:$USER Action=org.libvirt.unix.manage @@ -320,13 +321,13 @@ ResultAny=yes ResultInactive=yes ResultActive=yes EOF" - else - # Starting with fedora 18 and opensuse-12.3 enable stack-user to - # virsh -c qemu:///system by creating a policy-kit rule for - # stack-user using the new Javascript syntax - rules_dir=/etc/polkit-1/rules.d - sudo mkdir -p $rules_dir - sudo bash -c "cat < $rules_dir/50-libvirt-$STACK_USER.rules + else + # Starting with fedora 18 and opensuse-12.3 enable stack-user to + # virsh -c qemu:///system by creating a policy-kit rule for + # stack-user using the new Javascript syntax + rules_dir=/etc/polkit-1/rules.d + sudo mkdir -p $rules_dir + sudo bash -c "cat < $rules_dir/50-libvirt-$STACK_USER.rules polkit.addRule(function(action, subject) { if (action.id == 'org.libvirt.unix.manage' && subject.user == '"$STACK_USER"') { @@ -334,21 +335,22 @@ polkit.addRule(function(action, subject) { } }); EOF" - unset rules_dir + unset rules_dir + fi fi - fi - # The user that nova runs as needs to be member of **libvirtd** group otherwise - # nova-compute will be unable to use libvirt. - if ! getent group $LIBVIRT_GROUP >/dev/null; then - sudo groupadd $LIBVIRT_GROUP - fi - add_user_to_group $STACK_USER $LIBVIRT_GROUP + # The user that nova runs as needs to be member of **libvirtd** group otherwise + # nova-compute will be unable to use libvirt. + if ! getent group $LIBVIRT_GROUP >/dev/null; then + sudo groupadd $LIBVIRT_GROUP + fi + add_user_to_group $STACK_USER $LIBVIRT_GROUP - # libvirt detects various settings on startup, as we potentially changed - # the system configuration (modules, filesystems), we need to restart - # libvirt to detect those changes. - restart_service $LIBVIRT_DAEMON + # libvirt detects various settings on startup, as we potentially changed + # the system configuration (modules, filesystems), we need to restart + # libvirt to detect those changes. + restart_service $LIBVIRT_DAEMON + fi # Instance Storage @@ -436,8 +438,10 @@ function create_nova_conf() { if is_baremetal; then iniset $NOVA_CONF baremetal sql_connection `database_connection_url nova_bm` fi - iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE" - iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none" + if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then + iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE" + iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none" + fi iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" iniset $NOVA_CONF DEFAULT osapi_v3_enabled "True" @@ -636,26 +640,32 @@ function install_novaclient() { # install_nova() - Collect source and prepare function install_nova() { if is_service_enabled n-cpu; then - if is_ubuntu; then - install_package libvirt-bin - elif is_fedora || is_suse; then - install_package libvirt - else - exit_distro_not_supported "libvirt installation" - fi - - # Install and configure **LXC** if specified. LXC is another approach to - # splitting a system into many smaller parts. LXC uses cgroups and chroot - # to simulate multiple systems. - if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then + if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then if is_ubuntu; then - if [[ "$DISTRO" > natty ]]; then - install_package cgroup-lite - fi + install_package kvm + install_package libvirt-bin + install_package python-libvirt + elif is_fedora || is_suse; then + install_package kvm + install_package libvirt + install_package libvirt-python else - ### FIXME(dtroyer): figure this out - echo "RPM-based cgroup not implemented yet" - yum_install libcgroup-tools + exit_distro_not_supported "libvirt installation" + fi + + # Install and configure **LXC** if specified. LXC is another approach to + # splitting a system into many smaller parts. LXC uses cgroups and chroot + # to simulate multiple systems. + if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then + if is_ubuntu; then + if [[ "$DISTRO" > natty ]]; then + install_package cgroup-lite + fi + else + ### FIXME(dtroyer): figure this out + echo "RPM-based cgroup not implemented yet" + yum_install libcgroup-tools + fi fi fi fi @@ -698,9 +708,13 @@ function start_nova() { screen_it n-cell "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CELLS_CONF" fi - # The group **$LIBVIRT_GROUP** is added to the current user in this script. - # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group. - screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM'" + if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then + # The group **$LIBVIRT_GROUP** is added to the current user in this script. + # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group. + screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM'" + else + screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM" + fi screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert" screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network --config-file $NOVA_CONF_BOTTOM" screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler --config-file $NOVA_CONF_BOTTOM" From ebfac64d2a53b46e597b7b6a30787c4e276afb93 Mon Sep 17 00:00:00 2001 From: Kyle Mestery Date: Fri, 17 May 2013 15:20:56 -0500 Subject: [PATCH 0145/4704] Add support for setting extra networking configuration options. Add support for generically setting arguments for OpenStack Networking for both the server and agent. This is done using Q_SRV_EXTRA_OPTS for the server, which let you place settings in the "OVS" or "LINUX_BRIDGE" sections of the plugin configuration file. For the agent, two variables are defined: Q_AGENT_EXTRA_AGENT_OPTS lets you set items in the "AGENT" section of the plugin configuration file, and Q_AGENT_EXTRA_SRV_OPTS lets you set things in the "OVS" or "LINUX_BRIDGE" sections of the plugin configuration file. Implements blueprint devstack-quantum-config. Change-Id: I9ba44b21231f88cda1bcc5e3389c7875f03d8145 --- README.md | 13 +++++++++++++ lib/quantum | 4 ++++ lib/quantum_plugins/linuxbridge | 8 ++++++++ lib/quantum_plugins/linuxbridge_agent | 14 ++++++++++++++ lib/quantum_plugins/openvswitch | 8 ++++++++ lib/quantum_plugins/openvswitch_agent | 14 ++++++++++++++ 6 files changed, 61 insertions(+) diff --git a/README.md b/README.md index 8573638c41..905a54d5fc 100644 --- a/README.md +++ b/README.md @@ -122,6 +122,19 @@ In order to enable Quantum a single node setup, you'll need the following settin Then run `stack.sh` as normal. +devstack supports adding specific Quantum configuration flags to both the Open vSwitch and LinuxBridge plugin configuration files. To make use of this feature, the following variables are defined and can be configured in your `localrc` file: + + Variable Name Plugin Config File Section Modified + ------------------------------------------------------------------------------------- + Q_SRV_EXTRA_OPTS `OVS` (for Open Vswitch) or `LINUX_BRIDGE` (for LinuxBridge) + Q_AGENT_EXTRA_AGENT_OPTS AGENT + Q_AGENT_EXTRA_SRV_OPTS `OVS` (for Open Vswitch) or `LINUX_BRIDGE` (for LinuxBridge) + +An example of using the variables in your `localrc` is below: + + Q_AGENT_EXTRA_AGENT_OPTS=(tunnel_type=vxlan vxlan_udp_port=8472) + Q_SRV_EXTRA_OPTS=(tenant_network_type=vxlan) + # Tempest If tempest has been successfully configured, a basic set of smoke tests can be run as follows: diff --git a/lib/quantum b/lib/quantum index d85c6483aa..e231566d5a 100644 --- a/lib/quantum +++ b/lib/quantum @@ -52,6 +52,10 @@ # Quantum. # # With Quantum networking the NETWORK_MANAGER variable is ignored. +# +# To enable specific configuration options for either the Open vSwitch or +# LinuxBridge plugin, please see the top level README file under the +# Quantum section. # Save trace setting XTRACE=$(set +o | grep xtrace) diff --git a/lib/quantum_plugins/linuxbridge b/lib/quantum_plugins/linuxbridge index 71832f1427..dffa32b7bf 100644 --- a/lib/quantum_plugins/linuxbridge +++ b/lib/quantum_plugins/linuxbridge @@ -35,6 +35,14 @@ function quantum_plugin_configure_service() { else iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.firewall.NoopFirewallDriver fi + + # Define extra "LINUX_BRIDGE" configuration options when q-svc is configured by defining + # the array ``Q_SRV_EXTRA_OPTS``. + # For Example: ``Q_SRV_EXTRA_OPTS=(foo=true bar=2)`` + for I in "${Q_SRV_EXTRA_OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE ${I/=/ } + done } function has_quantum_plugin_security_group() { diff --git a/lib/quantum_plugins/linuxbridge_agent b/lib/quantum_plugins/linuxbridge_agent index 1e83275723..7855cd0eb1 100644 --- a/lib/quantum_plugins/linuxbridge_agent +++ b/lib/quantum_plugins/linuxbridge_agent @@ -47,6 +47,20 @@ function quantum_plugin_configure_plugin_agent() { iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.firewall.NoopFirewallDriver fi AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent" + # Define extra "AGENT" configuration options when q-agt is configured by defining + # the array ``Q_AGENT_EXTRA_AGENT_OPTS``. + # For Example: ``Q_AGENT_EXTRA_AGENT_OPTS=(foo=true bar=2)`` + for I in "${Q_AGENT_EXTRA_AGENT_OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + iniset /$Q_PLUGIN_CONF_FILE AGENT ${I/=/ } + done + # Define extra "LINUX_BRIDGE" configuration options when q-agt is configured by defining + # the array ``Q_AGENT_EXTRA_SRV_OPTS``. + # For Example: ``Q_AGENT_EXTRA_SRV_OPTS=(foo=true bar=2)`` + for I in "${Q_AGENT_EXTRA_SRV_OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE ${I/=/ } + done } function quantum_plugin_setup_interface_driver() { diff --git a/lib/quantum_plugins/openvswitch b/lib/quantum_plugins/openvswitch index cd29c199a1..e53db8aaa3 100644 --- a/lib/quantum_plugins/openvswitch +++ b/lib/quantum_plugins/openvswitch @@ -42,6 +42,14 @@ function quantum_plugin_configure_service() { fi _quantum_ovs_base_configure_firewall_driver + + # Define extra "OVS" configuration options when q-svc is configured by defining + # the array ``Q_SRV_EXTRA_OPTS``. + # For Example: ``Q_SRV_EXTRA_OPTS=(foo=true bar=2)`` + for I in "${Q_SRV_EXTRA_OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + iniset /$Q_PLUGIN_CONF_FILE OVS ${I/=/ } + done } function has_quantum_plugin_security_group() { diff --git a/lib/quantum_plugins/openvswitch_agent b/lib/quantum_plugins/openvswitch_agent index 87f5e97adc..ee761edf66 100644 --- a/lib/quantum_plugins/openvswitch_agent +++ b/lib/quantum_plugins/openvswitch_agent @@ -102,6 +102,20 @@ function quantum_plugin_configure_plugin_agent() { # Set root wrap iniset "/$Q_PLUGIN_CONF_FILE.domU" AGENT root_helper "$Q_RR_COMMAND" fi + # Define extra "AGENT" configuration options when q-agt is configured by defining + # defining the array ``Q_AGENT_EXTRA_AGENT_OPTS``. + # For Example: ``Q_AGENT_EXTRA_AGENT_OPTS=(foo=true bar=2)`` + for I in "${Q_AGENT_EXTRA_AGENT_OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + iniset /$Q_PLUGIN_CONF_FILE AGENT ${I/=/ } + done + # Define extra "OVS" configuration options when q-agt is configured by defining + # defining the array ``Q_AGENT_EXTRA_SRV_OPTS``. + # For Example: ``Q_AGENT_EXTRA_SRV_OPTS=(foo=true bar=2)`` + for I in "${Q_AGENT_EXTRA_SRV_OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + iniset /$Q_PLUGIN_CONF_FILE OVS ${I/=/ } + done } function quantum_plugin_setup_interface_driver() { From bae0233cf991ea18104120841a80ec4e3e932223 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 9 May 2013 09:24:49 +0200 Subject: [PATCH 0146/4704] Decrease the tempest build interval Decreasing the build interval leads to faster state change detection, so it will speed up the gating jobs. It may increase the possibility of a "hidden" issues causes indeterministic test results. Change-Id: Ida5e7296e4ea53d761e0efef34376699337d8377 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index a259ee977b..aa66ebb872 100644 --- a/lib/tempest +++ b/lib/tempest @@ -44,7 +44,7 @@ TEMPEST_CONF=$TEMPEST_CONF_DIR/tempest.conf NOVA_SOURCE_DIR=$DEST/nova -BUILD_INTERVAL=3 +BUILD_INTERVAL=1 BUILD_TIMEOUT=400 From 18b0906e1fa6510087c7455c7dee652e5b5842ef Mon Sep 17 00:00:00 2001 From: Jeremy Stanley Date: Mon, 10 Jun 2013 00:23:38 +0000 Subject: [PATCH 0147/4704] Correct InvalidInstanceID.NotFound test. * exercises/euca.sh: The match filtering InvalidInstanceID.NotFound is cleaned up to eliminate numerous leaning toothpicks, and now also actually works. The previous version was incorporating literal square brackets into the pattern where I think a character list match may have been intended instead, but was not actually necessary anyway. Change-Id: I765a20000e7b6ba6deadce8cad3bbc9559fcdd0d --- exercises/euca.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exercises/euca.sh b/exercises/euca.sh index d704279431..16b5f8e402 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -162,7 +162,7 @@ euca-terminate-instances $INSTANCE || \ # case changed with bug/836978. Requesting the status of an invalid instance # will now return an error message including the instance id, so we need to # filter that out. -if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -ve \"\\\(InstanceNotFound\\\|InvalidInstanceID\[.\]NotFound\\\)\" | grep -q $INSTANCE; do sleep 1; done"; then +if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -ve '\(InstanceNotFound\|InvalidInstanceID\.NotFound\)' | grep -q $INSTANCE; do sleep 1; done"; then die $LINENO "server didn't terminate within $TERMINATE_TIMEOUT seconds" fi From eeaa41a1ce5668698b86ad12c4ce37fc13bd637d Mon Sep 17 00:00:00 2001 From: Giulio Fidente Date: Fri, 7 Jun 2013 17:11:30 +0200 Subject: [PATCH 0148/4704] Adds openssl-devel to the RPMs dependencies pyOpenSSL (required by glanceclient) fails to build if openssl header files are missing. Change-Id: I3ebce749ee9ca88cbbecc6b7676a086d82670f4d Fixes: bug #1188650 --- files/rpms/general | 1 + 1 file changed, 1 insertion(+) diff --git a/files/rpms/general b/files/rpms/general index 764b602da0..2a1ee4b68b 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -6,6 +6,7 @@ gcc # dist:rhel6 [2] git-core openssh-server openssl +openssl-devel # to rebuild pyOpenSSL if needed libxml2-devel # dist:rhel6 [2] libxslt-devel # dist:rhel6 [2] psmisc From 1a6d4492e25365ec2947e93d5d69507088281415 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 3 Jun 2013 16:47:36 -0500 Subject: [PATCH 0149/4704] Periodic formatting cleanup This is the semi-irregular comment and docs cleanup. No functional changes should be here although some code is moved in a small attempt to sort functions and get things where they need to be. Change-Id: Ib4a3e2590c6fbd016c391acc7aef6421e91c0dca --- functions | 136 ++++++++++++++++++++---------------- lib/horizon | 12 +++- lib/keystone | 21 +----- lib/nova | 4 +- lib/quantum | 52 +++++++------- stack.sh | 189 +++++++++++++++++++++++++-------------------------- stackrc | 100 ++++++++++++++------------- 7 files changed, 260 insertions(+), 254 deletions(-) diff --git a/functions b/functions index dfde7dc551..1257024838 100644 --- a/functions +++ b/functions @@ -200,6 +200,7 @@ function _get_package_dir() { echo "$pkg_dir" } + # get_packages() collects a list of package names of any type from the # prerequisite files in ``files/{apts|rpms}``. The list is intended # to be passed to a package installer such as apt or yum. @@ -390,42 +391,6 @@ GetOSVersion() { export os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME } -# git update using reference as a branch. -# git_update_branch ref -function git_update_branch() { - - GIT_BRANCH=$1 - - git checkout -f origin/$GIT_BRANCH - # a local branch might not exist - git branch -D $GIT_BRANCH || true - git checkout -b $GIT_BRANCH -} - - -# git update using reference as a tag. Be careful editing source at that repo -# as working copy will be in a detached mode -# git_update_tag ref -function git_update_tag() { - - GIT_TAG=$1 - - git tag -d $GIT_TAG - # fetching given tag only - git fetch origin tag $GIT_TAG - git checkout -f $GIT_TAG -} - - -# git update using reference as a branch. -# git_update_remote_branch ref -function git_update_remote_branch() { - - GIT_BRANCH=$1 - - git checkout -b $GIT_BRANCH -t origin/$GIT_BRANCH -} - # Translate the OS version values into common nomenclature # Sets ``DISTRO`` from the ``os_*`` values @@ -457,19 +422,8 @@ function GetDistro() { } -# Determine if current distribution is an Ubuntu-based distribution. -# It will also detect non-Ubuntu but Debian-based distros; this is not an issue -# since Debian and Ubuntu should be compatible. -# is_ubuntu -function is_ubuntu { - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - [ "$os_PACKAGE" = "deb" ] -} - # Determine if current distribution is a Fedora-based distribution -# (Fedora, RHEL, CentOS). +# (Fedora, RHEL, CentOS, etc). # is_fedora function is_fedora { if [[ -z "$os_VENDOR" ]]; then @@ -479,6 +433,7 @@ function is_fedora { [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || [ "$os_VENDOR" = "CentOS" ] } + # Determine if current distribution is a SUSE-based distribution # (openSUSE, SLE). # is_suse @@ -491,6 +446,17 @@ function is_suse { } +# Determine if current distribution is an Ubuntu-based distribution +# It will also detect non-Ubuntu but Debian-based distros +# is_ubuntu +function is_ubuntu { + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + [ "$os_PACKAGE" = "deb" ] +} + + # Exit after outputting a message about the distribution not being supported. # exit_distro_not_supported [optional-string-telling-what-is-missing] function exit_distro_not_supported { @@ -565,6 +531,43 @@ function git_clone { } +# git update using reference as a branch. +# git_update_branch ref +function git_update_branch() { + + GIT_BRANCH=$1 + + git checkout -f origin/$GIT_BRANCH + # a local branch might not exist + git branch -D $GIT_BRANCH || true + git checkout -b $GIT_BRANCH +} + + +# git update using reference as a branch. +# git_update_remote_branch ref +function git_update_remote_branch() { + + GIT_BRANCH=$1 + + git checkout -b $GIT_BRANCH -t origin/$GIT_BRANCH +} + + +# git update using reference as a tag. Be careful editing source at that repo +# as working copy will be in a detached mode +# git_update_tag ref +function git_update_tag() { + + GIT_TAG=$1 + + git tag -d $GIT_TAG + # fetching given tag only + git fetch origin tag $GIT_TAG + git checkout -f $GIT_TAG +} + + # Comment an option in an INI file # inicomment config-file section option function inicomment() { @@ -1020,6 +1023,7 @@ function screen_rc { fi } + # Helper to remove the *.failure files under $SERVICE_DIR/$SCREEN_NAME # This is used for service_check when all the screen_it are called finished # init_service_check @@ -1034,6 +1038,7 @@ function init_service_check() { rm -f "$SERVICE_DIR/$SCREEN_NAME"/*.failure } + # Helper to get the status of each running service # service_check function service_check() { @@ -1062,6 +1067,7 @@ function service_check() { fi } + # ``pip install`` the dependencies of the package before ``setup.py develop`` # so pip and not distutils processes the dependency chain # Uses globals ``TRACK_DEPENDES``, ``*_proxy` @@ -1242,6 +1248,7 @@ function upload_image() { fi } + # Set the database backend to use # When called from stackrc/localrc DATABASE_BACKENDS has not been # initialized yet, just save the configuration selection and call back later @@ -1259,6 +1266,7 @@ function use_database { fi } + # Toggle enable/disable_service for services that must run exclusive of each other # $1 The name of a variable containing a space-separated list of services # $2 The name of a variable in which to store the enabled service's name @@ -1275,6 +1283,7 @@ function use_exclusive_service { return 0 } + # Wait for an HTTP server to start answering requests # wait_for_service timeout url function wait_for_service() { @@ -1283,6 +1292,7 @@ function wait_for_service() { timeout $timeout sh -c "while ! http_proxy= https_proxy= curl -s $url >/dev/null; do sleep 1; done" } + # Wrapper for ``yum`` to set proxy environment variables # Uses globals ``OFFLINE``, ``*_proxy` # yum_install package [package ...] @@ -1295,8 +1305,21 @@ function yum_install() { yum install -y "$@" } + +# zypper wrapper to set arguments correctly +# zypper_install package [package ...] +function zypper_install() { + [[ "$OFFLINE" = "True" ]] && return + local sudo="sudo" + [[ "$(id -u)" = "0" ]] && sudo="env" + $sudo http_proxy=$http_proxy https_proxy=$https_proxy \ + zypper --non-interactive install --auto-agree-with-licenses "$@" +} + + # ping check # Uses globals ``ENABLED_SERVICES`` +# ping_check from-net ip boot-timeout expected function ping_check() { if is_service_enabled quantum; then _ping_check_quantum "$1" $2 $3 $4 @@ -1333,8 +1356,10 @@ function _ping_check_novanet() { fi } + # ssh check +# ssh_check net-name key-file floating-ip default-user active-timeout function ssh_check() { if is_service_enabled quantum; then _ssh_check_quantum "$1" $2 $3 $4 $5 @@ -1356,17 +1381,6 @@ function _ssh_check_novanet() { } -# zypper wrapper to set arguments correctly -# zypper_install package [package ...] -function zypper_install() { - [[ "$OFFLINE" = "True" ]] && return - local sudo="sudo" - [[ "$(id -u)" = "0" ]] && sudo="env" - $sudo http_proxy=$http_proxy https_proxy=$https_proxy \ - zypper --non-interactive install --auto-agree-with-licenses "$@" -} - - # Add a user to a group. # add_user_to_group user group function add_user_to_group() { @@ -1396,6 +1410,7 @@ function get_python_exec_prefix() { fi } + # Get the location of the $module-rootwrap executables, where module is cinder # or nova. # get_rootwrap_location module @@ -1405,6 +1420,7 @@ function get_rootwrap_location() { echo "$(get_python_exec_prefix)/$module-rootwrap" } + # Get the path to the pip command. # get_pip_command function get_pip_command() { @@ -1419,6 +1435,7 @@ function get_pip_command() { fi } + # Path permissions sanity check # check_path_perm_sanity path function check_path_perm_sanity() { @@ -1448,6 +1465,7 @@ function check_path_perm_sanity() { done } + # Restore xtrace $XTRACE diff --git a/lib/horizon b/lib/horizon index ab1139996a..0cc250ed6f 100644 --- a/lib/horizon +++ b/lib/horizon @@ -74,13 +74,20 @@ function _horizon_config_set() { } + # Entry Points # ------------ # cleanup_horizon() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_horizon() { - : + if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then + # If ``/usr/bin/node`` points into ``$DEST`` + # we installed it via ``install_nodejs`` + if [[ $(readlink -f /usr/bin/node) =~ ($DEST) ]]; then + sudo rm /usr/bin/node + fi + fi } # configure_horizon() - Set config files, create data dirs, etc @@ -111,7 +118,6 @@ function init_horizon() { # Create an empty directory that apache uses as docroot sudo mkdir -p $HORIZON_DIR/.blackhole - HORIZON_REQUIRE='' if is_ubuntu; then # Clean up the old config name @@ -148,7 +154,6 @@ function init_horizon() { s,%DEST%,$DEST,g; s,%HORIZON_REQUIRE%,$HORIZON_REQUIRE,g; \" $FILES/apache-horizon.template >/etc/$APACHE_NAME/$APACHE_CONF" - } # install_horizon() - Collect source and prepare @@ -193,6 +198,7 @@ function stop_horizon() { fi } + # Restore xtrace $XTRACE diff --git a/lib/keystone b/lib/keystone index 6bf4d9fde4..2edd137dbb 100644 --- a/lib/keystone +++ b/lib/keystone @@ -178,7 +178,6 @@ function configure_keystone() { cp $KEYSTONE_DIR/etc/logging.conf.sample $KEYSTONE_CONF_DIR/logging.conf iniset $KEYSTONE_CONF_DIR/logging.conf logger_root level "DEBUG" iniset $KEYSTONE_CONF_DIR/logging.conf logger_root handlers "devel,production" - } # create_keystone_accounts() - Sets up common required keystone accounts @@ -254,25 +253,6 @@ create_keystone_accounts() { --adminurl "$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0" \ --internalurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0" fi - - # TODO(dtroyer): This is part of a series of changes...remove these when - # complete if they are really unused -# KEYSTONEADMIN_ROLE=$(keystone role-create \ -# --name KeystoneAdmin \ -# | grep " id " | get_field 2) -# KEYSTONESERVICE_ROLE=$(keystone role-create \ -# --name KeystoneServiceAdmin \ -# | grep " id " | get_field 2) - - # TODO(termie): these two might be dubious -# keystone user-role-add \ -# --user_id $ADMIN_USER \ -# --role_id $KEYSTONEADMIN_ROLE \ -# --tenant_id $ADMIN_TENANT -# keystone user-role-add \ -# --user_id $ADMIN_USER \ -# --role_id $KEYSTONESERVICE_ROLE \ -# --tenant_id $ADMIN_TENANT } # init_keystone() - Initialize databases, etc. @@ -339,6 +319,7 @@ function stop_keystone() { screen -S $SCREEN_NAME -p key -X kill } + # Restore xtrace $XTRACE diff --git a/lib/nova b/lib/nova index 508ed7838c..cac6330cc2 100644 --- a/lib/nova +++ b/lib/nova @@ -300,7 +300,7 @@ EOF fi if is_fedora || is_suse; then - if is_fedora && [[ $DISTRO =~ (rhel6) || "$os_RELEASE" -le "17" ]]; then + if is_fedora && [[ $DISTRO =~ (rhel6) || "$os_RELEASE" -le "17" ]]; then sudo bash -c "cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla [libvirt Management Access] Identity=unix-group:$LIBVIRT_GROUP @@ -352,7 +352,6 @@ EOF" restart_service $LIBVIRT_DAEMON fi - # Instance Storage # ---------------- @@ -494,7 +493,6 @@ function create_nova_conf() { iniset_multiline $NOVA_CONF DEFAULT notification_driver "nova.openstack.common.notifier.rpc_notifier" "ceilometer.compute.nova_notifier" fi - # Provide some transition from ``EXTRA_FLAGS`` to ``EXTRA_OPTS`` if [[ -z "$EXTRA_OPTS" && -n "$EXTRA_FLAGS" ]]; then EXTRA_OPTS=$EXTRA_FLAGS diff --git a/lib/quantum b/lib/quantum index d85c6483aa..15c8f19179 100644 --- a/lib/quantum +++ b/lib/quantum @@ -112,18 +112,18 @@ if is_service_enabled quantum; then # The following variables control the Quantum openvswitch and # linuxbridge plugins' allocation of tenant networks and # availability of provider networks. If these are not configured - # in localrc, tenant networks will be local to the host (with no + # in ``localrc``, tenant networks will be local to the host (with no # remote connectivity), and no physical resources will be # available for the allocation of provider networks. # To use GRE tunnels for tenant networks, set to True in - # localrc. GRE tunnels are only supported by the openvswitch + # ``localrc``. GRE tunnels are only supported by the openvswitch # plugin, and currently only on Ubuntu. ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-False} # If using GRE tunnels for tenant networks, specify the range of # tunnel IDs from which tenant networks are allocated. Can be - # overriden in localrc in necesssary. + # overriden in ``localrc`` in necesssary. TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGE:-1:1000} # To use VLANs for tenant networks, set to True in localrc. VLANs @@ -131,7 +131,7 @@ if is_service_enabled quantum; then # requiring additional configuration described below. ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} - # If using VLANs for tenant networks, set in localrc to specify + # If using VLANs for tenant networks, set in ``localrc`` to specify # the range of VLAN VIDs from which tenant networks are # allocated. An external network switch must be configured to # trunk these VLANs between hosts for multi-host connectivity. @@ -140,16 +140,16 @@ if is_service_enabled quantum; then TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} # If using VLANs for tenant networks, or if using flat or VLAN - # provider networks, set in localrc to the name of the physical - # network, and also configure OVS_PHYSICAL_BRIDGE for the - # openvswitch agent or LB_PHYSICAL_INTERFACE for the linuxbridge + # provider networks, set in ``localrc`` to the name of the physical + # network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the + # openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge # agent, as described below. # # Example: ``PHYSICAL_NETWORK=default`` PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-} # With the openvswitch plugin, if using VLANs for tenant networks, - # or if using flat or VLAN provider networks, set in localrc to + # or if using flat or VLAN provider networks, set in ``localrc`` to # the name of the OVS bridge to use for the physical network. The # bridge will be created if it does not already exist, but a # physical interface must be manually added to the bridge as a @@ -159,28 +159,29 @@ if is_service_enabled quantum; then OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-} # With the linuxbridge plugin, if using VLANs for tenant networks, - # or if using flat or VLAN provider networks, set in localrc to + # or if using flat or VLAN provider networks, set in ``localrc`` to # the name of the network interface to use for the physical # network. # # Example: ``LB_PHYSICAL_INTERFACE=eth1`` LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-} - # With the openvswitch plugin, set to True in localrc to enable + # With the openvswitch plugin, set to True in ``localrc`` to enable # provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. # # Example: ``OVS_ENABLE_TUNNELING=True`` OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} fi - # Quantum plugin specific functions # --------------------------------- -# Please refer to lib/quantum_plugins/README.md for details. + +# Please refer to ``lib/quantum_plugins/README.md`` for details. source $TOP_DIR/lib/quantum_plugins/$Q_PLUGIN # Agent loadbalancer service plugin functions # ------------------------------------------- + # Hardcoding for 1 service plugin for now source $TOP_DIR/lib/quantum_plugins/services/agent_loadbalancer @@ -191,7 +192,6 @@ else Q_USE_SECGROUP=False fi - # Functions # --------- @@ -423,7 +423,7 @@ function cleanup_quantum() { # _configure_quantum_common() # Set common config for all quantum server and agents. -# This MUST be called before other _configure_quantum_* functions. +# This MUST be called before other ``_configure_quantum_*`` functions. function _configure_quantum_common() { # Put config files in ``QUANTUM_CONF_DIR`` for everyone to find if [[ ! -d $QUANTUM_CONF_DIR ]]; then @@ -433,11 +433,11 @@ function _configure_quantum_common() { cp $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF - # Set plugin-specific variables Q_DB_NAME, Q_PLUGIN_CLASS. - # For main plugin config file, set Q_PLUGIN_CONF_PATH, Q_PLUGIN_CONF_FILENAME. - # For addition plugin config files, set Q_PLUGIN_EXTRA_CONF_PATH, - # Q_PLUGIN_EXTRA_CONF_FILES. For example: - # Q_PLUGIN_EXTRA_CONF_FILES=(file1, file2) + # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``. + # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``. + # For addition plugin config files, set ``Q_PLUGIN_EXTRA_CONF_PATH``, + # ``Q_PLUGIN_EXTRA_CONF_FILES``. For example: + # ``Q_PLUGIN_EXTRA_CONF_FILES=(file1, file2)`` quantum_plugin_configure_common if [[ $Q_PLUGIN_CONF_PATH == '' || $Q_PLUGIN_CONF_FILENAME == '' || $Q_PLUGIN_CLASS == '' ]]; then @@ -543,8 +543,7 @@ function _configure_quantum_metadata_agent() { _quantum_setup_keystone $Q_META_CONF_FILE DEFAULT set_auth_url } -function _configure_quantum_lbaas() -{ +function _configure_quantum_lbaas() { quantum_agent_lbaas_install_agent_packages quantum_agent_lbaas_configure_common quantum_agent_lbaas_configure_agent @@ -606,17 +605,17 @@ function _quantum_setup_rootwrap() { return fi # Deploy new rootwrap filters files (owned by root). - # Wipe any existing rootwrap.d files first + # Wipe any existing ``rootwrap.d`` files first Q_CONF_ROOTWRAP_D=$QUANTUM_CONF_DIR/rootwrap.d if [[ -d $Q_CONF_ROOTWRAP_D ]]; then sudo rm -rf $Q_CONF_ROOTWRAP_D fi - # Deploy filters to $QUANTUM_CONF_DIR/rootwrap.d + # Deploy filters to ``$QUANTUM_CONF_DIR/rootwrap.d`` mkdir -p -m 755 $Q_CONF_ROOTWRAP_D cp -pr $QUANTUM_DIR/etc/quantum/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ sudo chown -R root:root $Q_CONF_ROOTWRAP_D sudo chmod 644 $Q_CONF_ROOTWRAP_D/* - # Set up rootwrap.conf, pointing to $QUANTUM_CONF_DIR/rootwrap.d + # Set up ``rootwrap.conf``, pointing to ``$QUANTUM_CONF_DIR/rootwrap.d`` # location moved in newer versions, prefer new location if test -r $QUANTUM_DIR/etc/quantum/rootwrap.conf; then sudo cp -p $QUANTUM_DIR/etc/quantum/rootwrap.conf $Q_RR_CONF_FILE @@ -626,7 +625,7 @@ function _quantum_setup_rootwrap() { sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE sudo chown root:root $Q_RR_CONF_FILE sudo chmod 0644 $Q_RR_CONF_FILE - # Specify rootwrap.conf as first parameter to quantum-rootwrap + # Specify ``rootwrap.conf`` as first parameter to quantum-rootwrap ROOTWRAP_SUDOER_CMD="$QUANTUM_ROOTWRAP $Q_RR_CONF_FILE *" # Set up the rootwrap sudoers for quantum @@ -743,7 +742,8 @@ function _ssh_check_quantum() { # Quantum 3rd party programs #--------------------------- -# please refer to lib/quantum_thirdparty/README.md for details + +# please refer to ``lib/quantum_thirdparty/README.md`` for details QUANTUM_THIRD_PARTIES="" for f in $TOP_DIR/lib/quantum_thirdparty/*; do third_party=$(basename $f) diff --git a/stack.sh b/stack.sh index 1e61a3f19a..24a3d5f32c 100755 --- a/stack.sh +++ b/stack.sh @@ -3,7 +3,7 @@ # ``stack.sh`` is an opinionated OpenStack developer installation. It # installs and configures various combinations of **Ceilometer**, **Cinder**, # **Glance**, **Heat**, **Horizon**, **Keystone**, **Nova**, **Quantum** -# and **Swift** +# and **Swift**. # This script allows you to specify configuration options of what git # repositories to use, enabled services, network configuration and various @@ -12,9 +12,11 @@ # developer install. # To keep this script simple we assume you are running on a recent **Ubuntu** -# (12.04 Precise or newer) or **Fedora** (F16 or newer) machine. It -# should work in a VM or physical server. Additionally we put the list of -# ``apt`` and ``rpm`` dependencies and other configuration files in this repo. +# (12.04 Precise or newer) or **Fedora** (F16 or newer) machine. (It may work +# on other platforms but support for those platforms is left to those who added +# them to DevStack.) It should work in a VM or physical server. Additionally +# we maintain a list of ``apt`` and ``rpm`` dependencies and other configuration +# files in this repo. # Learn more and get the most recent version at http://devstack.org @@ -33,55 +35,20 @@ source $TOP_DIR/functions GetDistro -# Configure non-default repos -# =========================== - -# Repo configuration needs to occur before package installation. - -# Some dependencies are not available in Debian Wheezy official -# repositories. However, it's possible to run OpenStack from gplhost -# repository. -if [[ "$os_VENDOR" =~ (Debian) ]]; then - echo 'deb http://archive.gplhost.com/debian grizzly main' | sudo tee /etc/apt/sources.list.d/gplhost_wheezy-backports.list - echo 'deb http://archive.gplhost.com/debian grizzly-backports main' | sudo tee -a /etc/apt/sources.list.d/gplhost_wheezy-backports.list - apt_get update - apt_get install --force-yes gplhost-archive-keyring -fi - -# Installing Open vSwitch on RHEL6 requires enabling the RDO repo. -RHEL6_RDO_REPO_RPM=${RHEL6_RDO_REPO_RPM:-"http://rdo.fedorapeople.org/openstack/openstack-grizzly/rdo-release-grizzly-3.noarch.rpm"} -RHEL6_RDO_REPO_ID=${RHEL6_RDO_REPO_ID:-"openstack-grizzly"} -# RHEL6 requires EPEL for many Open Stack dependencies -RHEL6_EPEL_RPM=${RHEL6_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm"} - -if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then - - if ! yum repolist enabled $RHEL6_RDO_REPO_ID | grep -q $RHEL6_RDO_REPO_ID; then - echo "RDO repo not detected; installing" - yum_install $RHEL6_RDO_REPO_RPM || \ - die $LINENO "Error installing RDO repo, cannot continue" - fi - - if ! yum repolist enabled epel | grep -q 'epel'; then - echo "EPEL not detected; installing" - yum_install ${RHEL6_EPEL_RPM} || \ - die $LINENO "Error installing EPEL repo, cannot continue" - fi - -fi - # Global Settings # =============== -# ``stack.sh`` is customizable through setting environment variables. If you -# want to override a setting you can set and export it:: +# ``stack.sh`` is customizable by setting environment variables. Override a +# default setting via export:: # # export DATABASE_PASSWORD=anothersecret # ./stack.sh # -# You can also pass options on a single line ``DATABASE_PASSWORD=simple ./stack.sh`` +# or by setting the variable on the command line:: # -# Additionally, you can put any local variables into a ``localrc`` file:: +# DATABASE_PASSWORD=simple ./stack.sh +# +# Persistent variables can be placed in a ``localrc`` file:: # # DATABASE_PASSWORD=anothersecret # DATABASE_USER=hellaroot @@ -166,6 +133,41 @@ fi VERBOSE=$(trueorfalse True $VERBOSE) +# Additional repos +# ================ + +# Some distros need to add repos beyond the defaults provided by the vendor +# to pick up required packages. + +# The Debian Wheezy official repositories do not contain all required packages, +# add gplhost repository. +if [[ "$os_VENDOR" =~ (Debian) ]]; then + echo 'deb http://archive.gplhost.com/debian grizzly main' | sudo tee /etc/apt/sources.list.d/gplhost_wheezy-backports.list + echo 'deb http://archive.gplhost.com/debian grizzly-backports main' | sudo tee -a /etc/apt/sources.list.d/gplhost_wheezy-backports.list + apt_get update + apt_get install --force-yes gplhost-archive-keyring +fi + +if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then + # Installing Open vSwitch on RHEL6 requires enabling the RDO repo. + RHEL6_RDO_REPO_RPM=${RHEL6_RDO_REPO_RPM:-"http://rdo.fedorapeople.org/openstack/openstack-grizzly/rdo-release-grizzly-3.noarch.rpm"} + RHEL6_RDO_REPO_ID=${RHEL6_RDO_REPO_ID:-"openstack-grizzly"} + if ! yum repolist enabled $RHEL6_RDO_REPO_ID | grep -q $RHEL6_RDO_REPO_ID; then + echo "RDO repo not detected; installing" + yum_install $RHEL6_RDO_REPO_RPM || \ + die $LINENO "Error installing RDO repo, cannot continue" + fi + + # RHEL6 requires EPEL for many Open Stack dependencies + RHEL6_EPEL_RPM=${RHEL6_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm"} + if ! yum repolist enabled epel | grep -q 'epel'; then + echo "EPEL not detected; installing" + yum_install ${RHEL6_EPEL_RPM} || \ + die $LINENO "Error installing EPEL repo, cannot continue" + fi +fi + + # root Access # ----------- @@ -296,7 +298,7 @@ SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} # Configure Projects # ================== -# Get project function libraries +# Source project function libraries source $TOP_DIR/lib/tls source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone @@ -310,7 +312,7 @@ source $TOP_DIR/lib/quantum source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/ldap -# Set the destination directories for OpenStack projects +# Set the destination directories for other OpenStack projects OPENSTACKCLIENT_DIR=$DEST/python-openstackclient PBR_DIR=$DEST/pbr @@ -565,6 +567,7 @@ failed() { # an error. It is also useful for following along as the install occurs. set -o xtrace + # Install Packages # ================ @@ -585,61 +588,51 @@ if is_service_enabled q-agt; then install_quantum_agent_packages fi -# + # System-specific preconfigure # ============================ if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then - # Avoid having to configure selinux to allow things like httpd to - # access horizion files or run binaries like nodejs (LP#1175444) + # Disable selinux to avoid configuring to allow Apache access + # to Horizon files or run nodejs (LP#1175444) if selinuxenabled; then sudo setenforce 0 fi - # An old version (2.0.1) of python-crypto is probably installed on - # a fresh system, via the dependency chain - # cas->python-paramiko->python-crypto (related to anaconda). - # Unfortunately, "pip uninstall pycrypto" will remove the - # .egg-info file for this rpm-installed version, but leave most of - # the actual library files behind in /usr/lib64/python2.6/Crypto. - # When later "pip install pycrypto" happens, the built library - # will be installed over these existing files; the result is a - # useless mess of old, rpm-packaged files and pip-installed files. - # Unsurprisingly, the end result is it doesn't work. Thus we have - # to get rid of it now so that any packages that pip-install - # pycrypto get a "clean slate". - # (note, we have to be careful about other RPM packages specified - # pulling in python-crypto as well. That's why RHEL6 doesn't - # install python-paramiko packages for example...) + # An old version of ``python-crypto`` (2.0.1) may be installed on a + # fresh system via Anaconda and the dependency chain + # ``cas`` -> ``python-paramiko`` -> ``python-crypto``. + # ``pip uninstall pycrypto`` will remove the packaged ``.egg-info`` file + # but leave most of the actual library files behind in ``/usr/lib64/python2.6/Crypto``. + # Later ``pip install pycrypto`` will install over the packaged files resulting + # in a useless mess of old, rpm-packaged files and pip-installed files. + # Remove the package so that ``pip install python-crypto`` installs cleanly. + # Note: other RPM packages may require ``python-crypto`` as well. For example, + # RHEL6 does not install ``python-paramiko packages``. uninstall_package python-crypto - # A similar thing happens for python-lxml (a dependency of - # ipa-client, an auditing thing we don't care about). We have the - # build-dependencies the lxml pip-install will need (gcc, - # libxml2-dev & libxslt-dev) in the "general" rpm lists + # A similar situation occurs with ``python-lxml``, which is required by + # ``ipa-client``, an auditing package we don't care about. The + # build-dependencies needed for ``pip install lxml`` (``gcc``, + # ``libxml2-dev`` and ``libxslt-dev``) are present in ``files/rpms/general``. uninstall_package python-lxml - # If the dbus rpm was installed by the devstack rpm dependencies - # then you may hit a bug where the uuid isn't generated because - # the service was never started (PR#598200), causing issues for - # Nova stopping later on complaining that - # '/var/lib/dbus/machine-id' doesn't exist. + # If the ``dbus`` package was installed by DevStack dependencies the + # uuid may not be generated because the service was never started (PR#598200), + # causing Nova to stop later on complaining that ``/var/lib/dbus/machine-id`` + # does not exist. sudo service messagebus restart - # In setup.py, a "setup_requires" package is supposed to - # transient. However there is a bug with rhel6 distribute where - # setup_requires packages can register entry points that aren't - # cleared out properly after the setup-phase; the end result is - # installation failures (bz#924038). Thus we pre-install the - # problem package here; this way the setup_requires dependency is - # already satisfied and it will not need to be installed - # transiently, meaning we avoid the issue of it not being cleaned - # out properly. Note we do this before the track-depends below. + # ``setup.py`` contains a ``setup_requires`` package that is supposed + # to be transient. However, RHEL6 distribute has a bug where + # ``setup_requires`` registers entry points that are not cleaned + # out properly after the setup-phase resulting in installation failures + # (bz#924038). Pre-install the problem package so the ``setup_requires`` + # dependency is satisfied and it will not be installed transiently. + # Note we do this before the track-depends below. pip_install hgtools - # The version of python-nose in the RHEL6 repo is incompatible - # with Tempest. As a workaround: - + # RHEL6's version of ``python-nose`` is incompatible with Tempest. # Install nose 1.1 (Tempest-compatible) from EPEL install_package python-nose1.1 # Add a symlink for the new nosetests to allow tox for Tempest to @@ -850,10 +843,10 @@ fi init_service_check -# Kick off Sysstat -# ------------------------ -# run sysstat if it is enabled, this has to be early as daemon -# startup is one of the things to track. +# Sysstat +# ------- + +# If enabled, systat has to start early to track OpenStack service startup. if is_service_enabled sysstat;then if [[ -n ${SCREEN_LOGDIR} ]]; then screen_it sysstat "sar -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL" @@ -967,7 +960,7 @@ if is_service_enabled n-net q-dhcp; then rm -rf ${NOVA_STATE_PATH}/networks sudo mkdir -p ${NOVA_STATE_PATH}/networks sudo chown -R ${USER} ${NOVA_STATE_PATH}/networks - # Force IP forwarding on, just on case + # Force IP forwarding on, just in case sudo sysctl -w net.ipv4.ip_forward=1 fi @@ -1018,6 +1011,7 @@ if is_service_enabled nova; then XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"} iniset $NOVA_CONF DEFAULT firewall_driver "$XEN_FIREWALL_DRIVER" + # OpenVZ # ------ @@ -1028,6 +1022,7 @@ if is_service_enabled nova; then LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" + # Bare Metal # ---------- @@ -1050,6 +1045,7 @@ if is_service_enabled nova; then iniset $NOVA_CONF baremetal ${I/=/ } done + # PowerVM # ------- @@ -1069,8 +1065,9 @@ if is_service_enabled nova; then iniset $NOVA_CONF DEFAULT powervm_img_remote_path $POWERVM_IMG_REMOTE_PATH iniset $NOVA_CONF DEFAULT powervm_img_local_path $POWERVM_IMG_LOCAL_PATH + # vSphere API - # ------- + # ----------- elif [ "$VIRT_DRIVER" = 'vsphere' ]; then echo_summary "Using VMware vCenter driver" @@ -1081,8 +1078,9 @@ if is_service_enabled nova; then iniset $NOVA_CONF DEFAULT vmwareapi_host_password "$VMWAREAPI_PASSWORD" iniset $NOVA_CONF DEFAULT vmwareapi_cluster_name "$VMWAREAPI_CLUSTER" + # fake - # ----- + # ---- elif [ "$VIRT_DRIVER" = 'fake' ]; then echo_summary "Using fake Virt driver" @@ -1102,8 +1100,8 @@ if is_service_enabled nova; then iniset $NOVA_CONF DEFAULT scheduler_default_filters "RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter" - # Default - # ------- + # Default libvirt + # --------------- else echo_summary "Using libvirt virtualization driver" @@ -1296,7 +1294,6 @@ if is_service_enabled nova && is_baremetal; then screen_it baremetal "nova-baremetal-deploy-helper" fi - # Save some values we generated for later use CURRENT_RUN_TIME=$(date "+$TIMESTAMP_FORMAT") echo "# $CURRENT_RUN_TIME" >$TOP_DIR/.stackenv diff --git a/stackrc b/stackrc index 2ac564cb8e..ef39710035 100644 --- a/stackrc +++ b/stackrc @@ -73,20 +73,6 @@ CINDER_BRANCH=${CINDER_BRANCH:-master} CINDERCLIENT_REPO=${CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-cinderclient.git} CINDERCLIENT_BRANCH=${CINDERCLIENT_BRANCH:-master} -# compute service -NOVA_REPO=${NOVA_REPO:-${GIT_BASE}/openstack/nova.git} -NOVA_BRANCH=${NOVA_BRANCH:-master} - -# storage service -SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git} -SWIFT_BRANCH=${SWIFT_BRANCH:-master} -SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/fujita/swift3.git} -SWIFT3_BRANCH=${SWIFT3_BRANCH:-master} - -# python swift client library -SWIFTCLIENT_REPO=${SWIFTCLIENT_REPO:-${GIT_BASE}/openstack/python-swiftclient.git} -SWIFTCLIENT_BRANCH=${SWIFTCLIENT_BRANCH:-master} - # image catalog service GLANCE_REPO=${GLANCE_REPO:-${GIT_BASE}/openstack/glance.git} GLANCE_BRANCH=${GLANCE_BRANCH:-master} @@ -95,22 +81,30 @@ GLANCE_BRANCH=${GLANCE_BRANCH:-master} GLANCECLIENT_REPO=${GLANCECLIENT_REPO:-${GIT_BASE}/openstack/python-glanceclient.git} GLANCECLIENT_BRANCH=${GLANCECLIENT_BRANCH:-master} -# unified auth system (manages accounts/tokens) -KEYSTONE_REPO=${KEYSTONE_REPO:-${GIT_BASE}/openstack/keystone.git} -KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-master} - -# a websockets/html5 or flash powered VNC console for vm instances -NOVNC_REPO=${NOVNC_REPO:-${GIT_BASE}/kanaka/noVNC.git} -NOVNC_BRANCH=${NOVNC_BRANCH:-master} +# heat service +HEAT_REPO=${HEAT_REPO:-${GIT_BASE}/openstack/heat.git} +HEAT_BRANCH=${HEAT_BRANCH:-master} -# a websockets/html5 or flash powered SPICE console for vm instances -SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git} -SPICE_BRANCH=${SPICE_BRANCH:-master} +# python heat client library +HEATCLIENT_REPO=${HEATCLIENT_REPO:-${GIT_BASE}/openstack/python-heatclient.git} +HEATCLIENT_BRANCH=${HEATCLIENT_BRANCH:-master} # django powered web control panel for openstack HORIZON_REPO=${HORIZON_REPO:-${GIT_BASE}/openstack/horizon.git} HORIZON_BRANCH=${HORIZON_BRANCH:-master} +# unified auth system (manages accounts/tokens) +KEYSTONE_REPO=${KEYSTONE_REPO:-${GIT_BASE}/openstack/keystone.git} +KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-master} + +# python keystone client library to nova that horizon uses +KEYSTONECLIENT_REPO=${KEYSTONECLIENT_REPO:-${GIT_BASE}/openstack/python-keystoneclient.git} +KEYSTONECLIENT_BRANCH=${KEYSTONECLIENT_BRANCH:-master} + +# compute service +NOVA_REPO=${NOVA_REPO:-${GIT_BASE}/openstack/nova.git} +NOVA_BRANCH=${NOVA_BRANCH:-master} + # python client library to nova that horizon (and others) use NOVACLIENT_REPO=${NOVACLIENT_REPO:-${GIT_BASE}/openstack/python-novaclient.git} NOVACLIENT_BRANCH=${NOVACLIENT_BRANCH:-master} @@ -119,9 +113,9 @@ NOVACLIENT_BRANCH=${NOVACLIENT_BRANCH:-master} OPENSTACKCLIENT_REPO=${OPENSTACKCLIENT_REPO:-${GIT_BASE}/openstack/python-openstackclient.git} OPENSTACKCLIENT_BRANCH=${OPENSTACKCLIENT_BRANCH:-master} -# python keystone client library to nova that horizon uses -KEYSTONECLIENT_REPO=${KEYSTONECLIENT_REPO:-${GIT_BASE}/openstack/python-keystoneclient.git} -KEYSTONECLIENT_BRANCH=${KEYSTONECLIENT_BRANCH:-master} +# pbr drives the setuptools configs +PBR_REPO=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git} +PBR_BRANCH=${PBR_BRANCH:-master} # quantum service QUANTUM_REPO=${QUANTUM_REPO:-${GIT_BASE}/openstack/quantum.git} @@ -131,21 +125,20 @@ QUANTUM_BRANCH=${QUANTUM_BRANCH:-master} QUANTUMCLIENT_REPO=${QUANTUMCLIENT_REPO:-${GIT_BASE}/openstack/python-quantumclient.git} QUANTUMCLIENT_BRANCH=${QUANTUMCLIENT_BRANCH:-master} +# storage service +SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git} +SWIFT_BRANCH=${SWIFT_BRANCH:-master} +SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/fujita/swift3.git} +SWIFT3_BRANCH=${SWIFT3_BRANCH:-master} + +# python swift client library +SWIFTCLIENT_REPO=${SWIFTCLIENT_REPO:-${GIT_BASE}/openstack/python-swiftclient.git} +SWIFTCLIENT_BRANCH=${SWIFTCLIENT_BRANCH:-master} + # Tempest test suite TEMPEST_REPO=${TEMPEST_REPO:-${GIT_BASE}/openstack/tempest.git} TEMPEST_BRANCH=${TEMPEST_BRANCH:-master} -# heat service -HEAT_REPO=${HEAT_REPO:-${GIT_BASE}/openstack/heat.git} -HEAT_BRANCH=${HEAT_BRANCH:-master} - -# python heat client library -HEATCLIENT_REPO=${HEATCLIENT_REPO:-${GIT_BASE}/openstack/python-heatclient.git} -HEATCLIENT_BRANCH=${HEATCLIENT_BRANCH:-master} - -# ryu service -RYU_REPO=${RYU_REPO:-${GIT_BASE}/osrg/ryu.git} -RYU_BRANCH=${RYU_BRANCH:-master} # diskimage-builder BM_IMAGE_BUILD_REPO=${BM_IMAGE_BUILD_REPO:-${GIT_BASE}/stackforge/diskimage-builder.git} @@ -157,10 +150,18 @@ BM_IMAGE_BUILD_BRANCH=${BM_IMAGE_BUILD_BRANCH:-master} BM_POSEUR_REPO=${BM_POSEUR_REPO:-${GIT_BASE}/tripleo/bm_poseur.git} BM_POSEUR_BRANCH=${BM_POSEUR_BRANCH:-master} -# pbr -# Used to drive the setuptools configs -PBR_REPO=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git} -PBR_BRANCH=${PBR_BRANCH:-master} +# a websockets/html5 or flash powered VNC console for vm instances +NOVNC_REPO=${NOVNC_REPO:-${GIT_BASE}/kanaka/noVNC.git} +NOVNC_BRANCH=${NOVNC_BRANCH:-master} + +# ryu service +RYU_REPO=${RYU_REPO:-${GIT_BASE}/osrg/ryu.git} +RYU_BRANCH=${RYU_BRANCH:-master} + +# a websockets/html5 or flash powered SPICE console for vm instances +SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git} +SPICE_BRANCH=${SPICE_BRANCH:-master} + # Nova hypervisor configuration. We default to libvirt with **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can @@ -184,18 +185,22 @@ case "$VIRT_DRIVER" in ;; esac -# Specify a comma-separated list of UEC images to download and install into glance. -# supported urls here are: + +# Images +# ------ + +# Specify a comma-separated list of images to download and install into glance. +# Supported urls here are: # * "uec-style" images: # If the file ends in .tar.gz, uncompress the tarball and and select the first # .img file inside it as the image. If present, use "*-vmlinuz*" as the kernel # and "*-initrd*" as the ramdisk -# example: http://cloud-images.ubuntu.com/releases/oneiric/release/ubuntu-11.10-server-cloudimg-amd64.tar.gz +# example: http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-amd64.tar.gz # * disk image (*.img,*.img.gz) # if file ends in .img, then it will be uploaded and registered as a to # glance as a disk image. If it ends in .gz, it is uncompressed first. # example: -# http://cloud-images.ubuntu.com/releases/oneiric/release/ubuntu-11.10-server-cloudimg-armel-disk1.img +# http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-armel-disk1.img # http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-rootfs.img.gz # * OpenVZ image: # OpenVZ uses its own format of image, and does not support UEC style images @@ -222,11 +227,12 @@ case "$VIRT_DRIVER" in ;; vsphere) IMAGE_URLS="";; - *) # otherwise, use the uec style image (with kernel, ramdisk, disk) + *) # Default to Cirros with kernel, ramdisk and disk image DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.1-x86_64-uec} IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz"};; esac + # 5Gb default volume backing file size VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-5130M} From 174c751d051b3f6d349da70b0121f27ce6e1856e Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Tue, 11 Jun 2013 14:09:08 -0700 Subject: [PATCH 0150/4704] Install agent packages when running the Nicira plugin. This is necessary because currently the plugin uses the dhcp amongst other agents. Fixes bug #1190057 Change-Id: If9e79de2cf547076e0cac7ac0f71c9fb7fc2758f --- lib/quantum_plugins/nicira | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/quantum_plugins/nicira b/lib/quantum_plugins/nicira index 7795eed8aa..d274723fc1 100644 --- a/lib/quantum_plugins/nicira +++ b/lib/quantum_plugins/nicira @@ -34,8 +34,8 @@ function quantum_plugin_create_nova_conf() { } function quantum_plugin_install_agent_packages() { - # Nicira Plugin does not run q-agt - : + # Nicira Plugin does not run q-agt, but it currently needs dhcp and metadata agents + _quantum_ovs_base_install_agent_packages } function quantum_plugin_configure_common() { From fb430899a6629ebd14abb09d31916a9d289e7af7 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Tue, 11 Jun 2013 15:22:58 -0700 Subject: [PATCH 0151/4704] quantum-debug configuration fails on missing PUBLIC_BRIDGE The Nicira plugin does not require an L3 agent, however the PUBLIC_BRIDGE is needed by the quantum-debug tool as well as nicira third_party configuration. Fixes bug #1190062 Change-Id: Ie60f8f340e034a35a65e54518838c1d751a481a8 --- lib/quantum_plugins/nicira | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/quantum_plugins/nicira b/lib/quantum_plugins/nicira index 7795eed8aa..f055866f6e 100644 --- a/lib/quantum_plugins/nicira +++ b/lib/quantum_plugins/nicira @@ -46,7 +46,7 @@ function quantum_plugin_configure_common() { } function quantum_plugin_configure_debug_command() { - : + sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE } function quantum_plugin_configure_dhcp_agent() { From 97e1bd032345395dfae7291882e77398f008d2ca Mon Sep 17 00:00:00 2001 From: Giulio Fidente Date: Tue, 4 Jun 2013 05:33:52 +0200 Subject: [PATCH 0152/4704] Enables the multibackend Cinder tests in tempest when useful. The change adds in lib/tempest an optional dependency on the var $CINDER_MULTI_LVM_BACKEND which, if set, enables the multibackend tests in the tempest config. Change-Id: Iccafd00f5adabbbc2309fa72664bf29440744d91 --- lib/cinder | 2 +- lib/tempest | 18 +++++++++++++----- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/lib/cinder b/lib/cinder index 7e9c2ba6e5..6636397e18 100644 --- a/lib/cinder +++ b/lib/cinder @@ -194,7 +194,7 @@ function configure_cinder() { iniset $CINDER_CONF lvmdriver-1 volume_backend_name LVM_iSCSI iniset $CINDER_CONF lvmdriver-2 volume_group $VOLUME_GROUP2 iniset $CINDER_CONF lvmdriver-2 volume_driver cinder.volume.drivers.lvm.LVMISCSIDriver - iniset $CINDER_CONF lvmdriver-2 volume_backend_name LVM_iSCSI + iniset $CINDER_CONF lvmdriver-2 volume_backend_name LVM_iSCSI_2 else iniset $CINDER_CONF DEFAULT volume_group $VOLUME_GROUP iniset $CINDER_CONF DEFAULT volume_name_template ${VOLUME_NAME_PREFIX}%s diff --git a/lib/tempest b/lib/tempest index aa66ebb872..8018166777 100644 --- a/lib/tempest +++ b/lib/tempest @@ -23,6 +23,7 @@ # ``USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION`` # ``DEFAULT_INSTANCE_TYPE`` # ``DEFAULT_INSTANCE_USER`` +# ``CINDER_MULTI_LVM_BACKEND`` # ``stack.sh`` calls the entry points in this order: # # install_tempest @@ -234,11 +235,10 @@ function configure_tempest() { iniset $TEMPEST_CONF whitebox path_to_private_key $TEMPEST_DIR/id_rsa iniset $TEMPEST_CONF whitebox db_uri $BASE_SQL_CONN/nova - - # compute admin + # Compute admin iniset $TEMPEST_CONF "compute-admin" password "$password" # DEPRECATED - # network + # Network if is_service_enabled quantum; then iniset $TEMPEST_CONF network quantum_available "True" fi @@ -247,7 +247,7 @@ function configure_tempest() { iniset $TEMPEST_CONF network public_network_id "$public_network_id" iniset $TEMPEST_CONF network public_router_id "$public_router_id" - #boto + # boto iniset $TEMPEST_CONF boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" iniset $TEMPEST_CONF boto s3_url "http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}" iniset $TEMPEST_CONF boto s3_materials_path "$BOTO_MATERIALS_PATH" @@ -255,11 +255,19 @@ function configure_tempest() { iniset $TEMPEST_CONF boto http_socket_timeout 30 iniset $TEMPEST_CONF boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros} - # orchestration + # Orchestration if is_service_enabled heat; then iniset $TEMPEST_CONF orchestration heat_available "True" fi + # Volume + CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND) + if [ $CINDER_MULTI_LVM_BACKEND == "True "]; then + iniset $TEMPEST_CONF volume multi_backend_enabled "True" + iniset $TEMPEST_CONF volume backend1_name "LVM_iSCSI" + iniset $TEMPEST_CONF volume backend2_name "LVM_iSCSI_2" + fi + echo "Created tempest configuration file:" cat $TEMPEST_CONF From c3771456323126b03116bc58ec1579ba888ca132 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 13 Jun 2013 14:23:37 -0400 Subject: [PATCH 0153/4704] allow overwriting of http_image in tempest.conf the default http_image variable is an upstream cirros url in tempest. However, in the gate this can cause flakey failures if that has connection issues. Ensure that we can override this from devstack-gate to be something we are sure is going to work. Partial fix for bug #1190623 Change-Id: Ie06fc231725c235f5ee8fd17fc87d3305bc845a3 --- lib/tempest | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/tempest b/lib/tempest index 8018166777..ac9c8b4490 100644 --- a/lib/tempest +++ b/lib/tempest @@ -206,6 +206,13 @@ function configure_tempest() { iniset $TEMPEST_CONF identity alt_tenant_name $ALT_TENANT_NAME iniset $TEMPEST_CONF identity admin_password "$password" + # Image + # for the gate we want to be able to override this variable so we aren't + # doing an HTTP fetch over the wide internet for this test + if [[ ! -z "$TEMPEST_HTTP_IMAGE" ]]; then + iniset $TEMPEST_CONF image http_image $TEMPEST_HTTP_IMAGE + fi + # Compute iniset $TEMPEST_CONF compute change_password_available False # Note(nati) current tempest don't create network for each tenant From b7196174ddc0205c6b15325b957df2f393ca4861 Mon Sep 17 00:00:00 2001 From: Eugene Nikanorov Date: Thu, 13 Jun 2013 22:11:42 +0400 Subject: [PATCH 0154/4704] Change path to loadbalancer plugin LoadbalancerPlugin path needs to be changed to reflect renaming agent_loadbalancer to loadbalancer and moving services directory one level up. Change-Id: Ib2ece411b5fb84575673a66421eb474888dd8f43 --- lib/quantum | 2 +- .../services/{agent_loadbalancer => loadbalancer} | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) rename lib/quantum_plugins/services/{agent_loadbalancer => loadbalancer} (81%) diff --git a/lib/quantum b/lib/quantum index 1a826f8545..977a835f31 100644 --- a/lib/quantum +++ b/lib/quantum @@ -187,7 +187,7 @@ source $TOP_DIR/lib/quantum_plugins/$Q_PLUGIN # ------------------------------------------- # Hardcoding for 1 service plugin for now -source $TOP_DIR/lib/quantum_plugins/services/agent_loadbalancer +source $TOP_DIR/lib/quantum_plugins/services/loadbalancer # Use security group or not if has_quantum_plugin_security_group; then diff --git a/lib/quantum_plugins/services/agent_loadbalancer b/lib/quantum_plugins/services/loadbalancer similarity index 81% rename from lib/quantum_plugins/services/agent_loadbalancer rename to lib/quantum_plugins/services/loadbalancer index ee3faa5bb0..ac8501fa0d 100644 --- a/lib/quantum_plugins/services/agent_loadbalancer +++ b/lib/quantum_plugins/services/loadbalancer @@ -7,7 +7,7 @@ set +o xtrace AGENT_LBAAS_BINARY="$QUANTUM_DIR/bin/quantum-lbaas-agent" -AGENT_LBAAS_PLUGIN=quantum.plugins.services.agent_loadbalancer.plugin.LoadBalancerPlugin +LBAAS_PLUGIN=quantum.services.loadbalancer.plugin.LoadBalancerPlugin function quantum_agent_lbaas_install_agent_packages() { if is_ubuntu || is_fedora; then @@ -20,14 +20,14 @@ function quantum_agent_lbaas_install_agent_packages() { function quantum_agent_lbaas_configure_common() { if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then - Q_SERVICE_PLUGIN_CLASSES=$AGENT_LBAAS_PLUGIN + Q_SERVICE_PLUGIN_CLASSES=$LBAAS_PLUGIN else - Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$AGENT_LBAAS_PLUGIN" + Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$LBAAS_PLUGIN" fi } function quantum_agent_lbaas_configure_agent() { - LBAAS_AGENT_CONF_PATH=/etc/quantum/plugins/services/agent_loadbalancer + LBAAS_AGENT_CONF_PATH=/etc/quantum/services/loadbalancer/haproxy mkdir -p $LBAAS_AGENT_CONF_PATH LBAAS_AGENT_CONF_FILENAME="$LBAAS_AGENT_CONF_PATH/lbaas_agent.ini" From 51a3f1f6369d27193daa6132fd8bcf6ba1972bd2 Mon Sep 17 00:00:00 2001 From: Kyle Mestery Date: Thu, 13 Jun 2013 11:47:56 +0000 Subject: [PATCH 0155/4704] Fix the OVS version check to work with upstream master versions of OVS. This patch adds two functions to check version strings in the toplevel functions file. The openvswitch_agent then uses these to compare versions when checking for tunneling support. The tunneling version check now also takes into account upstream master versions of Open vSwitch, which the previous version check always failed on. Fixes bug #1190734 Change-Id: I0102fb57f8ce5529169025efa21a0996ad68bef1 --- functions | 54 +++++++++++++++++++++++++++ lib/quantum_plugins/openvswitch_agent | 4 +- 2 files changed, 56 insertions(+), 2 deletions(-) diff --git a/functions b/functions index 1257024838..8aba10da5a 100644 --- a/functions +++ b/functions @@ -1466,6 +1466,60 @@ function check_path_perm_sanity() { } +# This function recursively compares versions, and is not meant to be +# called by anything other than vercmp_numbers below. This function does +# not work with alphabetic versions. +# +# _vercmp_r sep ver1 ver2 +function _vercmp_r { + typeset sep + typeset -a ver1=() ver2=() + sep=$1; shift + ver1=("${@:1:sep}") + ver2=("${@:sep+1}") + + if ((ver1 > ver2)); then + echo 1; return 0 + elif ((ver2 > ver1)); then + echo -1; return 0 + fi + + if ((sep <= 1)); then + echo 0; return 0 + fi + + _vercmp_r $((sep-1)) "${ver1[@]:1}" "${ver2[@]:1}" +} + + +# This function compares two versions and is meant to be called by +# external callers. Please note the function assumes non-alphabetic +# versions. For example, this will work: +# +# vercmp_numbers 1.10 1.4 +# +# The above will return "1", as 1.10 is greater than 1.4. +# +# vercmp_numbers 5.2 6.4 +# +# The above will return "-1", as 5.2 is less than 6.4. +# +# vercmp_numbers 4.0 4.0 +# +# The above will return "0", as the versions are equal. +# +# vercmp_numbers ver1 ver2 +vercmp_numbers() { + typeset v1=$1 v2=$2 sep + typeset -a ver1 ver2 + + IFS=. read -ra ver1 <<< "$v1" + IFS=. read -ra ver2 <<< "$v2" + + _vercmp_r "${#ver1[@]}" "${ver1[@]}" "${ver2[@]}" +} + + # Restore xtrace $XTRACE diff --git a/lib/quantum_plugins/openvswitch_agent b/lib/quantum_plugins/openvswitch_agent index ee761edf66..7e83428a20 100644 --- a/lib/quantum_plugins/openvswitch_agent +++ b/lib/quantum_plugins/openvswitch_agent @@ -43,8 +43,8 @@ function quantum_plugin_configure_plugin_agent() { if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then # Verify tunnels are supported # REVISIT - also check kernel module support for GRE and patch ports - OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'` - if [ $OVS_VERSION \< "1.4" ] && ! is_service_enabled q-svc ; then + OVS_VERSION=`ovs-vsctl --version | head -n 1 | grep -E -o "[0-9]+\.[0-9]+"` + if [ `vercmp_numbers "$OVS_VERSION" "1.4"` -lt "0" ] && ! is_service_enabled q-svc ; then die $LINENO "You are running OVS version $OVS_VERSION. OVS 1.4+ is required for tunneling between multiple hosts." fi iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True From 8498487e552880d8630b5ce3a2d2d4c641e664cd Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Tue, 11 Jun 2013 13:50:59 -0700 Subject: [PATCH 0156/4704] Install agent packages when running agents like DHCP or L3 This is because these agents rely on quantum agent packages as much as the L2 agent. Fixes bug #1190050 Change-Id: I8b0c5667f86b22e3727a316f7bb0500d0d0ba637 --- lib/quantum | 11 ++++++++--- stack.sh | 2 +- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/lib/quantum b/lib/quantum index 1a826f8545..122a2cdbe3 100644 --- a/lib/quantum +++ b/lib/quantum @@ -362,8 +362,14 @@ function install_quantumclient() { # install_quantum_agent_packages() - Collect source and prepare function install_quantum_agent_packages() { - # install packages that is specific to plugin agent - quantum_plugin_install_agent_packages + # install packages that are specific to plugin agent(s) + if is_service_enabled q-agt q-dhcp q-l3; then + quantum_plugin_install_agent_packages + fi + + if is_service_enabled q-lbaas; then + quantum_agent_lbaas_install_agent_packages + fi } # Start running processes, including screen @@ -548,7 +554,6 @@ function _configure_quantum_metadata_agent() { } function _configure_quantum_lbaas() { - quantum_agent_lbaas_install_agent_packages quantum_agent_lbaas_configure_common quantum_agent_lbaas_configure_agent } diff --git a/stack.sh b/stack.sh index 24a3d5f32c..4089531710 100755 --- a/stack.sh +++ b/stack.sh @@ -584,7 +584,7 @@ if is_service_enabled $DATABASE_BACKENDS; then install_database fi -if is_service_enabled q-agt; then +if is_service_enabled quantum; then install_quantum_agent_packages fi From 22f4d5e7365f83f7e32ce76c57a740be980820a8 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Sat, 8 Jun 2013 21:18:45 +0200 Subject: [PATCH 0157/4704] F19 rpm dependencies Fedora 19 rpm dependencies are the same as Fedora 18. * Adding f19 to the 'dept:' list. Change-Id: I927c31d45ac03b8bddac117e7f501d1255a25200 --- files/rpms/glance | 2 +- files/rpms/horizon | 4 ++-- files/rpms/keystone | 10 +++++----- files/rpms/nova | 6 +++--- files/rpms/quantum | 4 ++-- files/rpms/ryu | 2 +- files/rpms/swift | 4 ++-- 7 files changed, 16 insertions(+), 16 deletions(-) diff --git a/files/rpms/glance b/files/rpms/glance index 097cf3f7e0..0f113eaa01 100644 --- a/files/rpms/glance +++ b/files/rpms/glance @@ -4,7 +4,7 @@ python-argparse python-devel python-eventlet python-greenlet -python-paste-deploy #dist:f16,f17,f18 +python-paste-deploy #dist:f16,f17,f18,f19 python-routes python-sqlalchemy python-wsgiref diff --git a/files/rpms/horizon b/files/rpms/horizon index e27888a4af..b844d98665 100644 --- a/files/rpms/horizon +++ b/files/rpms/horizon @@ -18,8 +18,8 @@ python-migrate python-mox python-netaddr python-nose -python-paste #dist:f16,f17,f18 -python-paste-deploy #dist:f16,f17,f18 +python-paste #dist:f16,f17,f18,f19 +python-paste-deploy #dist:f16,f17,f18,f19 python-routes python-sphinx python-sqlalchemy diff --git a/files/rpms/keystone b/files/rpms/keystone index 078adf7718..33a4f47ccf 100644 --- a/files/rpms/keystone +++ b/files/rpms/keystone @@ -1,10 +1,10 @@ python-greenlet -python-lxml #dist:f16,f17,f18 -python-paste #dist:f16,f17,f18 -python-paste-deploy #dist:f16,f17,f18 -python-paste-script #dist:f16,f17,f18 +python-lxml #dist:f16,f17,f18,f19 +python-paste #dist:f16,f17,f18,f19 +python-paste-deploy #dist:f16,f17,f18,f19 +python-paste-script #dist:f16,f17,f18,f19 python-routes -python-setuptools #dist:f16,f17,f18 +python-setuptools #dist:f16,f17,f18,f19 python-sqlalchemy python-sqlite2 python-webob diff --git a/files/rpms/nova b/files/rpms/nova index f50d93f883..8d8a0b875a 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -29,11 +29,11 @@ python-lockfile python-migrate python-mox python-netaddr -python-paramiko # dist:f16,f17,f18 +python-paramiko # dist:f16,f17,f18,f19 # ^ on RHEL, brings in python-crypto which conflicts with version from # pip we need -python-paste # dist:f16,f17,f18 -python-paste-deploy # dist:f16,f17,f18 +python-paste # dist:f16,f17,f18,f19 +python-paste-deploy # dist:f16,f17,f18,f19 python-qpid python-routes python-sqlalchemy diff --git a/files/rpms/quantum b/files/rpms/quantum index 8827d5aa1f..6a8fd3639c 100644 --- a/files/rpms/quantum +++ b/files/rpms/quantum @@ -12,8 +12,8 @@ python-iso8601 python-kombu python-netaddr #rhel6 gets via pip -python-paste # dist:f16,f17,f18 -python-paste-deploy # dist:f16,f17,f18 +python-paste # dist:f16,f17,f18,f19 +python-paste-deploy # dist:f16,f17,f18,f19 python-qpid python-routes python-sqlalchemy diff --git a/files/rpms/ryu b/files/rpms/ryu index 7cf3bd7f0b..0f62f9fc1f 100644 --- a/files/rpms/ryu +++ b/files/rpms/ryu @@ -1,5 +1,5 @@ python-gevent python-gflags python-netifaces -python-setuptools #dist:f16,f17,f18 +python-setuptools #dist:f16,f17,f18,f19 python-sphinx diff --git a/files/rpms/swift b/files/rpms/swift index 1b36e34eab..ee1fad8c8c 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -8,8 +8,8 @@ python-eventlet python-greenlet python-netifaces python-nose -python-paste-deploy # dist:f16,f17,f18 -python-setuptools # dist:f16,f17,f18 +python-paste-deploy # dist:f16,f17,f18,f19 +python-setuptools # dist:f16,f17,f18,f19 python-simplejson python-webob pyxattr From a8a11cf1f6616dbd93b41a884c582f346731b997 Mon Sep 17 00:00:00 2001 From: Kui Shi Date: Sun, 16 Jun 2013 10:11:46 +0800 Subject: [PATCH 0158/4704] exercise euca.sh check the volume just created In devstack, the exercises/euca.sh test the "VOLUME". it should always check the volume which was just created. Because there may be volumes left by the last failed euca.sh, then the variable VOLUME VOLUME=`euca-describe-volumes | cut -f2` will be assigned a list of volumes, which is not expected. Fixes bug #1191424 Change-Id: Ie004c9cfeaef4a8a313866d21d45c7b41f1b2ba0 --- exercises/euca.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exercises/euca.sh b/exercises/euca.sh index 16b5f8e402..7c590d09d4 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -85,7 +85,7 @@ if [[ "$ENABLED_SERVICES" =~ "c-vol" ]]; then die_if_not_set $LINENO VOLUME "Failure to create volume" # Test that volume has been created - VOLUME=`euca-describe-volumes | cut -f2` + VOLUME=`euca-describe-volumes $VOLUME | cut -f2` die_if_not_set $LINENO VOLUME "Failure to get volume" # Test volume has become available From c42ed258b4ae88c208650185298c0b673c3439c3 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 17 Jun 2013 13:23:44 +1000 Subject: [PATCH 0159/4704] Fix syntax error in lib/tempest Fix syntax error in lib/tempest: Created tempest configuration file: 21:06:30 ++ CINDER_MULTI_LVM_BACKEND=False 21:06:30 ++ '[' False == 'True ]' 21:06:30 /home/stack/devstack/lib/tempest: line 278: [: missing `]' Change-Id: Ib4e393dad12751bdfb0e6f8fac3accd380a1f760 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index e3faa2eb9f..277c68abcd 100644 --- a/lib/tempest +++ b/lib/tempest @@ -275,7 +275,7 @@ function configure_tempest() { # Volume CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND) - if [ $CINDER_MULTI_LVM_BACKEND == "True "]; then + if [ $CINDER_MULTI_LVM_BACKEND == "True" ]; then iniset $TEMPEST_CONF volume multi_backend_enabled "True" iniset $TEMPEST_CONF volume backend1_name "LVM_iSCSI" iniset $TEMPEST_CONF volume backend2_name "LVM_iSCSI_2" From 901419d27ba7c0770fcdc65b32b098003dbc98e3 Mon Sep 17 00:00:00 2001 From: "Baodong (Robert) Li" Date: Mon, 10 Jun 2013 08:39:26 -0700 Subject: [PATCH 0160/4704] add support in devstack to run it with cisco plugin implements blueprint cisco-plugin-support Change-Id: Ib4716c9ef6daa059d5210631d927253bf2ba6a64 --- lib/quantum_plugins/cisco | 327 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 327 insertions(+) create mode 100644 lib/quantum_plugins/cisco diff --git a/lib/quantum_plugins/cisco b/lib/quantum_plugins/cisco new file mode 100644 index 0000000000..92b91e4526 --- /dev/null +++ b/lib/quantum_plugins/cisco @@ -0,0 +1,327 @@ +# Quantum Cisco plugin +# --------------------------- + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# Scecify the VSM parameters +Q_CISCO_PLUGIN_VSM_IP=${Q_CISCO_PLUGIN_VSM_IP:-} +# Specify the VSM username +Q_CISCO_PLUGIN_VSM_USERNAME=${Q_CISCO_PLUGIN_VSM_USERNAME:-admin} +# Specify the VSM passward for above username +Q_CISCO_PLUGIN_VSM_PASSWORD=${Q_CISCO_PLUGIN_VSM_PASSWORD:-} +# Specify the uVEM integration bridge name +Q_CISCO_PLUGIN_INTEGRATION_BRIDGE=${Q_CISCO_PLUGIN_INTEGRATION_BRIDGE:-br-int} +# Specify if tunneling is enabled +Q_CISCO_PLUGIN_ENABLE_TUNNELING=${Q_CISCO_PLUGIN_ENABLE_TUNNELING:-True} +# Specify the VXLAN range +Q_CISCO_PLUGIN_VXLAN_ID_RANGES=${Q_CISCO_PLUGIN_VXLAN_ID_RANGES:-5000:10000} +# Specify the VLAN range +Q_CISCO_PLUGIN_VLAN_RANGES=${Q_CISCO_PLUGIN_VLAN_RANGES:-vlan:1:4094} + +# Specify ncclient package information +NCCLIENT_DIR=$DEST/ncclient +NCCLIENT_VERSION=${NCCLIENT_VERSION:-0.3.1} +NCCLIENT_REPO=${NCCLIENT_REPO:-${GIT_BASE}/CiscoSystems/ncclient.git} +NCCLIENT_BRANCH=${NCCLIENT_BRANCH:-master} + +# This routine put a prefix on an existing function name +function _prefix_function() { + declare -F $1 > /dev/null || die "$1 doesn't exist" + eval "$(echo "${2}_${1}()"; declare -f ${1} | tail -n +2)" +} + +function _has_ovs_subplugin() { + local subplugin + for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do + if [[ "$subplugin" == "openvswitch" ]]; then + return 0 + fi + done + return 1 +} + +function _has_nexus_subplugin() { + local subplugin + for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do + if [[ "$subplugin" == "nexus" ]]; then + return 0 + fi + done + return 1 +} + +function _has_n1kv_subplugin() { + local subplugin + for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do + if [[ "$subplugin" == "n1kv" ]]; then + return 0 + fi + done + return 1 +} + +# This routine populates the cisco config file with the information for +# a particular nexus switch +function _config_switch() { + local cisco_cfg_file=$1 + local switch_ip=$2 + local username=$3 + local password=$4 + local ssh_port=$5 + shift 5 + + local section="NEXUS_SWITCH:$switch_ip" + iniset $cisco_cfg_file $section username $username + iniset $cisco_cfg_file $section password $password + iniset $cisco_cfg_file $section ssh_port $ssh_port + + while [[ ${#@} != 0 ]]; do + iniset $cisco_cfg_file $section $1 $2 + shift 2 + done +} + +# Prefix openvswitch plugin routines with "ovs" in order to differentiate from +# cisco plugin routines. This means, ovs plugin routines will coexist with cisco +# plugin routines in this script. +source $TOP_DIR/lib/quantum_plugins/openvswitch +_prefix_function quantum_plugin_create_nova_conf ovs +_prefix_function quantum_plugin_install_agent_packages ovs +_prefix_function quantum_plugin_configure_common ovs +_prefix_function quantum_plugin_configure_debug_command ovs +_prefix_function quantum_plugin_configure_dhcp_agent ovs +_prefix_function quantum_plugin_configure_l3_agent ovs +_prefix_function quantum_plugin_configure_plugin_agent ovs +_prefix_function quantum_plugin_configure_service ovs +_prefix_function quantum_plugin_setup_interface_driver ovs +_prefix_function has_quantum_plugin_security_group ovs + +# Check the version of the installed ncclient package +function check_ncclient_version() { +python << EOF +version = '$NCCLIENT_VERSION' +import sys +try: + import pkg_resources + import ncclient + module_version = pkg_resources.get_distribution('ncclient').version + if version != module_version: + sys.exit(1) +except: + sys.exit(1) +EOF +} + +# Install the ncclient package +function install_ncclient() { + git_clone $NCCLIENT_REPO $NCCLIENT_DIR $NCCLIENT_BRANCH + (cd $NCCLIENT_DIR; sudo python setup.py install) +} + +# Check if the required version of ncclient has been installed +function is_ncclient_installed() { + # Check if the Cisco ncclient repository exists + if [[ -d $NCCLIENT_DIR ]]; then + remotes=$(cd $NCCLIENT_DIR; git remote -v | grep fetch | awk '{ print $2}') + for remote in $remotes; do + if [[ $remote == $NCCLIENT_REPO ]]; then + break; + fi + done + if [[ $remote != $NCCLIENT_REPO ]]; then + return 1 + fi + else + return 1 + fi + + # Check if the ncclient is installed with the right version + if ! check_ncclient_version; then + return 1 + fi + return 0 +} + +function has_quantum_plugin_security_group() { + if _has_ovs_subplugin; then + ovs_has_quantum_plugin_security_group + else + return 1 + fi +} + +function is_quantum_ovs_base_plugin() { + # Cisco uses OVS if openvswitch subplugin is deployed + _has_ovs_subplugin + return +} + +# populate required nova configuration parameters +function quantum_plugin_create_nova_conf() { + if _has_ovs_subplugin; then + ovs_quantum_plugin_create_nova_conf + else + _quantum_ovs_base_configure_nova_vif_driver + fi +} + +function quantum_plugin_install_agent_packages() { + # Cisco plugin uses openvswitch to operate in one of its configurations + ovs_quantum_plugin_install_agent_packages +} + +# Configure common parameters +function quantum_plugin_configure_common() { + # setup default subplugins + if [ ! -v Q_CISCO_PLUGIN_SUBPLUGINS ]; then + declare -ga Q_CISCO_PLUGIN_SUBPLUGINS + Q_CISCO_PLUGIN_SUBPLUGINS=(openvswitch nexus) + fi + if _has_ovs_subplugin; then + ovs_quantum_plugin_configure_common + Q_PLUGIN_EXTRA_CONF_PATH=etc/quantum/plugins/cisco + Q_PLUGIN_EXTRA_CONF_FILES=(cisco_plugins.ini) + else + Q_PLUGIN_CONF_PATH=etc/quantum/plugins/cisco + Q_PLUGIN_CONF_FILENAME=cisco_plugins.ini + fi + Q_PLUGIN_CLASS="quantum.plugins.cisco.network_plugin.PluginV2" + Q_DB_NAME=cisco_quantum +} + +function quantum_plugin_configure_debug_command() { + if _has_ovs_subplugin; then + ovs_quantum_plugin_configure_debug_command + fi +} + +function quantum_plugin_configure_dhcp_agent() { + iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager quantum.agent.dhcp_agent.DhcpAgentWithStateReport +} + +function quantum_plugin_configure_l3_agent() { + if _has_ovs_subplugin; then + ovs_quantum_plugin_configure_l3_agent + fi +} + +function _configure_nexus_subplugin() { + local cisco_cfg_file=$1 + + # Install a known compatible ncclient from the Cisco repository if necessary + if ! is_ncclient_installed; then + # Preserve the two global variables + local offline=$OFFLINE + local reclone=$RECLONE + # Change their values to allow installation + OFFLINE=False + RECLONE=yes + install_ncclient + # Restore their values + OFFLINE=$offline + RECLONE=$reclone + fi + + # Setup default nexus switch information + if [ ! -v Q_CISCO_PLUGIN_SWITCH_INFO ]; then + declare -A Q_CISCO_PLUGIN_SWITCH_INFO + HOST_NAME=$(hostname) + Q_CISCO_PLUGIN_SWITCH_INFO=([1.1.1.1]=stack:stack:22:${HOST_NAME}:1/10) + else + iniset $cisco_cfg_file CISCO nexus_driver quantum.plugins.cisco.nexus.cisco_nexus_network_driver_v2.CiscoNEXUSDriver + fi + + # Setup the switch configurations + local nswitch + local sw_info + local segment + local sw_info_array + declare -i count=0 + for nswitch in ${!Q_CISCO_PLUGIN_SWITCH_INFO[@]}; do + sw_info=${Q_CISCO_PLUGIN_SWITCH_INFO[$nswitch]} + sw_info_array=${sw_info//:/ } + sw_info_array=( $sw_info_array ) + count=${#sw_info_array[@]} + if [[ $count < 5 || $(( ($count-3) % 2 )) != 0 ]]; then + die $LINENO "Incorrect switch configuration: ${Q_CISCO_PLUGIN_SWITCH_INFO[$nswitch]}" + fi + _config_switch $cisco_cfg_file $nswitch ${sw_info_array[@]} + done +} + +# Configure n1kv plugin +function _configure_n1kv_subplugin() { + local cisco_cfg_file=$1 + + # populate the cisco plugin cfg file with the VSM information + echo "Configuring n1kv in $cisco_cfg_file-- $Q_CISCO_PLUGIN_VSM_IP $Q_CISCO_PLUGIN_VSM_USERNAME $Q_CISCO_PLUGIN_VSM_PASSWORD" + iniset $cisco_cfg_file N1KV:$Q_CISCO_PLUGIN_VSM_IP username $Q_CISCO_PLUGIN_VSM_USERNAME + iniset $cisco_cfg_file N1KV:$Q_CISCO_PLUGIN_VSM_IP password $Q_CISCO_PLUGIN_VSM_PASSWORD + + iniset $cisco_cfg_file CISCO_N1K integration_bridge $Q_CISCO_PLUGIN_INTEGRATION_BRIDGE + iniset $cisco_cfg_file CISCO_N1K enable_tunneling $Q_CISCO_PLUGIN_ENABLE_TUNNELING + iniset $cisco_cfg_file CISCO_N1K vxlan_id_ranges $Q_CISCO_PLUGIN_VXLAN_ID_RANGES + iniset $cisco_cfg_file CISCO_N1K network_vlan_ranges $Q_CISCO_PLUGIN_VLAN_RANGES + + # Setup the integration bridge by calling the ovs_base + OVS_BRIDGE=$Q_CISCO_PLUGIN_INTEGRATION_BRIDGE + _quantum_ovs_base_setup_bridge $OVS_BRIDGE +} + +function quantum_plugin_configure_plugin_agent() { + if _has_ovs_subplugin; then + ovs_quantum_plugin_configure_plugin_agent + fi +} + +function quantum_plugin_configure_service() { + local subplugin + local cisco_cfg_file + + if _has_ovs_subplugin; then + ovs_quantum_plugin_configure_service + cisco_cfg_file=/${Q_PLUGIN_EXTRA_CONF_FILES[0]} + else + cisco_cfg_file=/$Q_PLUGIN_CONF_FILE + fi + + # Setup the [CISCO_PLUGINS] section + if [[ ${#Q_CISCO_PLUGIN_SUBPLUGINS[@]} > 2 ]]; then + die $LINENO "At most two subplugins are supported." + fi + + if _has_ovs_subplugin && _has_n1kv_subplugin; then + die $LINENO "OVS subplugin and n1kv subplugin cannot coexist" + fi + + # Setup the subplugins + inicomment $cisco_cfg_file CISCO_PLUGINS nexus_plugin + inicomment $cisco_cfg_file CISCO_PLUGINS vswitch_plugin + inicomment $cisco_cfg_file CISCO_TEST host + for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do + case $subplugin in + nexus) iniset $cisco_cfg_file CISCO_PLUGINS nexus_plugin quantum.plugins.cisco.nexus.cisco_nexus_plugin_v2.NexusPlugin;; + openvswitch) iniset $cisco_cfg_file CISCO_PLUGINS vswitch_plugin quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2;; + n1kv) iniset $cisco_cfg_file CISCO_PLUGINS vswitch_plugin quantum.plugins.cisco.n1kv.n1kv_quantum_plugin.N1kvQuantumPluginV2;; + *) die $LINENO "Unsupported cisco subplugin: $subplugin";; + esac + done + + if _has_nexus_subplugin; then + _configure_nexus_subplugin $cisco_cfg_file + fi + + if _has_n1kv_subplugin; then + _configure_n1kv_subplugin $cisco_cfg_file + fi +} + +function quantum_plugin_setup_interface_driver() { + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver +} + +# Restore xtrace +$MY_XTRACE From e7a2ba45a72a838f47a1fafc67c855c159a269c0 Mon Sep 17 00:00:00 2001 From: Brad Topol Date: Mon, 17 Jun 2013 15:13:49 -0500 Subject: [PATCH 0161/4704] Update the ou name of Group to be the new default of UserGroups This update sets the ou value for groups to be the new keystone ldap default of UserGroups Fixes Bug1191807 Change-Id: I9b7383100045155ca35b04190f42be641a0c6ea8 --- files/ldap/openstack.ldif | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/files/ldap/openstack.ldif b/files/ldap/openstack.ldif index 2b76372609..f810fe8d2e 100644 --- a/files/ldap/openstack.ldif +++ b/files/ldap/openstack.ldif @@ -4,9 +4,9 @@ objectClass: dcObject objectClass: organizationalUnit ou: openstack -dn: ou=Groups,dc=openstack,dc=org +dn: ou=UserGroups,dc=openstack,dc=org objectClass: organizationalUnit -ou: Groups +ou: UserGroups dn: ou=Users,dc=openstack,dc=org objectClass: organizationalUnit From fa868cb59c5847d1836d94977774bd3fafa87f27 Mon Sep 17 00:00:00 2001 From: Daniel Jones Date: Tue, 18 Jun 2013 15:28:01 -0500 Subject: [PATCH 0162/4704] Disable vnc on Power Systems Power systems don't have graphics adapters, so we can't simulate a VNC console. This patch removes that from the default nova configuration if the system architecture is ppc64. Change-Id: I129d180b712115e5c275241740d34805fea23e8b --- functions | 7 +++++++ stack.sh | 4 ++++ 2 files changed, 11 insertions(+) diff --git a/functions b/functions index 8aba10da5a..c611e0046c 100644 --- a/functions +++ b/functions @@ -471,6 +471,13 @@ function exit_distro_not_supported { fi } +# Utility function for checking machine architecture +# is_arch arch-type +function is_arch { + ARCH_TYPE=$1 + + [ "($uname -m)" = "$ARCH_TYPE" ] +} # git clone only if directory doesn't exist already. Since ``DEST`` might not # be owned by the installation user, we create the directory and change the diff --git a/stack.sh b/stack.sh index 4089531710..f8d546fc1c 100755 --- a/stack.sh +++ b/stack.sh @@ -1108,6 +1108,10 @@ if is_service_enabled nova; then iniset $NOVA_CONF DEFAULT compute_driver "libvirt.LibvirtDriver" LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" + # Power architecture currently does not support graphical consoles. + if is_arch "ppc64"; then + iniset $NOVA_CONF DEFAULT vnc_enabled "false" + fi fi init_nova_cells From e3111329f1bf82d07859ffdb8cf3e98fdd5cb2ad Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Wed, 19 Jun 2013 13:57:31 +1200 Subject: [PATCH 0163/4704] Use heat-manage to sync heat db. heat-db-setup has been deprecated and doesn't work when devstack uses postgres and mysql isn't installed. This is causing postgres gating to fail for heat. Change-Id: I84f0a2d40f0033e52c87b6f0c9c7265471134ffe --- lib/heat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/heat b/lib/heat index 4d2f84e1c4..13bf130b15 100644 --- a/lib/heat +++ b/lib/heat @@ -158,7 +158,7 @@ function init_heat() { # (re)create heat database recreate_database heat utf8 - $HEAT_DIR/bin/heat-db-setup $os_PACKAGE -r $DATABASE_PASSWORD + $HEAT_DIR/bin/heat-manage db_sync create_heat_cache_dir } From 68ac03c7f269adda3739ef3cdb7a466f38e3b2b2 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Thu, 6 Jun 2013 16:22:34 +0100 Subject: [PATCH 0164/4704] xenapi: separate disk for cinder volumes Some kernels have lockup issues while serving cinder volumes from file-loopback-lvm kind of cinder setup. This patch provides a way to create an additional drive to the OpenStack VM, and use that to store cinder volumes, thus eliminating the lockup issue. It will help when testing XenServer. Now, you can specify devices for stack-volumes through: VOLUME_BACKING_DEVICE In case you are using CINDER_MULTI_LVM_BACKEND, you can use VOLUME_BACKING_DEVICE2 as well. Xenserver: Should you whish to use a 10 gig disk backend for your cinder volumes, specify: XEN_XVDB_SIZE_GB=10 VOLUME_BACKING_DEVICE=/dev/xvdb Citrix is using this approach on its internal CI system to run tests against OpenStack. This is a workaround for this bug: https://bugs.launchpad.net/cinder/+bug/1023755 Related to blueprint xenapi-devstack-cleanup Change-Id: Iee633d2704185bfbf9234882654c47b850fa168a --- lib/cinder | 39 ++++++++++++++++++++++-------------- tools/xen/install_os_domU.sh | 13 ++++++++++++ tools/xen/xenrc | 10 +++++++++ 3 files changed, 47 insertions(+), 15 deletions(-) diff --git a/lib/cinder b/lib/cinder index 0eabf400de..c572db49d2 100644 --- a/lib/cinder +++ b/lib/cinder @@ -69,10 +69,12 @@ CINDER_PERIODIC_INTERVAL=${CINDER_PERIODIC_INTERVAL:-60} # Name of the lvm volume groups to use/create for iscsi volumes VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file} +VOLUME_BACKING_DEVICE=${VOLUME_BACKING_DEVICE:-} # VOLUME_GROUP2 is used only if CINDER_MULTI_LVM_BACKEND = True VOLUME_GROUP2=${VOLUME_GROUP2:-stack-volumes2} VOLUME_BACKING_FILE2=${VOLUME_BACKING_FILE2:-$DATA_DIR/${VOLUME_GROUP2}-backing-file} +VOLUME_BACKING_DEVICE2=${VOLUME_BACKING_DEVICE2:-} VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} @@ -335,28 +337,35 @@ create_cinder_volume_group() { # ``/opt/stack/data``. if ! sudo vgs $VOLUME_GROUP; then - # Only create if the file doesn't already exists - [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE - - DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE` + if [ -z "$VOLUME_BACKING_DEVICE" ]; then + # Only create if the file doesn't already exists + [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE + DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE` - # Only create if the loopback device doesn't contain $VOLUME_GROUP - if ! sudo vgs $VOLUME_GROUP; then - sudo vgcreate $VOLUME_GROUP $DEV + # Only create if the loopback device doesn't contain $VOLUME_GROUP + if ! sudo vgs $VOLUME_GROUP; then + sudo vgcreate $VOLUME_GROUP $DEV + fi + else + sudo vgcreate $VOLUME_GROUP $VOLUME_BACKING_DEVICE fi fi if [ "$CINDER_MULTI_LVM_BACKEND" = "True" ]; then #set up the second volume if CINDER_MULTI_LVM_BACKEND is enabled if ! sudo vgs $VOLUME_GROUP2; then - # Only create if the file doesn't already exists - [[ -f $VOLUME_BACKING_FILE2 ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE2 - - DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE2` - - # Only create if the loopback device doesn't contain $VOLUME_GROUP - if ! sudo vgs $VOLUME_GROUP2; then - sudo vgcreate $VOLUME_GROUP2 $DEV + if [ -z "$VOLUME_BACKING_DEVICE2" ]; then + # Only create if the file doesn't already exists + [[ -f $VOLUME_BACKING_FILE2 ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE2 + + DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE2` + + # Only create if the loopback device doesn't contain $VOLUME_GROUP + if ! sudo vgs $VOLUME_GROUP2; then + sudo vgcreate $VOLUME_GROUP2 $DEV + fi + else + sudo vgcreate $VOLUME_GROUP2 $VOLUME_BACKING_DEVICE2 fi fi fi diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index a744869288..be3b540d4b 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -259,6 +259,19 @@ fi FLAT_NETWORK_BRIDGE=$(bridge_for "$VM_BRIDGE_OR_NET_NAME") append_kernel_cmdline "$GUEST_NAME" "flat_network_bridge=${FLAT_NETWORK_BRIDGE}" +# Add a separate xvdb, if it was requested +if [[ "0" != "$XEN_XVDB_SIZE_GB" ]]; then + vm=$(xe vm-list name-label="$GUEST_NAME" --minimal) + + # Add a new disk + localsr=$(get_local_sr) + extra_vdi=$(xe vdi-create \ + name-label=xvdb-added-by-devstack \ + virtual-size="${XEN_XVDB_SIZE_GB}GiB" \ + sr-uuid=$localsr type=user) + xe vbd-create vm-uuid=$vm vdi-uuid=$extra_vdi device=1 +fi + # create a snapshot before the first boot # to allow a quick re-run with the same settings xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_FIRST_BOOT" diff --git a/tools/xen/xenrc b/tools/xen/xenrc index 0ed3a6a7e5..3e2396438a 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -76,4 +76,14 @@ UBUNTU_INST_NAMESERVERS="" UBUNTU_INST_NETMASK="" UBUNTU_INST_GATEWAY="" +# Create a separate xvdb. Tis could be used as a backing device for cinder +# volumes. Specify +# XEN_XVDB_SIZE_GB=10 +# VOLUME_BACKING_DEVICE=/dev/xvdb +# in your localrc to avoid kernel lockups: +# https://bugs.launchpad.net/cinder/+bug/1023755 +# +# Set the size to 0 to avoid creation of additional disk. +XEN_XVDB_SIZE_GB=0 + source ../../stackrc From 5a56cd6283f918e6022dcc47906fb86a739496dc Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Mon, 17 Jun 2013 13:54:43 +0100 Subject: [PATCH 0165/4704] xenapi: Cleanup networking OpenStack VM had 4 interfaces, from which eth0 was connected to HIMN (Host Internal Management Network), and not used at all. The reason for not using this interface according to johngarbutt is: - VNC console would not work - migrations would not work - many other things This change removes the interface, thus devstack is providing the same architecture as described in the official documentation, YAGNI applied on the HIMN: http://goo.gl/dmrNF The change also modifies some defaults, so it's easier to get started: - The Ubuntu VM is network-installed through the management network - OS VM is getting its address on the management network through DHCP - Default Public IP addresses aligned to stack.sh defaults - OS Services are listening on Management Interface Related to blueprint xenapi-devstack-cleanup Change-Id: I73c9751ade6f4786b8b682a1994cb87f0a624379 --- lib/nova | 2 +- stack.sh | 3 ++ stackrc | 1 - tools/xen/README.md | 25 +++-------- tools/xen/build_xva.sh | 72 +++++++++++++++---------------- tools/xen/functions | 16 +++++++ tools/xen/install_os_domU.sh | 19 +++++--- tools/xen/templates/interfaces.in | 23 ---------- tools/xen/xenrc | 32 ++++++++------ 9 files changed, 96 insertions(+), 97 deletions(-) delete mode 100644 tools/xen/templates/interfaces.in diff --git a/lib/nova b/lib/nova index cac6330cc2..afc540e7c6 100644 --- a/lib/nova +++ b/lib/nova @@ -77,7 +77,7 @@ SPICE_DIR=$DEST/spice-html5 # Set defaults according to the virt driver if [ "$VIRT_DRIVER" = 'xenserver' ]; then - PUBLIC_INTERFACE_DEFAULT=eth3 + PUBLIC_INTERFACE_DEFAULT=eth2 GUEST_INTERFACE_DEFAULT=eth1 # Allow ``build_domU.sh`` to specify the flat network bridge via kernel args FLAT_NETWORK_BRIDGE_DEFAULT=$(sed -e 's/.* flat_network_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline) diff --git a/stack.sh b/stack.sh index 4089531710..9321b423d5 100755 --- a/stack.sh +++ b/stack.sh @@ -1001,6 +1001,9 @@ if is_service_enabled nova; then if [ "$VIRT_DRIVER" = 'xenserver' ]; then echo_summary "Using XenServer virtualization driver" + if [ -z "$XENAPI_CONNECTION_URL" ]; then + die $LINENO "XENAPI_CONNECTION_URL is not specified" + fi read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN." iniset $NOVA_CONF DEFAULT compute_driver "xenapi.XenAPIDriver" iniset $NOVA_CONF DEFAULT xenapi_connection_url "$XENAPI_CONNECTION_URL" diff --git a/stackrc b/stackrc index ef39710035..4263a9d41d 100644 --- a/stackrc +++ b/stackrc @@ -178,7 +178,6 @@ case "$VIRT_DRIVER" in ;; xenserver) # Xen config common to nova and quantum - XENAPI_CONNECTION_URL=${XENAPI_CONNECTION_URL:-"http://169.254.0.1"} XENAPI_USER=${XENAPI_USER:-"root"} ;; *) diff --git a/tools/xen/README.md b/tools/xen/README.md index 8f0c10d0d7..9f3908028d 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -34,6 +34,13 @@ The `install_os_domU.sh` script will: `eth0`. - eth3 - Public interface, connected to `PUB_BRIDGE_OR_NET_NAME` defaults to `"OpenStack Public Network"`. + - After the Ubuntu install process finished, the network configuration is + modified to: + - eth0 - Management interface, connected to `MGT_BRIDGE_OR_NET_NAME` + - eth1 - VM interface, connected to `VM_BRIDGE_OR_NET_NAME` + - eth2 - Public interface, connected to `PUB_BRIDGE_OR_NET_NAME` + - (eth3) - Optional network interface if quantum is used, to enforce xapi to + create the underlying bridge. - Start devstack inside the created OpenStack VM ## Step 1: Install Xenserver @@ -92,30 +99,12 @@ Of course, use real passwords if this machine is exposed. MULTI_HOST=1 # Give extra time for boot ACTIVE_TIMEOUT=45 - # Host Interface, i.e. the interface on the OpenStack vm you want to expose - # the services on. The default is eth3, which means the public network, but - # as the public network is going to be virtual, we are setting the services - # to listen on the management network, which defaults to 'xenbr0', the - # XenServer's network. - HOST_IP_IFACE=eth2 - - # Use DHCP server to configure the Management IP of OpenStack VM - MGT_IP="dhcp" # Settings for netinstalling Ubuntu UBUNTU_INST_RELEASE=precise - # First time Ubuntu network install params, use the DHCP server on the - # management network - UBUNTU_INST_IFACE="eth2" - UBUNTU_INST_IP="dhcp" - # NOTE: the value of FLAT_NETWORK_BRIDGE will automatically be determined # by install_os_domU.sh script. - - # Public IP address is aligned with the devstack defaults (see FLOATING_RANGE) - PUB_IP=172.24.4.10 - PUB_NETMASK=255.255.255.0 EOF ## Step 4: Run `./install_os_domU.sh` from the `tools/xen` directory diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh index b0fd003d52..d0cdf17391 100755 --- a/tools/xen/build_xva.sh +++ b/tools/xen/build_xva.sh @@ -32,6 +32,41 @@ source xenrc # GUEST_NAME="$1" +function _print_interface_config() { + local device_nr + local ip_address + local netmask + + device_nr="$1" + ip_address="$2" + netmask="$3" + + local device + + device="eth${device_nr}" + + echo "auto $device" + if [ $ip_address == "dhcp" ]; then + echo "iface $device inet dhcp" + else + echo "iface $device inet static" + echo " address $ip_address" + echo " netmask $netmask" + fi + + # Turn off tx checksumming for better performance + echo " post-up ethtool -K $device tx off" +} + +function print_interfaces_config() { + echo "auto lo" + echo "iface lo inet loopback" + + _print_interface_config $PUB_DEV_NR $PUB_IP $PUB_NETMASK + _print_interface_config $VM_DEV_NR $VM_IP $VM_NETMASK + _print_interface_config $MGT_DEV_NR $MGT_IP $MGT_NETMASK +} + # # Mount the VDI # @@ -81,42 +116,7 @@ $HOSTS_FILE_IP $GUEST_NAME EOF # Configure the network -INTERFACES=$STAGING_DIR/etc/network/interfaces -TEMPLATES_DIR=$TOP_DIR/templates -cp $TEMPLATES_DIR/interfaces.in $INTERFACES -if [ $VM_IP == "dhcp" ]; then - echo 'eth1 on dhcp' - sed -e "s,iface eth1 inet static,iface eth1 inet dhcp,g" -i $INTERFACES - sed -e '/@ETH1_/d' -i $INTERFACES -else - sed -e "s,@ETH1_IP@,$VM_IP,g" -i $INTERFACES - sed -e "s,@ETH1_NETMASK@,$VM_NETMASK,g" -i $INTERFACES -fi - -if [ $MGT_IP == "dhcp" ]; then - echo 'eth2 on dhcp' - sed -e "s,iface eth2 inet static,iface eth2 inet dhcp,g" -i $INTERFACES - sed -e '/@ETH2_/d' -i $INTERFACES -else - sed -e "s,@ETH2_IP@,$MGT_IP,g" -i $INTERFACES - sed -e "s,@ETH2_NETMASK@,$MGT_NETMASK,g" -i $INTERFACES -fi - -if [ $PUB_IP == "dhcp" ]; then - echo 'eth3 on dhcp' - sed -e "s,iface eth3 inet static,iface eth3 inet dhcp,g" -i $INTERFACES - sed -e '/@ETH3_/d' -i $INTERFACES -else - sed -e "s,@ETH3_IP@,$PUB_IP,g" -i $INTERFACES - sed -e "s,@ETH3_NETMASK@,$PUB_NETMASK,g" -i $INTERFACES -fi - -if [ "$ENABLE_GI" == "true" ]; then - cat <>$INTERFACES -auto eth0 -iface eth0 inet dhcp -EOF -fi +print_interfaces_config > $STAGING_DIR/etc/network/interfaces # Gracefully cp only if source file/dir exists function cp_it { diff --git a/tools/xen/functions b/tools/xen/functions index ebfd4835a2..f22a561688 100644 --- a/tools/xen/functions +++ b/tools/xen/functions @@ -224,3 +224,19 @@ function append_kernel_cmdline() pv_args=$(xe vm-param-get param-name=PV-args uuid=$vm) xe vm-param-set PV-args="$pv_args $kernel_args" uuid=$vm } + +function destroy_all_vifs_of() +{ + local vm_name_label + + vm_name_label="$1" + + local vm + + vm=$(_vm_uuid "$vm_name_label") + IFS=, + for vif in $(xe vif-list vm-uuid=$vm --minimal); do + xe vif-destroy uuid="$vif" + done + unset IFS +} diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index a744869288..be4aa16394 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -238,6 +238,15 @@ else vm_uuid=$(xe vm-install template="$TNAME" new-name-label="$GUEST_NAME") fi +## Setup network cards +# Wipe out all +destroy_all_vifs_of "$GUEST_NAME" +# Tenant network +add_interface "$GUEST_NAME" "$VM_BRIDGE_OR_NET_NAME" "$VM_DEV_NR" +# Management network +add_interface "$GUEST_NAME" "$MGT_BRIDGE_OR_NET_NAME" "$MGT_DEV_NR" +# Public network +add_interface "$GUEST_NAME" "$PUB_BRIDGE_OR_NET_NAME" "$PUB_DEV_NR" # # Inject DevStack inside VM disk @@ -248,7 +257,7 @@ $THIS_DIR/build_xva.sh "$GUEST_NAME" # is created by XenServer). This is required for Quantum. Also pass that as a # kernel parameter for DomU if is_service_enabled quantum; then - add_interface "$GUEST_NAME" "$XEN_INT_BRIDGE_OR_NET_NAME" "4" + add_interface "$GUEST_NAME" "$XEN_INT_BRIDGE_OR_NET_NAME" $XEN_INT_DEV_NR XEN_INTEGRATION_BRIDGE=$(bridge_for "$XEN_INT_BRIDGE_OR_NET_NAME") append_kernel_cmdline \ @@ -275,19 +284,19 @@ function ssh_no_check() { # Get hold of the Management IP of OpenStack VM OS_VM_MANAGEMENT_ADDRESS=$MGT_IP if [ $OS_VM_MANAGEMENT_ADDRESS == "dhcp" ]; then - OS_VM_MANAGEMENT_ADDRESS=$(find_ip_by_name $GUEST_NAME 2) + OS_VM_MANAGEMENT_ADDRESS=$(find_ip_by_name $GUEST_NAME $MGT_DEV_NR) fi # Get hold of the Service IP of OpenStack VM -if [ $HOST_IP_IFACE == "eth2" ]; then +if [ $HOST_IP_IFACE == "eth${MGT_DEV_NR}" ]; then OS_VM_SERVICES_ADDRESS=$MGT_IP if [ $MGT_IP == "dhcp" ]; then - OS_VM_SERVICES_ADDRESS=$(find_ip_by_name $GUEST_NAME 2) + OS_VM_SERVICES_ADDRESS=$(find_ip_by_name $GUEST_NAME $MGT_DEV_NR) fi else OS_VM_SERVICES_ADDRESS=$PUB_IP if [ $PUB_IP == "dhcp" ]; then - OS_VM_SERVICES_ADDRESS=$(find_ip_by_name $GUEST_NAME 3) + OS_VM_SERVICES_ADDRESS=$(find_ip_by_name $GUEST_NAME $PUB_DEV_NR) fi fi diff --git a/tools/xen/templates/interfaces.in b/tools/xen/templates/interfaces.in deleted file mode 100644 index 74b41ccfcb..0000000000 --- a/tools/xen/templates/interfaces.in +++ /dev/null @@ -1,23 +0,0 @@ -auto lo -iface lo inet loopback - -# If eth3 is static, the order should not matter -# and eth0 will have the default gateway. If not, -# we probably want the default gateway to be -# what is on the public interface. Hence changed -# the order here. -auto eth3 -iface eth3 inet static - address @ETH3_IP@ - netmask @ETH3_NETMASK@ - -auto eth1 -iface eth1 inet static - address @ETH1_IP@ - netmask @ETH1_NETMASK@ -post-up ethtool -K eth1 tx off - -auto eth2 -iface eth2 inet static - address @ETH2_IP@ - netmask @ETH2_NETMASK@ diff --git a/tools/xen/xenrc b/tools/xen/xenrc index 0ed3a6a7e5..2bd7be7e37 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -30,32 +30,35 @@ XEN_INT_BRIDGE_OR_NET_NAME="OpenStack VM Integration Network" # VM Password GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} +# Extracted variables for OpenStack VM network device numbers. +# Make sure, they form a continous sequence starting from 0 +MGT_DEV_NR=0 +VM_DEV_NR=1 +PUB_DEV_NR=2 +XEN_INT_DEV_NR=3 + # Host Interface, i.e. the interface on the nova vm you want to expose the -# services on. Usually eth2 (management network) or eth3 (public network) and -# not eth0 (private network with XenServer host) or eth1 (VM traffic network) -HOST_IP_IFACE=${HOST_IP_IFACE:-eth3} +# services on. Usually the device connected to the management network or the +# one connected to the public network is used. +HOST_IP_IFACE=${HOST_IP_IFACE:-"eth${MGT_DEV_NR}"} # # Our nova host's network info # # Management network -MGT_IP=${MGT_IP:-172.16.100.55} -MGT_NETMASK=${MGT_NETMASK:-255.255.255.0} +MGT_IP=${MGT_IP:-dhcp} +MGT_NETMASK=${MGT_NETMASK:-ignored} # VM Network VM_IP=${VM_IP:-10.255.255.255} VM_NETMASK=${VM_NETMASK:-255.255.255.0} # Public network -PUB_IP=${PUB_IP:-192.168.1.55} +# Aligned with stack.sh - see FLOATING_RANGE +PUB_IP=${PUB_IP:-172.24.4.10} PUB_NETMASK=${PUB_NETMASK:-255.255.255.0} -# Decide if you should enable eth0, -# the guest installer network -# You need to disable this on xcp-xapi on Ubuntu 12.04 -ENABLE_GI=true - # Ubuntu install settings UBUNTU_INST_RELEASE="oneiric" UBUNTU_INST_TEMPLATE_NAME="Ubuntu 11.10 (64-bit) for DevStack" @@ -69,8 +72,11 @@ UBUNTU_INST_HTTP_DIRECTORY="/ubuntu" UBUNTU_INST_HTTP_PROXY="" UBUNTU_INST_LOCALE="en_US" UBUNTU_INST_KEYBOARD="us" -# network configuration for ubuntu netinstall -UBUNTU_INST_IFACE="eth3" +# network configuration for ubuntu netinstall. +# TODO(matelakat): get rid of legacy network interfaces +# specify "eth2" to use the management network +# specify "eth3" to use the public network +UBUNTU_INST_IFACE="eth2" UBUNTU_INST_IP="dhcp" UBUNTU_INST_NAMESERVERS="" UBUNTU_INST_NETMASK="" From 2f524bd9053ee106daa98414bbe3b94b0cd2e43f Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Wed, 19 Jun 2013 12:32:23 +0100 Subject: [PATCH 0166/4704] xenapi: cleanup VM Installation VM installation created a machine with multiple network interfaces. This is a needless complexity at that point, we only need one interface. This change removes the complex network interface setup from install-os-vpx.sh script, so that only one network interface is created, which is connected to the selected network (management by default). This change also removes a lot of non-used code from install-os-vpx.sh Specify the network to be used for ubuntu netinstall with: UBUNTU_INST_BRIDGE_OR_NET_NAME defaults to $MGT_BRIDGE_OR_NET_NAME. This change also includes: - use precise as default Related to blueprint xenapi-devstack-cleanup Change-Id: Ib0a0a316e849f2c2651305ea657c84820cd2bb3b --- tools/xen/README.md | 15 +- tools/xen/install_os_domU.sh | 8 +- tools/xen/scripts/install-os-vpx.sh | 398 ++----------------- tools/xen/scripts/install_ubuntu_template.sh | 2 +- tools/xen/xenrc | 11 +- 5 files changed, 43 insertions(+), 391 deletions(-) diff --git a/tools/xen/README.md b/tools/xen/README.md index 9f3908028d..f0e6859121 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -25,15 +25,9 @@ Steps to follow: The `install_os_domU.sh` script will: - Setup XenAPI plugins - Create the named networks, if they don't exist - - Install an Ubuntu Virtual Machine, with 4 network interfaces: - - eth0 - internal xapi interface - - eth1 - VM interface, connected to `VM_BRIDGE_OR_NET_NAME` defaults to - `"OpenStack VM Network"`. - - eth2 - Management interface, connected to `MGT_BRIDGE_OR_NET_NAME`, - defaults to `xenbr0`, XenServer's bridge associated with the Hypervisors - `eth0`. - - eth3 - Public interface, connected to `PUB_BRIDGE_OR_NET_NAME` defaults to - `"OpenStack Public Network"`. + - Preseed-Netinstall an Ubuntu Virtual Machine, with 1 network interface: + - eth0 - Connected to `UBUNTU_INST_BRIDGE_OR_NET_NAME`, defaults to + `MGT_BRIDGE_OR_NET_NAME` - After the Ubuntu install process finished, the network configuration is modified to: - eth0 - Management interface, connected to `MGT_BRIDGE_OR_NET_NAME` @@ -100,9 +94,6 @@ Of course, use real passwords if this machine is exposed. # Give extra time for boot ACTIVE_TIMEOUT=45 - # Settings for netinstalling Ubuntu - UBUNTU_INST_RELEASE=precise - # NOTE: the value of FLAT_NETWORK_BRIDGE will automatically be determined # by install_os_domU.sh script. EOF diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index be4aa16394..63295d1b51 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -198,13 +198,11 @@ if [ -z "$templateuuid" ]; then # Update the template $THIS_DIR/scripts/install_ubuntu_template.sh $PRESEED_URL - # create a new VM with the given template - # creating the correct VIFs and metadata + # create a new VM from the given template with eth0 attached to the given + # network $THIS_DIR/scripts/install-os-vpx.sh \ -t "$UBUNTU_INST_TEMPLATE_NAME" \ - -v "$VM_BRIDGE_OR_NET_NAME" \ - -m "$MGT_BRIDGE_OR_NET_NAME" \ - -p "$PUB_BRIDGE_OR_NET_NAME" \ + -n "$UBUNTU_INST_BRIDGE_OR_NET_NAME" \ -l "$GUEST_NAME" \ -r "$OSDOMU_MEM_MB" diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh index c82f8702ba..8ee8b675a9 100755 --- a/tools/xen/scripts/install-os-vpx.sh +++ b/tools/xen/scripts/install-os-vpx.sh @@ -19,106 +19,48 @@ set -eux -[[ -f "/etc/xensource-inventory" ]] && source "/etc/xensource-inventory" || source "/etc/xcp/inventory" - -NAME="XenServer OpenStack VPX" -DATA_VDI_SIZE="500MiB" -BRIDGE_M= -BRIDGE_P= -VPX_FILE=os-vpx.xva -AS_TEMPLATE= -FROM_TEMPLATE= +BRIDGE= RAM= -WAIT_FOR_NETWORK= BALLOONING= +NAME_LABEL= +TEMPLATE_NAME= usage() { cat << EOF - Usage: $0 [-f FILE_PATH] [-d DISK_SIZE] [-v BRIDGE_NAME] [-m BRIDGE_NAME] [-p BRIDGE_NAME] - [-r RAM] [-i|-c] [-w] [-b] [-l NAME_LABEL] [-t TEMPLATE_NW_INSTALL] + Usage: $0 -t TEMPLATE_NW_INSTALL -l NAME_LABEL [-n BRIDGE] [-r RAM] [-b] - Installs XenServer OpenStack VPX. + Install a VM from a template OPTIONS: -h Shows this message. - -i Install OpenStack VPX as template. - -c Clone from existing template. - -w Wait for the network settings to show up before exiting. + -t template VM template to use -b Enable memory ballooning. When set min_RAM=RAM/2 max_RAM=RAM. - -f path Specifies the path to the XVA. - Default to ./os-vpx.xva. - -d disk-size Specifies the size in MiB for the data disk. - Defaults to 500 MiB. - -m bridge Specifies the bridge for the isolated management network. - Defaults to xenbr0. - -v bridge Specifies the bridge for the vm network - -p bridge Specifies the bridge for the externally facing network. -r MiB Specifies RAM used by the VPX, in MiB. By default it will take the value from the XVA. -l name Specifies the name label for the VM. - -t template Network install an openstack domU from this template - - EXAMPLES: - - Create a VPX that connects to the isolated management network using the - default bridge with a data disk of 1GiB: - install-os-vpx.sh -f /root/os-vpx-devel.xva -d 1024 - - Create a VPX that connects to the isolated management network using xenbr1 - as bridge: - install-os-vpx.sh -m xenbr1 - - Create a VPX that connects to both the management and public networks - using xenbr1 and xapi4 as bridges: - install-os-vpx.sh -m xenbr1 -p xapi4 - - Create a VPX that connects to both the management and public networks - using the default for management traffic: - install-os-vpx.sh -m xapi4 - + -n bridge The bridge/network to use for eth0. Defaults to xenbr0 EOF } get_params() { - while getopts "hicwbf:d:v:m:p:r:l:t:" OPTION; + while getopts "hbn:r:l:t:" OPTION; do case $OPTION in h) usage exit 1 ;; - i) - AS_TEMPLATE=1 - ;; - c) - FROM_TEMPLATE=1 - ;; - w) - WAIT_FOR_NETWORK=1 - ;; b) BALLOONING=1 ;; - f) - VPX_FILE=$OPTARG - ;; - d) - DATA_VDI_SIZE="${OPTARG}MiB" - ;; - m) - BRIDGE_M=$OPTARG - ;; - p) - BRIDGE_P=$OPTARG - ;; r) RAM=$OPTARG ;; - v) - BRIDGE_V=$OPTARG + n) + BRIDGE=$OPTARG ;; l) NAME_LABEL=$OPTARG @@ -132,9 +74,19 @@ get_params() ;; esac done - if [[ -z $BRIDGE_M ]] + if [[ -z $BRIDGE ]] then - BRIDGE_M=xenbr0 + BRIDGE=xenbr0 + fi + + if [[ -z $TEMPLATE_NAME ]]; then + echo "Please specify a template name" >&2 + exit 1 + fi + + if [[ -z $NAME_LABEL ]]; then + echo "Please specify a name-label for the new VM" >&2 + exit 1 fi } @@ -147,34 +99,6 @@ xe_min() } -get_dest_sr() -{ - IFS=, - sr_uuids=$(xe sr-list --minimal other-config:i18n-key=local-storage) - dest_sr="" - for sr_uuid in $sr_uuids - do - pbd=$(xe pbd-list --minimal sr-uuid=$sr_uuid host-uuid=$INSTALLATION_UUID) - if [ "$pbd" ] - then - echo "$sr_uuid" - unset IFS - return - fi - done - unset IFS - - dest_sr=$(xe_min sr-list uuid=$(xe_min pool-list params=default-SR)) - if [ "$dest_sr" = "" ] - then - echo "No local storage and no default storage; cannot import VPX." >&2 - exit 1 - else - echo "$dest_sr" - fi -} - - find_network() { result=$(xe_min network-list bridge="$1") @@ -186,137 +110,12 @@ find_network() } -find_template() -{ - xe_min template-list other-config:os-vpx=true -} - - -renumber_system_disk() -{ - local v="$1" - local vdi_uuid=$(xe_min vbd-list vm-uuid="$v" type=Disk userdevice=xvda \ - params=vdi-uuid) - if [ "$vdi_uuid" ] - then - local vbd_uuid=$(xe_min vbd-list vm-uuid="$v" vdi-uuid="$vdi_uuid") - xe vbd-destroy uuid="$vbd_uuid" - local new_vbd_uuid=$(xe vbd-create vm-uuid="$v" vdi-uuid="$vdi_uuid" \ - device=0 bootable=true type=Disk) - xe vbd-param-set other-config:owner uuid="$new_vbd_uuid" - fi -} - - create_vif() -{ - xe vif-create vm-uuid="$1" network-uuid="$2" device="$3" -} - -create_gi_vif() -{ - local v="$1" - # Note that we've made the outbound device eth1, so that it comes up after - # the guest installer VIF, which means that the outbound one wins in terms - # of gateway. - local gi_network_uuid=$(xe_min network-list \ - other-config:is_guest_installer_network=true) - create_vif "$v" "$gi_network_uuid" "0" >/dev/null -} - -create_vm_vif() -{ - local v="$1" - echo "Installing VM interface on $BRIDGE_V." - local out_network_uuid=$(find_network "$BRIDGE_V") - create_vif "$v" "$out_network_uuid" "1" >/dev/null -} - -create_management_vif() { local v="$1" - echo "Installing management interface on $BRIDGE_M." - local out_network_uuid=$(find_network "$BRIDGE_M") - create_vif "$v" "$out_network_uuid" "2" >/dev/null -} - - -# This installs the interface for public traffic, only if a bridge is specified -# The interface is not configured at this stage, but it will be, once the admin -# tasks are complete for the services of this VPX -create_public_vif() -{ - local v="$1" - if [[ -z $BRIDGE_P ]] - then - echo "Skipping installation of interface for public traffic." - else - echo "Installing public interface on $BRIDGE_P." - pub_network_uuid=$(find_network "$BRIDGE_P") - create_vif "$v" "$pub_network_uuid" "3" >/dev/null - fi -} - - -label_system_disk() -{ - local v="$1" - local vdi_uuid=$(xe_min vbd-list vm-uuid="$v" type=Disk userdevice=0 \ - params=vdi-uuid) - xe vdi-param-set \ - name-label="$NAME system disk" \ - other-config:os-vpx=true \ - uuid=$vdi_uuid -} - - -create_data_disk() -{ - local v="$1" - - local sys_vdi_uuid=$(xe_min vbd-list vm-uuid="$v" type=Disk params=vdi-uuid) - local data_vdi_uuid=$(xe_min vdi-list other-config:os-vpx-data=true) - - if echo "$data_vdi_uuid" | grep -q , - then - echo "Multiple data disks found -- assuming that you want a new one." - data_vdi_uuid="" - else - data_in_use=$(xe_min vbd-list vdi-uuid="$data_vdi_uuid") - if [ "$data_in_use" != "" ] - then - echo "Data disk already in use -- will create another one." - data_vdi_uuid="" - fi - fi - - if [ "$data_vdi_uuid" = "" ] - then - echo -n "Creating new data disk ($DATA_VDI_SIZE)... " - sr_uuid=$(xe_min vdi-list params=sr-uuid uuid="$sys_vdi_uuid") - data_vdi_uuid=$(xe vdi-create name-label="$NAME data disk" \ - sr-uuid="$sr_uuid" \ - type=user \ - virtual-size="$DATA_VDI_SIZE") - xe vdi-param-set \ - other-config:os-vpx-data=true \ - uuid="$data_vdi_uuid" - dom0_uuid=$(xe_min vm-list is-control-domain=true) - vbd_uuid=$(xe vbd-create device=autodetect type=Disk \ - vdi-uuid="$data_vdi_uuid" vm-uuid="$dom0_uuid") - xe vbd-plug uuid=$vbd_uuid - dev=$(xe_min vbd-list params=device uuid=$vbd_uuid) - mke2fs -q -j -m0 /dev/$dev - e2label /dev/$dev vpxstate - xe vbd-unplug uuid=$vbd_uuid - xe vbd-destroy uuid=$vbd_uuid - else - echo -n "Attaching old data disk... " - fi - vbd_uuid=$(xe vbd-create device=2 type=Disk \ - vdi-uuid="$data_vdi_uuid" vm-uuid="$v") - xe vbd-param-set other-config:os-vpx-data=true uuid=$vbd_uuid - echo "done." + echo "Installing VM interface on [$BRIDGE]" + local out_network_uuid=$(find_network "$BRIDGE") + xe vif-create vm-uuid="$v" network-uuid="$out_network_uuid" device="0" } @@ -342,34 +141,6 @@ set_auto_start() } -set_all() -{ - local v="$1" - set_memory "$v" - set_auto_start "$v" - label_system_disk "$v" - create_gi_vif "$v" - create_vm_vif "$v" - create_management_vif "$v" - create_public_vif "$v" -} - - -log_vifs() -{ - local v="$1" - - (IFS=, - for vif in $(xe_min vif-list vm-uuid="$v") - do - dev=$(xe_min vif-list uuid="$vif" params=device) - mac=$(xe_min vif-list uuid="$vif" params=MAC | sed -e 's/:/-/g') - echo "eth$dev has MAC $mac." - done - unset IFS) | sort -} - - destroy_vifs() { local v="$1" @@ -384,116 +155,11 @@ destroy_vifs() get_params "$@" -thisdir=$(dirname "$0") - -if [ "$FROM_TEMPLATE" ] -then - template_uuid=$(find_template) - name=$(xe_min template-list params=name-label uuid="$template_uuid") - echo -n "Cloning $name... " - vm_uuid=$(xe vm-clone vm="$template_uuid" new-name-label="$name") - xe vm-param-set is-a-template=false uuid="$vm_uuid" - echo $vm_uuid. - - destroy_vifs "$vm_uuid" - set_all "$vm_uuid" -elif [ "$TEMPLATE_NAME" ] -then - echo $TEMPLATE_NAME - vm_uuid=$(xe_min vm-install template="$TEMPLATE_NAME" new-name-label="$NAME_LABEL") - destroy_vifs "$vm_uuid" - set_auto_start "$vm_uuid" - create_gi_vif "$vm_uuid" - create_vm_vif "$vm_uuid" - create_management_vif "$vm_uuid" - create_public_vif "$vm_uuid" - xe vm-param-set other-config:os-vpx=true uuid="$vm_uuid" - xe vm-param-set actions-after-reboot=Destroy uuid="$vm_uuid" - set_memory "$vm_uuid" -else - if [ ! -f "$VPX_FILE" ] - then - # Search $thisdir/$VPX_FILE too. In particular, this is used when - # installing the VPX from the supp-pack, because we want to be able to - # invoke this script from the RPM and the firstboot script. - if [ -f "$thisdir/$VPX_FILE" ] - then - VPX_FILE="$thisdir/$VPX_FILE" - else - echo "$VPX_FILE does not exist." >&2 - exit 1 - fi - fi - - echo "Found OS-VPX File: $VPX_FILE. " - - dest_sr=$(get_dest_sr) - - echo -n "Installing $NAME... " - vm_uuid=$(xe vm-import filename=$VPX_FILE sr-uuid="$dest_sr") - echo $vm_uuid. - - renumber_system_disk "$vm_uuid" - - nl=${NAME_LABEL:-$(xe_min vm-list params=name-label uuid=$vm_uuid)} - xe vm-param-set \ - "name-label=${nl/ import/}" \ - other-config:os-vpx=true \ - uuid=$vm_uuid - - set_all "$vm_uuid" - create_data_disk "$vm_uuid" - - if [ "$AS_TEMPLATE" ] - then - xe vm-param-set uuid="$vm_uuid" is-a-template=true \ - other-config:instant=true - echo -n "Installing VPX from template... " - vm_uuid=$(xe vm-clone vm="$vm_uuid" new-name-label="${nl/ import/}") - xe vm-param-set is-a-template=false uuid="$vm_uuid" - echo "$vm_uuid." - fi -fi - - -log_vifs "$vm_uuid" - -echo -n "Starting VM... " +vm_uuid=$(xe_min vm-install template="$TEMPLATE_NAME" new-name-label="$NAME_LABEL") +destroy_vifs "$vm_uuid" +set_auto_start "$vm_uuid" +create_vif "$vm_uuid" +xe vm-param-set other-config:os-vpx=true uuid="$vm_uuid" +xe vm-param-set actions-after-reboot=Destroy uuid="$vm_uuid" +set_memory "$vm_uuid" xe vm-start uuid=$vm_uuid -echo "done." - - -show_ip() -{ - ip_addr=$(echo "$1" | sed -n "s,^.*"$2"/ip: \([^;]*\).*$,\1,p") - echo -n "IP address for $3: " - if [ "$ip_addr" = "" ] - then - echo "did not appear." - else - echo "$ip_addr." - fi -} - - -if [ "$WAIT_FOR_NETWORK" ] -then - echo "Waiting for network configuration... " - i=0 - while [ $i -lt 600 ] - do - ip=$(xe_min vm-list params=networks uuid=$vm_uuid) - if [ "$ip" != "" ] - then - show_ip "$ip" "1" "$BRIDGE_M" - if [[ $BRIDGE_P ]] - then - show_ip "$ip" "2" "$BRIDGE_P" - fi - echo "Installation complete." - exit 0 - fi - sleep 10 - let i=i+1 - done -fi diff --git a/tools/xen/scripts/install_ubuntu_template.sh b/tools/xen/scripts/install_ubuntu_template.sh index b7a8eff952..f24e7b26d6 100755 --- a/tools/xen/scripts/install_ubuntu_template.sh +++ b/tools/xen/scripts/install_ubuntu_template.sh @@ -53,7 +53,7 @@ disk_size=$(($OSDOMU_VDI_GB * 1024 * 1024 * 1024)) pvargs="-- quiet console=hvc0 partman/default_filesystem=ext3 \ console-setup/ask_detect=false locale=${UBUNTU_INST_LOCALE} \ keyboard-configuration/layoutcode=${UBUNTU_INST_KEYBOARD} \ -netcfg/choose_interface=${UBUNTU_INST_IFACE} \ +netcfg/choose_interface=eth0 \ netcfg/get_hostname=os netcfg/get_domain=os auto \ url=${preseed_url}" diff --git a/tools/xen/xenrc b/tools/xen/xenrc index 2bd7be7e37..afa3d32327 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -60,8 +60,8 @@ PUB_IP=${PUB_IP:-172.24.4.10} PUB_NETMASK=${PUB_NETMASK:-255.255.255.0} # Ubuntu install settings -UBUNTU_INST_RELEASE="oneiric" -UBUNTU_INST_TEMPLATE_NAME="Ubuntu 11.10 (64-bit) for DevStack" +UBUNTU_INST_RELEASE="precise" +UBUNTU_INST_TEMPLATE_NAME="Ubuntu 12.04 (64-bit) for DevStack" # For 12.04 use "precise" and update template name # However, for 12.04, you should be using # XenServer 6.1 and later or XCP 1.6 or later @@ -72,11 +72,8 @@ UBUNTU_INST_HTTP_DIRECTORY="/ubuntu" UBUNTU_INST_HTTP_PROXY="" UBUNTU_INST_LOCALE="en_US" UBUNTU_INST_KEYBOARD="us" -# network configuration for ubuntu netinstall. -# TODO(matelakat): get rid of legacy network interfaces -# specify "eth2" to use the management network -# specify "eth3" to use the public network -UBUNTU_INST_IFACE="eth2" +# network configuration for ubuntu netinstall +UBUNTU_INST_BRIDGE_OR_NET_NAME=${UBUNTU_INST_BRIDGE_OR_NET_NAME:-"$MGT_BRIDGE_OR_NET_NAME"} UBUNTU_INST_IP="dhcp" UBUNTU_INST_NAMESERVERS="" UBUNTU_INST_NETMASK="" From e3d3cb7598ac0dc5d2df30fcc71b8e8d95b31070 Mon Sep 17 00:00:00 2001 From: Eoghan Glynn Date: Thu, 20 Jun 2013 15:05:54 +0000 Subject: [PATCH 0167/4704] Restore admin role to ceilometer user. Fixes bug 1193004 The ceilometer user needs the admin role, so that the resources (instances etc.) associated with all tenants are visible to the ceilometer agents for metering purposes. Change-Id: I4d8a88d2d88a11a4b408e0c68ef227ec2af2d822 --- files/keystone_data.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index a1875e183b..ccac88044c 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -7,7 +7,7 @@ # service glance admin # service swift service # if enabled # service heat service # if enabled -# service ceilometer service # if enabled +# service ceilometer admin # if enabled # Tempest Only: # alt_demo alt_demo Member # @@ -157,7 +157,7 @@ if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]]; then --email=ceilometer@example.com) keystone user-role-add --tenant_id $SERVICE_TENANT \ --user_id $CEILOMETER_USER \ - --role_id $SERVICE_ROLE + --role_id $ADMIN_ROLE # Ceilometer needs ResellerAdmin role to access swift account stats. keystone user-role-add --tenant_id $SERVICE_TENANT \ --user_id $CEILOMETER_USER \ From 4c03034ef8c0908b9113f3f83148bd65ebbd43a6 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Thu, 20 Jun 2013 17:02:44 -0400 Subject: [PATCH 0168/4704] Set default volume backing file size to 10G This commit changes the default volume backing file size from 5G to 10G. This is already done in devstack-gate because on tempest runs we would frequently go above 5G. This will mirror the change for all devstack runs not just in the gate. Change-Id: I7023237653a28a4bb9413540ad9ecf3dfa588b0a --- stackrc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackrc b/stackrc index ef39710035..23b5bb839f 100644 --- a/stackrc +++ b/stackrc @@ -233,8 +233,8 @@ case "$VIRT_DRIVER" in esac -# 5Gb default volume backing file size -VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-5130M} +# 10Gb default volume backing file size +VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-10250M} # Name of the LVM volume group to use/create for iscsi volumes VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} From 46fe276a53662013b5a377909ae6b96bae924ddc Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Fri, 21 Jun 2013 16:48:02 +0100 Subject: [PATCH 0169/4704] xenapi: Add qemu-utils as a cinder dependency If you are using xenapi, your system won't have qemu-img installed. This patch adds the package qemu-utils to the list of cinder apts/rpms/suse-rpms, thus devstack will pull the required binaries. Change-Id: Id701880ce03bb3e78223df0af6a00b052408407c --- files/apts/cinder | 1 + files/rpms-suse/cinder | 1 + files/rpms/cinder | 1 + 3 files changed, 3 insertions(+) diff --git a/files/apts/cinder b/files/apts/cinder index 5db06eac99..c45b97f5a2 100644 --- a/files/apts/cinder +++ b/files/apts/cinder @@ -1,2 +1,3 @@ tgt lvm2 +qemu-utils diff --git a/files/rpms-suse/cinder b/files/rpms-suse/cinder index e5b47274ef..61b9f253ec 100644 --- a/files/rpms-suse/cinder +++ b/files/rpms-suse/cinder @@ -1,2 +1,3 @@ lvm2 tgt +qemu-img diff --git a/files/rpms/cinder b/files/rpms/cinder index df861aade0..19dedffe91 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -1,2 +1,3 @@ lvm2 scsi-target-utils +qemu-img From 39aeda23b23c1f1a3db3c7f81017271c4780f7ad Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Mon, 17 Jun 2013 12:51:33 +0100 Subject: [PATCH 0170/4704] Create the /images directory used by the resize functionality. XenServer's resize uses /images as a known-path for each hypervisor. This is a symlink to the storage repository so disks can be moved between the /images path and the storage repository efficiently. Change-Id: I13b39dbf5537ad45160c1af4cc10bd867b7f89c1 --- tools/xen/functions | 10 ++++++++++ tools/xen/install_os_domU.sh | 1 + tools/xen/test_functions.sh | 23 +++++++++++++++++++++++ 3 files changed, 34 insertions(+) diff --git a/tools/xen/functions b/tools/xen/functions index ebfd4835a2..35c17d746d 100644 --- a/tools/xen/functions +++ b/tools/xen/functions @@ -24,6 +24,16 @@ function create_directory_for_kernels { fi } +function create_directory_for_images { + if [ -d "/images" ]; then + echo "INFO: /images directory already exists, using that" >&2 + else + local LOCALPATH="$(get_local_sr_path)/os-images" + mkdir -p $LOCALPATH + ln -s $LOCALPATH /images + fi +} + function extract_remote_zipball { local ZIPBALL_URL=$1 diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index a744869288..deaf7e5e67 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -63,6 +63,7 @@ if [[ "$ENABLED_SERVICES" =~ "q-agt" && "$Q_PLUGIN" = "openvswitch" ]]; then fi create_directory_for_kernels +create_directory_for_images # # Configure Networking diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh index 62393ca2eb..410df5f8b7 100755 --- a/tools/xen/test_functions.sh +++ b/tools/xen/test_functions.sh @@ -138,6 +138,29 @@ function test_create_directory_for_kernels_existing_dir { EOF } +function test_create_directory_for_images { + ( + . mocks + mock_out get_local_sr uuid1 + create_directory_for_images + ) + + assert_directory_exists "/var/run/sr-mount/uuid1/os-images" + assert_symlink "/images" "/var/run/sr-mount/uuid1/os-images" +} + +function test_create_directory_for_images_existing_dir { + ( + . mocks + given_directory_exists "/images" + create_directory_for_images + ) + + diff -u $LIST_OF_ACTIONS - << EOF +[ -d /images ] +EOF +} + function test_extract_remote_zipball { local RESULT=$(. mocks && extract_remote_zipball "someurl") From 7fa1902f9bcad80b3bbf1831805ec10d937b3d9a Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 25 Jun 2013 14:11:48 +1000 Subject: [PATCH 0171/4704] Add cli_dir to tempest config Add the cli_dir option to the generated tempest config. This is required on platforms such as RHEL where the binaries are not in the standard location. Note that to be fully operational this requires [1] in tempest so that the cli path is created correctly. [1] https://review.openstack.org/#/c/34302/ Change-Id: Ic7369193f1434974366cfabc7550e8545de244cd --- lib/tempest | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/tempest b/lib/tempest index 277c68abcd..87f6a574a0 100644 --- a/lib/tempest +++ b/lib/tempest @@ -281,6 +281,9 @@ function configure_tempest() { iniset $TEMPEST_CONF volume backend2_name "LVM_iSCSI_2" fi + # cli + iniset $TEMPEST_CONF cli cli_dir $NOVA_BIN_DIR + echo "Created tempest configuration file:" cat $TEMPEST_CONF From d42634ff631e5faa5e3a91f581c861e59279e6f7 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Mon, 24 Jun 2013 09:26:55 +0000 Subject: [PATCH 0172/4704] Use lowercase section names in Neutron (aka Quantum) Fixes bug 1194064 The DEFAULT section remains the same. In addition to this the sql_connection has been changed to connection - this is for OSLO DB support. Change-Id: I56175146941a9cae966033b557e44097e29a5d43 --- lib/quantum | 8 +++---- lib/quantum_plugins/bigswitch_floodlight | 4 ++-- lib/quantum_plugins/linuxbridge | 10 ++++----- lib/quantum_plugins/linuxbridge_agent | 10 ++++----- lib/quantum_plugins/nec | 10 ++++----- lib/quantum_plugins/nicira | 10 ++++----- lib/quantum_plugins/openvswitch | 12 +++++----- lib/quantum_plugins/openvswitch_agent | 28 ++++++++++++------------ lib/quantum_plugins/ovs_base | 4 ++-- lib/quantum_plugins/plumgrid | 4 ++-- lib/quantum_plugins/ryu | 4 ++-- 11 files changed, 52 insertions(+), 52 deletions(-) diff --git a/lib/quantum b/lib/quantum index 51dd76195d..f16937c684 100644 --- a/lib/quantum +++ b/lib/quantum @@ -459,7 +459,7 @@ function _configure_quantum_common() { Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME cp $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE - iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection `database_connection_url $Q_DB_NAME` + iniset /$Q_PLUGIN_CONF_FILE database connection `database_connection_url $Q_DB_NAME` iniset $QUANTUM_CONF DEFAULT state_path $DATA_DIR/quantum # If addition config files are set, make sure their path name is set as well @@ -494,7 +494,7 @@ function _configure_quantum_debug_command() { iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT root_helper "$Q_RR_COMMAND" # Intermediate fix until Quantum patch lands and then line above will # be cleaned. - iniset $QUANTUM_TEST_CONFIG_FILE AGENT root_helper "$Q_RR_COMMAND" + iniset $QUANTUM_TEST_CONFIG_FILE agent root_helper "$Q_RR_COMMAND" _quantum_setup_keystone $QUANTUM_TEST_CONFIG_FILE DEFAULT set_auth_url _quantum_setup_interface_driver $QUANTUM_TEST_CONFIG_FILE @@ -563,7 +563,7 @@ function _configure_quantum_lbaas() { function _configure_quantum_plugin_agent() { # Specify the default root helper prior to agent configuration to # ensure that an agent's configuration can override the default - iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" + iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_COMMAND" iniset $QUANTUM_CONF DEFAULT verbose True iniset $QUANTUM_CONF DEFAULT debug True @@ -645,7 +645,7 @@ function _quantum_setup_rootwrap() { sudo mv $TEMPFILE /etc/sudoers.d/quantum-rootwrap # Update the root_helper - iniset $QUANTUM_CONF AGENT root_helper "$Q_RR_COMMAND" + iniset $QUANTUM_CONF agent root_helper "$Q_RR_COMMAND" } # Configures keystone integration for quantum service and agents diff --git a/lib/quantum_plugins/bigswitch_floodlight b/lib/quantum_plugins/bigswitch_floodlight index edee0eb748..11997457bc 100644 --- a/lib/quantum_plugins/bigswitch_floodlight +++ b/lib/quantum_plugins/bigswitch_floodlight @@ -42,8 +42,8 @@ function quantum_plugin_configure_plugin_agent() { } function quantum_plugin_configure_service() { - iniset /$Q_PLUGIN_CONF_FILE RESTPROXY servers $BS_FL_CONTROLLERS_PORT - iniset /$Q_PLUGIN_CONF_FILE RESTPROXY servertimeout $BS_FL_CONTROLLER_TIMEOUT + iniset /$Q_PLUGIN_CONF_FILE restproxy servers $BS_FL_CONTROLLERS_PORT + iniset /$Q_PLUGIN_CONF_FILE restproxy servertimeout $BS_FL_CONTROLLER_TIMEOUT } function quantum_plugin_setup_interface_driver() { diff --git a/lib/quantum_plugins/linuxbridge b/lib/quantum_plugins/linuxbridge index dffa32b7bf..989b930005 100644 --- a/lib/quantum_plugins/linuxbridge +++ b/lib/quantum_plugins/linuxbridge @@ -14,7 +14,7 @@ function quantum_plugin_configure_common() { function quantum_plugin_configure_service() { if [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE VLANS tenant_network_type vlan + iniset /$Q_PLUGIN_CONF_FILE vlans tenant_network_type vlan else echo "WARNING - The linuxbridge plugin is using local tenant networks, with no connectivity between hosts." fi @@ -28,12 +28,12 @@ function quantum_plugin_configure_service() { fi fi if [[ "$LB_VLAN_RANGES" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE VLANS network_vlan_ranges $LB_VLAN_RANGES + iniset /$Q_PLUGIN_CONF_FILE vlans network_vlan_ranges $LB_VLAN_RANGES fi if [[ "$Q_USE_SECGROUP" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.linux.iptables_firewall.IptablesFirewallDriver + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver quantum.agent.linux.iptables_firewall.IptablesFirewallDriver else - iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.firewall.NoopFirewallDriver + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver quantum.agent.firewall.NoopFirewallDriver fi # Define extra "LINUX_BRIDGE" configuration options when q-svc is configured by defining @@ -41,7 +41,7 @@ function quantum_plugin_configure_service() { # For Example: ``Q_SRV_EXTRA_OPTS=(foo=true bar=2)`` for I in "${Q_SRV_EXTRA_OPTS[@]}"; do # Replace the first '=' with ' ' for iniset syntax - iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE ${I/=/ } + iniset /$Q_PLUGIN_CONF_FILE linux_bridge ${I/=/ } done } diff --git a/lib/quantum_plugins/linuxbridge_agent b/lib/quantum_plugins/linuxbridge_agent index 7855cd0eb1..b3ca8b12ab 100644 --- a/lib/quantum_plugins/linuxbridge_agent +++ b/lib/quantum_plugins/linuxbridge_agent @@ -39,12 +39,12 @@ function quantum_plugin_configure_plugin_agent() { LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE fi if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings $LB_INTERFACE_MAPPINGS + iniset /$Q_PLUGIN_CONF_FILE linux_bridge physical_interface_mappings $LB_INTERFACE_MAPPINGS fi if [[ "$Q_USE_SECGROUP" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.linux.iptables_firewall.IptablesFirewallDriver + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver quantum.agent.linux.iptables_firewall.IptablesFirewallDriver else - iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.firewall.NoopFirewallDriver + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver quantum.agent.firewall.NoopFirewallDriver fi AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent" # Define extra "AGENT" configuration options when q-agt is configured by defining @@ -52,14 +52,14 @@ function quantum_plugin_configure_plugin_agent() { # For Example: ``Q_AGENT_EXTRA_AGENT_OPTS=(foo=true bar=2)`` for I in "${Q_AGENT_EXTRA_AGENT_OPTS[@]}"; do # Replace the first '=' with ' ' for iniset syntax - iniset /$Q_PLUGIN_CONF_FILE AGENT ${I/=/ } + iniset /$Q_PLUGIN_CONF_FILE agent ${I/=/ } done # Define extra "LINUX_BRIDGE" configuration options when q-agt is configured by defining # the array ``Q_AGENT_EXTRA_SRV_OPTS``. # For Example: ``Q_AGENT_EXTRA_SRV_OPTS=(foo=true bar=2)`` for I in "${Q_AGENT_EXTRA_SRV_OPTS[@]}"; do # Replace the first '=' with ' ' for iniset syntax - iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE ${I/=/ } + iniset /$Q_PLUGIN_CONF_FILE linux_bridge ${I/=/ } done } diff --git a/lib/quantum_plugins/nec b/lib/quantum_plugins/nec index 608e267e80..69bbe0e618 100644 --- a/lib/quantum_plugins/nec +++ b/lib/quantum_plugins/nec @@ -77,11 +77,11 @@ function quantum_plugin_configure_plugin_agent() { function quantum_plugin_configure_service() { iniset $QUANTUM_CONF DEFAULT api_extensions_path quantum/plugins/nec/extensions/ - iniset /$Q_PLUGIN_CONF_FILE OFC host $OFC_API_HOST - iniset /$Q_PLUGIN_CONF_FILE OFC port $OFC_API_PORT - iniset /$Q_PLUGIN_CONF_FILE OFC driver $OFC_DRIVER - iniset /$Q_PLUGIN_CONF_FILE OFC api_retry_max OFC_RETRY_MAX - iniset /$Q_PLUGIN_CONF_FILE OFC api_retry_interval OFC_RETRY_INTERVAL + iniset /$Q_PLUGIN_CONF_FILE ofc host $OFC_API_HOST + iniset /$Q_PLUGIN_CONF_FILE ofc port $OFC_API_PORT + iniset /$Q_PLUGIN_CONF_FILE ofc driver $OFC_DRIVER + iniset /$Q_PLUGIN_CONF_FILE ofc api_retry_max OFC_RETRY_MAX + iniset /$Q_PLUGIN_CONF_FILE ofc api_retry_interval OFC_RETRY_INTERVAL _quantum_ovs_base_configure_firewall_driver } diff --git a/lib/quantum_plugins/nicira b/lib/quantum_plugins/nicira index c9c6d0e74e..d4b3e5128e 100644 --- a/lib/quantum_plugins/nicira +++ b/lib/quantum_plugins/nicira @@ -68,16 +68,16 @@ function quantum_plugin_configure_plugin_agent() { function quantum_plugin_configure_service() { if [[ "$MAX_LP_PER_BRIDGED_LS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE NVP max_lp_per_bridged_ls $MAX_LP_PER_BRIDGED_LS + iniset /$Q_PLUGIN_CONF_FILE nvp max_lp_per_bridged_ls $MAX_LP_PER_BRIDGED_LS fi if [[ "$MAX_LP_PER_OVERLAY_LS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE NVP max_lp_per_overlay_ls $MAX_LP_PER_OVERLAY_LS + iniset /$Q_PLUGIN_CONF_FILE nvp max_lp_per_overlay_ls $MAX_LP_PER_OVERLAY_LS fi if [[ "$FAILOVER_TIME" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE NVP failover_time $FAILOVER_TIME + iniset /$Q_PLUGIN_CONF_FILE nvp failover_time $FAILOVER_TIME fi if [[ "$CONCURRENT_CONNECTIONS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE NVP concurrent_connections $CONCURRENT_CONNECTIONS + iniset /$Q_PLUGIN_CONF_FILE nvp concurrent_connections $CONCURRENT_CONNECTIONS fi if [[ "$DEFAULT_TZ_UUID" != "" ]]; then @@ -89,7 +89,7 @@ function quantum_plugin_configure_service() { iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l3_gw_service_uuid $DEFAULT_L3_GW_SVC_UUID Q_L3_ENABLED=True Q_L3_ROUTER_PER_TENANT=True - iniset /$Q_PLUGIN_CONF_FILE NVP enable_metadata_access_network True + iniset /$Q_PLUGIN_CONF_FILE nvp enable_metadata_access_network True fi if [[ "$DEFAULT_L2_GW_SVC_UUID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l2_gw_service_uuid $DEFAULT_L2_GW_SVC_UUID diff --git a/lib/quantum_plugins/openvswitch b/lib/quantum_plugins/openvswitch index e53db8aaa3..4aac9f8e69 100644 --- a/lib/quantum_plugins/openvswitch +++ b/lib/quantum_plugins/openvswitch @@ -16,10 +16,10 @@ function quantum_plugin_configure_common() { function quantum_plugin_configure_service() { if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type gre - iniset /$Q_PLUGIN_CONF_FILE OVS tunnel_id_ranges $TENANT_TUNNEL_RANGES + iniset /$Q_PLUGIN_CONF_FILE ovs tenant_network_type gre + iniset /$Q_PLUGIN_CONF_FILE ovs tunnel_id_ranges $TENANT_TUNNEL_RANGES elif [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type vlan + iniset /$Q_PLUGIN_CONF_FILE ovs tenant_network_type vlan else echo "WARNING - The openvswitch plugin is using local tenant networks, with no connectivity between hosts." fi @@ -33,12 +33,12 @@ function quantum_plugin_configure_service() { fi fi if [[ "$OVS_VLAN_RANGES" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE OVS network_vlan_ranges $OVS_VLAN_RANGES + iniset /$Q_PLUGIN_CONF_FILE ovs network_vlan_ranges $OVS_VLAN_RANGES fi # Enable tunnel networks if selected if [[ $OVS_ENABLE_TUNNELING = "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True + iniset /$Q_PLUGIN_CONF_FILE ovs enable_tunneling True fi _quantum_ovs_base_configure_firewall_driver @@ -48,7 +48,7 @@ function quantum_plugin_configure_service() { # For Example: ``Q_SRV_EXTRA_OPTS=(foo=true bar=2)`` for I in "${Q_SRV_EXTRA_OPTS[@]}"; do # Replace the first '=' with ' ' for iniset syntax - iniset /$Q_PLUGIN_CONF_FILE OVS ${I/=/ } + iniset /$Q_PLUGIN_CONF_FILE ovs ${I/=/ } done } diff --git a/lib/quantum_plugins/openvswitch_agent b/lib/quantum_plugins/openvswitch_agent index 7e83428a20..608c3eae98 100644 --- a/lib/quantum_plugins/openvswitch_agent +++ b/lib/quantum_plugins/openvswitch_agent @@ -47,8 +47,8 @@ function quantum_plugin_configure_plugin_agent() { if [ `vercmp_numbers "$OVS_VERSION" "1.4"` -lt "0" ] && ! is_service_enabled q-svc ; then die $LINENO "You are running OVS version $OVS_VERSION. OVS 1.4+ is required for tunneling between multiple hosts." fi - iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True - iniset /$Q_PLUGIN_CONF_FILE OVS local_ip $HOST_IP + iniset /$Q_PLUGIN_CONF_FILE ovs enable_tunneling True + iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $HOST_IP fi # Setup physical network bridge mappings. Override @@ -61,7 +61,7 @@ function quantum_plugin_configure_plugin_agent() { sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE fi if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings $OVS_BRIDGE_MAPPINGS + iniset /$Q_PLUGIN_CONF_FILE ovs bridge_mappings $OVS_BRIDGE_MAPPINGS fi AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent" @@ -73,20 +73,20 @@ function quantum_plugin_configure_plugin_agent() { Q_RR_DOM0_COMMAND="$QUANTUM_DIR/bin/quantum-rootwrap-xen-dom0 $Q_RR_CONF_FILE" # For now, duplicate the xen configuration already found in nova.conf - iniset $Q_RR_CONF_FILE XENAPI xenapi_connection_url "$XENAPI_CONNECTION_URL" - iniset $Q_RR_CONF_FILE XENAPI xenapi_connection_username "$XENAPI_USER" - iniset $Q_RR_CONF_FILE XENAPI xenapi_connection_password "$XENAPI_PASSWORD" + iniset $Q_RR_CONF_FILE xenapi xenapi_connection_url "$XENAPI_CONNECTION_URL" + iniset $Q_RR_CONF_FILE xenapi xenapi_connection_username "$XENAPI_USER" + iniset $Q_RR_CONF_FILE xenapi xenapi_connection_password "$XENAPI_PASSWORD" # Under XS/XCP, the ovs agent needs to target the dom0 # integration bridge. This is enabled by using a root wrapper # that executes commands on dom0 via a XenAPI plugin. - iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_DOM0_COMMAND" + iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_DOM0_COMMAND" # Set "physical" mapping - iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings "physnet1:$FLAT_NETWORK_BRIDGE" + iniset /$Q_PLUGIN_CONF_FILE ovs bridge_mappings "physnet1:$FLAT_NETWORK_BRIDGE" # XEN_INTEGRATION_BRIDGE is the integration bridge in dom0 - iniset /$Q_PLUGIN_CONF_FILE OVS integration_bridge $XEN_INTEGRATION_BRIDGE + iniset /$Q_PLUGIN_CONF_FILE ovs integration_bridge $XEN_INTEGRATION_BRIDGE # Set up domU's L2 agent: @@ -96,25 +96,25 @@ function quantum_plugin_configure_plugin_agent() { sudo ovs-vsctl add-port "br-$GUEST_INTERFACE_DEFAULT" $GUEST_INTERFACE_DEFAULT # Set bridge mappings to "physnet1:br-$GUEST_INTERFACE_DEFAULT" - iniset "/$Q_PLUGIN_CONF_FILE.domU" OVS bridge_mappings "physnet1:br-$GUEST_INTERFACE_DEFAULT" + iniset "/$Q_PLUGIN_CONF_FILE.domU" ovs bridge_mappings "physnet1:br-$GUEST_INTERFACE_DEFAULT" # Set integration bridge to domU's - iniset "/$Q_PLUGIN_CONF_FILE.domU" OVS integration_bridge $OVS_BRIDGE + iniset "/$Q_PLUGIN_CONF_FILE.domU" ovs integration_bridge $OVS_BRIDGE # Set root wrap - iniset "/$Q_PLUGIN_CONF_FILE.domU" AGENT root_helper "$Q_RR_COMMAND" + iniset "/$Q_PLUGIN_CONF_FILE.domU" agent root_helper "$Q_RR_COMMAND" fi # Define extra "AGENT" configuration options when q-agt is configured by defining # defining the array ``Q_AGENT_EXTRA_AGENT_OPTS``. # For Example: ``Q_AGENT_EXTRA_AGENT_OPTS=(foo=true bar=2)`` for I in "${Q_AGENT_EXTRA_AGENT_OPTS[@]}"; do # Replace the first '=' with ' ' for iniset syntax - iniset /$Q_PLUGIN_CONF_FILE AGENT ${I/=/ } + iniset /$Q_PLUGIN_CONF_FILE agent ${I/=/ } done # Define extra "OVS" configuration options when q-agt is configured by defining # defining the array ``Q_AGENT_EXTRA_SRV_OPTS``. # For Example: ``Q_AGENT_EXTRA_SRV_OPTS=(foo=true bar=2)`` for I in "${Q_AGENT_EXTRA_SRV_OPTS[@]}"; do # Replace the first '=' with ' ' for iniset syntax - iniset /$Q_PLUGIN_CONF_FILE OVS ${I/=/ } + iniset /$Q_PLUGIN_CONF_FILE ovs ${I/=/ } done } diff --git a/lib/quantum_plugins/ovs_base b/lib/quantum_plugins/ovs_base index a5e03acd51..646ff4a782 100644 --- a/lib/quantum_plugins/ovs_base +++ b/lib/quantum_plugins/ovs_base @@ -56,9 +56,9 @@ function _quantum_ovs_base_configure_debug_command() { function _quantum_ovs_base_configure_firewall_driver() { if [[ "$Q_USE_SECGROUP" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver quantum.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver else - iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.firewall.NoopFirewallDriver + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver quantum.agent.firewall.NoopFirewallDriver fi } diff --git a/lib/quantum_plugins/plumgrid b/lib/quantum_plugins/plumgrid index 14567104ed..dde18c8e85 100644 --- a/lib/quantum_plugins/plumgrid +++ b/lib/quantum_plugins/plumgrid @@ -27,8 +27,8 @@ function quantum_plugin_configure_common() { function quantum_plugin_configure_service() { PLUMGRID_NOS_IP=${PLUMGRID_NOS_IP:-localhost} PLUMGRID_NOS_PORT=${PLUMGRID_NOS_PORT:-7766} - iniset /$Q_PLUGIN_CONF_FILE PLUMgridNOS nos_server $PLUMGRID_NOS_IP - iniset /$Q_PLUGIN_CONF_FILE PLUMgridNOS nos_server_port $PLUMGRID_NOS_PORT + iniset /$Q_PLUGIN_CONF_FILE plumgridnos nos_server $PLUMGRID_NOS_IP + iniset /$Q_PLUGIN_CONF_FILE plumgridnos nos_server_port $PLUMGRID_NOS_PORT } function quantum_plugin_configure_debug_command() { diff --git a/lib/quantum_plugins/ryu b/lib/quantum_plugins/ryu index dcdccb7c58..53c4f41aa6 100644 --- a/lib/quantum_plugins/ryu +++ b/lib/quantum_plugins/ryu @@ -49,14 +49,14 @@ function quantum_plugin_configure_plugin_agent() { if [ -n "$RYU_INTERNAL_INTERFACE" ]; then sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $RYU_INTERNAL_INTERFACE fi - iniset /$Q_PLUGIN_CONF_FILE OVS integration_bridge $OVS_BRIDGE + iniset /$Q_PLUGIN_CONF_FILE ovs integration_bridge $OVS_BRIDGE AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/ryu/agent/ryu_quantum_agent.py" _quantum_ovs_base_configure_firewall_driver } function quantum_plugin_configure_service() { - iniset /$Q_PLUGIN_CONF_FILE OVS openflow_rest_api $RYU_API_HOST:$RYU_API_PORT + iniset /$Q_PLUGIN_CONF_FILE ovs openflow_rest_api $RYU_API_HOST:$RYU_API_PORT _quantum_ovs_base_configure_firewall_driver } From 6e88f8e14f5e7f15e37ceebf0f588483ad4bb82b Mon Sep 17 00:00:00 2001 From: Brad Topol Date: Wed, 26 Jun 2013 10:26:33 -0500 Subject: [PATCH 0173/4704] Remove creation of Domain entries in LDAP Domain entries are no longer stored in keystone ldap. Removing the creation from devstack ldap install Fixes Bug 1194204 Change-Id: I9c93d3021cc2bb058d1ef57bebcf3a13dc5bdd34 --- files/ldap/openstack.ldif | 4 ---- 1 file changed, 4 deletions(-) diff --git a/files/ldap/openstack.ldif b/files/ldap/openstack.ldif index f810fe8d2e..02caf3f368 100644 --- a/files/ldap/openstack.ldif +++ b/files/ldap/openstack.ldif @@ -20,10 +20,6 @@ dn: ou=Projects,dc=openstack,dc=org objectClass: organizationalUnit ou: Projects -dn: ou=Domains,dc=openstack,dc=org -objectClass: organizationalUnit -ou: Domains - dn: cn=9fe2ff9ee4384b1894a90878d3e92bab,ou=Roles,dc=openstack,dc=org objectClass: organizationalRole ou: _member_ From 0db171378dc97fea47f7d536ab92ab363e544127 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 26 Jun 2013 22:31:48 +1000 Subject: [PATCH 0174/4704] Update LVM autoextend config for RHEL6 Cinder clones are slightly larger due to some extra metadata. RHEL6 will not allow auto-extending of LV's without this, leading to clones giving hard-to-track disk I/O errors. See https://bugzilla.redhat.com/show_bug.cgi?id=975052 Change-Id: I09a5e061a9665c5310383f9f9eb281bfdc8e416d --- lib/cinder | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/lib/cinder b/lib/cinder index c572db49d2..f691b384bf 100644 --- a/lib/cinder +++ b/lib/cinder @@ -276,6 +276,19 @@ function configure_cinder() { echo "$CINDER_GLUSTERFS_SHARES" > $CINDER_CONF_DIR/glusterfs_shares fi fi + + if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then + # Cinder clones are slightly larger due to some extra + # metadata. RHEL6 will not allow auto-extending of LV's + # without this, leading to clones giving hard-to-track disk + # I/O errors. + # see https://bugzilla.redhat.com/show_bug.cgi?id=975052 + sudo sed -i~ \ + -e 's/snapshot_autoextend_threshold =.*/snapshot_autoextend_threshold = 80/' \ + -e 's/snapshot_autoextend_percent =.*/snapshot_autoextend_percent = 20/' \ + /etc/lvm/lvm.conf + fi + } # create_cinder_accounts() - Set up common required cinder accounts From 4f9b33d077cacaed524175902f6209f20cc4c5a7 Mon Sep 17 00:00:00 2001 From: Martin Vidner Date: Thu, 27 Jun 2013 13:11:22 +0000 Subject: [PATCH 0175/4704] Fix python exec prefix on SUSE. https://bugs.launchpad.net/devstack/+bug/1068386 "sudo python setup.py develop" installs to /usr/bin on SUSE Otherwise it fails with "stack.sh:191 g-api did not start". Change-Id: I1258240ce2a5a765188353fbc8a2e085d0b02fec --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index c611e0046c..70ab999676 100644 --- a/functions +++ b/functions @@ -1410,7 +1410,7 @@ function add_user_to_group() { # Get the path to the direcotry where python executables are installed. # get_python_exec_prefix function get_python_exec_prefix() { - if is_fedora; then + if is_fedora || is_suse; then echo "/usr/bin" else echo "/usr/local/bin" From 4ca55309f28f9470701487881847a7efdc3c5628 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 28 Jun 2013 14:21:17 +1000 Subject: [PATCH 0176/4704] Add python-libguestfs to RPM list libguestfs appears to be the best way to access images for injection, so add the python bindings to the RPM list. Indeed, on RHEL it is the only way, because the fallback of nbd access isn't available. There, this change prevents a lot of errors in nova.virt.disk.vfs.localfs when it can't import the nbd module. Change-Id: I9c2a81bb7c26bc17f8b5f1b5a682c05a419b33ba --- files/rpms/n-cpu | 1 + 1 file changed, 1 insertion(+) diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu index 149672ac20..e4fdaf4eda 100644 --- a/files/rpms/n-cpu +++ b/files/rpms/n-cpu @@ -4,3 +4,4 @@ lvm2 genisoimage sysfsutils sg3_utils +python-libguestfs From 9297319fdb18440b7f37aa71b12a7599cd691dbb Mon Sep 17 00:00:00 2001 From: Paul Michali Date: Mon, 24 Jun 2013 12:44:58 -0700 Subject: [PATCH 0177/4704] Enhance DevStack to allow user to specify the following customizations for the predefined networks created on startup (default in parenthesis): PRIVATE_SUBNET_NAME - Name of the local subnet (private-subnet) PUBLIC_SUBNET_NAME - Name of the public subnet (public-subnet) PUBLIC_NETWORK_GATEWAY - IP for the public subnet (172.24.4.225) Moved NETWORK_GATEWAY into lib/quantum, along with these new customizations. bug 1194218 Change-Id: I87ebc22e338df278db7523f11be07de1ca88234b --- lib/quantum | 12 +++++++++--- stack.sh | 1 - 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/lib/quantum b/lib/quantum index f16937c684..afe99c48cb 100644 --- a/lib/quantum +++ b/lib/quantum @@ -65,6 +65,12 @@ set +o xtrace # Quantum Network Configuration # ----------------------------- +# Gateway and subnet defaults, in case they are not customized in localrc +NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1} +PUBLIC_NETWORK_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-172.24.4.225} +PRIVATE_SUBNET_NAME=${PRIVATE_SUBNET_NAME:-"private-subnet"} +PUBLIC_SUBNET_NAME=${PUBLIC_SUBNET_NAME:-"public-subnet"} + # Set up default directories QUANTUM_DIR=$DEST/quantum QUANTUMCLIENT_DIR=$DEST/python-quantumclient @@ -304,11 +310,11 @@ function create_quantum_initial_network() { sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE done NET_ID=$(quantum net-create $PHYSICAL_NETWORK --tenant_id $TENANT_ID --provider:network_type flat --provider:physical_network "$PHYSICAL_NETWORK" | grep ' id ' | get_field 2) - SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) sudo ifconfig $OVS_PHYSICAL_BRIDGE up else NET_ID=$(quantum net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) - SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) fi if [[ "$Q_L3_ENABLED" == "True" ]]; then @@ -323,7 +329,7 @@ function create_quantum_initial_network() { quantum router-interface-add $ROUTER_ID $SUBNET_ID # Create an external network, and a subnet. Configure the external network as router gw EXT_NET_ID=$(quantum net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2) - EXT_GW_IP=$(quantum subnet-create --ip_version 4 ${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2) + EXT_GW_IP=$(quantum subnet-create --ip_version 4 ${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} --gateway $PUBLIC_NETWORK_GATEWAY --name $PUBLIC_SUBNET_NAME $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2) quantum router-gateway-set $ROUTER_ID $EXT_NET_ID if is_service_enabled q-l3; then diff --git a/stack.sh b/stack.sh index 5372942855..57b8529e06 100755 --- a/stack.sh +++ b/stack.sh @@ -266,7 +266,6 @@ sudo chown -R $STACK_USER $DATA_DIR FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28} FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24} FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256} -NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1} HOST_IP=$(get_default_host_ip $FIXED_RANGE $FLOATING_RANGE "$HOST_IP_IFACE" "$HOST_IP") if [ "$HOST_IP" == "" ]; then From 3005e17853416db571936e527c1288c4e27c3499 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 24 Jan 2013 14:14:28 -0600 Subject: [PATCH 0178/4704] Enable configuration of Identity API v3 * Default IDENTITY_API_VERSION to '2.0' in stackrc Note: the value of these *_API_VERSION variables will NOT include the leading 'v' as the CLI tools do not allow it. Change-Id: Ic6473833be35625282e7442f3c88fc1c4d0cc134 --- lib/keystone | 11 ++++++----- openrc | 3 +++ stackrc | 3 +++ 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/lib/keystone b/lib/keystone index 2edd137dbb..26b7ed638a 100644 --- a/lib/keystone +++ b/lib/keystone @@ -3,11 +3,12 @@ # Dependencies: # ``functions`` file +# ``DEST``, ``STACK_USER`` +# ``IDENTITY_API_VERSION`` # ``BASE_SQL_CONN`` # ``SERVICE_HOST``, ``SERVICE_PROTOCOL`` # ``SERVICE_TOKEN`` # ``S3_SERVICE_PORT`` (template backend only) -# ``STACK_USER`` # ``stack.sh`` calls the entry points in this order: # @@ -249,9 +250,9 @@ create_keystone_accounts() { keystone endpoint-create \ --region RegionOne \ --service_id $KEYSTONE_SERVICE \ - --publicurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0" \ - --adminurl "$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0" \ - --internalurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0" + --publicurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$IDENTITY_API_VERSION" \ + --adminurl "$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v$IDENTITY_API_VERSION" \ + --internalurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$IDENTITY_API_VERSION" fi } @@ -302,7 +303,7 @@ function start_keystone() { # Start Keystone in a screen window screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug" echo "Waiting for keystone to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s http://$SERVICE_HOST:$service_port/v2.0/ >/dev/null; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s http://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then die $LINENO "keystone did not start" fi diff --git a/openrc b/openrc index f1026a50a4..1321ce7d9f 100644 --- a/openrc +++ b/openrc @@ -75,6 +75,9 @@ export OS_AUTH_URL=$SERVICE_PROTOCOL://$SERVICE_HOST:5000/v2.0 # Set the pointer to our CA certificate chain. Harmless if TLS is not used. export OS_CACERT=$INT_CA_DIR/ca-chain.pem +# Identity API version +export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-2.0} + # Currently novaclient needs you to specify the *compute api* version. This # needs to match the config of your catalog returned by Keystone. export NOVA_VERSION=${NOVA_VERSION:-1.1} diff --git a/stackrc b/stackrc index 49cf0266ad..b998def67b 100644 --- a/stackrc +++ b/stackrc @@ -37,6 +37,9 @@ ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-s # Set the default Nova APIs to enable NOVA_ENABLED_APIS=ec2,osapi_compute,metadata +# Configure Identity API version: 2.0, 3 +IDENTITY_API_VERSION=2.0 + # Whether to use 'dev mode' for screen windows. Dev mode works by # stuffing text into the screen windows so that a developer can use # ctrl-c, up-arrow, enter to restart the service. Starting services From 245a431374183639962f1245002b4d19ccc54ec0 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 28 Jun 2013 09:16:08 -0500 Subject: [PATCH 0179/4704] Set the correct URLs in the version return data The URLs advertised in the data returned by keystone's '/' route default to localhost and is not usable from off-host. Not that anything in DevStack uses it (yet). Change-Id: I049789f568eff48c1abb0678c3ac0ae8a8960c64 --- lib/keystone | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/keystone b/lib/keystone index 2edd137dbb..d07b34f797 100644 --- a/lib/keystone +++ b/lib/keystone @@ -119,6 +119,10 @@ function configure_keystone() { iniset $KEYSTONE_CONF identity driver "keystone.identity.backends.ldap.Identity" fi + # Set the URL advertised in the ``versions`` structure returned by the '/' route + iniset $KEYSTONE_CONF DEFAULT public_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(public_port)s/" + iniset $KEYSTONE_CONF DEFAULT admin_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(admin_port)s/" + if is_service_enabled tls-proxy; then # Set the service ports for a proxy to take the originals iniset $KEYSTONE_CONF DEFAULT public_port $KEYSTONE_SERVICE_PORT_INT From 4640026cc1077232f609caf24c42a7dd477e3f68 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Sun, 30 Jun 2013 04:32:27 -0700 Subject: [PATCH 0180/4704] Fix spelling mistakes git ls-files | misspellings -f - Source: https://github.com/lyda/misspell-check Change-Id: I7d63578734ca5cc1ec865a7d024c1a013d1ada87 --- HACKING.rst | 2 +- exerciserc | 4 ++-- exercises/boot_from_volume.sh | 2 +- exercises/bundle.sh | 2 +- exercises/client-args.sh | 4 ++-- exercises/client-env.sh | 4 ++-- exercises/euca.sh | 4 ++-- exercises/floating_ips.sh | 2 +- exercises/horizon.sh | 2 +- exercises/quantum-adv-test.sh | 2 +- exercises/sec_groups.sh | 2 +- exercises/swift.sh | 2 +- lib/tempest | 4 ++-- openrc | 2 +- samples/local.sh | 2 +- tools/xen/scripts/install_ubuntu_template.sh | 2 +- 16 files changed, 21 insertions(+), 21 deletions(-) diff --git a/HACKING.rst b/HACKING.rst index 77194a3d41..3fef9509e4 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -174,7 +174,7 @@ These scripts are executed serially by ``exercise.sh`` in testing situations. the script on non-zero exit codes:: # This script exits on an error so that errors don't compound and you see - # only the first error that occured. + # only the first error that occurred. set -o errexit # Print the commands being run so that we can see the command that triggers diff --git a/exerciserc b/exerciserc index c26ec2ce95..9105fe3331 100644 --- a/exerciserc +++ b/exerciserc @@ -21,10 +21,10 @@ export RUNNING_TIMEOUT=${RUNNING_TIMEOUT:-$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT))} # Max time to wait for a vm to terminate export TERMINATE_TIMEOUT=${TERMINATE_TIMEOUT:-30} -# Max time to wait for a euca-volume command to propogate +# Max time to wait for a euca-volume command to propagate export VOLUME_TIMEOUT=${VOLUME_TIMEOUT:-30} -# Max time to wait for a euca-delete command to propogate +# Max time to wait for a euca-delete command to propagate export VOLUME_DELETE_TIMEOUT=${SNAPSHOT_DELETE_TIMEOUT:-60} # The size of the volume we want to boot from; some storage back-ends diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index 14d00492f6..358b3d2579 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -11,7 +11,7 @@ echo "Begin DevStack Exercise: $0" echo "*********************************************************************" # This script exits on an error so that errors don't compound and you see -# only the first error that occured. +# only the first error that occurred. set -o errexit # Print the commands being run so that we can see the command that triggers diff --git a/exercises/bundle.sh b/exercises/bundle.sh index dce36aa31f..b83678ab1f 100755 --- a/exercises/bundle.sh +++ b/exercises/bundle.sh @@ -10,7 +10,7 @@ echo "Begin DevStack Exercise: $0" echo "*********************************************************************" # This script exits on an error so that errors don't compound and you see -# only the first error that occured. +# only the first error that occurred. set -o errexit # Print the commands being run so that we can see the command that triggers diff --git a/exercises/client-args.sh b/exercises/client-args.sh index 28f4123863..1e68042cec 100755 --- a/exercises/client-args.sh +++ b/exercises/client-args.sh @@ -2,14 +2,14 @@ # **client-args.sh** -# Test OpenStack client authentication aguemnts handling +# Test OpenStack client authentication arguments handling echo "*********************************************************************" echo "Begin DevStack Exercise: $0" echo "*********************************************************************" # This script exits on an error so that errors don't compound and you see -# only the first error that occured. +# only the first error that occurred. set -o errexit # Print the commands being run so that we can see the command that triggers diff --git a/exercises/client-env.sh b/exercises/client-env.sh index 147fdfcfea..6c6fe12282 100755 --- a/exercises/client-env.sh +++ b/exercises/client-env.sh @@ -2,14 +2,14 @@ # **client-env.sh** -# Test OpenStack client enviroment variable handling +# Test OpenStack client environment variable handling echo "*********************************************************************" echo "Begin DevStack Exercise: $0" echo "*********************************************************************" # This script exits on an error so that errors don't compound and you see -# only the first error that occured. +# only the first error that occurred. set -o errexit # Print the commands being run so that we can see the command that triggers diff --git a/exercises/euca.sh b/exercises/euca.sh index 7c590d09d4..ac21b6bf79 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -10,7 +10,7 @@ echo "Begin DevStack Exercise: $0" echo "*********************************************************************" # This script exits on an error so that errors don't compound and you see -# only the first error that occured. +# only the first error that occurred. set -o errexit # Print the commands being run so that we can see the command that triggers @@ -90,7 +90,7 @@ if [[ "$ENABLED_SERVICES" =~ "c-vol" ]]; then # Test volume has become available if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then - die $LINENO "volume didnt become available within $RUNNING_TIMEOUT seconds" + die $LINENO "volume didn't become available within $RUNNING_TIMEOUT seconds" fi # Attach volume to an instance diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index ad11a6b793..b741efbeba 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -9,7 +9,7 @@ echo "Begin DevStack Exercise: $0" echo "*********************************************************************" # This script exits on an error so that errors don't compound and you see -# only the first error that occured. +# only the first error that occurred. set -o errexit # Print the commands being run so that we can see the command that triggers diff --git a/exercises/horizon.sh b/exercises/horizon.sh index 5d778c9899..d62ad52123 100755 --- a/exercises/horizon.sh +++ b/exercises/horizon.sh @@ -9,7 +9,7 @@ echo "Begin DevStack Exercise: $0" echo "*********************************************************************" # This script exits on an error so that errors don't compound and you see -# only the first error that occured. +# only the first error that occurred. set -o errexit # Print the commands being run so that we can see the command that triggers diff --git a/exercises/quantum-adv-test.sh b/exercises/quantum-adv-test.sh index 34f4f62312..abec5e4a1b 100755 --- a/exercises/quantum-adv-test.sh +++ b/exercises/quantum-adv-test.sh @@ -10,7 +10,7 @@ echo "Begin DevStack Exercise: $0" echo "*********************************************************************" # This script exits on an error so that errors don't compound and you see -# only the first error that occured. +# only the first error that occurred. set -o errtrace diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh index b73afdfd09..6b67291cde 100755 --- a/exercises/sec_groups.sh +++ b/exercises/sec_groups.sh @@ -9,7 +9,7 @@ echo "Begin DevStack Exercise: $0" echo "*********************************************************************" # This script exits on an error so that errors don't compound and you see -# only the first error that occured. +# only the first error that occurred. set -o errexit # Print the commands being run so that we can see the command that triggers diff --git a/exercises/swift.sh b/exercises/swift.sh index c4ec3e9095..b9f1b566bb 100755 --- a/exercises/swift.sh +++ b/exercises/swift.sh @@ -9,7 +9,7 @@ echo "Begin DevStack Exercise: $0" echo "*********************************************************************" # This script exits on an error so that errors don't compound and you see -# only the first error that occured. +# only the first error that occurred. set -o errexit # Print the commands being run so that we can see the command that triggers diff --git a/lib/tempest b/lib/tempest index 277c68abcd..f3dcfbb569 100644 --- a/lib/tempest +++ b/lib/tempest @@ -3,7 +3,7 @@ # Dependencies: # ``functions`` file -# ``lib/nova`` service is runing +# ``lib/nova`` service is running # # - ``DEST``, ``FILES`` # - ``ADMIN_PASSWORD`` @@ -78,7 +78,7 @@ function configure_tempest() { # sudo python setup.py deploy # This function exits on an error so that errors don't compound and you see - # only the first error that occured. + # only the first error that occurred. errexit=$(set +o | grep errexit) set -o errexit diff --git a/openrc b/openrc index f1026a50a4..adf92b973c 100644 --- a/openrc +++ b/openrc @@ -37,7 +37,7 @@ source $RC_DIR/lib/tls # The introduction of Keystone to the OpenStack ecosystem has standardized the # term **tenant** as the entity that owns resources. In some places references # still exist to the original Nova term **project** for this use. Also, -# **tenant_name** is prefered to **tenant_id**. +# **tenant_name** is preferred to **tenant_id**. export OS_TENANT_NAME=${OS_TENANT_NAME:-demo} # In addition to the owning entity (tenant), nova stores the entity performing diff --git a/samples/local.sh b/samples/local.sh index 590152593d..970cbb97e0 100755 --- a/samples/local.sh +++ b/samples/local.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash # Sample ``local.sh`` for user-configurable tasks to run automatically -# at the sucessful conclusion of ``stack.sh``. +# at the successful conclusion of ``stack.sh``. # NOTE: Copy this file to the root ``devstack`` directory for it to # work properly. diff --git a/tools/xen/scripts/install_ubuntu_template.sh b/tools/xen/scripts/install_ubuntu_template.sh index b7a8eff952..5cbe2ac6db 100755 --- a/tools/xen/scripts/install_ubuntu_template.sh +++ b/tools/xen/scripts/install_ubuntu_template.sh @@ -37,7 +37,7 @@ fi builtin_name="Debian Squeeze 6.0 (32-bit)" builtin_uuid=$(xe template-list name-label="$builtin_name" --minimal) if [[ -z $builtin_uuid ]]; then - echo "Cant find the Debian Squeeze 32bit template on your XenServer." + echo "Can't find the Debian Squeeze 32bit template on your XenServer." exit 1 fi From 303965768887ef75895c9890be1d8c22d1faf935 Mon Sep 17 00:00:00 2001 From: Steven Dake Date: Sun, 30 Jun 2013 16:11:54 -0700 Subject: [PATCH 0181/4704] Enable Fedora 19 on DevStack Fedora uses a special PROMPT_COMMAND for screen terminal types, which are the default with DevStack. The PROMPT_COMMAND interacts in a negative way with the -t and -X and -p flags, causing DevStack not to work. To solve this problem, this patch forces PROMPT_COMMAND to default to /bin/true, triggering no changes to the window title used within screen. Also this patch enables F19 as a non-FORCE distribution. Change-Id: I2414d9e28dd95b69272e132163b29ed83f73b2f6 Fixes: bug #1196340 --- functions | 2 ++ stack.sh | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/functions b/functions index 70ab999676..06d7e7b38b 100644 --- a/functions +++ b/functions @@ -1020,6 +1020,8 @@ function screen_rc { echo "sessionname $SCREEN_NAME" > $SCREENRC # Set a reasonable statusbar echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC + # Some distributions override PROMPT_COMMAND for the screen terminal type - turn that off + echo "setenv PROMPT_COMMAND /bin/true" >> $SCREENRC echo "screen -t shell bash" >> $SCREENRC fi # If this service doesn't already exist in the screenrc file diff --git a/stack.sh b/stack.sh index 57b8529e06..05b53afee1 100755 --- a/stack.sh +++ b/stack.sh @@ -109,7 +109,7 @@ disable_negated_services # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|saucy|7.0|wheezy|sid|testing|jessie|f16|f17|f18|opensuse-12.2|rhel6) ]]; then +if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|saucy|7.0|wheezy|sid|testing|jessie|f16|f17|f18|f19|opensuse-12.2|rhel6) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" @@ -830,6 +830,7 @@ if [[ "$USE_SCREEN" == "True" ]]; then SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})' fi screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS" + screen -r $SCREEN_NAME -X setenv PROMPT_COMMAND /bin/true fi # Clear screen rc file From 056df827ff3f8d40eeeedea3d09daae86982e5b5 Mon Sep 17 00:00:00 2001 From: Jason Dillaman Date: Mon, 1 Jul 2013 08:52:13 -0400 Subject: [PATCH 0182/4704] Set QPID_HOST parameter to better support multi-node mode When deploying OpenStack in a multi-node configuration, the Qpid clients need the hostname of the potentially remote Qpid server in order to permit the necessary RPC communication between components. Fixes bug #1196521 Change-Id: Iee3c3747cedea9488ec345e78f8eddbc6e850573 --- lib/rpc_backend | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/rpc_backend b/lib/rpc_backend index 4b04053bac..462e6cc913 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -170,8 +170,9 @@ function iniset_rpc_backend() { # Set MATCHMAKER_REDIS_HOST if running multi-node. MATCHMAKER_REDIS_HOST=${MATCHMAKER_REDIS_HOST:-127.0.0.1} iniset $file matchmaker_redis host $MATCHMAKER_REDIS_HOST - elif is_service_enabled qpid; then + elif is_service_enabled qpid || [ -n "$QPID_HOST" ]; then iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_qpid + iniset $file $section qpid_hostname $QPID_HOST if is_ubuntu; then QPID_PASSWORD=`sudo strings /etc/qpid/qpidd.sasldb | grep -B1 admin | head -1` iniset $file $section qpid_password $QPID_PASSWORD From 14625c288742a6d7532986e7dde1adb8b021c884 Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Tue, 18 Jun 2013 15:58:53 -0700 Subject: [PATCH 0183/4704] Adds support for the Indigo Virtual Switch Allows users to specify which virtual switch Quantum should use and inform Nova to use. It configures the quantum dhcp agent interface driver to bind to the correct switch and sets the vif_type that the BigSwitch/Floodlight plugin will send to Nova. Change-Id: I077a9ce8ab205e2949e0a438307f7da46a8a247d Implements: blueprint ivs-support --- lib/quantum_plugins/bigswitch_floodlight | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/lib/quantum_plugins/bigswitch_floodlight b/lib/quantum_plugins/bigswitch_floodlight index 11997457bc..cae8882ed7 100644 --- a/lib/quantum_plugins/bigswitch_floodlight +++ b/lib/quantum_plugins/bigswitch_floodlight @@ -44,13 +44,23 @@ function quantum_plugin_configure_plugin_agent() { function quantum_plugin_configure_service() { iniset /$Q_PLUGIN_CONF_FILE restproxy servers $BS_FL_CONTROLLERS_PORT iniset /$Q_PLUGIN_CONF_FILE restproxy servertimeout $BS_FL_CONTROLLER_TIMEOUT + if [ "$BS_FL_VIF_DRIVER" = "ivs" ] + then + iniset /$Q_PLUGIN_CONF_FILE nova vif_type ivs + fi } function quantum_plugin_setup_interface_driver() { local conf_file=$1 - iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver + if [ "$BS_FL_VIF_DRIVER" = "ivs" ] + then + iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.IVSInterfaceDriver + else + iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver + fi } + function has_quantum_plugin_security_group() { # 1 means False here return 1 From a9787d077f2c06ff2804e6b84c6f769d507a971f Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 2 Jul 2013 11:30:40 +0200 Subject: [PATCH 0184/4704] Enable all notifications update from Nova for Ceilometer This will allow Ceilometer to grab even more events about instances states changes. Change-Id: Ie28258607695caf96dcfa292cb74355aced85ccf --- lib/nova | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/nova b/lib/nova index afc540e7c6..46bc70f166 100644 --- a/lib/nova +++ b/lib/nova @@ -490,6 +490,8 @@ function create_nova_conf() { if is_service_enabled ceilometer; then iniset $NOVA_CONF DEFAULT instance_usage_audit "True" iniset $NOVA_CONF DEFAULT instance_usage_audit_period "hour" + iniset $NOVA_CONF DEFAULT notify_on_state_change "vm_and_task_state" + iniset $NOVA_CONF DEFAULT notify_on_any_change "True" iniset_multiline $NOVA_CONF DEFAULT notification_driver "nova.openstack.common.notifier.rpc_notifier" "ceilometer.compute.nova_notifier" fi From 24d866ef45698454b31801b29719f612e7ac70a2 Mon Sep 17 00:00:00 2001 From: William Marshall Date: Tue, 2 Jul 2013 12:26:31 -0500 Subject: [PATCH 0185/4704] Set the scenario/img_dir option for tempest.conf The default value does not work unless devstack is installed in /opt/stack/new/devstack. This patch changes the img_dir option to point to the correct install location. Change-Id: Iea41d209dad10b2f9a7c97efd55c39a8d29347cc --- lib/tempest | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/tempest b/lib/tempest index 8b4ae0e725..0c91ac7f8a 100644 --- a/lib/tempest +++ b/lib/tempest @@ -273,6 +273,9 @@ function configure_tempest() { iniset $TEMPEST_CONF orchestration heat_available "True" fi + # Scenario + iniset $TEMPEST_CONF scenario img_dir "$FILES/images/cirros-0.3.1-x86_64-uec" + # Volume CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND) if [ $CINDER_MULTI_LVM_BACKEND == "True" ]; then From a6ed3dcf3cabbb841304499277dc141181ea6196 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Mon, 1 Jul 2013 22:49:31 +0200 Subject: [PATCH 0186/4704] lib/cinder comment The default volume backing size is ~10G now. Changing the comment to be more future-proof by including the shell variable name. Change-Id: I27879b240f3f8348fa069853de18e47871ce00d2 --- lib/cinder | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/cinder b/lib/cinder index f691b384bf..40a25baedc 100644 --- a/lib/cinder +++ b/lib/cinder @@ -340,14 +340,14 @@ function create_cinder_cache_dir() { } create_cinder_volume_group() { - # According to the CINDER_MULTI_LVM_BACKEND value, configure one or two default volumes + # According to the ``CINDER_MULTI_LVM_BACKEND`` value, configure one or two default volumes # group called ``stack-volumes`` (and ``stack-volumes2``) for the volume # service if it (they) does (do) not yet exist. If you don't wish to use a # file backed volume group, create your own volume group called ``stack-volumes`` # and ``stack-volumes2`` before invoking ``stack.sh``. # - # By default, the two backing files are 5G in size, and are stored in - # ``/opt/stack/data``. + # The two backing files are ``VOLUME_BACKING_FILE_SIZE`` in size, and they are stored in + # the ``DATA_DIR``. if ! sudo vgs $VOLUME_GROUP; then if [ -z "$VOLUME_BACKING_DEVICE" ]; then From 19570302712d7ed252cf4303e39490d4e3e46f92 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Thu, 4 Jul 2013 09:45:32 +0200 Subject: [PATCH 0187/4704] On openSUSE, qemu-tools provides qemu-img Change-Id: I6ab0b7dd871acd6103b15b5fe10350667b72d1a8 --- files/rpms-suse/cinder | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/rpms-suse/cinder b/files/rpms-suse/cinder index 61b9f253ec..8f4a5a7998 100644 --- a/files/rpms-suse/cinder +++ b/files/rpms-suse/cinder @@ -1,3 +1,3 @@ lvm2 tgt -qemu-img +qemu-tools From e7e51ac5e6a2c61afab11d30d5dd034e66877734 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 4 Jul 2013 14:10:46 -0400 Subject: [PATCH 0188/4704] Directly install requirements with pip Rather than parsing then feeding the pip requirements file to pip, just have pip consume it directly. Change-Id: I17bbe4324e6957c7165bc0f340ddae1e51039471 --- functions | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/functions b/functions index 06d7e7b38b..fa0e7248da 100644 --- a/functions +++ b/functions @@ -1087,12 +1087,13 @@ function setup_develop() { else SUDO_CMD="sudo" fi + for reqs_file in $1/requirements.txt $1/tools/pip-requires ; do + if [ -f $reqs_file ] ; then + pip_install -r $reqs_file + fi + done (cd $1; \ python setup.py egg_info; \ - raw_links=$(awk '/^.+/ {print "-f " $1}' *.egg-info/dependency_links.txt); \ - depend_links=$(echo $raw_links | xargs); \ - require_file=$([ ! -r *-info/requires.txt ] || echo "-r *-info/requires.txt"); \ - pip_install $require_file $depend_links; \ $SUDO_CMD \ HTTP_PROXY=$http_proxy \ HTTPS_PROXY=$https_proxy \ From 0a03806e281d1f197e54d48318e4a7bba3eab77c Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Fri, 5 Jul 2013 11:42:07 -0700 Subject: [PATCH 0189/4704] Rename quantum repos to neutron Only the repos are renamed. This will coincide with the repo rename operation on July 6. See https://wiki.openstack.org/wiki/Network/neutron-renaming Change-Id: I99fe1f80ffc7a54b958b709495b90f8d94d41376 --- lib/quantum | 4 ++-- stackrc | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/quantum b/lib/quantum index afe99c48cb..a8708653cb 100644 --- a/lib/quantum +++ b/lib/quantum @@ -72,8 +72,8 @@ PRIVATE_SUBNET_NAME=${PRIVATE_SUBNET_NAME:-"private-subnet"} PUBLIC_SUBNET_NAME=${PUBLIC_SUBNET_NAME:-"public-subnet"} # Set up default directories -QUANTUM_DIR=$DEST/quantum -QUANTUMCLIENT_DIR=$DEST/python-quantumclient +QUANTUM_DIR=$DEST/neutron +QUANTUMCLIENT_DIR=$DEST/python-neutronclient QUANTUM_AUTH_CACHE_DIR=${QUANTUM_AUTH_CACHE_DIR:-/var/cache/quantum} QUANTUM_CONF_DIR=/etc/quantum diff --git a/stackrc b/stackrc index 49cf0266ad..fc1e01d62a 100644 --- a/stackrc +++ b/stackrc @@ -118,11 +118,11 @@ PBR_REPO=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git} PBR_BRANCH=${PBR_BRANCH:-master} # quantum service -QUANTUM_REPO=${QUANTUM_REPO:-${GIT_BASE}/openstack/quantum.git} +QUANTUM_REPO=${QUANTUM_REPO:-${GIT_BASE}/openstack/neutron.git} QUANTUM_BRANCH=${QUANTUM_BRANCH:-master} # quantum client -QUANTUMCLIENT_REPO=${QUANTUMCLIENT_REPO:-${GIT_BASE}/openstack/python-quantumclient.git} +QUANTUMCLIENT_REPO=${QUANTUMCLIENT_REPO:-${GIT_BASE}/openstack/python-neutronclient.git} QUANTUMCLIENT_BRANCH=${QUANTUMCLIENT_BRANCH:-master} # storage service From b05c876994183b6a1d53dfbdcea2ca4a7743035f Mon Sep 17 00:00:00 2001 From: Mark McClain Date: Sat, 6 Jul 2013 23:29:39 -0400 Subject: [PATCH 0190/4704] update for name change to Neutron Note: Nova and Horizon are not updated until those projects have migrated. Change-Id: I256ef20e7caadd9c96e6dd908c5d8b69ca5c4aeb --- HACKING.rst | 2 +- README.md | 14 +- clean.sh | 4 +- exercises/boot_from_volume.sh | 6 +- exercises/euca.sh | 6 +- exercises/floating_ips.sh | 10 +- exercises/quantum-adv-test.sh | 28 +- exercises/volumes.sh | 6 +- functions | 20 +- lib/keystone | 6 +- lib/{quantum => neutron} | 515 +++++++++--------- lib/neutron_plugins/README.md | 38 ++ lib/neutron_plugins/bigswitch_floodlight | 74 +++ lib/neutron_plugins/brocade | 59 ++ .../cisco | 86 +-- .../linuxbridge | 18 +- .../linuxbridge_agent | 34 +- lib/{quantum_plugins => neutron_plugins}/ml2 | 22 +- lib/{quantum_plugins => neutron_plugins}/nec | 56 +- .../nicira | 40 +- .../openvswitch | 20 +- .../openvswitch_agent | 44 +- .../ovs_base | 34 +- .../plumgrid | 22 +- lib/neutron_plugins/ryu | 80 +++ .../services/loadbalancer | 18 +- .../README.md | 8 +- .../bigswitch_floodlight | 2 +- .../nicira | 0 .../ryu | 22 +- .../trema | 0 lib/nova | 4 +- lib/quantum_plugins/README.md | 38 -- lib/quantum_plugins/bigswitch_floodlight | 74 --- lib/quantum_plugins/brocade | 59 -- lib/quantum_plugins/ryu | 80 --- lib/tempest | 10 +- samples/localrc | 2 +- stack.sh | 56 +- stackrc | 18 +- tools/xen/README.md | 2 +- tools/xen/install_os_domU.sh | 12 +- unstack.sh | 12 +- 43 files changed, 834 insertions(+), 827 deletions(-) rename lib/{quantum => neutron} (58%) create mode 100644 lib/neutron_plugins/README.md create mode 100644 lib/neutron_plugins/bigswitch_floodlight create mode 100644 lib/neutron_plugins/brocade rename lib/{quantum_plugins => neutron_plugins}/cisco (79%) rename lib/{quantum_plugins => neutron_plugins}/linuxbridge (78%) rename lib/{quantum_plugins => neutron_plugins}/linuxbridge_agent (69%) rename lib/{quantum_plugins => neutron_plugins}/ml2 (78%) rename lib/{quantum_plugins => neutron_plugins}/nec (65%) rename lib/{quantum_plugins => neutron_plugins}/nicira (79%) rename lib/{quantum_plugins => neutron_plugins}/openvswitch (76%) rename lib/{quantum_plugins => neutron_plugins}/openvswitch_agent (81%) rename lib/{quantum_plugins => neutron_plugins}/ovs_base (71%) rename lib/{quantum_plugins => neutron_plugins}/plumgrid (53%) create mode 100644 lib/neutron_plugins/ryu rename lib/{quantum_plugins => neutron_plugins}/services/loadbalancer (68%) rename lib/{quantum_thirdparty => neutron_thirdparty}/README.md (78%) rename lib/{quantum_thirdparty => neutron_thirdparty}/bigswitch_floodlight (97%) rename lib/{quantum_thirdparty => neutron_thirdparty}/nicira (100%) rename lib/{quantum_thirdparty => neutron_thirdparty}/ryu (75%) rename lib/{quantum_thirdparty => neutron_thirdparty}/trema (100%) delete mode 100644 lib/quantum_plugins/README.md delete mode 100644 lib/quantum_plugins/bigswitch_floodlight delete mode 100644 lib/quantum_plugins/brocade delete mode 100644 lib/quantum_plugins/ryu diff --git a/HACKING.rst b/HACKING.rst index 3fef9509e4..dd665a2304 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -110,7 +110,7 @@ follows: * Global service configuration like ``ENABLED_SERVICES`` * Variables used by multiple services that do not have a clear owner, i.e. ``VOLUME_BACKING_FILE_SIZE`` (nova-volumes and cinder) or ``PUBLIC_NETWORK_NAME`` - (nova-network and quantum) + (nova-network and neutron) * Variables that can not be cleanly declared in a project file due to dependency ordering, i.e. the order of sourcing the project files can not be changed for other reasons but the earlier file needs to dereference a diff --git a/README.md b/README.md index 905a54d5fc..6fcd01d7ce 100644 --- a/README.md +++ b/README.md @@ -104,11 +104,11 @@ If you are enabling `swift3` in `ENABLED_SERVICES` devstack will install the swi Only Swift proxy server is launched in the screen session all other services are started in background and managed by `swift-init` tool. -# Quantum +# Neutron Basic Setup -In order to enable Quantum a single node setup, you'll need the following settings in your `localrc` : +In order to enable Neutron a single node setup, you'll need the following settings in your `localrc` : disable_service n-net enable_service q-svc @@ -116,13 +116,13 @@ In order to enable Quantum a single node setup, you'll need the following settin enable_service q-dhcp enable_service q-l3 enable_service q-meta - enable_service quantum + enable_service neutron # Optional, to enable tempest configuration as part of devstack enable_service tempest Then run `stack.sh` as normal. -devstack supports adding specific Quantum configuration flags to both the Open vSwitch and LinuxBridge plugin configuration files. To make use of this feature, the following variables are defined and can be configured in your `localrc` file: +devstack supports adding specific Neutron configuration flags to both the Open vSwitch and LinuxBridge plugin configuration files. To make use of this feature, the following variables are defined and can be configured in your `localrc` file: Variable Name Plugin Config File Section Modified ------------------------------------------------------------------------------------- @@ -144,7 +144,7 @@ If tempest has been successfully configured, a basic set of smoke tests can be r # Multi-Node Setup -A more interesting setup involves running multiple compute nodes, with Quantum networks connecting VMs on different compute nodes. +A more interesting setup involves running multiple compute nodes, with Neutron networks connecting VMs on different compute nodes. You should run at least one "controller node", which should have a `stackrc` that includes at least: disable_service n-net @@ -153,7 +153,7 @@ You should run at least one "controller node", which should have a `stackrc` tha enable_service q-dhcp enable_service q-l3 enable_service q-meta - enable_service quantum + enable_service neutron You likely want to change your `localrc` to run a scheduler that will balance VMs across hosts: @@ -161,7 +161,7 @@ You likely want to change your `localrc` to run a scheduler that will balance VM You can then run many compute nodes, each of which should have a `stackrc` which includes the following, with the IP address of the above controller node: - ENABLED_SERVICES=n-cpu,rabbit,g-api,quantum,q-agt + ENABLED_SERVICES=n-cpu,rabbit,g-api,neutron,q-agt SERVICE_HOST=[IP of controller node] MYSQL_HOST=$SERVICE_HOST RABBIT_HOST=$SERVICE_HOST diff --git a/clean.sh b/clean.sh index 758947a02e..493c449fca 100755 --- a/clean.sh +++ b/clean.sh @@ -42,7 +42,7 @@ source $TOP_DIR/lib/cinder source $TOP_DIR/lib/swift source $TOP_DIR/lib/ceilometer source $TOP_DIR/lib/heat -source $TOP_DIR/lib/quantum +source $TOP_DIR/lib/neutron source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/ldap @@ -60,7 +60,7 @@ cleanup_cinder cleanup_glance cleanup_keystone cleanup_nova -cleanup_quantum +cleanup_neutron cleanup_swift # cinder doesn't always clean up the volume group as it might be used elsewhere... diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index 358b3d2579..18147325bb 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -32,9 +32,9 @@ source $TOP_DIR/functions # Import configuration source $TOP_DIR/openrc -# Import quantum functions if needed -if is_service_enabled quantum; then - source $TOP_DIR/lib/quantum +# Import neutron functions if needed +if is_service_enabled neutron; then + source $TOP_DIR/lib/neutron fi # Import exercise configuration diff --git a/exercises/euca.sh b/exercises/euca.sh index ac21b6bf79..eec8636fa3 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -33,9 +33,9 @@ source $TOP_DIR/functions # Import EC2 configuration source $TOP_DIR/eucarc -# Import quantum functions if needed -if is_service_enabled quantum; then - source $TOP_DIR/lib/quantum +# Import neutron functions if needed +if is_service_enabled neutron; then + source $TOP_DIR/lib/neutron fi # Import exercise configuration diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index b741efbeba..b22ef110d2 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -30,9 +30,9 @@ source $TOP_DIR/functions # Import configuration source $TOP_DIR/openrc -# Import quantum functions if needed -if is_service_enabled quantum; then - source $TOP_DIR/lib/quantum +# Import neutron functions if needed +if is_service_enabled neutron; then + source $TOP_DIR/lib/neutron fi # Import exercise configuration @@ -155,7 +155,7 @@ nova add-floating-ip $VM_UUID $FLOATING_IP || \ # Test we can ping our floating IP within ASSOCIATE_TIMEOUT seconds ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT -if ! is_service_enabled quantum; then +if ! is_service_enabled neutron; then # Allocate an IP from second floating pool TEST_FLOATING_IP=$(nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1) die_if_not_set $LINENO TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL" @@ -179,7 +179,7 @@ fi # Clean up # -------- -if ! is_service_enabled quantum; then +if ! is_service_enabled neutron; then # Delete second floating IP nova floating-ip-delete $TEST_FLOATING_IP || \ die $LINENO "Failure deleting floating IP $TEST_FLOATING_IP" diff --git a/exercises/quantum-adv-test.sh b/exercises/quantum-adv-test.sh index abec5e4a1b..4367e2e3c1 100755 --- a/exercises/quantum-adv-test.sh +++ b/exercises/quantum-adv-test.sh @@ -1,9 +1,9 @@ #!/usr/bin/env bash # -# **quantum-adv-test.sh** +# **neutron-adv-test.sh** -# Perform integration testing of Nova and other components with Quantum. +# Perform integration testing of Nova and other components with Neutron. echo "*********************************************************************" echo "Begin DevStack Exercise: $0" @@ -43,16 +43,16 @@ source $TOP_DIR/functions # Import configuration source $TOP_DIR/openrc -# Import quantum functions -source $TOP_DIR/lib/quantum +# Import neutron functions +source $TOP_DIR/lib/neutron -# If quantum is not enabled we exit with exitcode 55, which means exercise is skipped. -quantum_plugin_check_adv_test_requirements || exit 55 +# If neutron is not enabled we exit with exitcode 55, which means exercise is skipped. +neutron_plugin_check_adv_test_requirements || exit 55 # Import exercise configuration source $TOP_DIR/exerciserc -# Quantum Settings +# Neutron Settings # ---------------- TENANTS="DEMO1" @@ -161,7 +161,7 @@ function get_role_id { function get_network_id { local NETWORK_NAME="$1" - local NETWORK_ID=`quantum net-list -F id -- --name=$NETWORK_NAME | awk "NR==4" | awk '{print $2}'` + local NETWORK_ID=`neutron net-list -F id -- --name=$NETWORK_NAME | awk "NR==4" | awk '{print $2}'` echo $NETWORK_ID } @@ -232,9 +232,9 @@ function create_network { source $TOP_DIR/openrc admin admin local TENANT_ID=$(get_tenant_id $TENANT) source $TOP_DIR/openrc $TENANT $TENANT - local NET_ID=$(quantum net-create --tenant_id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' ) - quantum subnet-create --ip_version 4 --tenant_id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR - quantum-debug probe-create --device-owner compute $NET_ID + local NET_ID=$(neutron net-create --tenant_id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' ) + neutron subnet-create --ip_version 4 --tenant_id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR + neutron-debug probe-create --device-owner compute $NET_ID source $TOP_DIR/openrc demo demo } @@ -320,10 +320,10 @@ function delete_network { local TENANT_ID=$(get_tenant_id $TENANT) #TODO(nati) comment out until l3-agent merged #for res in port subnet net router;do - for net_id in `quantum net-list -c id -c name | grep $NET_NAME | awk '{print $2}'`;do + for net_id in `neutron net-list -c id -c name | grep $NET_NAME | awk '{print $2}'`;do delete_probe $net_id - quantum subnet-list | grep $net_id | awk '{print $2}' | xargs -I% quantum subnet-delete % - quantum net-delete $net_id + neutron subnet-list | grep $net_id | awk '{print $2}' | xargs -I% neutron subnet-delete % + neutron net-delete $net_id done source $TOP_DIR/openrc demo demo } diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 79136411ac..f574bb3463 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -30,9 +30,9 @@ source $TOP_DIR/functions # Import configuration source $TOP_DIR/openrc -# Import quantum functions if needed -if is_service_enabled quantum; then - source $TOP_DIR/lib/quantum +# Import neutron functions if needed +if is_service_enabled neutron; then + source $TOP_DIR/lib/neutron fi # Import exercise configuration diff --git a/functions b/functions index 06d7e7b38b..c461ca29dc 100644 --- a/functions +++ b/functions @@ -262,8 +262,8 @@ function get_packages() { file_to_parse="${file_to_parse} keystone" fi elif [[ $service == q-* ]]; then - if [[ ! $file_to_parse =~ quantum ]]; then - file_to_parse="${file_to_parse} quantum" + if [[ ! $file_to_parse =~ neutron ]]; then + file_to_parse="${file_to_parse} neutron" fi fi done @@ -717,7 +717,7 @@ function is_running() { # **cinder** returns true if any service enabled start with **c-** # **ceilometer** returns true if any service enabled start with **ceilometer** # **glance** returns true if any service enabled start with **g-** -# **quantum** returns true if any service enabled start with **q-** +# **neutron** returns true if any service enabled start with **q-** # **swift** returns true if any service enabled start with **s-** # For backward compatibility if we have **swift** in ENABLED_SERVICES all the # **s-** services will be enabled. This will be deprecated in the future. @@ -732,7 +732,7 @@ function is_service_enabled() { [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0 [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0 [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0 - [[ ${service} == "quantum" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0 + [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0 [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && return 0 [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && return 0 done @@ -758,7 +758,7 @@ function _cleanup_service_list () { # enable_service qpid # # This function does not know about the special cases -# for nova, glance, and quantum built into is_service_enabled(). +# for nova, glance, and neutron built into is_service_enabled(). # Uses global ``ENABLED_SERVICES`` # enable_service service [service ...] function enable_service() { @@ -780,7 +780,7 @@ function enable_service() { # disable_service rabbit # # This function does not know about the special cases -# for nova, glance, and quantum built into is_service_enabled(). +# for nova, glance, and neutron built into is_service_enabled(). # Uses global ``ENABLED_SERVICES`` # disable_service service [service ...] function disable_service() { @@ -1330,8 +1330,8 @@ function zypper_install() { # Uses globals ``ENABLED_SERVICES`` # ping_check from-net ip boot-timeout expected function ping_check() { - if is_service_enabled quantum; then - _ping_check_quantum "$1" $2 $3 $4 + if is_service_enabled neutron; then + _ping_check_neutron "$1" $2 $3 $4 return fi _ping_check_novanet "$1" $2 $3 $4 @@ -1370,8 +1370,8 @@ function _ping_check_novanet() { # ssh_check net-name key-file floating-ip default-user active-timeout function ssh_check() { - if is_service_enabled quantum; then - _ssh_check_quantum "$1" $2 $3 $4 $5 + if is_service_enabled neutron; then + _ssh_check_neutron "$1" $2 $3 $4 $5 return fi _ssh_check_novanet "$1" $2 $3 $4 $5 diff --git a/lib/keystone b/lib/keystone index 2edd137dbb..4b93992cb9 100644 --- a/lib/keystone +++ b/lib/keystone @@ -151,12 +151,12 @@ function configure_keystone() { echo "catalog.RegionOne.object_store.name = Swift Service" >> $KEYSTONE_CATALOG fi - # Add quantum endpoints to service catalog if quantum is enabled - if is_service_enabled quantum; then + # Add neutron endpoints to service catalog if neutron is enabled + if is_service_enabled neutron; then echo "catalog.RegionOne.network.publicURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG echo "catalog.RegionOne.network.adminURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG echo "catalog.RegionOne.network.internalURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG - echo "catalog.RegionOne.network.name = Quantum Service" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.network.name = Neutron Service" >> $KEYSTONE_CATALOG fi sed -e " diff --git a/lib/quantum b/lib/neutron similarity index 58% rename from lib/quantum rename to lib/neutron index a8708653cb..c28bd28fb0 100644 --- a/lib/quantum +++ b/lib/neutron @@ -1,5 +1,5 @@ -# lib/quantum -# functions - funstions specific to quantum +# lib/neutron +# functions - funstions specific to neutron # Dependencies: # ``functions`` file @@ -7,62 +7,62 @@ # ``stack.sh`` calls the entry points in this order: # -# install_quantum -# install_quantumclient -# install_quantum_agent_packages -# install_quantum_third_party -# configure_quantum -# init_quantum -# configure_quantum_third_party -# init_quantum_third_party -# start_quantum_third_party -# create_nova_conf_quantum -# start_quantum_service_and_check -# create_quantum_initial_network -# setup_quantum_debug -# start_quantum_agents +# install_neutron +# install_neutronclient +# install_neutron_agent_packages +# install_neutron_third_party +# configure_neutron +# init_neutron +# configure_neutron_third_party +# init_neutron_third_party +# start_neutron_third_party +# create_nova_conf_neutron +# start_neutron_service_and_check +# create_neutron_initial_network +# setup_neutron_debug +# start_neutron_agents # # ``unstack.sh`` calls the entry points in this order: # -# stop_quantum +# stop_neutron -# Functions in lib/quantum are classified into the following categories: +# Functions in lib/neutron are classified into the following categories: # # - entry points (called from stack.sh or unstack.sh) # - internal functions -# - quantum exercises +# - neutron exercises # - 3rd party programs -# Quantum Networking +# Neutron Networking # ------------------ -# Make sure that quantum is enabled in ``ENABLED_SERVICES``. If you want -# to run Quantum on this host, make sure that q-svc is also in +# Make sure that neutron is enabled in ``ENABLED_SERVICES``. If you want +# to run Neutron on this host, make sure that q-svc is also in # ``ENABLED_SERVICES``. # -# If you're planning to use the Quantum openvswitch plugin, set +# If you're planning to use the Neutron openvswitch plugin, set # ``Q_PLUGIN`` to "openvswitch" and make sure the q-agt service is enabled -# in ``ENABLED_SERVICES``. If you're planning to use the Quantum +# in ``ENABLED_SERVICES``. If you're planning to use the Neutron # linuxbridge plugin, set ``Q_PLUGIN`` to "linuxbridge" and make sure the # q-agt service is enabled in ``ENABLED_SERVICES``. # -# See "Quantum Network Configuration" below for additional variables +# See "Neutron Network Configuration" below for additional variables # that must be set in localrc for connectivity across hosts with -# Quantum. +# Neutron. # -# With Quantum networking the NETWORK_MANAGER variable is ignored. +# With Neutron networking the NETWORK_MANAGER variable is ignored. # # To enable specific configuration options for either the Open vSwitch or # LinuxBridge plugin, please see the top level README file under the -# Quantum section. +# Neutron section. # Save trace setting XTRACE=$(set +o | grep xtrace) set +o xtrace -# Quantum Network Configuration +# Neutron Network Configuration # ----------------------------- # Gateway and subnet defaults, in case they are not customized in localrc @@ -72,22 +72,29 @@ PRIVATE_SUBNET_NAME=${PRIVATE_SUBNET_NAME:-"private-subnet"} PUBLIC_SUBNET_NAME=${PUBLIC_SUBNET_NAME:-"public-subnet"} # Set up default directories -QUANTUM_DIR=$DEST/neutron -QUANTUMCLIENT_DIR=$DEST/python-neutronclient -QUANTUM_AUTH_CACHE_DIR=${QUANTUM_AUTH_CACHE_DIR:-/var/cache/quantum} +NEUTRON_DIR=$DEST/neutron +NEUTRONCLIENT_DIR=$DEST/python-neutronclient +NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron} + +# Support entry points installation of console scripts +if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then + NEUTRON_BIN_DIR=$NEUTRON_DIR/bin + else +NEUTRON_BIN_DIR=$(get_python_exec_prefix) +fi -QUANTUM_CONF_DIR=/etc/quantum -QUANTUM_CONF=$QUANTUM_CONF_DIR/quantum.conf -export QUANTUM_TEST_CONFIG_FILE=${QUANTUM_TEST_CONFIG_FILE:-"$QUANTUM_CONF_DIR/debug.ini"} +NEUTRON_CONF_DIR=/etc/neutron +NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf +export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} -# Default Quantum Plugin +# Default Neutron Plugin Q_PLUGIN=${Q_PLUGIN:-openvswitch} -# Default Quantum Port +# Default Neutron Port Q_PORT=${Q_PORT:-9696} -# Default Quantum Host +# Default Neutron Host Q_HOST=${Q_HOST:-$SERVICE_HOST} # Default admin username -Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum} +Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron} # Default auth strategy Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} # Use namespace or not @@ -99,27 +106,27 @@ Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} Q_META_DATA_IP=${Q_META_DATA_IP:-$SERVICE_HOST} # Allow Overlapping IP among subnets Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True} -# Use quantum-debug command +# Use neutron-debug command Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False} # The name of the default q-l3 router Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1} # List of config file names in addition to the main plugin config file -# See _configure_quantum_common() for details about setting it up +# See _configure_neutron_common() for details about setting it up declare -a Q_PLUGIN_EXTRA_CONF_FILES -if is_service_enabled quantum; then - Q_RR_CONF_FILE=$QUANTUM_CONF_DIR/rootwrap.conf +if is_service_enabled neutron; then + Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then Q_RR_COMMAND="sudo" else - QUANTUM_ROOTWRAP=$(get_rootwrap_location quantum) - Q_RR_COMMAND="sudo $QUANTUM_ROOTWRAP $Q_RR_CONF_FILE" + NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) + Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE" fi # Provider Network Configurations # -------------------------------- - # The following variables control the Quantum openvswitch and + # The following variables control the Neutron openvswitch and # linuxbridge plugins' allocation of tenant networks and # availability of provider networks. If these are not configured # in ``localrc``, tenant networks will be local to the host (with no @@ -183,20 +190,20 @@ if is_service_enabled quantum; then OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} fi -# Quantum plugin specific functions +# Neutron plugin specific functions # --------------------------------- -# Please refer to ``lib/quantum_plugins/README.md`` for details. -source $TOP_DIR/lib/quantum_plugins/$Q_PLUGIN +# Please refer to ``lib/neutron_plugins/README.md`` for details. +source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN # Agent loadbalancer service plugin functions # ------------------------------------------- # Hardcoding for 1 service plugin for now -source $TOP_DIR/lib/quantum_plugins/services/loadbalancer +source $TOP_DIR/lib/neutron_plugins/services/loadbalancer # Use security group or not -if has_quantum_plugin_security_group; then +if has_neutron_plugin_security_group; then Q_USE_SECGROUP=${Q_USE_SECGROUP:-True} else Q_USE_SECGROUP=False @@ -205,36 +212,36 @@ fi # Functions # --------- -# configure_quantum() -# Set common config for all quantum server and agents. -function configure_quantum() { - _configure_quantum_common - iniset_rpc_backend quantum $QUANTUM_CONF DEFAULT +# configure_neutron() +# Set common config for all neutron server and agents. +function configure_neutron() { + _configure_neutron_common + iniset_rpc_backend neutron $NEUTRON_CONF DEFAULT # goes before q-svc to init Q_SERVICE_PLUGIN_CLASSES if is_service_enabled q-lbaas; then - _configure_quantum_lbaas + _configure_neutron_lbaas fi if is_service_enabled q-svc; then - _configure_quantum_service + _configure_neutron_service fi if is_service_enabled q-agt; then - _configure_quantum_plugin_agent + _configure_neutron_plugin_agent fi if is_service_enabled q-dhcp; then - _configure_quantum_dhcp_agent + _configure_neutron_dhcp_agent fi if is_service_enabled q-l3; then - _configure_quantum_l3_agent + _configure_neutron_l3_agent fi if is_service_enabled q-meta; then - _configure_quantum_metadata_agent + _configure_neutron_metadata_agent fi - _configure_quantum_debug_command + _configure_neutron_debug_command } -function create_nova_conf_quantum() { +function create_nova_conf_neutron() { iniset $NOVA_CONF DEFAULT network_api_class "nova.network.quantumv2.api.API" iniset $NOVA_CONF DEFAULT quantum_admin_username "$Q_ADMIN_USERNAME" iniset $NOVA_CONF DEFAULT quantum_admin_password "$SERVICE_PASSWORD" @@ -249,7 +256,7 @@ function create_nova_conf_quantum() { fi # set NOVA_VIF_DRIVER and optionally set options in nova_conf - quantum_plugin_create_nova_conf + neutron_plugin_create_nova_conf iniset $NOVA_CONF DEFAULT libvirt_vif_driver "$NOVA_VIF_DRIVER" iniset $NOVA_CONF DEFAULT linuxnet_interface_driver "$LINUXNET_VIF_DRIVER" @@ -258,38 +265,38 @@ function create_nova_conf_quantum() { fi } -# create_quantum_accounts() - Set up common required quantum accounts +# create_neutron_accounts() - Set up common required neutron accounts # Tenant User Roles # ------------------------------------------------------------------ -# service quantum admin # if enabled +# service neutron admin # if enabled # Migrated from keystone_data.sh -function create_quantum_accounts() { +function create_neutron_accounts() { SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then - QUANTUM_USER=$(keystone user-create \ - --name=quantum \ + NEUTRON_USER=$(keystone user-create \ + --name=neutron \ --pass="$SERVICE_PASSWORD" \ --tenant_id $SERVICE_TENANT \ - --email=quantum@example.com \ + --email=neutron@example.com \ | grep " id " | get_field 2) keystone user-role-add \ --tenant_id $SERVICE_TENANT \ - --user_id $QUANTUM_USER \ + --user_id $NEUTRON_USER \ --role_id $ADMIN_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - QUANTUM_SERVICE=$(keystone service-create \ - --name=quantum \ + NEUTRON_SERVICE=$(keystone service-create \ + --name=neutron \ --type=network \ - --description="Quantum Service" \ + --description="Neutron Service" \ | grep " id " | get_field 2) keystone endpoint-create \ --region RegionOne \ - --service_id $QUANTUM_SERVICE \ + --service_id $NEUTRON_SERVICE \ --publicurl "http://$SERVICE_HOST:9696/" \ --adminurl "http://$SERVICE_HOST:9696/" \ --internalurl "http://$SERVICE_HOST:9696/" @@ -297,11 +304,11 @@ function create_quantum_accounts() { fi } -function create_quantum_initial_network() { +function create_neutron_initial_network() { TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1) # Create a small network - # Since quantum command is executed in admin context at this point, + # Since neutron command is executed in admin context at this point, # ``--tenant_id`` needs to be specified. if is_baremetal; then sudo ovs-vsctl add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE @@ -309,36 +316,36 @@ function create_quantum_initial_network() { sudo ip addr del $IP dev $PUBLIC_INTERFACE sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE done - NET_ID=$(quantum net-create $PHYSICAL_NETWORK --tenant_id $TENANT_ID --provider:network_type flat --provider:physical_network "$PHYSICAL_NETWORK" | grep ' id ' | get_field 2) - SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + NET_ID=$(neutron net-create $PHYSICAL_NETWORK --tenant_id $TENANT_ID --provider:network_type flat --provider:physical_network "$PHYSICAL_NETWORK" | grep ' id ' | get_field 2) + SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) sudo ifconfig $OVS_PHYSICAL_BRIDGE up else - NET_ID=$(quantum net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) - SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + NET_ID=$(neutron net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) + SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) fi if [[ "$Q_L3_ENABLED" == "True" ]]; then # Create a router, and add the private subnet as one of its interfaces if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then # create a tenant-owned router. - ROUTER_ID=$(quantum router-create --tenant_id $TENANT_ID $Q_ROUTER_NAME | grep ' id ' | get_field 2) + ROUTER_ID=$(neutron router-create --tenant_id $TENANT_ID $Q_ROUTER_NAME | grep ' id ' | get_field 2) else # Plugin only supports creating a single router, which should be admin owned. - ROUTER_ID=$(quantum router-create $Q_ROUTER_NAME | grep ' id ' | get_field 2) + ROUTER_ID=$(neutron router-create $Q_ROUTER_NAME | grep ' id ' | get_field 2) fi - quantum router-interface-add $ROUTER_ID $SUBNET_ID + neutron router-interface-add $ROUTER_ID $SUBNET_ID # Create an external network, and a subnet. Configure the external network as router gw - EXT_NET_ID=$(quantum net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2) - EXT_GW_IP=$(quantum subnet-create --ip_version 4 ${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} --gateway $PUBLIC_NETWORK_GATEWAY --name $PUBLIC_SUBNET_NAME $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2) - quantum router-gateway-set $ROUTER_ID $EXT_NET_ID + EXT_NET_ID=$(neutron net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2) + EXT_GW_IP=$(neutron subnet-create --ip_version 4 ${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} --gateway $PUBLIC_NETWORK_GATEWAY --name $PUBLIC_SUBNET_NAME $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2) + neutron router-gateway-set $ROUTER_ID $EXT_NET_ID if is_service_enabled q-l3; then # logic is specific to using the l3-agent for l3 - if is_quantum_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then + if is_neutron_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then CIDR_LEN=${FLOATING_RANGE#*/} sudo ip addr add $EXT_GW_IP/$CIDR_LEN dev $PUBLIC_BRIDGE sudo ip link set $PUBLIC_BRIDGE up - ROUTER_GW_IP=`quantum port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' '{ print $8; }'` + ROUTER_GW_IP=`neutron port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' '{ print $8; }'` sudo route add -net $FIXED_RANGE gw $ROUTER_GW_IP fi if [[ "$Q_USE_NAMESPACE" == "False" ]]; then @@ -349,313 +356,313 @@ function create_quantum_initial_network() { fi } -# init_quantum() - Initialize databases, etc. -function init_quantum() { +# init_neutron() - Initialize databases, etc. +function init_neutron() { : } -# install_quantum() - Collect source and prepare -function install_quantum() { - git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH - setup_develop $QUANTUM_DIR +# install_neutron() - Collect source and prepare +function install_neutron() { + git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH + setup_develop $NEUTRON_DIR } -# install_quantumclient() - Collect source and prepare -function install_quantumclient() { - git_clone $QUANTUMCLIENT_REPO $QUANTUMCLIENT_DIR $QUANTUMCLIENT_BRANCH - setup_develop $QUANTUMCLIENT_DIR +# install_neutronclient() - Collect source and prepare +function install_neutronclient() { + git_clone $NEUTRONCLIENT_REPO $NEUTRONCLIENT_DIR $NEUTRONCLIENT_BRANCH + setup_develop $NEUTRONCLIENT_DIR } -# install_quantum_agent_packages() - Collect source and prepare -function install_quantum_agent_packages() { +# install_neutron_agent_packages() - Collect source and prepare +function install_neutron_agent_packages() { # install packages that are specific to plugin agent(s) if is_service_enabled q-agt q-dhcp q-l3; then - quantum_plugin_install_agent_packages + neutron_plugin_install_agent_packages fi if is_service_enabled q-lbaas; then - quantum_agent_lbaas_install_agent_packages + neutron_agent_lbaas_install_agent_packages fi } # Start running processes, including screen -function start_quantum_service_and_check() { +function start_neutron_service_and_check() { # build config-file options local cfg_file - local CFG_FILE_OPTIONS="--config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE" + local CFG_FILE_OPTIONS="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do CFG_FILE_OPTIONS+=" --config-file /$cfg_file" done - # Start the Quantum service - screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server $CFG_FILE_OPTIONS" - echo "Waiting for Quantum to start..." + # Start the Neutron service + screen_it q-svc "cd $NEUTRON_DIR && python $NEUTRON_BIN_DIR/neutron-server $CFG_FILE_OPTIONS" + echo "Waiting for Neutron to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$Q_HOST:$Q_PORT; do sleep 1; done"; then - die $LINENO "Quantum did not start" + die $LINENO "Neutron did not start" fi } # Start running processes, including screen -function start_quantum_agents() { - # Start up the quantum agents if enabled - screen_it q-agt "cd $QUANTUM_DIR && python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE" - screen_it q-dhcp "cd $QUANTUM_DIR && python $AGENT_DHCP_BINARY --config-file $QUANTUM_CONF --config-file=$Q_DHCP_CONF_FILE" - screen_it q-l3 "cd $QUANTUM_DIR && python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE" - screen_it q-meta "cd $QUANTUM_DIR && python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE" +function start_neutron_agents() { + # Start up the neutron agents if enabled + screen_it q-agt "cd $NEUTRON_DIR && python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" + screen_it q-dhcp "cd $NEUTRON_DIR && python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE" + screen_it q-l3 "cd $NEUTRON_DIR && python $AGENT_L3_BINARY --config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE" + screen_it q-meta "cd $NEUTRON_DIR && python $AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file=$Q_META_CONF_FILE" if [ "$VIRT_DRIVER" = 'xenserver' ]; then # For XenServer, start an agent for the domU openvswitch - screen_it q-domua "cd $QUANTUM_DIR && python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU" + screen_it q-domua "cd $NEUTRON_DIR && python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU" fi if is_service_enabled q-lbaas; then - screen_it q-lbaas "cd $QUANTUM_DIR && python $AGENT_LBAAS_BINARY --config-file $QUANTUM_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME" + screen_it q-lbaas "cd $NEUTRON_DIR && python $AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME" fi } -# stop_quantum() - Stop running processes (non-screen) -function stop_quantum() { +# stop_neutron() - Stop running processes (non-screen) +function stop_neutron() { if is_service_enabled q-dhcp; then pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }') [ ! -z "$pid" ] && sudo kill -9 $pid fi if is_service_enabled q-meta; then - pid=$(ps aux | awk '/quantum-ns-metadata-proxy/ { print $2 }') + pid=$(ps aux | awk '/neutron-ns-metadata-proxy/ { print $2 }') [ ! -z "$pid" ] && sudo kill -9 $pid fi } -# cleanup_quantum() - Remove residual data files, anything left over from previous +# cleanup_neutron() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_quantum() { - if is_quantum_ovs_base_plugin; then - quantum_ovs_base_cleanup +function cleanup_neutron() { + if is_neutron_ovs_base_plugin; then + neutron_ovs_base_cleanup fi - # delete all namespaces created by quantum + # delete all namespaces created by neutron for ns in $(sudo ip netns list | grep -o -e qdhcp-[0-9a-f\-]* -e qrouter-[0-9a-f\-]*); do sudo ip netns delete ${ns} done } -# _configure_quantum_common() -# Set common config for all quantum server and agents. -# This MUST be called before other ``_configure_quantum_*`` functions. -function _configure_quantum_common() { - # Put config files in ``QUANTUM_CONF_DIR`` for everyone to find - if [[ ! -d $QUANTUM_CONF_DIR ]]; then - sudo mkdir -p $QUANTUM_CONF_DIR +# _configure_neutron_common() +# Set common config for all neutron server and agents. +# This MUST be called before other ``_configure_neutron_*`` functions. +function _configure_neutron_common() { + # Put config files in ``NEUTRON_CONF_DIR`` for everyone to find + if [[ ! -d $NEUTRON_CONF_DIR ]]; then + sudo mkdir -p $NEUTRON_CONF_DIR fi - sudo chown $STACK_USER $QUANTUM_CONF_DIR + sudo chown $STACK_USER $NEUTRON_CONF_DIR - cp $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF + cp $NEUTRON_DIR/etc/neutron.conf $NEUTRON_CONF # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``. # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``. # For addition plugin config files, set ``Q_PLUGIN_EXTRA_CONF_PATH``, # ``Q_PLUGIN_EXTRA_CONF_FILES``. For example: # ``Q_PLUGIN_EXTRA_CONF_FILES=(file1, file2)`` - quantum_plugin_configure_common + neutron_plugin_configure_common if [[ $Q_PLUGIN_CONF_PATH == '' || $Q_PLUGIN_CONF_FILENAME == '' || $Q_PLUGIN_CLASS == '' ]]; then - die $LINENO "Quantum plugin not set.. exiting" + die $LINENO "Neutron plugin not set.. exiting" fi - # If needed, move config file from ``$QUANTUM_DIR/etc/quantum`` to ``QUANTUM_CONF_DIR`` + # If needed, move config file from ``$NEUTRON_DIR/etc/neutron`` to ``NEUTRON_CONF_DIR`` mkdir -p /$Q_PLUGIN_CONF_PATH Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME - cp $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE + cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE iniset /$Q_PLUGIN_CONF_FILE database connection `database_connection_url $Q_DB_NAME` - iniset $QUANTUM_CONF DEFAULT state_path $DATA_DIR/quantum + iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron # If addition config files are set, make sure their path name is set as well if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 && $Q_PLUGIN_EXTRA_CONF_PATH == '' ]]; then - die $LINENO "Quantum additional plugin config not set.. exiting" + die $LINENO "Neutron additional plugin config not set.. exiting" fi - # If additional config files exist, copy them over to quantum configuration + # If additional config files exist, copy them over to neutron configuration # directory if [[ $Q_PLUGIN_EXTRA_CONF_PATH != '' ]]; then mkdir -p /$Q_PLUGIN_EXTRA_CONF_PATH local f for (( f=0; $f < ${#Q_PLUGIN_EXTRA_CONF_FILES[@]}; f+=1 )); do Q_PLUGIN_EXTRA_CONF_FILES[$f]=$Q_PLUGIN_EXTRA_CONF_PATH/${Q_PLUGIN_EXTRA_CONF_FILES[$f]} - cp $QUANTUM_DIR/${Q_PLUGIN_EXTRA_CONF_FILES[$f]} /${Q_PLUGIN_EXTRA_CONF_FILES[$f]} + cp $NEUTRON_DIR/${Q_PLUGIN_EXTRA_CONF_FILES[$f]} /${Q_PLUGIN_EXTRA_CONF_FILES[$f]} done fi - _quantum_setup_rootwrap + _neutron_setup_rootwrap } -function _configure_quantum_debug_command() { +function _configure_neutron_debug_command() { if [[ "$Q_USE_DEBUG_COMMAND" != "True" ]]; then return fi - cp $QUANTUM_DIR/etc/l3_agent.ini $QUANTUM_TEST_CONFIG_FILE + cp $NEUTRON_DIR/etc/l3_agent.ini $NEUTRON_TEST_CONFIG_FILE - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT verbose False - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT debug False - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT root_helper "$Q_RR_COMMAND" - # Intermediate fix until Quantum patch lands and then line above will + iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT verbose False + iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT debug False + iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE + iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT root_helper "$Q_RR_COMMAND" + # Intermediate fix until Neutron patch lands and then line above will # be cleaned. - iniset $QUANTUM_TEST_CONFIG_FILE agent root_helper "$Q_RR_COMMAND" + iniset $NEUTRON_TEST_CONFIG_FILE agent root_helper "$Q_RR_COMMAND" - _quantum_setup_keystone $QUANTUM_TEST_CONFIG_FILE DEFAULT set_auth_url - _quantum_setup_interface_driver $QUANTUM_TEST_CONFIG_FILE + _neutron_setup_keystone $NEUTRON_TEST_CONFIG_FILE DEFAULT set_auth_url + _neutron_setup_interface_driver $NEUTRON_TEST_CONFIG_FILE - quantum_plugin_configure_debug_command + neutron_plugin_configure_debug_command } -function _configure_quantum_dhcp_agent() { - AGENT_DHCP_BINARY="$QUANTUM_DIR/bin/quantum-dhcp-agent" - Q_DHCP_CONF_FILE=$QUANTUM_CONF_DIR/dhcp_agent.ini +function _configure_neutron_dhcp_agent() { + AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent" + Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini - cp $QUANTUM_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE + cp $NEUTRON_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE iniset $Q_DHCP_CONF_FILE DEFAULT verbose True iniset $Q_DHCP_CONF_FILE DEFAULT debug True iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" - _quantum_setup_keystone $Q_DHCP_CONF_FILE DEFAULT set_auth_url - _quantum_setup_interface_driver $Q_DHCP_CONF_FILE + _neutron_setup_keystone $Q_DHCP_CONF_FILE DEFAULT set_auth_url + _neutron_setup_interface_driver $Q_DHCP_CONF_FILE - quantum_plugin_configure_dhcp_agent + neutron_plugin_configure_dhcp_agent } -function _configure_quantum_l3_agent() { +function _configure_neutron_l3_agent() { Q_L3_ENABLED=True # for l3-agent, only use per tenant router if we have namespaces Q_L3_ROUTER_PER_TENANT=$Q_USE_NAMESPACE - AGENT_L3_BINARY="$QUANTUM_DIR/bin/quantum-l3-agent" - Q_L3_CONF_FILE=$QUANTUM_CONF_DIR/l3_agent.ini + AGENT_L3_BINARY="$NEUTRON_BIN_DIR/neutron-l3-agent" + Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini - cp $QUANTUM_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE + cp $NEUTRON_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE iniset $Q_L3_CONF_FILE DEFAULT verbose True iniset $Q_L3_CONF_FILE DEFAULT debug True iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" - _quantum_setup_keystone $Q_L3_CONF_FILE DEFAULT set_auth_url - _quantum_setup_interface_driver $Q_L3_CONF_FILE + _neutron_setup_keystone $Q_L3_CONF_FILE DEFAULT set_auth_url + _neutron_setup_interface_driver $Q_L3_CONF_FILE - quantum_plugin_configure_l3_agent + neutron_plugin_configure_l3_agent } -function _configure_quantum_metadata_agent() { - AGENT_META_BINARY="$QUANTUM_DIR/bin/quantum-metadata-agent" - Q_META_CONF_FILE=$QUANTUM_CONF_DIR/metadata_agent.ini +function _configure_neutron_metadata_agent() { + AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent" + Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini - cp $QUANTUM_DIR/etc/metadata_agent.ini $Q_META_CONF_FILE + cp $NEUTRON_DIR/etc/metadata_agent.ini $Q_META_CONF_FILE iniset $Q_META_CONF_FILE DEFAULT verbose True iniset $Q_META_CONF_FILE DEFAULT debug True iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" - _quantum_setup_keystone $Q_META_CONF_FILE DEFAULT set_auth_url + _neutron_setup_keystone $Q_META_CONF_FILE DEFAULT set_auth_url } -function _configure_quantum_lbaas() { - quantum_agent_lbaas_configure_common - quantum_agent_lbaas_configure_agent +function _configure_neutron_lbaas() { + neutron_agent_lbaas_configure_common + neutron_agent_lbaas_configure_agent } -# _configure_quantum_plugin_agent() - Set config files for quantum plugin agent +# _configure_neutron_plugin_agent() - Set config files for neutron plugin agent # It is called when q-agt is enabled. -function _configure_quantum_plugin_agent() { +function _configure_neutron_plugin_agent() { # Specify the default root helper prior to agent configuration to # ensure that an agent's configuration can override the default iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_COMMAND" - iniset $QUANTUM_CONF DEFAULT verbose True - iniset $QUANTUM_CONF DEFAULT debug True + iniset $NEUTRON_CONF DEFAULT verbose True + iniset $NEUTRON_CONF DEFAULT debug True # Configure agent for plugin - quantum_plugin_configure_plugin_agent + neutron_plugin_configure_plugin_agent } -# _configure_quantum_service() - Set config files for quantum service +# _configure_neutron_service() - Set config files for neutron service # It is called when q-svc is enabled. -function _configure_quantum_service() { - Q_API_PASTE_FILE=$QUANTUM_CONF_DIR/api-paste.ini - Q_POLICY_FILE=$QUANTUM_CONF_DIR/policy.json +function _configure_neutron_service() { + Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini + Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json - cp $QUANTUM_DIR/etc/api-paste.ini $Q_API_PASTE_FILE - cp $QUANTUM_DIR/etc/policy.json $Q_POLICY_FILE + cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE + cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE if is_service_enabled $DATABASE_BACKENDS; then recreate_database $Q_DB_NAME utf8 else - die $LINENO "A database must be enabled in order to use the $Q_PLUGIN Quantum plugin." + die $LINENO "A database must be enabled in order to use the $Q_PLUGIN Neutron plugin." fi # Update either configuration file with plugin - iniset $QUANTUM_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS + iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then - iniset $QUANTUM_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES + iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES fi - iniset $QUANTUM_CONF DEFAULT verbose True - iniset $QUANTUM_CONF DEFAULT debug True - iniset $QUANTUM_CONF DEFAULT policy_file $Q_POLICY_FILE - iniset $QUANTUM_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP + iniset $NEUTRON_CONF DEFAULT verbose True + iniset $NEUTRON_CONF DEFAULT debug True + iniset $NEUTRON_CONF DEFAULT policy_file $Q_POLICY_FILE + iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP - iniset $QUANTUM_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY - _quantum_setup_keystone $QUANTUM_CONF keystone_authtoken + iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY + _neutron_setup_keystone $NEUTRON_CONF keystone_authtoken # Configure plugin - quantum_plugin_configure_service + neutron_plugin_configure_service } # Utility Functions #------------------ -# _quantum_setup_rootwrap() - configure Quantum's rootwrap -function _quantum_setup_rootwrap() { +# _neutron_setup_rootwrap() - configure Neutron's rootwrap +function _neutron_setup_rootwrap() { if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then return fi # Deploy new rootwrap filters files (owned by root). # Wipe any existing ``rootwrap.d`` files first - Q_CONF_ROOTWRAP_D=$QUANTUM_CONF_DIR/rootwrap.d + Q_CONF_ROOTWRAP_D=$NEUTRON_CONF_DIR/rootwrap.d if [[ -d $Q_CONF_ROOTWRAP_D ]]; then sudo rm -rf $Q_CONF_ROOTWRAP_D fi - # Deploy filters to ``$QUANTUM_CONF_DIR/rootwrap.d`` + # Deploy filters to ``$NEUTRON_CONF_DIR/rootwrap.d`` mkdir -p -m 755 $Q_CONF_ROOTWRAP_D - cp -pr $QUANTUM_DIR/etc/quantum/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ + cp -pr $NEUTRON_DIR/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ sudo chown -R root:root $Q_CONF_ROOTWRAP_D sudo chmod 644 $Q_CONF_ROOTWRAP_D/* - # Set up ``rootwrap.conf``, pointing to ``$QUANTUM_CONF_DIR/rootwrap.d`` + # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d`` # location moved in newer versions, prefer new location - if test -r $QUANTUM_DIR/etc/quantum/rootwrap.conf; then - sudo cp -p $QUANTUM_DIR/etc/quantum/rootwrap.conf $Q_RR_CONF_FILE + if test -r $NEUTRON_DIR/etc/neutron/rootwrap.conf; then + sudo cp -p $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE else - sudo cp -p $QUANTUM_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE + sudo cp -p $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE fi sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE sudo chown root:root $Q_RR_CONF_FILE sudo chmod 0644 $Q_RR_CONF_FILE - # Specify ``rootwrap.conf`` as first parameter to quantum-rootwrap - ROOTWRAP_SUDOER_CMD="$QUANTUM_ROOTWRAP $Q_RR_CONF_FILE *" + # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap + ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *" - # Set up the rootwrap sudoers for quantum + # Set up the rootwrap sudoers for neutron TEMPFILE=`mktemp` echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE - sudo mv $TEMPFILE /etc/sudoers.d/quantum-rootwrap + sudo mv $TEMPFILE /etc/sudoers.d/neutron-rootwrap # Update the root_helper - iniset $QUANTUM_CONF agent root_helper "$Q_RR_COMMAND" + iniset $NEUTRON_CONF agent root_helper "$Q_RR_COMMAND" } -# Configures keystone integration for quantum service and agents -function _quantum_setup_keystone() { +# Configures keystone integration for neutron service and agents +function _neutron_setup_keystone() { local conf_file=$1 local section=$2 local use_auth_url=$3 @@ -669,58 +676,58 @@ function _quantum_setup_keystone() { iniset $conf_file $section admin_tenant_name $SERVICE_TENANT_NAME iniset $conf_file $section admin_user $Q_ADMIN_USERNAME iniset $conf_file $section admin_password $SERVICE_PASSWORD - iniset $conf_file $section signing_dir $QUANTUM_AUTH_CACHE_DIR + iniset $conf_file $section signing_dir $NEUTRON_AUTH_CACHE_DIR # Create cache dir - sudo mkdir -p $QUANTUM_AUTH_CACHE_DIR - sudo chown $STACK_USER $QUANTUM_AUTH_CACHE_DIR - rm -f $QUANTUM_AUTH_CACHE_DIR/* + sudo mkdir -p $NEUTRON_AUTH_CACHE_DIR + sudo chown $STACK_USER $NEUTRON_AUTH_CACHE_DIR + rm -f $NEUTRON_AUTH_CACHE_DIR/* } -function _quantum_setup_interface_driver() { +function _neutron_setup_interface_driver() { # ovs_use_veth needs to be set before the plugin configuration # occurs to allow plugins to override the setting. iniset $1 DEFAULT ovs_use_veth $Q_OVS_USE_VETH - quantum_plugin_setup_interface_driver $1 + neutron_plugin_setup_interface_driver $1 } -# Functions for Quantum Exercises +# Functions for Neutron Exercises #-------------------------------- function delete_probe() { local from_net="$1" net_id=`_get_net_id $from_net` - probe_id=`quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'` - quantum-debug --os-tenant-name admin --os-username admin probe-delete $probe_id + probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'` + neutron-debug --os-tenant-name admin --os-username admin probe-delete $probe_id } -function setup_quantum_debug() { +function setup_neutron_debug() { if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then public_net_id=`_get_net_id $PUBLIC_NETWORK_NAME` - quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create --device-owner compute $public_net_id + neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create --device-owner compute $public_net_id private_net_id=`_get_net_id $PRIVATE_NETWORK_NAME` - quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create --device-owner compute $private_net_id + neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create --device-owner compute $private_net_id fi } -function teardown_quantum_debug() { +function teardown_neutron_debug() { delete_probe $PUBLIC_NETWORK_NAME delete_probe $PRIVATE_NETWORK_NAME } function _get_net_id() { - quantum --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD net-list | grep $1 | awk '{print $2}' + neutron --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD net-list | grep $1 | awk '{print $2}' } function _get_probe_cmd_prefix() { local from_net="$1" net_id=`_get_net_id $from_net` - probe_id=`quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1` + probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1` echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id" } -function _ping_check_quantum() { +function _ping_check_neutron() { local from_net=$1 local ip=$2 local timeout_sec=$3 @@ -742,7 +749,7 @@ function _ping_check_quantum() { } # ssh check -function _ssh_check_quantum() { +function _ssh_check_neutron() { local from_net=$1 local key_file=$2 local ip=$3 @@ -755,48 +762,48 @@ function _ssh_check_quantum() { fi } -# Quantum 3rd party programs +# Neutron 3rd party programs #--------------------------- -# please refer to ``lib/quantum_thirdparty/README.md`` for details -QUANTUM_THIRD_PARTIES="" -for f in $TOP_DIR/lib/quantum_thirdparty/*; do +# please refer to ``lib/neutron_thirdparty/README.md`` for details +NEUTRON_THIRD_PARTIES="" +for f in $TOP_DIR/lib/neutron_thirdparty/*; do third_party=$(basename $f) if is_service_enabled $third_party; then - source $TOP_DIR/lib/quantum_thirdparty/$third_party - QUANTUM_THIRD_PARTIES="$QUANTUM_THIRD_PARTIES,$third_party" + source $TOP_DIR/lib/neutron_thirdparty/$third_party + NEUTRON_THIRD_PARTIES="$NEUTRON_THIRD_PARTIES,$third_party" fi done -function _quantum_third_party_do() { - for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do +function _neutron_third_party_do() { + for third_party in ${NEUTRON_THIRD_PARTIES//,/ }; do ${1}_${third_party} done } -# configure_quantum_third_party() - Set config files, create data dirs, etc -function configure_quantum_third_party() { - _quantum_third_party_do configure +# configure_neutron_third_party() - Set config files, create data dirs, etc +function configure_neutron_third_party() { + _neutron_third_party_do configure } -# init_quantum_third_party() - Initialize databases, etc. -function init_quantum_third_party() { - _quantum_third_party_do init +# init_neutron_third_party() - Initialize databases, etc. +function init_neutron_third_party() { + _neutron_third_party_do init } -# install_quantum_third_party() - Collect source and prepare -function install_quantum_third_party() { - _quantum_third_party_do install +# install_neutron_third_party() - Collect source and prepare +function install_neutron_third_party() { + _neutron_third_party_do install } -# start_quantum_third_party() - Start running processes, including screen -function start_quantum_third_party() { - _quantum_third_party_do start +# start_neutron_third_party() - Start running processes, including screen +function start_neutron_third_party() { + _neutron_third_party_do start } -# stop_quantum_third_party - Stop running processes (non-screen) -function stop_quantum_third_party() { - _quantum_third_party_do stop +# stop_neutron_third_party - Stop running processes (non-screen) +function stop_neutron_third_party() { + _neutron_third_party_do stop } diff --git a/lib/neutron_plugins/README.md b/lib/neutron_plugins/README.md new file mode 100644 index 0000000000..be8fd96677 --- /dev/null +++ b/lib/neutron_plugins/README.md @@ -0,0 +1,38 @@ +Neutron plugin specific files +============================= +Neutron plugins require plugin specific behavior. +The files under the directory, ``lib/neutron_plugins/``, will be used +when their service is enabled. +Each plugin has ``lib/neutron_plugins/$Q_PLUGIN`` and define the following +functions. +Plugin specific configuration variables should be in this file. + +* filename: ``$Q_PLUGIN`` + * The corresponding file name MUST be the same to plugin name ``$Q_PLUGIN``. + Plugin specific configuration variables should be in this file. + +functions +--------- +``lib/neutron`` calls the following functions when the ``$Q_PLUGIN`` is enabled + +* ``neutron_plugin_create_nova_conf`` : + set ``NOVA_VIF_DRIVER`` and optionally set options in nova_conf + e.g. + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} +* ``neutron_plugin_install_agent_packages`` : + install packages that is specific to plugin agent + e.g. + install_package bridge-utils +* ``neutron_plugin_configure_common`` : + set plugin-specific variables, ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``, + ``Q_DB_NAME``, ``Q_PLUGIN_CLASS`` +* ``neutron_plugin_configure_debug_command`` +* ``neutron_plugin_configure_dhcp_agent`` +* ``neutron_plugin_configure_l3_agent`` +* ``neutron_plugin_configure_plugin_agent`` +* ``neutron_plugin_configure_service`` +* ``neutron_plugin_setup_interface_driver`` +* ``has_neutron_plugin_security_group``: + return 0 if the plugin support neutron security group otherwise return 1 +* ``neutron_plugin_check_adv_test_requirements``: + return 0 if requirements are satisfied otherwise return 1 diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight new file mode 100644 index 0000000000..24507312c7 --- /dev/null +++ b/lib/neutron_plugins/bigswitch_floodlight @@ -0,0 +1,74 @@ +# Neuton Big Switch/FloodLight plugin +# ------------------------------------ + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/neutron_plugins/ovs_base +source $TOP_DIR/lib/neutron_thirdparty/bigswitch_floodlight # for third party service specific configuration values + +function neutron_plugin_create_nova_conf() { + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} +} + +function neutron_plugin_install_agent_packages() { + _neutron_ovs_base_install_agent_packages +} + +function neutron_plugin_configure_common() { + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/bigswitch + Q_PLUGIN_CONF_FILENAME=restproxy.ini + Q_DB_NAME="restproxy_neutron" + Q_PLUGIN_CLASS="neutron.plugins.bigswitch.plugin.NeutronRestProxyV2" + BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80} + BS_FL_CONTROLLER_TIMEOUT=${BS_FL_CONTROLLER_TIMEOUT:-10} +} + +function neutron_plugin_configure_debug_command() { + _neutron_ovs_base_configure_debug_command +} + +function neutron_plugin_configure_dhcp_agent() { + : +} + +function neutron_plugin_configure_l3_agent() { + _neutron_ovs_base_configure_l3_agent +} + +function neutron_plugin_configure_plugin_agent() { + : +} + +function neutron_plugin_configure_service() { + iniset /$Q_PLUGIN_CONF_FILE restproxy servers $BS_FL_CONTROLLERS_PORT + iniset /$Q_PLUGIN_CONF_FILE restproxy servertimeout $BS_FL_CONTROLLER_TIMEOUT + if [ "$BS_FL_VIF_DRIVER" = "ivs" ] + then + iniset /$Q_PLUGIN_CONF_FILE nova vif_type ivs + fi +} + +function neutron_plugin_setup_interface_driver() { + local conf_file=$1 + if [ "$BS_FL_VIF_DRIVER" = "ivs" ] + then + iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.IVSInterfaceDriver + else + iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver + fi +} + + +function has_neutron_plugin_security_group() { + # 1 means False here + return 1 +} + +function neutron_plugin_check_adv_test_requirements() { + is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 +} + +# Restore xtrace +$MY_XTRACE diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade new file mode 100644 index 0000000000..f9275cacc2 --- /dev/null +++ b/lib/neutron_plugins/brocade @@ -0,0 +1,59 @@ +# Brocade Neutron Plugin +# ---------------------- + +# Save trace setting +BRCD_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +function is_neutron_ovs_base_plugin() { + return 1 +} + +function neutron_plugin_create_nova_conf() { + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} +} + +function neutron_plugin_install_agent_packages() { + install_package bridge-utils +} + +function neutron_plugin_configure_common() { + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/brocade + Q_PLUGIN_CONF_FILENAME=brocade.ini + Q_DB_NAME="brcd_neutron" + Q_PLUGIN_CLASS="neutron.plugins.brocade.NeutronPlugin.BrocadePluginV2" +} + +function neutron_plugin_configure_debug_command() { + iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge +} + +function neutron_plugin_configure_dhcp_agent() { + iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport +} + +function neutron_plugin_configure_l3_agent() { + iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge + iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport +} + +function neutron_plugin_configure_plugin_agent() { + AGENT_BINARY="$NEUTON_BIN_DIR/neutron-linuxbridge-agent" +} + +function neutron_plugin_setup_interface_driver() { + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.BridgeInterfaceDriver +} + +function has_neutron_plugin_security_group() { + # 0 means True here + return 0 +} + +function neutron_plugin_check_adv_test_requirements() { + is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 +} + +# Restore xtrace +$BRCD_XTRACE diff --git a/lib/quantum_plugins/cisco b/lib/neutron_plugins/cisco similarity index 79% rename from lib/quantum_plugins/cisco rename to lib/neutron_plugins/cisco index 92b91e4526..8948be6de4 100644 --- a/lib/quantum_plugins/cisco +++ b/lib/neutron_plugins/cisco @@ -1,4 +1,4 @@ -# Quantum Cisco plugin +# Neutron Cisco plugin # --------------------------- # Save trace setting @@ -86,17 +86,17 @@ function _config_switch() { # Prefix openvswitch plugin routines with "ovs" in order to differentiate from # cisco plugin routines. This means, ovs plugin routines will coexist with cisco # plugin routines in this script. -source $TOP_DIR/lib/quantum_plugins/openvswitch -_prefix_function quantum_plugin_create_nova_conf ovs -_prefix_function quantum_plugin_install_agent_packages ovs -_prefix_function quantum_plugin_configure_common ovs -_prefix_function quantum_plugin_configure_debug_command ovs -_prefix_function quantum_plugin_configure_dhcp_agent ovs -_prefix_function quantum_plugin_configure_l3_agent ovs -_prefix_function quantum_plugin_configure_plugin_agent ovs -_prefix_function quantum_plugin_configure_service ovs -_prefix_function quantum_plugin_setup_interface_driver ovs -_prefix_function has_quantum_plugin_security_group ovs +source $TOP_DIR/lib/neutron_plugins/openvswitch +_prefix_function neutron_plugin_create_nova_conf ovs +_prefix_function neutron_plugin_install_agent_packages ovs +_prefix_function neutron_plugin_configure_common ovs +_prefix_function neutron_plugin_configure_debug_command ovs +_prefix_function neutron_plugin_configure_dhcp_agent ovs +_prefix_function neutron_plugin_configure_l3_agent ovs +_prefix_function neutron_plugin_configure_plugin_agent ovs +_prefix_function neutron_plugin_configure_service ovs +_prefix_function neutron_plugin_setup_interface_driver ovs +_prefix_function has_neutron_plugin_security_group ovs # Check the version of the installed ncclient package function check_ncclient_version() { @@ -144,66 +144,66 @@ function is_ncclient_installed() { return 0 } -function has_quantum_plugin_security_group() { +function has_neutron_plugin_security_group() { if _has_ovs_subplugin; then - ovs_has_quantum_plugin_security_group + ovs_has_neutron_plugin_security_group else return 1 fi } -function is_quantum_ovs_base_plugin() { +function is_neutron_ovs_base_plugin() { # Cisco uses OVS if openvswitch subplugin is deployed _has_ovs_subplugin return } # populate required nova configuration parameters -function quantum_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf() { if _has_ovs_subplugin; then - ovs_quantum_plugin_create_nova_conf + ovs_neutron_plugin_create_nova_conf else - _quantum_ovs_base_configure_nova_vif_driver + _neutron_ovs_base_configure_nova_vif_driver fi } -function quantum_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages() { # Cisco plugin uses openvswitch to operate in one of its configurations - ovs_quantum_plugin_install_agent_packages + ovs_neutron_plugin_install_agent_packages } # Configure common parameters -function quantum_plugin_configure_common() { +function neutron_plugin_configure_common() { # setup default subplugins if [ ! -v Q_CISCO_PLUGIN_SUBPLUGINS ]; then declare -ga Q_CISCO_PLUGIN_SUBPLUGINS Q_CISCO_PLUGIN_SUBPLUGINS=(openvswitch nexus) fi if _has_ovs_subplugin; then - ovs_quantum_plugin_configure_common - Q_PLUGIN_EXTRA_CONF_PATH=etc/quantum/plugins/cisco + ovs_neutron_plugin_configure_common + Q_PLUGIN_EXTRA_CONF_PATH=etc/neutron/plugins/cisco Q_PLUGIN_EXTRA_CONF_FILES=(cisco_plugins.ini) else - Q_PLUGIN_CONF_PATH=etc/quantum/plugins/cisco + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/cisco Q_PLUGIN_CONF_FILENAME=cisco_plugins.ini fi - Q_PLUGIN_CLASS="quantum.plugins.cisco.network_plugin.PluginV2" - Q_DB_NAME=cisco_quantum + Q_PLUGIN_CLASS="neutron.plugins.cisco.network_plugin.PluginV2" + Q_DB_NAME=cisco_neutron } -function quantum_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command() { if _has_ovs_subplugin; then - ovs_quantum_plugin_configure_debug_command + ovs_neutron_plugin_configure_debug_command fi } -function quantum_plugin_configure_dhcp_agent() { - iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager quantum.agent.dhcp_agent.DhcpAgentWithStateReport +function neutron_plugin_configure_dhcp_agent() { + iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport } -function quantum_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent() { if _has_ovs_subplugin; then - ovs_quantum_plugin_configure_l3_agent + ovs_neutron_plugin_configure_l3_agent fi } @@ -230,7 +230,7 @@ function _configure_nexus_subplugin() { HOST_NAME=$(hostname) Q_CISCO_PLUGIN_SWITCH_INFO=([1.1.1.1]=stack:stack:22:${HOST_NAME}:1/10) else - iniset $cisco_cfg_file CISCO nexus_driver quantum.plugins.cisco.nexus.cisco_nexus_network_driver_v2.CiscoNEXUSDriver + iniset $cisco_cfg_file CISCO nexus_driver neutron.plugins.cisco.nexus.cisco_nexus_network_driver_v2.CiscoNEXUSDriver fi # Setup the switch configurations @@ -267,21 +267,21 @@ function _configure_n1kv_subplugin() { # Setup the integration bridge by calling the ovs_base OVS_BRIDGE=$Q_CISCO_PLUGIN_INTEGRATION_BRIDGE - _quantum_ovs_base_setup_bridge $OVS_BRIDGE + _neutron_ovs_base_setup_bridge $OVS_BRIDGE } -function quantum_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent() { if _has_ovs_subplugin; then - ovs_quantum_plugin_configure_plugin_agent + ovs_neutron_plugin_configure_plugin_agent fi } -function quantum_plugin_configure_service() { +function neutron_plugin_configure_service() { local subplugin local cisco_cfg_file if _has_ovs_subplugin; then - ovs_quantum_plugin_configure_service + ovs_neutron_plugin_configure_service cisco_cfg_file=/${Q_PLUGIN_EXTRA_CONF_FILES[0]} else cisco_cfg_file=/$Q_PLUGIN_CONF_FILE @@ -302,9 +302,9 @@ function quantum_plugin_configure_service() { inicomment $cisco_cfg_file CISCO_TEST host for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do case $subplugin in - nexus) iniset $cisco_cfg_file CISCO_PLUGINS nexus_plugin quantum.plugins.cisco.nexus.cisco_nexus_plugin_v2.NexusPlugin;; - openvswitch) iniset $cisco_cfg_file CISCO_PLUGINS vswitch_plugin quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2;; - n1kv) iniset $cisco_cfg_file CISCO_PLUGINS vswitch_plugin quantum.plugins.cisco.n1kv.n1kv_quantum_plugin.N1kvQuantumPluginV2;; + nexus) iniset $cisco_cfg_file CISCO_PLUGINS nexus_plugin neutron.plugins.cisco.nexus.cisco_nexus_plugin_v2.NexusPlugin;; + openvswitch) iniset $cisco_cfg_file CISCO_PLUGINS vswitch_plugin neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2;; + n1kv) iniset $cisco_cfg_file CISCO_PLUGINS vswitch_plugin neutron.plugins.cisco.n1kv.n1kv_neutron_plugin.N1kvNeutronPluginV2;; *) die $LINENO "Unsupported cisco subplugin: $subplugin";; esac done @@ -318,9 +318,9 @@ function quantum_plugin_configure_service() { fi } -function quantum_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver() { local conf_file=$1 - iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver + iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver } # Restore xtrace diff --git a/lib/quantum_plugins/linuxbridge b/lib/neutron_plugins/linuxbridge similarity index 78% rename from lib/quantum_plugins/linuxbridge rename to lib/neutron_plugins/linuxbridge index 989b930005..9aad8f39ae 100644 --- a/lib/quantum_plugins/linuxbridge +++ b/lib/neutron_plugins/linuxbridge @@ -1,18 +1,18 @@ -# Quantum Linux Bridge plugin +# Neutron Linux Bridge plugin # --------------------------- # Save trace setting MY_XTRACE=$(set +o | grep xtrace) set +o xtrace -function quantum_plugin_configure_common() { - Q_PLUGIN_CONF_PATH=etc/quantum/plugins/linuxbridge +function neutron_plugin_configure_common() { + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/linuxbridge Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini - Q_DB_NAME="quantum_linux_bridge" - Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2" + Q_DB_NAME="neutron_linux_bridge" + Q_PLUGIN_CLASS="neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2" } -function quantum_plugin_configure_service() { +function neutron_plugin_configure_service() { if [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then iniset /$Q_PLUGIN_CONF_FILE vlans tenant_network_type vlan else @@ -31,9 +31,9 @@ function quantum_plugin_configure_service() { iniset /$Q_PLUGIN_CONF_FILE vlans network_vlan_ranges $LB_VLAN_RANGES fi if [[ "$Q_USE_SECGROUP" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver quantum.agent.linux.iptables_firewall.IptablesFirewallDriver + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver else - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver quantum.agent.firewall.NoopFirewallDriver + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver fi # Define extra "LINUX_BRIDGE" configuration options when q-svc is configured by defining @@ -45,7 +45,7 @@ function quantum_plugin_configure_service() { done } -function has_quantum_plugin_security_group() { +function has_neutron_plugin_security_group() { # 0 means True here return 0 } diff --git a/lib/quantum_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent similarity index 69% rename from lib/quantum_plugins/linuxbridge_agent rename to lib/neutron_plugins/linuxbridge_agent index b3ca8b12ab..88c49c5b5e 100644 --- a/lib/quantum_plugins/linuxbridge_agent +++ b/lib/neutron_plugins/linuxbridge_agent @@ -1,37 +1,37 @@ -# Quantum Linux Bridge L2 agent +# Neutron Linux Bridge L2 agent # ----------------------------- # Save trace setting PLUGIN_XTRACE=$(set +o | grep xtrace) set +o xtrace -function is_quantum_ovs_base_plugin() { +function is_neutron_ovs_base_plugin() { # linuxbridge doesn't use OVS return 1 } -function quantum_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf() { NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} } -function quantum_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages() { install_package bridge-utils } -function quantum_plugin_configure_debug_command() { - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge +function neutron_plugin_configure_debug_command() { + iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge } -function quantum_plugin_configure_dhcp_agent() { - iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager quantum.agent.dhcp_agent.DhcpAgentWithStateReport +function neutron_plugin_configure_dhcp_agent() { + iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport } -function quantum_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent() { iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge - iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager quantum.agent.l3_agent.L3NATAgentWithStateReport + iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport } -function quantum_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent() { # Setup physical network interface mappings. Override # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more # complex physical network configurations. @@ -42,11 +42,11 @@ function quantum_plugin_configure_plugin_agent() { iniset /$Q_PLUGIN_CONF_FILE linux_bridge physical_interface_mappings $LB_INTERFACE_MAPPINGS fi if [[ "$Q_USE_SECGROUP" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver quantum.agent.linux.iptables_firewall.IptablesFirewallDriver + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver else - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver quantum.agent.firewall.NoopFirewallDriver + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver fi - AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent" + AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-linuxbridge-agent" # Define extra "AGENT" configuration options when q-agt is configured by defining # the array ``Q_AGENT_EXTRA_AGENT_OPTS``. # For Example: ``Q_AGENT_EXTRA_AGENT_OPTS=(foo=true bar=2)`` @@ -63,12 +63,12 @@ function quantum_plugin_configure_plugin_agent() { done } -function quantum_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver() { local conf_file=$1 - iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver + iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.BridgeInterfaceDriver } -function quantum_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements() { is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 } diff --git a/lib/quantum_plugins/ml2 b/lib/neutron_plugins/ml2 similarity index 78% rename from lib/quantum_plugins/ml2 rename to lib/neutron_plugins/ml2 index ae8fe6c997..fcff8703e5 100644 --- a/lib/quantum_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -1,4 +1,4 @@ -# Quantum Modular Layer 2 plugin +# Neutron Modular Layer 2 plugin # ------------------------------ # Save trace setting @@ -7,16 +7,16 @@ set +o xtrace # Default openvswitch L2 agent Q_AGENT=${Q_AGENT:-openvswitch} -source $TOP_DIR/lib/quantum_plugins/${Q_AGENT}_agent +source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent -function quantum_plugin_configure_common() { - Q_PLUGIN_CONF_PATH=etc/quantum/plugins/ml2 +function neutron_plugin_configure_common() { + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ml2 Q_PLUGIN_CONF_FILENAME=ml2_conf.ini - Q_DB_NAME="quantum_ml2" - Q_PLUGIN_CLASS="quantum.plugins.ml2.plugin.Ml2Plugin" + Q_DB_NAME="neutron_ml2" + Q_PLUGIN_CLASS="neutron.plugins.ml2.plugin.Ml2Plugin" } -function quantum_plugin_configure_service() { +function neutron_plugin_configure_service() { if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then iniset /$Q_PLUGIN_CONF_FILE ml2 tenant_network_types gre iniset /$Q_PLUGIN_CONF_FILE ml2_type_gre tunnel_id_ranges $TENANT_TUNNEL_RANGES @@ -40,21 +40,21 @@ function quantum_plugin_configure_service() { fi # REVISIT(rkukura): Setting firewall_driver here for - # quantum.agent.securitygroups_rpc.is_firewall_enabled() which is + # neutron.agent.securitygroups_rpc.is_firewall_enabled() which is # used in the server, in case no L2 agent is configured on the # server's node. If an L2 agent is configured, this will get # overridden with the correct driver. The ml2 plugin should # instead use its own config variable to indicate whether security # groups is enabled, and that will need to be set here instead. if [[ "$Q_USE_SECGROUP" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.not.a.real.FirewallDriver + iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver neutron.agent.not.a.real.FirewallDriver else - iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.firewall.NoopFirewallDriver + iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver neutron.agent.firewall.NoopFirewallDriver fi } -function has_quantum_plugin_security_group() { +function has_neutron_plugin_security_group() { return 0 } diff --git a/lib/quantum_plugins/nec b/lib/neutron_plugins/nec similarity index 65% rename from lib/quantum_plugins/nec rename to lib/neutron_plugins/nec index 69bbe0e618..79d41dbf77 100644 --- a/lib/quantum_plugins/nec +++ b/lib/neutron_plugins/nec @@ -1,4 +1,4 @@ -# Quantum NEC OpenFlow plugin +# Neutron NEC OpenFlow plugin # --------------------------- # Save trace setting @@ -20,47 +20,47 @@ OFC_RETRY_INTERVAL=${OFC_RETRY_INTERVAL:-1} # Main logic # --------------------------- -source $TOP_DIR/lib/quantum_plugins/ovs_base +source $TOP_DIR/lib/neutron_plugins/ovs_base -function quantum_plugin_create_nova_conf() { - _quantum_ovs_base_configure_nova_vif_driver +function neutron_plugin_create_nova_conf() { + _neutron_ovs_base_configure_nova_vif_driver } -function quantum_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages() { # SKIP_OVS_INSTALL is useful when we want to use Open vSwitch whose # version is different from the version provided by the distribution. if [[ "$SKIP_OVS_INSTALL" = "True" ]]; then echo "You need to install Open vSwitch manually." return fi - _quantum_ovs_base_install_agent_packages + _neutron_ovs_base_install_agent_packages } -function quantum_plugin_configure_common() { - Q_PLUGIN_CONF_PATH=etc/quantum/plugins/nec +function neutron_plugin_configure_common() { + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/nec Q_PLUGIN_CONF_FILENAME=nec.ini - Q_DB_NAME="quantum_nec" - Q_PLUGIN_CLASS="quantum.plugins.nec.nec_plugin.NECPluginV2" + Q_DB_NAME="neutron_nec" + Q_PLUGIN_CLASS="neutron.plugins.nec.nec_plugin.NECPluginV2" } -function quantum_plugin_configure_debug_command() { - _quantum_ovs_base_configure_debug_command +function neutron_plugin_configure_debug_command() { + _neutron_ovs_base_configure_debug_command } -function quantum_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent() { : } -function quantum_plugin_configure_l3_agent() { - _quantum_ovs_base_configure_l3_agent +function neutron_plugin_configure_l3_agent() { + _neutron_ovs_base_configure_l3_agent } -function quantum_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent() { if [[ "$SKIP_OVS_BRIDGE_SETUP" = "True" ]]; then return fi # Set up integration bridge - _quantum_ovs_base_setup_bridge $OVS_BRIDGE + _neutron_ovs_base_setup_bridge $OVS_BRIDGE sudo ovs-vsctl --no-wait set-controller $OVS_BRIDGE tcp:$OFC_OFP_HOST:$OFC_OFP_PORT # Generate datapath ID from HOST_IP local dpid=$(printf "0x%07d%03d%03d%03d\n" ${HOST_IP//./ }) @@ -69,26 +69,26 @@ function quantum_plugin_configure_plugin_agent() { if [ -n "$OVS_INTERFACE" ]; then sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $OVS_INTERFACE fi - _quantum_setup_ovs_tunnels $OVS_BRIDGE - AGENT_BINARY="$QUANTUM_DIR/bin/quantum-nec-agent" + _neutron_setup_ovs_tunnels $OVS_BRIDGE + AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-nec-agent" - _quantum_ovs_base_configure_firewall_driver + _neutron_ovs_base_configure_firewall_driver } -function quantum_plugin_configure_service() { - iniset $QUANTUM_CONF DEFAULT api_extensions_path quantum/plugins/nec/extensions/ +function neutron_plugin_configure_service() { + iniset $NEUTRON_CONF DEFAULT api_extensions_path neutron/plugins/nec/extensions/ iniset /$Q_PLUGIN_CONF_FILE ofc host $OFC_API_HOST iniset /$Q_PLUGIN_CONF_FILE ofc port $OFC_API_PORT iniset /$Q_PLUGIN_CONF_FILE ofc driver $OFC_DRIVER iniset /$Q_PLUGIN_CONF_FILE ofc api_retry_max OFC_RETRY_MAX iniset /$Q_PLUGIN_CONF_FILE ofc api_retry_interval OFC_RETRY_INTERVAL - _quantum_ovs_base_configure_firewall_driver + _neutron_ovs_base_configure_firewall_driver } -function quantum_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver() { local conf_file=$1 - iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver + iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver iniset $conf_file DEFAULT ovs_use_veth True } @@ -96,7 +96,7 @@ function quantum_plugin_setup_interface_driver() { # --------------------------- # Setup OVS tunnel manually -function _quantum_setup_ovs_tunnels() { +function _neutron_setup_ovs_tunnels() { local bridge=$1 local id=0 GRE_LOCAL_IP=${GRE_LOCAL_IP:-$HOST_IP} @@ -113,12 +113,12 @@ function _quantum_setup_ovs_tunnels() { fi } -function has_quantum_plugin_security_group() { +function has_neutron_plugin_security_group() { # 0 means True here return 0 } -function quantum_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements() { is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 } diff --git a/lib/quantum_plugins/nicira b/lib/neutron_plugins/nicira similarity index 79% rename from lib/quantum_plugins/nicira rename to lib/neutron_plugins/nicira index d4b3e5128e..7642be6578 100644 --- a/lib/quantum_plugins/nicira +++ b/lib/neutron_plugins/nicira @@ -1,14 +1,14 @@ -# Quantum Nicira NVP plugin +# Neutron Nicira NVP plugin # --------------------------- # Save trace setting MY_XTRACE=$(set +o | grep xtrace) set +o xtrace -source $TOP_DIR/lib/quantum_plugins/ovs_base +source $TOP_DIR/lib/neutron_plugins/ovs_base function setup_integration_bridge() { - _quantum_ovs_base_setup_bridge $OVS_BRIDGE + _neutron_ovs_base_setup_bridge $OVS_BRIDGE # Set manager to NVP controller (1st of list) if [[ "$NVP_CONTROLLERS" != "" ]]; then # Get the first controller @@ -20,12 +20,12 @@ function setup_integration_bridge() { sudo ovs-vsctl set-manager ssl:$OVS_MGR_IP } -function is_quantum_ovs_base_plugin() { +function is_neutron_ovs_base_plugin() { # NVP uses OVS, but not the l3-agent return 0 } -function quantum_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf() { NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtOpenVswitchDriver"} # if n-cpu is enabled, then setup integration bridge if is_service_enabled n-cpu; then @@ -33,40 +33,40 @@ function quantum_plugin_create_nova_conf() { fi } -function quantum_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages() { # Nicira Plugin does not run q-agt, but it currently needs dhcp and metadata agents - _quantum_ovs_base_install_agent_packages + _neutron_ovs_base_install_agent_packages } -function quantum_plugin_configure_common() { - Q_PLUGIN_CONF_PATH=etc/quantum/plugins/nicira +function neutron_plugin_configure_common() { + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/nicira Q_PLUGIN_CONF_FILENAME=nvp.ini - Q_DB_NAME="quantum_nvp" - Q_PLUGIN_CLASS="quantum.plugins.nicira.nicira_nvp_plugin.QuantumPlugin.NvpPluginV2" + Q_DB_NAME="neutron_nvp" + Q_PLUGIN_CLASS="neutron.plugins.nicira.nicira_nvp_plugin.NeutronPlugin.NvpPluginV2" } -function quantum_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command() { sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE } -function quantum_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent() { setup_integration_bridge iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network True iniset $Q_DHCP_CONF_FILE DEFAULT ovs_use_veth True } -function quantum_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent() { # Nicira plugin does not run L3 agent die $LINENO "q-l3 should must not be executed with Nicira plugin!" } -function quantum_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent() { # Nicira plugin does not run L2 agent die $LINENO "q-agt must not be executed with Nicira plugin!" } -function quantum_plugin_configure_service() { +function neutron_plugin_configure_service() { if [[ "$MAX_LP_PER_BRIDGED_LS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE nvp max_lp_per_bridged_ls $MAX_LP_PER_BRIDGED_LS fi @@ -120,17 +120,17 @@ function quantum_plugin_configure_service() { fi } -function quantum_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver() { local conf_file=$1 - iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver + iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver } -function has_quantum_plugin_security_group() { +function has_neutron_plugin_security_group() { # 0 means True here return 0 } -function quantum_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements() { is_service_enabled q-dhcp && return 0 } diff --git a/lib/quantum_plugins/openvswitch b/lib/neutron_plugins/openvswitch similarity index 76% rename from lib/quantum_plugins/openvswitch rename to lib/neutron_plugins/openvswitch index 4aac9f8e69..f99eb383d8 100644 --- a/lib/quantum_plugins/openvswitch +++ b/lib/neutron_plugins/openvswitch @@ -1,20 +1,20 @@ -# Quantum Open vSwitch plugin +# Neutron Open vSwitch plugin # --------------------------- # Save trace setting MY_XTRACE=$(set +o | grep xtrace) set +o xtrace -source $TOP_DIR/lib/quantum_plugins/openvswitch_agent +source $TOP_DIR/lib/neutron_plugins/openvswitch_agent -function quantum_plugin_configure_common() { - Q_PLUGIN_CONF_PATH=etc/quantum/plugins/openvswitch - Q_PLUGIN_CONF_FILENAME=ovs_quantum_plugin.ini - Q_DB_NAME="ovs_quantum" - Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2" +function neutron_plugin_configure_common() { + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/openvswitch + Q_PLUGIN_CONF_FILENAME=ovs_neutron_plugin.ini + Q_DB_NAME="ovs_neutron" + Q_PLUGIN_CLASS="neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2" } -function quantum_plugin_configure_service() { +function neutron_plugin_configure_service() { if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then iniset /$Q_PLUGIN_CONF_FILE ovs tenant_network_type gre iniset /$Q_PLUGIN_CONF_FILE ovs tunnel_id_ranges $TENANT_TUNNEL_RANGES @@ -41,7 +41,7 @@ function quantum_plugin_configure_service() { iniset /$Q_PLUGIN_CONF_FILE ovs enable_tunneling True fi - _quantum_ovs_base_configure_firewall_driver + _neutron_ovs_base_configure_firewall_driver # Define extra "OVS" configuration options when q-svc is configured by defining # the array ``Q_SRV_EXTRA_OPTS``. @@ -52,7 +52,7 @@ function quantum_plugin_configure_service() { done } -function has_quantum_plugin_security_group() { +function has_neutron_plugin_security_group() { return 0 } diff --git a/lib/quantum_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent similarity index 81% rename from lib/quantum_plugins/openvswitch_agent rename to lib/neutron_plugins/openvswitch_agent index 608c3eae98..46c2a5c6e2 100644 --- a/lib/quantum_plugins/openvswitch_agent +++ b/lib/neutron_plugins/openvswitch_agent @@ -1,43 +1,43 @@ -# Quantum Open vSwitch L2 agent +# Neutron Open vSwitch L2 agent # ----------------------------- # Save trace setting PLUGIN_XTRACE=$(set +o | grep xtrace) set +o xtrace -source $TOP_DIR/lib/quantum_plugins/ovs_base +source $TOP_DIR/lib/neutron_plugins/ovs_base -function quantum_plugin_create_nova_conf() { - _quantum_ovs_base_configure_nova_vif_driver +function neutron_plugin_create_nova_conf() { + _neutron_ovs_base_configure_nova_vif_driver if [ "$VIRT_DRIVER" = 'xenserver' ]; then iniset $NOVA_CONF DEFAULT xenapi_vif_driver nova.virt.xenapi.vif.XenAPIOpenVswitchDriver iniset $NOVA_CONF DEFAULT xenapi_ovs_integration_bridge $XEN_INTEGRATION_BRIDGE - # Disable nova's firewall so that it does not conflict with quantum + # Disable nova's firewall so that it does not conflict with neutron iniset $NOVA_CONF DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver fi } -function quantum_plugin_install_agent_packages() { - _quantum_ovs_base_install_agent_packages +function neutron_plugin_install_agent_packages() { + _neutron_ovs_base_install_agent_packages } -function quantum_plugin_configure_debug_command() { - _quantum_ovs_base_configure_debug_command +function neutron_plugin_configure_debug_command() { + _neutron_ovs_base_configure_debug_command } -function quantum_plugin_configure_dhcp_agent() { - iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager quantum.agent.dhcp_agent.DhcpAgentWithStateReport +function neutron_plugin_configure_dhcp_agent() { + iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport } -function quantum_plugin_configure_l3_agent() { - _quantum_ovs_base_configure_l3_agent - iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager quantum.agent.l3_agent.L3NATAgentWithStateReport +function neutron_plugin_configure_l3_agent() { + _neutron_ovs_base_configure_l3_agent + iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport } -function quantum_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent() { # Setup integration bridge - _quantum_ovs_base_setup_bridge $OVS_BRIDGE - _quantum_ovs_base_configure_firewall_driver + _neutron_ovs_base_setup_bridge $OVS_BRIDGE + _neutron_ovs_base_configure_firewall_driver # Setup agent for tunneling if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then @@ -63,14 +63,14 @@ function quantum_plugin_configure_plugin_agent() { if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE ovs bridge_mappings $OVS_BRIDGE_MAPPINGS fi - AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent" + AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-openvswitch-agent" if [ "$VIRT_DRIVER" = 'xenserver' ]; then # Make a copy of our config for domU sudo cp /$Q_PLUGIN_CONF_FILE "/$Q_PLUGIN_CONF_FILE.domu" # Deal with Dom0's L2 Agent: - Q_RR_DOM0_COMMAND="$QUANTUM_DIR/bin/quantum-rootwrap-xen-dom0 $Q_RR_CONF_FILE" + Q_RR_DOM0_COMMAND="$NEUTRON_BIN_DIR/neutron-rootwrap-xen-dom0 $Q_RR_CONF_FILE" # For now, duplicate the xen configuration already found in nova.conf iniset $Q_RR_CONF_FILE xenapi xenapi_connection_url "$XENAPI_CONNECTION_URL" @@ -118,12 +118,12 @@ function quantum_plugin_configure_plugin_agent() { done } -function quantum_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver() { local conf_file=$1 - iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver + iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver } -function quantum_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements() { is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 } diff --git a/lib/quantum_plugins/ovs_base b/lib/neutron_plugins/ovs_base similarity index 71% rename from lib/quantum_plugins/ovs_base rename to lib/neutron_plugins/ovs_base index 646ff4a782..0a2765b480 100644 --- a/lib/quantum_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -8,34 +8,34 @@ set +o xtrace OVS_BRIDGE=${OVS_BRIDGE:-br-int} PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} -function is_quantum_ovs_base_plugin() { +function is_neutron_ovs_base_plugin() { # Yes, we use OVS. return 0 } -function _quantum_ovs_base_setup_bridge() { +function _neutron_ovs_base_setup_bridge() { local bridge=$1 - quantum-ovs-cleanup + neutron-ovs-cleanup sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge } -function quantum_ovs_base_cleanup() { - # remove all OVS ports that look like Quantum created ports +function neutron_ovs_base_cleanup() { + # remove all OVS ports that look like Neutron created ports for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do sudo ovs-vsctl del-port ${port} done - # remove all OVS bridges created by Quantum + # remove all OVS bridges created by Neutron for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${OVS_BRIDGE} -e ${PUBLIC_BRIDGE}); do sudo ovs-vsctl del-br ${bridge} done } -function _quantum_ovs_base_install_agent_packages() { +function _neutron_ovs_base_install_agent_packages() { local kernel_version # Install deps - # FIXME add to ``files/apts/quantum``, but don't install if not needed! + # FIXME add to ``files/apts/neutron``, but don't install if not needed! if is_ubuntu; then kernel_version=`cat /proc/version | cut -d " " -f3` install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version @@ -50,29 +50,29 @@ function _quantum_ovs_base_install_agent_packages() { fi } -function _quantum_ovs_base_configure_debug_command() { - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE +function _neutron_ovs_base_configure_debug_command() { + iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE } -function _quantum_ovs_base_configure_firewall_driver() { +function _neutron_ovs_base_configure_firewall_driver() { if [[ "$Q_USE_SECGROUP" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver quantum.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver else - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver quantum.agent.firewall.NoopFirewallDriver + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver fi } -function _quantum_ovs_base_configure_l3_agent() { +function _neutron_ovs_base_configure_l3_agent() { iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE - quantum-ovs-cleanup + neutron-ovs-cleanup sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE # ensure no IP is configured on the public bridge sudo ip addr flush dev $PUBLIC_BRIDGE } -function _quantum_ovs_base_configure_nova_vif_driver() { - # The hybrid VIF driver needs to be specified when Quantum Security Group +function _neutron_ovs_base_configure_nova_vif_driver() { + # The hybrid VIF driver needs to be specified when Neutron Security Group # is enabled (until vif_security attributes are supported in VIF extension) if [[ "$Q_USE_SECGROUP" == "True" ]]; then NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} diff --git a/lib/quantum_plugins/plumgrid b/lib/neutron_plugins/plumgrid similarity index 53% rename from lib/quantum_plugins/plumgrid rename to lib/neutron_plugins/plumgrid index dde18c8e85..d4cc39596e 100644 --- a/lib/quantum_plugins/plumgrid +++ b/lib/neutron_plugins/plumgrid @@ -1,4 +1,4 @@ -# PLUMgrid Quantum Plugin +# PLUMgrid Neutron Plugin # Edgar Magana emagana@plumgrid.com # ------------------------------------ @@ -6,36 +6,36 @@ MY_XTRACE=$(set +o | grep xtrace) set +o xtrace -#source $TOP_DIR/lib/quantum_plugins/ovs_base +#source $TOP_DIR/lib/neutron_plugins/ovs_base -function quantum_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf() { NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} } -function quantum_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver() { : } -function quantum_plugin_configure_common() { - Q_PLUGIN_CONF_PATH=etc/quantum/plugins/plumgrid +function neutron_plugin_configure_common() { + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/plumgrid Q_PLUGIN_CONF_FILENAME=plumgrid.ini - Q_DB_NAME="plumgrid_quantum" - Q_PLUGIN_CLASS="quantum.plugins.plumgrid.plumgrid_nos_plugin.plumgrid_plugin.QuantumPluginPLUMgridV2" + Q_DB_NAME="plumgrid_neutron" + Q_PLUGIN_CLASS="neutron.plugins.plumgrid.plumgrid_nos_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2" } -function quantum_plugin_configure_service() { +function neutron_plugin_configure_service() { PLUMGRID_NOS_IP=${PLUMGRID_NOS_IP:-localhost} PLUMGRID_NOS_PORT=${PLUMGRID_NOS_PORT:-7766} iniset /$Q_PLUGIN_CONF_FILE plumgridnos nos_server $PLUMGRID_NOS_IP iniset /$Q_PLUGIN_CONF_FILE plumgridnos nos_server_port $PLUMGRID_NOS_PORT } -function quantum_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command() { : } -function quantum_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements() { is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 } # Restore xtrace diff --git a/lib/neutron_plugins/ryu b/lib/neutron_plugins/ryu new file mode 100644 index 0000000000..334c227cdb --- /dev/null +++ b/lib/neutron_plugins/ryu @@ -0,0 +1,80 @@ +# Neutron Ryu plugin +# ------------------ + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/neutron_plugins/ovs_base +source $TOP_DIR/lib/neutron_thirdparty/ryu # for configuration value + +function neutron_plugin_create_nova_conf() { + _neutron_ovs_base_configure_nova_vif_driver + iniset $NOVA_CONF DEFAULT libvirt_ovs_integration_bridge "$OVS_BRIDGE" +} + +function neutron_plugin_install_agent_packages() { + _neutron_ovs_base_install_agent_packages + + # neutron_ryu_agent requires ryu module + install_package $(get_packages "ryu") + install_ryu + configure_ryu +} + +function neutron_plugin_configure_common() { + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ryu + Q_PLUGIN_CONF_FILENAME=ryu.ini + Q_DB_NAME="ovs_neutron" + Q_PLUGIN_CLASS="neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2" +} + +function neutron_plugin_configure_debug_command() { + _neutron_ovs_base_configure_debug_command + iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT +} + +function neutron_plugin_configure_dhcp_agent() { + iniset $Q_DHCP_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT +} + +function neutron_plugin_configure_l3_agent() { + iniset $Q_L3_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT + _neutron_ovs_base_configure_l3_agent +} + +function neutron_plugin_configure_plugin_agent() { + # Set up integration bridge + _neutron_ovs_base_setup_bridge $OVS_BRIDGE + if [ -n "$RYU_INTERNAL_INTERFACE" ]; then + sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $RYU_INTERNAL_INTERFACE + fi + iniset /$Q_PLUGIN_CONF_FILE ovs integration_bridge $OVS_BRIDGE + AGENT_BINARY="$NEUTRON_DIR/neutron/plugins/ryu/agent/ryu_neutron_agent.py" + + _neutron_ovs_base_configure_firewall_driver +} + +function neutron_plugin_configure_service() { + iniset /$Q_PLUGIN_CONF_FILE ovs openflow_rest_api $RYU_API_HOST:$RYU_API_PORT + + _neutron_ovs_base_configure_firewall_driver +} + +function neutron_plugin_setup_interface_driver() { + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver + iniset $conf_file DEFAULT ovs_use_veth True +} + +function has_neutron_plugin_security_group() { + # 0 means True here + return 0 +} + +function neutron_plugin_check_adv_test_requirements() { + is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 +} + +# Restore xtrace +$MY_XTRACE diff --git a/lib/quantum_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer similarity index 68% rename from lib/quantum_plugins/services/loadbalancer rename to lib/neutron_plugins/services/loadbalancer index ac8501fa0d..49e286a8cb 100644 --- a/lib/quantum_plugins/services/loadbalancer +++ b/lib/neutron_plugins/services/loadbalancer @@ -1,4 +1,4 @@ -# Quantum loadbalancer plugin +# Neutron loadbalancer plugin # --------------------------- # Save trace setting @@ -6,10 +6,10 @@ MY_XTRACE=$(set +o | grep xtrace) set +o xtrace -AGENT_LBAAS_BINARY="$QUANTUM_DIR/bin/quantum-lbaas-agent" -LBAAS_PLUGIN=quantum.services.loadbalancer.plugin.LoadBalancerPlugin +AGENT_LBAAS_BINARY="$NEUTRON_BIN_DIR/neutron-lbaas-agent" +LBAAS_PLUGIN=neutron.services.loadbalancer.plugin.LoadBalancerPlugin -function quantum_agent_lbaas_install_agent_packages() { +function neutron_agent_lbaas_install_agent_packages() { if is_ubuntu || is_fedora; then install_package haproxy elif is_suse; then @@ -18,7 +18,7 @@ function quantum_agent_lbaas_install_agent_packages() { fi } -function quantum_agent_lbaas_configure_common() { +function neutron_agent_lbaas_configure_common() { if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then Q_SERVICE_PLUGIN_CLASSES=$LBAAS_PLUGIN else @@ -26,20 +26,20 @@ function quantum_agent_lbaas_configure_common() { fi } -function quantum_agent_lbaas_configure_agent() { - LBAAS_AGENT_CONF_PATH=/etc/quantum/services/loadbalancer/haproxy +function neutron_agent_lbaas_configure_agent() { + LBAAS_AGENT_CONF_PATH=/etc/neutron/services/loadbalancer/haproxy mkdir -p $LBAAS_AGENT_CONF_PATH LBAAS_AGENT_CONF_FILENAME="$LBAAS_AGENT_CONF_PATH/lbaas_agent.ini" - cp $QUANTUM_DIR/etc/lbaas_agent.ini $LBAAS_AGENT_CONF_FILENAME + cp $NEUTRON_DIR/etc/lbaas_agent.ini $LBAAS_AGENT_CONF_FILENAME iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT use_namespaces $Q_USE_NAMESPACE # ovs_use_veth needs to be set before the plugin configuration # occurs to allow plugins to override the setting. iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT ovs_use_veth $Q_OVS_USE_VETH - quantum_plugin_setup_interface_driver $LBAAS_AGENT_CONF_FILENAME + neutron_plugin_setup_interface_driver $LBAAS_AGENT_CONF_FILENAME if is_fedora; then iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT user_group "nobody" diff --git a/lib/quantum_thirdparty/README.md b/lib/neutron_thirdparty/README.md similarity index 78% rename from lib/quantum_thirdparty/README.md rename to lib/neutron_thirdparty/README.md index 3b5837d71d..b289f58c5d 100644 --- a/lib/quantum_thirdparty/README.md +++ b/lib/neutron_thirdparty/README.md @@ -1,7 +1,7 @@ -Quantum third party specific files +Neutron third party specific files ================================== -Some Quantum plugins require third party programs to function. -The files under the directory, ``lib/quantum_thirdparty/``, will be used +Some Neutron plugins require third party programs to function. +The files under the directory, ``lib/neutron_thirdparty/``, will be used when their service are enabled. Third party program specific configuration variables should be in this file. @@ -10,7 +10,7 @@ Third party program specific configuration variables should be in this file. functions --------- -``lib/quantum`` calls the following functions when the ```` is enabled +``lib/neutron`` calls the following functions when the ```` is enabled functions to be implemented * ``configure_``: diff --git a/lib/quantum_thirdparty/bigswitch_floodlight b/lib/neutron_thirdparty/bigswitch_floodlight similarity index 97% rename from lib/quantum_thirdparty/bigswitch_floodlight rename to lib/neutron_thirdparty/bigswitch_floodlight index 385bd0d9b0..ebde0673b8 100644 --- a/lib/quantum_thirdparty/bigswitch_floodlight +++ b/lib/neutron_thirdparty/bigswitch_floodlight @@ -13,7 +13,7 @@ function configure_bigswitch_floodlight() { } function init_bigswitch_floodlight() { - install_quantum_agent_packages + install_neutron_agent_packages echo -n "Installing OVS managed by the openflow controllers:" echo ${BS_FL_CONTROLLERS_PORT} diff --git a/lib/quantum_thirdparty/nicira b/lib/neutron_thirdparty/nicira similarity index 100% rename from lib/quantum_thirdparty/nicira rename to lib/neutron_thirdparty/nicira diff --git a/lib/quantum_thirdparty/ryu b/lib/neutron_thirdparty/ryu similarity index 75% rename from lib/quantum_thirdparty/ryu rename to lib/neutron_thirdparty/ryu index f1e9e7c495..3b825a10c1 100644 --- a/lib/quantum_thirdparty/ryu +++ b/lib/neutron_thirdparty/ryu @@ -18,8 +18,8 @@ RYU_OFP_PORT=${RYU_OFP_PORT:-6633} # Ryu Applications RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest} -# configure_ryu can be called multiple times as quantum_pluing/ryu may call -# this function for quantum-ryu-agent +# configure_ryu can be called multiple times as neutron_pluing/ryu may call +# this function for neutron-ryu-agent _RYU_CONFIGURED=${_RYU_CONFIGURED:-False} function configure_ryu() { if [[ "$_RYU_CONFIGURED" == "False" ]]; then @@ -44,19 +44,19 @@ wsapi_host=$RYU_API_HOST wsapi_port=$RYU_API_PORT ofp_listen_host=$RYU_OFP_HOST ofp_tcp_listen_port=$RYU_OFP_PORT -quantum_url=http://$Q_HOST:$Q_PORT -quantum_admin_username=$Q_ADMIN_USERNAME -quantum_admin_password=$SERVICE_PASSWORD -quantum_admin_tenant_name=$SERVICE_TENANT_NAME -quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0 -quantum_auth_strategy=$Q_AUTH_STRATEGY -quantum_controller_addr=tcp:$RYU_OFP_HOST:$RYU_OFP_PORT +neutron_url=http://$Q_HOST:$Q_PORT +neutron_admin_username=$Q_ADMIN_USERNAME +neutron_admin_password=$SERVICE_PASSWORD +neutron_admin_tenant_name=$SERVICE_TENANT_NAME +neutron_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0 +neutron_auth_strategy=$Q_AUTH_STRATEGY +neutron_controller_addr=tcp:$RYU_OFP_HOST:$RYU_OFP_PORT "} echo "${RYU_CONF_CONTENTS}" > $RYU_CONF } -# install_ryu can be called multiple times as quantum_pluing/ryu may call -# this function for quantum-ryu-agent +# install_ryu can be called multiple times as neutron_pluing/ryu may call +# this function for neutron-ryu-agent # Make this function idempotent and avoid cloning same repo many times # with RECLONE=yes _RYU_INSTALLED=${_RYU_INSTALLED:-False} diff --git a/lib/quantum_thirdparty/trema b/lib/neutron_thirdparty/trema similarity index 100% rename from lib/quantum_thirdparty/trema rename to lib/neutron_thirdparty/trema diff --git a/lib/nova b/lib/nova index afc540e7c6..24d5cf9b0a 100644 --- a/lib/nova +++ b/lib/nova @@ -81,7 +81,7 @@ if [ "$VIRT_DRIVER" = 'xenserver' ]; then GUEST_INTERFACE_DEFAULT=eth1 # Allow ``build_domU.sh`` to specify the flat network bridge via kernel args FLAT_NETWORK_BRIDGE_DEFAULT=$(sed -e 's/.* flat_network_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline) - if is_service_enabled quantum; then + if is_service_enabled neutron; then XEN_INTEGRATION_BRIDGE=$(sed -e 's/.* xen_integration_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline) fi elif [ "$VIRT_DRIVER" = 'baremetal' ]; then @@ -281,7 +281,7 @@ function configure_nova() { fi if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - if is_service_enabled quantum && is_quantum_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then + if is_service_enabled neutron && is_neutron_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces cat < Date: Mon, 8 Jul 2013 11:52:17 -0700 Subject: [PATCH 0191/4704] Update NVP plugin name for Neutron. Module nicira_nvp_plugin no longer exists and it can be removed from the plugin name. Supports blueprint nicira_nvp_plugin Change-Id: I19bdbb3c9e7aa05f672789766f3c0fd528682f03 --- lib/neutron_plugins/nicira | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/nicira b/lib/neutron_plugins/nicira index 7642be6578..9b9dbdcc1b 100644 --- a/lib/neutron_plugins/nicira +++ b/lib/neutron_plugins/nicira @@ -42,7 +42,7 @@ function neutron_plugin_configure_common() { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/nicira Q_PLUGIN_CONF_FILENAME=nvp.ini Q_DB_NAME="neutron_nvp" - Q_PLUGIN_CLASS="neutron.plugins.nicira.nicira_nvp_plugin.NeutronPlugin.NvpPluginV2" + Q_PLUGIN_CLASS="neutron.plugins.nicira.NeutronPlugin.NvpPluginV2" } function neutron_plugin_configure_debug_command() { From 53fe11a8227bc96e55d31227a08f5af7cb337797 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 8 Jul 2013 15:06:33 -0500 Subject: [PATCH 0192/4704] More neutron renames Rename quantum-adv-test.sh and the package prereq files in files/*/quantum Change-Id: I4dab635c2ae79f1f42a9cfdadbf7a4d06cf2b925 --- exercises/{quantum-adv-test.sh => neutron-adv-test.sh} | 0 files/apts/{quantum => neutron} | 0 files/rpms-suse/{quantum => neutron} | 0 files/rpms/{quantum => neutron} | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename exercises/{quantum-adv-test.sh => neutron-adv-test.sh} (100%) rename files/apts/{quantum => neutron} (100%) rename files/rpms-suse/{quantum => neutron} (100%) rename files/rpms/{quantum => neutron} (100%) diff --git a/exercises/quantum-adv-test.sh b/exercises/neutron-adv-test.sh similarity index 100% rename from exercises/quantum-adv-test.sh rename to exercises/neutron-adv-test.sh diff --git a/files/apts/quantum b/files/apts/neutron similarity index 100% rename from files/apts/quantum rename to files/apts/neutron diff --git a/files/rpms-suse/quantum b/files/rpms-suse/neutron similarity index 100% rename from files/rpms-suse/quantum rename to files/rpms-suse/neutron diff --git a/files/rpms/quantum b/files/rpms/neutron similarity index 100% rename from files/rpms/quantum rename to files/rpms/neutron From aeb85c1ab0e287abce3e956c02c3a390a6a20044 Mon Sep 17 00:00:00 2001 From: Edgar Magana Date: Fri, 5 Jul 2013 11:03:57 -0700 Subject: [PATCH 0193/4704] Renaming NOS to Director Moving PLUMgrid Director parameters to the common configuration function Fix bug #1198281 Change-Id: I5231950121cfaac9d2d8cc58c4e7b904ccaa9dce --- lib/neutron_plugins/plumgrid | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/neutron_plugins/plumgrid b/lib/neutron_plugins/plumgrid index d4cc39596e..a4f0b0dd5d 100644 --- a/lib/neutron_plugins/plumgrid +++ b/lib/neutron_plugins/plumgrid @@ -22,13 +22,13 @@ function neutron_plugin_configure_common() { Q_PLUGIN_CONF_FILENAME=plumgrid.ini Q_DB_NAME="plumgrid_neutron" Q_PLUGIN_CLASS="neutron.plugins.plumgrid.plumgrid_nos_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2" + PLUMGRID_DIRECTOR_IP=${PLUMGRID_DIRECTOR_IP:-localhost} + PLUMGRID_DIRECTOR_PORT=${PLUMGRID_DIRECTOR_PORT:-7766} } function neutron_plugin_configure_service() { - PLUMGRID_NOS_IP=${PLUMGRID_NOS_IP:-localhost} - PLUMGRID_NOS_PORT=${PLUMGRID_NOS_PORT:-7766} - iniset /$Q_PLUGIN_CONF_FILE plumgridnos nos_server $PLUMGRID_NOS_IP - iniset /$Q_PLUGIN_CONF_FILE plumgridnos nos_server_port $PLUMGRID_NOS_PORT + iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector director_server $PLUMGRID_DIRECTOR_IP + iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector director_server_port $PLUMGRID_DIRECTOR_PORT } function neutron_plugin_configure_debug_command() { From cf6d809d8d13b8177826b9c95b0ccab6f0dcb9c4 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Thu, 4 Jul 2013 09:59:34 +0200 Subject: [PATCH 0194/4704] Add openSUSE support for openvswitch Change-Id: I4665a2719c0050cb5413a8ffcefb977106533d1a --- lib/neutron_plugins/ovs_base | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 0a2765b480..0a53bffc74 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -44,9 +44,9 @@ function _neutron_ovs_base_install_agent_packages() { # Ensure that the service is started restart_service openvswitch elif is_suse; then - ### FIXME: Find out if package can be pushed to Factory - echo "OpenVSwitch packages can be installed from Cloud:OpenStack:Master in OBS" - restart_service openvswitch + install_package openvswitch + restart_service openvswitch-switch + restart_service openvswitch-controller fi } From 28147819aaf03a65fa5689efbf54fd72773bc2fa Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Sat, 29 Jun 2013 14:17:16 +0200 Subject: [PATCH 0195/4704] Source linuxbridge_agent in linuxbridge plugin devstack neutron lib needs to find some functions named 'neutron_plugin_configure_XXXXXX' in the neutron plugin lib Part of them are usually in the plugin agent file. For linuxbridge plugin some of theses functions are missing. This change load the linuxbridge_agent file in linuxbridge file to make missing functions available to neutron lib when linux bridge plugin is used. Fix bug #1195237 Change-Id: I567bedc84e77c041e9418773fba1f74b33bcf604 --- lib/neutron_plugins/linuxbridge | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/neutron_plugins/linuxbridge b/lib/neutron_plugins/linuxbridge index 9aad8f39ae..37bc748c37 100644 --- a/lib/neutron_plugins/linuxbridge +++ b/lib/neutron_plugins/linuxbridge @@ -5,6 +5,8 @@ MY_XTRACE=$(set +o | grep xtrace) set +o xtrace +source $TOP_DIR/lib/neutron_plugins/linuxbridge_agent + function neutron_plugin_configure_common() { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/linuxbridge Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini From ca96b0a169f3a6c13b20775267f8be8077ffad53 Mon Sep 17 00:00:00 2001 From: Simon Pasquier Date: Tue, 9 Jul 2013 16:59:12 +0200 Subject: [PATCH 0196/4704] Add variable to specify additional parameters for the Neutron service. This patch adds a new variable, Q_SRV_EXTRA_DEFAULT_OPTS, which can override any parameter of the [DEFAULT] section of the neutron.conf file. It can be used for example to define the number of DHCP agents that should host a network: Q_SRV_EXTRA_DEFAULT_OPTS=(dhcp_agents_per_network=2) Change-Id: Ic34164457e64dec6ca8ec76be1772f998d100991 --- README.md | 11 ++++++----- lib/neutron | 8 ++++++++ 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 6fcd01d7ce..5fd4291df5 100644 --- a/README.md +++ b/README.md @@ -122,13 +122,14 @@ In order to enable Neutron a single node setup, you'll need the following settin Then run `stack.sh` as normal. -devstack supports adding specific Neutron configuration flags to both the Open vSwitch and LinuxBridge plugin configuration files. To make use of this feature, the following variables are defined and can be configured in your `localrc` file: +devstack supports adding specific Neutron configuration flags to the service, Open vSwitch plugin and LinuxBridge plugin configuration files. To make use of this feature, the following variables are defined and can be configured in your `localrc` file: - Variable Name Plugin Config File Section Modified + Variable Name Config File Section Modified ------------------------------------------------------------------------------------- - Q_SRV_EXTRA_OPTS `OVS` (for Open Vswitch) or `LINUX_BRIDGE` (for LinuxBridge) - Q_AGENT_EXTRA_AGENT_OPTS AGENT - Q_AGENT_EXTRA_SRV_OPTS `OVS` (for Open Vswitch) or `LINUX_BRIDGE` (for LinuxBridge) + Q_SRV_EXTRA_OPTS Plugin `OVS` (for Open Vswitch) or `LINUX_BRIDGE` (for LinuxBridge) + Q_AGENT_EXTRA_AGENT_OPTS Plugin AGENT + Q_AGENT_EXTRA_SRV_OPTS Plugin `OVS` (for Open Vswitch) or `LINUX_BRIDGE` (for LinuxBridge) + Q_SRV_EXTRA_DEFAULT_OPTS Service DEFAULT An example of using the variables in your `localrc` is below: diff --git a/lib/neutron b/lib/neutron index c28bd28fb0..5da7cfc34e 100644 --- a/lib/neutron +++ b/lib/neutron @@ -614,6 +614,14 @@ function _configure_neutron_service() { iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY _neutron_setup_keystone $NEUTRON_CONF keystone_authtoken + # Define extra "DEFAULT" configuration options when q-svc is configured by + # defining the array ``Q_SRV_EXTRA_DEFAULT_OPTS``. + # For Example: ``Q_SRV_EXTRA_DEFAULT_OPTS=(foo=true bar=2)`` + for I in "${Q_SRV_EXTRA_DEFAULT_OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + iniset $NEUTRON_CONF DEFAULT ${I/=/ } + done + # Configure plugin neutron_plugin_configure_service } From 0ce91a5cba10e3a68dad29f9a86f774ee6cfe7b7 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Fri, 5 Jul 2013 11:59:24 +0000 Subject: [PATCH 0197/4704] Configure swift functional test suite. - Implements blueprint configure-swift-functional-tests. Change-Id: I8eeedb83e59d8a305d3072ba7506f74afc21c0d1 --- lib/swift | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++ stack.sh | 1 + 2 files changed, 52 insertions(+) diff --git a/lib/swift b/lib/swift index 36bca4c9e2..f537989f98 100644 --- a/lib/swift +++ b/lib/swift @@ -271,6 +271,34 @@ EOF sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config} done + # Set new accounts in tempauth to match keystone tenant/user (to make testing easier) + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth user_swifttenanttest1_swiftusertest1 "testing .admin" + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth user_swifttenanttest2_swiftusertest2 "testing2 .admin" + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth user_swifttenanttest1_swiftusertest3 "testing3 .admin" + + testfile=${SWIFT_CONF_DIR}/test.conf + cp ${SWIFT_DIR}/test/sample.conf ${testfile} + + # Set accounts for functional tests + iniset ${testfile} func_test account swifttenanttest1 + iniset ${testfile} func_test username swiftusertest1 + iniset ${testfile} func_test username3 swiftusertest3 + iniset ${testfile} func_test account2 swifttenanttest2 + iniset ${testfile} func_test username2 swiftusertest2 + + # Set maximum file size to 10000 bytes or our vm will fill up quickly with + # the default 5gb size. + iniuncomment ${testfile} func_test max_file_size + iniset ${testfile} func_test max_file_size 10000 + + + if is_service_enabled key;then + iniuncomment ${testfile} func_test auth_version + iniset ${testfile} func_test auth_host ${KEYSTONE_SERVICE_HOST} + iniset ${testfile} func_test auth_port ${KEYSTONE_AUTH_PORT} + iniset ${testfile} func_test auth_prefix /v2.0/ + fi + swift_log_dir=${SWIFT_DATA_DIR}/logs rm -rf ${swift_log_dir} mkdir -p ${swift_log_dir}/hourly @@ -329,6 +357,29 @@ function create_swift_disk() { sudo chown -R $USER: ${node} done } +# create_swift_accounts() - Set up common required swift accounts for tests we +# are all attach names since we want to make it compatible with tempauth which +# use underscores for separators. + +# Tenant User Roles +# ------------------------------------------------------------------ +# swifttenanttest1 swiftusertest1 admin +# swifttenanttest1 swiftusertest3 anotherrole +# swifttenanttest2 swiftusertest2 admin + +# Migrated from keystone_data.sh +function create_swift_accounts() { + SWIFT_TENANT_TEST1=$(keystone tenant-create --name=swifttenanttest1 | grep " id " | get_field 2) + SWIFT_USER_TEST1=$(keystone user-create --name=swiftusertest1 --pass=testing --email=test@example.com | grep " id " | get_field 2) + keystone user-role-add --user_id $SWIFT_USER_TEST1 --role_id $ADMIN_ROLE --tenant_id $SWIFT_TENANT_TEST1 + + SWIFT_USER_TEST3=$(keystone user-create --name=swiftusertest3 --pass=testing3 --email=test3@example.com | grep " id " | get_field 2) + keystone user-role-add --user_id $SWIFT_USER_TEST3 --role_id $ANOTHER_ROLE --tenant_id $SWIFT_TENANT_TEST1 + + SWIFT_TENANT_TEST2=$(keystone tenant-create --name=swifttenanttest2 | grep " id " | get_field 2) + SWIFT_USER_TEST2=$(keystone user-create --name=swiftusertest2 --pass=testing2 --email=test2@example.com | grep " id " | get_field 2) + keystone user-role-add --user_id $SWIFT_USER_TEST2 --role_id $ADMIN_ROLE --tenant_id $SWIFT_TENANT_TEST2 +} # init_swift() - Initialize rings function init_swift() { diff --git a/stack.sh b/stack.sh index 05b53afee1..86640e9558 100755 --- a/stack.sh +++ b/stack.sh @@ -878,6 +878,7 @@ if is_service_enabled key; then export OS_SERVICE_ENDPOINT=$SERVICE_ENDPOINT create_keystone_accounts create_nova_accounts + create_swift_accounts create_cinder_accounts create_quantum_accounts From 12229a77a525ea92f037fb2bdfd54681de6bde6a Mon Sep 17 00:00:00 2001 From: Euan Harris Date: Wed, 3 Jul 2013 17:51:01 +0100 Subject: [PATCH 0198/4704] devstack: Add paths specific to XenServer Core * XenServer Core keeps its Xapi plugins in a different path to XenServer, in order to comply with the Filesystem Hierarchy Standard. * XenServer Core mounts devices under /dev/xen/blktap-2 in dom0, whereas XenServer mounts them under /dev/sm. Change-Id: I0a66cd03f000f19c8ff63a8ae4231c00fda88380 --- tools/xen/functions | 2 +- tools/xen/scripts/manage-vdi | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/xen/functions b/tools/xen/functions index 4e37554f4d..3a69a5d72c 100644 --- a/tools/xen/functions +++ b/tools/xen/functions @@ -1,7 +1,7 @@ #!/bin/bash function xapi_plugin_location { - for PLUGIN_DIR in "/etc/xapi.d/plugins/" "/usr/lib/xcp/plugins/"; do + for PLUGIN_DIR in "/etc/xapi.d/plugins/" "/usr/lib/xcp/plugins/" "/usr/lib/xapi/plugins"; do if [ -d $PLUGIN_DIR ]; then echo $PLUGIN_DIR return 0 diff --git a/tools/xen/scripts/manage-vdi b/tools/xen/scripts/manage-vdi index 05c4b0745c..381e671acf 100755 --- a/tools/xen/scripts/manage-vdi +++ b/tools/xen/scripts/manage-vdi @@ -32,7 +32,7 @@ function get_mount_device() { vbd_uuid=$1 dev=$(xe_min vbd-list params=device uuid="$vbd_uuid") - if [[ "$dev" =~ "sm/" ]]; then + if [[ "$dev" =~ "sm/" || "$dev" =~ "blktap-2/" ]]; then DEBIAN_FRONTEND=noninteractive \ apt-get --option "Dpkg::Options::=--force-confold" --assume-yes \ install kpartx &> /dev/null || true @@ -49,7 +49,7 @@ function get_mount_device() { function clean_dev_mappings() { dev=$(xe_min vbd-list params=device uuid="$vbd_uuid") - if [[ "$dev" =~ "sm/" ]]; then + if [[ "$dev" =~ "sm/" || "$dev" =~ "blktap-2/" ]]; then kpartx -dv "/dev/$dev" fi } From d8511034e69368785bf85440840889664fb90cac Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Wed, 3 Jul 2013 10:44:44 +0100 Subject: [PATCH 0199/4704] xenapi: xe network-attach for OVS bridge creation For Neutron we need an additional OVS bridge to be present. The xe network-create command did not create the bridge immediately. A workaround was applied to attach a network interface to that network, forcing xapi to create the underlying OVS bridge. It turned out, that the xe network-attach command could be used instead, so the workaround is removed and replaced with the proper solution by this patch. Fixes bug 1197305 Change-Id: I10cc763531b71238f0bc12a12de985f8f35d7e27 --- tools/xen/functions | 18 ++++++++++++++++++ tools/xen/install_os_domU.sh | 11 ++++++++++- tools/xen/xenrc | 1 - 3 files changed, 28 insertions(+), 2 deletions(-) diff --git a/tools/xen/functions b/tools/xen/functions index 4e37554f4d..fd14075d40 100644 --- a/tools/xen/functions +++ b/tools/xen/functions @@ -250,3 +250,21 @@ function destroy_all_vifs_of() done unset IFS } + +function have_multiple_hosts() { + xe host-list --minimal | grep -q "," +} + +function attach_network() { + local bridge_or_net_name + + bridge_or_net_name="$1" + + local net + local host + + net=$(_network_uuid "$bridge_or_net_name") + host=$(xe host-list --minimal) + + xe network-attach uuid=$net host-uuid=$host +} diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 21aa729c0c..6eb30130cd 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -50,6 +50,15 @@ xe_min() cd $THIS_DIR +# Die if multiple hosts listed +if have_multiple_hosts; then + cat >&2 << EOF +ERROR: multiple hosts found. This might mean that the XenServer is a member +of a pool - Exiting. +EOF + exit 1 +fi + # Install plugins ## Nova plugins @@ -256,7 +265,7 @@ $THIS_DIR/build_xva.sh "$GUEST_NAME" # is created by XenServer). This is required for Neutron. Also pass that as a # kernel parameter for DomU if is_service_enabled neutron; then - add_interface "$GUEST_NAME" "$XEN_INT_BRIDGE_OR_NET_NAME" $XEN_INT_DEV_NR + attach_network "$XEN_INT_BRIDGE_OR_NET_NAME" XEN_INTEGRATION_BRIDGE=$(bridge_for "$XEN_INT_BRIDGE_OR_NET_NAME") append_kernel_cmdline \ diff --git a/tools/xen/xenrc b/tools/xen/xenrc index 97c0930e4f..03b30ac55e 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -35,7 +35,6 @@ GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} MGT_DEV_NR=0 VM_DEV_NR=1 PUB_DEV_NR=2 -XEN_INT_DEV_NR=3 # Host Interface, i.e. the interface on the nova vm you want to expose the # services on. Usually the device connected to the management network or the From 022e991a54cce612a5b4a6d9a62227307800a25e Mon Sep 17 00:00:00 2001 From: JordanP Date: Tue, 9 Jul 2013 14:14:48 +0200 Subject: [PATCH 0200/4704] clean logical volumes in init_cinder This part of code was forgotten when _clean_volume_group() has been renamed. Change-Id: I96fe749013b5cf1fd3411b2ba661302873e92994 --- lib/cinder | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/lib/cinder b/lib/cinder index 40a25baedc..6e7d785697 100644 --- a/lib/cinder +++ b/lib/cinder @@ -81,10 +81,9 @@ VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} # Functions # --------- -# _cleanup_lvm removes all cinder volumes and the backing file of the -# volume group used by cinder -# _cleanup_lvm $VOLUME_GROUP $VOLUME_NAME_PREFIX -function _cleanup_lvm() { +# _clean_lvm_lv removes all cinder LVM volumes +# _clean_lvm_lv $VOLUME_GROUP $VOLUME_NAME_PREFIX +function _clean_lvm_lv() { local vg=$1 local lv_prefix=$2 @@ -95,6 +94,13 @@ function _cleanup_lvm() { sudo lvremove -f $vg/$lv fi done +} + +# _clean_lvm_backing_file() removes the backing file of the +# volume group used by cinder +# _clean_lvm_backing_file() $VOLUME_GROUP +function _clean_lvm_backing_file() { + local vg=$1 # if there is no logical volume left, it's safe to attempt a cleanup # of the backing file @@ -145,10 +151,12 @@ function cleanup_cinder() { fi # Campsite rule: leave behind a volume group at least as clean as we found it - _cleanup_lvm $VOLUME_GROUP $VOLUME_NAME_PREFIX + _clean_lvm_lv $VOLUME_GROUP $VOLUME_NAME_PREFIX + _clean_lvm_backing_file $VOLUME_GROUP if [ "$CINDER_MULTI_LVM_BACKEND" = "True" ]; then - _cleanup_lvm $VOLUME_GROUP2 $VOLUME_NAME_PREFIX + _clean_lvm_lv $VOLUME_GROUP2 $VOLUME_NAME_PREFIX + _clean_lvm_backing_file $VOLUME_GROUP2 fi } @@ -412,9 +420,9 @@ function init_cinder() { # Remove iscsi targets sudo tgtadm --op show --mode target | grep $VOLUME_NAME_PREFIX | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true # Start with a clean volume group - _clean_volume_group $VOLUME_GROUP $VOLUME_NAME_PREFIX + _clean_lvm_lv $VOLUME_GROUP $VOLUME_NAME_PREFIX if [ "$CINDER_MULTI_LVM_BACKEND" = "True" ]; then - _clean_volume_group $VOLUME_GROUP2 $VOLUME_NAME_PREFIX + _clean_lvm_lv $VOLUME_GROUP2 $VOLUME_NAME_PREFIX fi fi fi From ba31305497fe66ee01230659bc6778915c873616 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Wed, 10 Jul 2013 21:03:43 +0200 Subject: [PATCH 0201/4704] Move swift stuff out of keystone-data.sh - Fixes bug 1199918. Change-Id: I808bdd74a94c8e0ca126b9bee1bfd53eafa189a9 --- files/keystone_data.sh | 28 +--------------------------- lib/swift | 29 +++++++++++++++++++++++++---- 2 files changed, 26 insertions(+), 31 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index ccac88044c..45f9c8165c 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -5,7 +5,6 @@ # Tenant User Roles # ------------------------------------------------------------------ # service glance admin -# service swift service # if enabled # service heat service # if enabled # service ceilometer admin # if enabled # Tempest Only: @@ -124,32 +123,7 @@ if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then fi fi -# Swift - -if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then - SWIFT_USER=$(get_id keystone user-create \ - --name=swift \ - --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=swift@example.com) - keystone user-role-add \ - --tenant_id $SERVICE_TENANT \ - --user_id $SWIFT_USER \ - --role_id $SERVICE_ROLE - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - SWIFT_SERVICE=$(get_id keystone service-create \ - --name=swift \ - --type="object-store" \ - --description="Swift Service") - keystone endpoint-create \ - --region RegionOne \ - --service_id $SWIFT_SERVICE \ - --publicurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" \ - --adminurl "http://$SERVICE_HOST:8080" \ - --internalurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" - fi -fi - +# Ceilometer if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]]; then CEILOMETER_USER=$(get_id keystone user-create --name=ceilometer \ --pass="$SERVICE_PASSWORD" \ diff --git a/lib/swift b/lib/swift index f537989f98..2feae78dda 100644 --- a/lib/swift +++ b/lib/swift @@ -357,18 +357,39 @@ function create_swift_disk() { sudo chown -R $USER: ${node} done } -# create_swift_accounts() - Set up common required swift accounts for tests we -# are all attach names since we want to make it compatible with tempauth which -# use underscores for separators. +# create_swift_accounts() - Set up standard swift accounts and extra +# one for tests we do this by attaching all words in the account name +# since we want to make it compatible with tempauth which use +# underscores for separators. # Tenant User Roles # ------------------------------------------------------------------ +# service swift service # swifttenanttest1 swiftusertest1 admin # swifttenanttest1 swiftusertest3 anotherrole # swifttenanttest2 swiftusertest2 admin -# Migrated from keystone_data.sh function create_swift_accounts() { + KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql} + + SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + + SWIFT_USER=$(keystone user-create --name=swift --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT --email=swift@example.com | grep " id " | get_field 2) + keystone user-role-add --tenant_id $SERVICE_TENANT --user_id $SWIFT_USER --role_id $ADMIN_ROLE + + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + SWIFT_SERVICE=$(keystone service-create --name=swift --type="object-store" \ + --description="Swift Service" | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $SWIFT_SERVICE \ + --publicurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" \ + --adminurl "http://$SERVICE_HOST:8080" \ + --internalurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" + fi + SWIFT_TENANT_TEST1=$(keystone tenant-create --name=swifttenanttest1 | grep " id " | get_field 2) SWIFT_USER_TEST1=$(keystone user-create --name=swiftusertest1 --pass=testing --email=test@example.com | grep " id " | get_field 2) keystone user-role-add --user_id $SWIFT_USER_TEST1 --role_id $ADMIN_ROLE --tenant_id $SWIFT_TENANT_TEST1 From 69b3ff63e4eac221b20607af754f4bcea1478bea Mon Sep 17 00:00:00 2001 From: Nachi Ueno Date: Fri, 7 Jun 2013 10:28:33 -0700 Subject: [PATCH 0202/4704] Add Support for OpenStack Networking VPNaaS (IPSec) Change-Id: I3e5c618237531452c8649d10ef1eb6284919fa31 --- lib/neutron | 19 ++++++++++++++++++- lib/neutron_plugins/services/vpn | 29 +++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 1 deletion(-) create mode 100644 lib/neutron_plugins/services/vpn diff --git a/lib/neutron b/lib/neutron index c28bd28fb0..425fb3c003 100644 --- a/lib/neutron +++ b/lib/neutron @@ -202,6 +202,11 @@ source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN # Hardcoding for 1 service plugin for now source $TOP_DIR/lib/neutron_plugins/services/loadbalancer +# VPN service plugin functions +# ------------------------------------------- +# Hardcoding for 1 service plugin for now +source $TOP_DIR/lib/neutron_plugins/services/vpn + # Use security group or not if has_neutron_plugin_security_group; then Q_USE_SECGROUP=${Q_USE_SECGROUP:-True} @@ -222,6 +227,9 @@ function configure_neutron() { if is_service_enabled q-lbaas; then _configure_neutron_lbaas fi + if is_service_enabled q-vpn; then + _configure_neutron_vpn + fi if is_service_enabled q-svc; then _configure_neutron_service fi @@ -248,6 +256,7 @@ function create_nova_conf_neutron() { iniset $NOVA_CONF DEFAULT quantum_admin_auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" iniset $NOVA_CONF DEFAULT quantum_auth_strategy "$Q_AUTH_STRATEGY" iniset $NOVA_CONF DEFAULT quantum_admin_tenant_name "$SERVICE_TENANT_NAME" + iniset $NOVA_CONF DEFAULT quantum_region_name "RegionOne" iniset $NOVA_CONF DEFAULT quantum_url "http://$Q_HOST:$Q_PORT" if [[ "$Q_USE_SECGROUP" == "True" ]]; then @@ -536,7 +545,8 @@ function _configure_neutron_l3_agent() { Q_L3_ENABLED=True # for l3-agent, only use per tenant router if we have namespaces Q_L3_ROUTER_PER_TENANT=$Q_USE_NAMESPACE - AGENT_L3_BINARY="$NEUTRON_BIN_DIR/neutron-l3-agent" + + AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"} Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini cp $NEUTRON_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE @@ -571,6 +581,13 @@ function _configure_neutron_lbaas() { neutron_agent_lbaas_configure_agent } +function _configure_neutron_vpn() +{ + neutron_vpn_install_agent_packages + neutron_vpn_configure_common + neutron_vpn_configure_agent +} + # _configure_neutron_plugin_agent() - Set config files for neutron plugin agent # It is called when q-agt is enabled. function _configure_neutron_plugin_agent() { diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn new file mode 100644 index 0000000000..3c030c5f42 --- /dev/null +++ b/lib/neutron_plugins/services/vpn @@ -0,0 +1,29 @@ +# Neutron VPN plugin +# --------------------------- + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +VPN_BINARY="$NEUTRON_DIR/bin/neutron-vpn-agent" +VPN_PLUGIN="neutron.services.vpn.plugin.VPNDriverPlugin" + +function neutron_vpn_install_agent_packages() { + install_package strongswan +} + +function neutron_vpn_configure_common() { + if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then + Q_SERVICE_PLUGIN_CLASSES=$VPN_PLUGIN + else + Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$VPN_PLUGIN" + fi +} + +function neutron_vpn_configure_agent() { + AGENT_L3_BINARY="$NEUTRON_DIR/bin/neutron-vpn-agent" +} + +# Restore xtrace +$MY_XTRACE From 73a39bf2c61c9cf4c7cf398dea4170c86d46a269 Mon Sep 17 00:00:00 2001 From: Nobuto MURATA Date: Thu, 11 Jul 2013 16:26:02 +0900 Subject: [PATCH 0203/4704] replace left quantum string for enable_lb writing the correct config for horizon to show LBaaS entry properly like: OPENSTACK_NEUTRON_NETWORK = { 'enable_lb': True } Change-Id: I7f522a150a3732ee734f2b818910ff92c8c73028 Fixes: bug #1200098 --- lib/horizon | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/horizon b/lib/horizon index 0cc250ed6f..b537484608 100644 --- a/lib/horizon +++ b/lib/horizon @@ -106,7 +106,7 @@ function init_horizon() { # enable loadbalancer dashboard in case service is enabled if is_service_enabled q-lbaas; then - _horizon_config_set $local_settings OPENSTACK_QUANTUM_NETWORK enable_lb True + _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_lb True fi # Initialize the horizon database (it stores sessions and notices shown to From 6f0017161f76ffecd1ee489bcb33462729ad96e6 Mon Sep 17 00:00:00 2001 From: Euan Harris Date: Wed, 10 Jul 2013 16:30:31 +0100 Subject: [PATCH 0204/4704] xenapi: Exit immediately if zipball download fails If install_os_domU.sh fails to download the Xapi plugins zipball correctly it ignores the error and continues the installation. This could damage the hypervisor's filesystem, as it may delete files or overwrite them with garbage. Change-Id: I9f6dc31b977592e2818e37b2d310c2a5dc477364 Fixes: bug #1195640 --- tools/xen/functions | 4 ++-- tools/xen/mocks | 3 +++ tools/xen/test_functions.sh | 9 +++++++++ 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/tools/xen/functions b/tools/xen/functions index 4e37554f4d..e5a7531a41 100644 --- a/tools/xen/functions +++ b/tools/xen/functions @@ -40,11 +40,11 @@ function extract_remote_zipball { local LOCAL_ZIPBALL=$(mktemp) local EXTRACTED_FILES=$(mktemp -d) - ( + { wget -nv $ZIPBALL_URL -O $LOCAL_ZIPBALL --no-check-certificate unzip -q -o $LOCAL_ZIPBALL -d $EXTRACTED_FILES rm -f $LOCAL_ZIPBALL - ) >&2 + } >&2 echo "$EXTRACTED_FILES" } diff --git a/tools/xen/mocks b/tools/xen/mocks index 6da6acbba7..94b0ca4d02 100644 --- a/tools/xen/mocks +++ b/tools/xen/mocks @@ -34,6 +34,9 @@ function mktemp { } function wget { + if [[ $@ =~ "failurl" ]]; then + exit 1 + fi echo "wget $@" >> $LIST_OF_ACTIONS } diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh index 410df5f8b7..534723833d 100755 --- a/tools/xen/test_functions.sh +++ b/tools/xen/test_functions.sh @@ -173,6 +173,15 @@ EOF [ "$RESULT" = "tempdir" ] } +function test_extract_remote_zipball_wget_fail { + set +e + + local IGNORE + IGNORE=$(. mocks && extract_remote_zipball "failurl") + + assert_previous_command_failed +} + function test_find_nova_plugins { local tmpdir=$(mktemp -d) From a3ec804ad90fd13944c1b6bd5e55e85e6ecdb62c Mon Sep 17 00:00:00 2001 From: Euan Harris Date: Thu, 4 Jul 2013 16:25:33 +0100 Subject: [PATCH 0205/4704] manage-vdi: Wait until devices have been created before mounting kpartx creates partition devices asynchronously - it may exit before the devices have been created. This may cause a subsequent mount call to fail, because the device it is trying to mount does not yet exist. kpartx now has a -s option which makes it wait for the devices to be created before returning, but the version XenServer's dom0 doesn't have this option. Work around this by retrying the mount. When dom0's kpartx is updated, we will be able to use the -s option. Change-Id: I823a8eac4f3a2ef313d06e21da0f38ed46d7386a --- tools/xen/scripts/manage-vdi | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/tools/xen/scripts/manage-vdi b/tools/xen/scripts/manage-vdi index 05c4b0745c..6dcb2117a8 100755 --- a/tools/xen/scripts/manage-vdi +++ b/tools/xen/scripts/manage-vdi @@ -41,7 +41,17 @@ function get_mount_device() { echo "Failed to find mapping" exit -1 fi - echo "/dev/mapper/${mapping}" + + local device="/dev/mapper/${mapping}" + for (( i = 0; i < 5; i++ )) ; do + if [ -b $device ] ; then + echo $device + return + fi + sleep 1 + done + echo "ERROR: timed out waiting for dev-mapper" + exit 1 else echo "/dev/$dev$part" fi From be677f4403f69f03b3719661ceed71c34bf431a6 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Fri, 12 Jul 2013 08:22:31 -0700 Subject: [PATCH 0206/4704] Add integration bridge to the nova.conf This is needed when running the compute service with vSphere as virt backend, and Neutron as networking solution. Fixes bug #1200677 Change-Id: Iff01559be437d1409c13224574458d23a1edd1a4 --- stack.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index c4f7944d65..530b7b7e73 100755 --- a/stack.sh +++ b/stack.sh @@ -1080,7 +1080,9 @@ if is_service_enabled nova; then iniset $NOVA_CONF DEFAULT vmwareapi_host_username "$VMWAREAPI_USER" iniset $NOVA_CONF DEFAULT vmwareapi_host_password "$VMWAREAPI_PASSWORD" iniset $NOVA_CONF DEFAULT vmwareapi_cluster_name "$VMWAREAPI_CLUSTER" - + if is_service_enabled neutron; then + iniset $NOVA_CONF vmware integration_bridge $OVS_BRIDGE + fi # fake # ---- From f34cb8513505c47723f2192c77a0068bdcc20218 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Fri, 12 Jul 2013 17:11:07 +0100 Subject: [PATCH 0207/4704] xenapi: preinstall python netaddr On our CI system, the devstack instance was reporting errors, that are related to devstack's address_in_net function - that requires the python netaddr library to be installed. This patch preinstalls this python package. Fixes bug 1097667 Change-Id: I8af199427f06cfdf0a68d96d87fe3e541199dca7 --- tools/xen/prepare_guest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh index 0e112263e2..f109d723d9 100755 --- a/tools/xen/prepare_guest.sh +++ b/tools/xen/prepare_guest.sh @@ -22,7 +22,7 @@ STACK_USER="$3" # Install basics apt-get update apt-get install -y cracklib-runtime curl wget ssh openssh-server tcpdump ethtool -apt-get install -y curl wget ssh openssh-server python-pip git vim-nox sudo +apt-get install -y curl wget ssh openssh-server python-pip git vim-nox sudo python-netaddr pip install xenapi # Install XenServer guest utilities From a3dc399965409206aa325769551cf4121d2db0ee Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 11 Jul 2013 11:26:35 +0200 Subject: [PATCH 0208/4704] QPID default host Using SERVICE_HOST as a default QPID_HOST. Change-Id: I65df0ea4be7df37d4d6f22591b175870808a5c84 --- lib/rpc_backend | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/rpc_backend b/lib/rpc_backend index 462e6cc913..ff87aae2af 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -172,7 +172,7 @@ function iniset_rpc_backend() { iniset $file matchmaker_redis host $MATCHMAKER_REDIS_HOST elif is_service_enabled qpid || [ -n "$QPID_HOST" ]; then iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_qpid - iniset $file $section qpid_hostname $QPID_HOST + iniset $file $section qpid_hostname ${QPID_HOST:-$SERVICE_HOST} if is_ubuntu; then QPID_PASSWORD=`sudo strings /etc/qpid/qpidd.sasldb | grep -B1 admin | head -1` iniset $file $section qpid_password $QPID_PASSWORD From 503e9ac4cf9ddb634279bf98c856adb135f43ac9 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Mon, 15 Jul 2013 09:41:25 -0700 Subject: [PATCH 0209/4704] Set external ID on br-ex This will enable Neurtron identify that the external bridge is a Neutron bridge (this is required for bug 1192883) Change-Id: I8ad1b0b3d93d5068beec2021abf9afbacf8c48ff --- lib/neutron_plugins/ovs_base | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 0a53bffc74..2666d8e8ba 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -67,6 +67,7 @@ function _neutron_ovs_base_configure_l3_agent() { neutron-ovs-cleanup sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE + sudo ovs-vsctl --no-wait br-set-external-id $PUBLIC_BRIDGE bridge-id $PUBLIC_BRIDGE # ensure no IP is configured on the public bridge sudo ip addr flush dev $PUBLIC_BRIDGE } From e6d4fe5f0fbf1c48307a27f7073569f4f09857ff Mon Sep 17 00:00:00 2001 From: John Griffith Date: Mon, 15 Jul 2013 17:35:54 -0600 Subject: [PATCH 0210/4704] Modify startup order of Cinder services. There are cases where the timing between the start up for cinder-volume and cinder-scheduler service can result in a race where the scheduler doesn't know about the volume-service until the next periodic update. This change does attempts to do an easy fix by swapping the start order of the cinder services to ensure that the scheduler will be able to receive the volume service capabilities update. Fixes bug: 1189595 Change-Id: I8f477ddc04c15c04493f7ce6863e08e1de8f0128 --- lib/cinder | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index 6e7d785697..ef7e3dc9cc 100644 --- a/lib/cinder +++ b/lib/cinder @@ -474,9 +474,13 @@ function start_cinder() { fi screen_it c-api "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF" - screen_it c-vol "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF" screen_it c-sch "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF" screen_it c-bak "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-backup --config-file $CINDER_CONF" + screen_it c-vol "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF" + + # NOTE(jdg): For cinder, startup order matters. To ensure that repor_capabilities is received + # by the scheduler start the cinder-volume service last (or restart it) after the scheduler + # has started. This is a quick fix for lp bug/1189595 # Start proxies if enabled if is_service_enabled c-api && is_service_enabled tls-proxy; then From 3763141cf7763390bf35b86b2d143b156c25a915 Mon Sep 17 00:00:00 2001 From: Sudarshan Acharya Date: Tue, 16 Jul 2013 00:47:54 +0000 Subject: [PATCH 0211/4704] UUID Token provider in keystone.conf Token provider needs to be set to uuid.Provider when the token format is UUID. PKI is the default. Change-Id: I967289524a50f650cdf2476d5067d263dbf55b03 Fixes: bug #1201639 --- lib/keystone | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/keystone b/lib/keystone index 1b6970dff4..e7e0544bb4 100644 --- a/lib/keystone +++ b/lib/keystone @@ -132,6 +132,11 @@ function configure_keystone() { iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN" iniset $KEYSTONE_CONF signing token_format "$KEYSTONE_TOKEN_FORMAT" + + if [[ "$KEYSTONE_TOKEN_FORMAT" = "UUID" ]]; then + iniset $KEYSTONE_CONF token provider keystone.token.providers.uuid.Provider + fi + iniset $KEYSTONE_CONF sql connection `database_connection_url keystone` iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2" From 82c0996d48156465980efb6898764c2bb270faaf Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Tue, 16 Jul 2013 07:16:07 +0000 Subject: [PATCH 0212/4704] Disable fallocate and set max_file_size to default - We used to set max_file_size to 10000 to get the functional tests passing on devstack but this was the wrong way. We are now disabling fallocate like done in saio to get the large objects test passing. - Fixes bug 1201077. Change-Id: I33058352f5abfb06f2a992890cbc7339cedc0ad3 --- lib/swift | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/lib/swift b/lib/swift index f537989f98..e85cdad36d 100644 --- a/lib/swift +++ b/lib/swift @@ -242,6 +242,9 @@ EOF iniuncomment ${swift_node_config} DEFAULT log_facility iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility} + iniuncomment ${swift_node_config} DEFAULT disable_fallocate + iniset ${swift_node_config} DEFAULT disable_fallocate true + iniuncomment ${swift_node_config} DEFAULT mount_check iniset ${swift_node_config} DEFAULT mount_check false @@ -286,12 +289,6 @@ EOF iniset ${testfile} func_test account2 swifttenanttest2 iniset ${testfile} func_test username2 swiftusertest2 - # Set maximum file size to 10000 bytes or our vm will fill up quickly with - # the default 5gb size. - iniuncomment ${testfile} func_test max_file_size - iniset ${testfile} func_test max_file_size 10000 - - if is_service_enabled key;then iniuncomment ${testfile} func_test auth_version iniset ${testfile} func_test auth_host ${KEYSTONE_SERVICE_HOST} From 35633f097ada2f16f7d4a052a26e729b6c14eaa2 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Tue, 16 Jul 2013 07:35:13 +0000 Subject: [PATCH 0213/4704] Fix vm_test_mode - It was previously incorrectly generated. - Fixes bug 1201694. Change-Id: I802bbd0ced8f12064189db7d707fbb6ca09113bb --- lib/swift | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/swift b/lib/swift index f537989f98..aeb56c7efd 100644 --- a/lib/swift +++ b/lib/swift @@ -223,6 +223,7 @@ EOF local swift_node_config=$1 local node_id=$2 local bind_port=$3 + local server_type=$4 log_facility=$[ node_id - 1 ] node_path=${SWIFT_DATA_DIR}/${node_number} @@ -252,7 +253,7 @@ EOF for node_number in ${SWIFT_REPLICAS_SEQ}; do swift_node_config=${SWIFT_CONF_DIR}/object-server/${node_number}.conf cp ${SWIFT_DIR}/etc/object-server.conf-sample ${swift_node_config} - generate_swift_config ${swift_node_config} ${node_number} $[OBJECT_PORT_BASE + 10 * (node_number - 1)] + generate_swift_config ${swift_node_config} ${node_number} $[OBJECT_PORT_BASE + 10 * (node_number - 1)] object iniset ${swift_node_config} filter:recon recon_cache_path ${SWIFT_DATA_DIR}/cache # Using a sed and not iniset/iniuncomment because we want to a global # modification and make sure it works for new sections. @@ -260,14 +261,14 @@ EOF swift_node_config=${SWIFT_CONF_DIR}/container-server/${node_number}.conf cp ${SWIFT_DIR}/etc/container-server.conf-sample ${swift_node_config} - generate_swift_config ${swift_node_config} ${node_number} $[CONTAINER_PORT_BASE + 10 * (node_number - 1)] + generate_swift_config ${swift_node_config} ${node_number} $[CONTAINER_PORT_BASE + 10 * (node_number - 1)] container iniuncomment ${swift_node_config} app:container-server allow_versions iniset ${swift_node_config} app:container-server allow_versions "true" sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config} swift_node_config=${SWIFT_CONF_DIR}/account-server/${node_number}.conf cp ${SWIFT_DIR}/etc/account-server.conf-sample ${swift_node_config} - generate_swift_config ${swift_node_config} ${node_number} $[ACCOUNT_PORT_BASE + 10 * (node_number - 1)] + generate_swift_config ${swift_node_config} ${node_number} $[ACCOUNT_PORT_BASE + 10 * (node_number - 1)] account sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config} done From 8e58c0736fa7dba4a8d1da905c887cf603b7a653 Mon Sep 17 00:00:00 2001 From: Jiajun Liu Date: Wed, 17 Jul 2013 06:41:50 +0000 Subject: [PATCH 0214/4704] make rejoin-stack.sh keep the same service tags Currently rejoin-stack.sh can not keep the same screen service tags as the first you deploy openstack due to that the stack-screenrc lack proper command to configure screen's hardstatus. just delete the old stack-screenrc so that function screen_rc can write proper initialize command into stack-screenrc. fix bug 1182597 Change-Id: I4cb4c6ded93a5c7b0bd39d65a754ddf86553463d --- rejoin-stack.sh | 2 +- stack.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/rejoin-stack.sh b/rejoin-stack.sh index 65ba7214fa..30b7bab1cc 100755 --- a/rejoin-stack.sh +++ b/rejoin-stack.sh @@ -17,7 +17,7 @@ if [[ -e $TOP_DIR/stack-screenrc ]]; then echo "Attaching to already started screen session.." exec screen -r stack fi - exec screen -c $TOP_DIR/stack-screenrc -S $SCREEN_NAME + exec screen -c $TOP_DIR/stack-screenrc fi echo "Couldn't find $TOP_DIR/stack-screenrc file; have you run stack.sh yet?" diff --git a/stack.sh b/stack.sh index f2054d93eb..e4a7acb4e0 100755 --- a/stack.sh +++ b/stack.sh @@ -836,7 +836,7 @@ fi # Clear screen rc file SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc if [[ -e $SCREENRC ]]; then - echo -n > $SCREENRC + rm -f $SCREENRC fi # Initialize the directory for service status check From 95c93e2b54ff0dcb5d7a4dd0f7f78e21c789f511 Mon Sep 17 00:00:00 2001 From: Ravi Chunduru Date: Tue, 16 Jul 2013 04:18:47 -0700 Subject: [PATCH 0215/4704] Adds support for Openstack Networking FWaaS (Firewall) blueprint quantum-fwaas-devstack Change-Id: I3c546433415ab18a5933a25774a06df7c4cb42e9 --- lib/horizon | 7 ++++++- lib/neutron | 26 ++++++++++++++++++++++++-- lib/neutron_plugins/services/firewall | 27 +++++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 3 deletions(-) create mode 100644 lib/neutron_plugins/services/firewall diff --git a/lib/horizon b/lib/horizon index 89bd65901c..1e758bfc43 100644 --- a/lib/horizon +++ b/lib/horizon @@ -50,7 +50,7 @@ function _horizon_config_set() { if [ -n "$line" ]; then sed -i -e "/^$section/,/^}/ s/^\( *'$option'\) *:.*$/\1: $value,/" $file else - sed -i -e "/^$section/ a\n '$option': $value,\n" $file + sed -i -e "/^$section/a\ '$option': $value," $file fi else echo -e "\n\n$section = {\n '$option': $value,\n}" >> $file @@ -96,6 +96,11 @@ function init_horizon() { _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_lb True fi + # enable firewall dashboard in case service is enabled + if is_service_enabled q-fwaas; then + _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_firewall True + fi + # Initialize the horizon database (it stores sessions and notices shown to # users). The user system is external (keystone). cd $HORIZON_DIR diff --git a/lib/neutron b/lib/neutron index 31876dee88..be831185ca 100644 --- a/lib/neutron +++ b/lib/neutron @@ -207,6 +207,10 @@ source $TOP_DIR/lib/neutron_plugins/services/loadbalancer # Hardcoding for 1 service plugin for now source $TOP_DIR/lib/neutron_plugins/services/vpn +# Firewall Service Plugin functions +# -------------------------------- +source $TOP_DIR/lib/neutron_plugins/services/firewall + # Use security group or not if has_neutron_plugin_security_group; then Q_USE_SECGROUP=${Q_USE_SECGROUP:-True} @@ -230,6 +234,9 @@ function configure_neutron() { if is_service_enabled q-vpn; then _configure_neutron_vpn fi + if is_service_enabled q-fwaas; then + _configure_neutron_fwaas + fi if is_service_enabled q-svc; then _configure_neutron_service fi @@ -418,11 +425,17 @@ function start_neutron_agents() { screen_it q-agt "cd $NEUTRON_DIR && python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" screen_it q-dhcp "cd $NEUTRON_DIR && python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE" + L3_CONF_FILES="--config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE" + + if is_service_enabled q-fwaas; then + L3_CONF_FILES="$L3_CONF_FILES --config-file $Q_FWAAS_CONF_FILE" + fi if is_service_enabled q-vpn; then - screen_it q-vpn "cd $NEUTRON_DIR && $AGENT_VPN_BINARY --config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE" + screen_it q-vpn "cd $NEUTRON_DIR && $AGENT_VPN_BINARY $L3_CONF_FILES" else - screen_it q-l3 "cd $NEUTRON_DIR && python $AGENT_L3_BINARY --config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE" + screen_it q-l3 "cd $NEUTRON_DIR && python $AGENT_L3_BINARY $L3_CONF_FILES" fi + screen_it q-meta "cd $NEUTRON_DIR && python $AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file=$Q_META_CONF_FILE" if [ "$VIRT_DRIVER" = 'xenserver' ]; then @@ -554,6 +567,10 @@ function _configure_neutron_l3_agent() { AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"} Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini + if is_service_enabled q-fwaas; then + Q_FWAAS_CONF_FILE=$NEUTRON_CONF_DIR/fwaas_driver.ini + fi + cp $NEUTRON_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE iniset $Q_L3_CONF_FILE DEFAULT verbose True @@ -586,6 +603,11 @@ function _configure_neutron_lbaas() { neutron_agent_lbaas_configure_agent } +function _configure_neutron_fwaas() { + neutron_fwaas_configure_common + neutron_fwaas_configure_driver +} + function _configure_neutron_vpn() { neutron_vpn_install_agent_packages diff --git a/lib/neutron_plugins/services/firewall b/lib/neutron_plugins/services/firewall new file mode 100644 index 0000000000..1597e8577d --- /dev/null +++ b/lib/neutron_plugins/services/firewall @@ -0,0 +1,27 @@ +# Neutron firewall plugin +# --------------------------- + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +FWAAS_PLUGIN=neutron.services.firewall.fwaas_plugin.FirewallPlugin + +function neutron_fwaas_configure_common() { + if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then + Q_SERVICE_PLUGIN_CLASSES=$FWAAS_PLUGIN + else + Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$FWAAS_PLUGIN" + fi +} + +function neutron_fwaas_configure_driver() { + FWAAS_DRIVER_CONF_FILENAME=/etc/neutron/fwaas_driver.ini + cp $NEUTRON_DIR/etc/fwaas_driver.ini $FWAAS_DRIVER_CONF_FILENAME + + iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas enabled True + iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas driver "neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver" +} + +# Restore xtrace +$MY_XTRACE From 0ff314c01dc1184fc443a85f4110615f32ec8d90 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 17 Jul 2013 16:30:19 +1000 Subject: [PATCH 0216/4704] Only create swift account if swift enabled Only call the swift account creation function if swift is enabled, otherwise the endpoints are created in keystone even though swift isn't running. This causes failures when tempest queries keystone and thinks swift is there; it starts running tests against it that fail with unhelpful "connection refused" errors. Change-Id: Icf08409c9443ec703e5f1da4531aa34c326f3642 --- stack.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index f2054d93eb..0a5e1eea09 100755 --- a/stack.sh +++ b/stack.sh @@ -878,10 +878,13 @@ if is_service_enabled key; then export OS_SERVICE_ENDPOINT=$SERVICE_ENDPOINT create_keystone_accounts create_nova_accounts - create_swift_accounts create_cinder_accounts create_neutron_accounts + if is_service_enabled swift || is_service_enabled s-proxy; then + create_swift_accounts + fi + # ``keystone_data.sh`` creates services, admin and demo users, and roles. ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \ SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \ From 4594eb9271e9e3b71a304f61af8c35e82a7059be Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 17 Jul 2013 16:26:22 +0200 Subject: [PATCH 0217/4704] Remove notify_on_any_change This option has been removed from Nova. Change-Id: Ic1369cc05861686daae36ec8e5f96b687cac728c --- lib/nova | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/nova b/lib/nova index db82aa2362..096f34c576 100644 --- a/lib/nova +++ b/lib/nova @@ -491,7 +491,6 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT instance_usage_audit "True" iniset $NOVA_CONF DEFAULT instance_usage_audit_period "hour" iniset $NOVA_CONF DEFAULT notify_on_state_change "vm_and_task_state" - iniset $NOVA_CONF DEFAULT notify_on_any_change "True" iniset_multiline $NOVA_CONF DEFAULT notification_driver "nova.openstack.common.notifier.rpc_notifier" "ceilometer.compute.nova_notifier" fi From 584750f996bf0336d5c743634cbb0d2e02e78783 Mon Sep 17 00:00:00 2001 From: Nachi Ueno Date: Mon, 15 Jul 2013 18:22:21 -0700 Subject: [PATCH 0218/4704] Update neutron-vpn-agent path Fix path of vpn-agent as same as lbaas - If q-vpn service is enabled, this patch switches the l3-agent to vpn-agent Change-Id: Ifbe3d51b5c89f759a71e904960c5f6cc99c44a5f --- lib/neutron | 8 ++++++-- lib/neutron_plugins/services/vpn | 6 +----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/neutron b/lib/neutron index e6f5911cbd..835f900425 100644 --- a/lib/neutron +++ b/lib/neutron @@ -415,7 +415,12 @@ function start_neutron_agents() { # Start up the neutron agents if enabled screen_it q-agt "cd $NEUTRON_DIR && python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" screen_it q-dhcp "cd $NEUTRON_DIR && python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE" - screen_it q-l3 "cd $NEUTRON_DIR && python $AGENT_L3_BINARY --config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE" + + if is_service_enabled q-vpn; then + screen_it q-vpn "cd $NEUTRON_DIR && $AGENT_VPN_BINARY --config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE" + else + screen_it q-l3 "cd $NEUTRON_DIR && python $AGENT_L3_BINARY --config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE" + fi screen_it q-meta "cd $NEUTRON_DIR && python $AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file=$Q_META_CONF_FILE" if [ "$VIRT_DRIVER" = 'xenserver' ]; then @@ -585,7 +590,6 @@ function _configure_neutron_vpn() { neutron_vpn_install_agent_packages neutron_vpn_configure_common - neutron_vpn_configure_agent } # _configure_neutron_plugin_agent() - Set config files for neutron plugin agent diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn index 3c030c5f42..0a79a697ad 100644 --- a/lib/neutron_plugins/services/vpn +++ b/lib/neutron_plugins/services/vpn @@ -6,7 +6,7 @@ MY_XTRACE=$(set +o | grep xtrace) set +o xtrace -VPN_BINARY="$NEUTRON_DIR/bin/neutron-vpn-agent" +AGENT_VPN_BINARY="$NEUTRON_BIN_DIR/neutron-vpn-agent" VPN_PLUGIN="neutron.services.vpn.plugin.VPNDriverPlugin" function neutron_vpn_install_agent_packages() { @@ -21,9 +21,5 @@ function neutron_vpn_configure_common() { fi } -function neutron_vpn_configure_agent() { - AGENT_L3_BINARY="$NEUTRON_DIR/bin/neutron-vpn-agent" -} - # Restore xtrace $MY_XTRACE From 5cac378cde0074ad9f7fe50507800e28c6997418 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Wed, 17 Jul 2013 15:13:44 +0000 Subject: [PATCH 0219/4704] Configure tempauth along keystoneauth. - This would help testing the two auth server for functional testing. - Fixes bug 1202233. Change-Id: Ie0bc642873585ab02083aed543720b4a9b17cb02 --- lib/swift | 37 +++++++++++-------------------------- 1 file changed, 11 insertions(+), 26 deletions(-) diff --git a/lib/swift b/lib/swift index d09a95398d..e53d674666 100644 --- a/lib/swift +++ b/lib/swift @@ -133,19 +133,6 @@ function configure_swift() { sudo sed -i '/disable *= *yes/ { s/yes/no/ }' /etc/xinetd.d/rsync fi - if is_service_enabled swift3;then - swift_auth_server="s3token " - fi - - # By default Swift will be installed with the tempauth middleware - # which has some default username and password if you have - # configured keystone it will checkout the directory. - if is_service_enabled key; then - swift_auth_server+="authtoken keystoneauth" - else - swift_auth_server=tempauth - fi - SWIFT_CONFIG_PROXY_SERVER=${SWIFT_CONF_DIR}/proxy-server.conf cp ${SWIFT_DIR}/etc/proxy-server.conf-sample ${SWIFT_CONFIG_PROXY_SERVER} @@ -164,24 +151,22 @@ function configure_swift() { iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080} - # By default Swift will be installed with the tempauth middleware - # which has some default username and password if you have - # configured keystone it will configure swift with it. - if is_service_enabled key;then - if is_service_enabled swift3;then - swift_pipeline=" swift3 s3token " - fi - swift_pipeline+=" authtoken keystoneauth " - else - if is_service_enabled swift3;then - swift_pipeline=" swift3 " - fi - swift_pipeline+=" tempauth " + # By default Swift will be installed with keystone and tempauth middleware + # and add the swift3 middleware if its configured for it. The token for + # tempauth would be prefixed with the reseller_prefix setting TEMPAUTH_ the + # token for keystoneauth would have the standard reseller_prefix AUTH_ + if is_service_enabled swift3;then + swift_pipeline=" swift3 s3token " fi + swift_pipeline+=" authtoken keystoneauth tempauth " sed -i "/^pipeline/ { s/tempauth/${swift_pipeline} ${SWIFT_EXTRAS_MIDDLEWARE}/ ;}" ${SWIFT_CONFIG_PROXY_SERVER} + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth account_autocreate iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth reseller_prefix + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth reseller_prefix "TEMPAUTH" + # Configure Keystone sed -i '/^# \[filter:authtoken\]/,/^# \[filter:keystoneauth\]$/ s/^#[ \t]*//' ${SWIFT_CONFIG_PROXY_SERVER} iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_host $KEYSTONE_AUTH_HOST From 50686e56200e7064f5ba65834a03977b6d9ce413 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 17 Jul 2013 11:32:35 -0400 Subject: [PATCH 0220/4704] fix name of scheduler_driver in produced nova.conf Fixes bug #1202174. The nova.conf config variable that configures which scheduler to use is scheduler_driver, not compute_scheduler_driver. Change-Id: I775cae40edc9f8f55177f9d95cdbaa9416c4bfcd --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 849ec5730d..cb4b8b7b17 100644 --- a/lib/nova +++ b/lib/nova @@ -368,7 +368,7 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT allow_resize_to_same_host "True" iniset $NOVA_CONF DEFAULT api_paste_config "$NOVA_API_PASTE_INI" iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf" - iniset $NOVA_CONF DEFAULT compute_scheduler_driver "$SCHEDULER" + iniset $NOVA_CONF DEFAULT scheduler_driver "$SCHEDULER" iniset $NOVA_CONF DEFAULT dhcpbridge_flagfile "$NOVA_CONF" iniset $NOVA_CONF DEFAULT force_dhcp_release "True" iniset $NOVA_CONF DEFAULT fixed_range "$FIXED_RANGE" From 9778b3cb68fc872df089d3272a548d75aad0d8a2 Mon Sep 17 00:00:00 2001 From: Guangyu Suo Date: Wed, 17 Jul 2013 15:22:21 +0800 Subject: [PATCH 0221/4704] Add mysql support for ceilometer storage backend in devstack Currently, devstack only support mongodb as ceilometer storage backend, this patch is to add mysql storage support for ceilometer. If you want to use mysql as backend during developing, you can specify CEILOMETER_BACKEND=mysql in localrc file. If you use mongodb, just ignore the parameter. Change-Id: Ic2f475a9baa6d71a43cd29a6ca777ac972e47b0a Implements: blueprint ceilometer-mysql-support --- lib/ceilometer | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index bd4ab0f2dd..548496e707 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -37,12 +37,10 @@ CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api CEILOMETER_AUTH_CACHE_DIR=${CEILOMETER_AUTH_CACHE_DIR:-/var/cache/ceilometer} # Support potential entry-points console scripts -if [[ -d $CEILOMETER_DIR/bin ]]; then - CEILOMETER_BIN_DIR=$CEILOMETER_DIR/bin -else - CEILOMETER_BIN_DIR=$(get_python_exec_prefix) -fi +CEILOMETER_BIN_DIR=$(get_python_exec_prefix) +# Set up database backend +CEILOMETER_BACKEND=${CEILOMETER_BACKEND:-mongodb} # Functions # --------- @@ -91,11 +89,13 @@ function configure_ceilometer() { iniset $CEILOMETER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $CEILOMETER_CONF keystone_authtoken signing_dir $CEILOMETER_AUTH_CACHE_DIR - iniset $CEILOMETER_CONF database connection mongodb://localhost:27017/ceilometer - - configure_mongodb - - cleanup_ceilometer + if [[ "$CEILOMETER_BACKEND" = 'mysql' ]]; then + iniset $CEILOMETER_CONF database connection `database_connection_url ceilometer` + else + iniset $CEILOMETER_CONF database connection mongodb://localhost:27017/ceilometer + configure_mongodb + cleanup_ceilometer + fi } function configure_mongodb() { @@ -113,6 +113,11 @@ function init_ceilometer() { sudo mkdir -p $CEILOMETER_AUTH_CACHE_DIR sudo chown $STACK_USER $CEILOMETER_AUTH_CACHE_DIR rm -f $CEILOMETER_AUTH_CACHE_DIR/* + + if [[ "$CEILOMETER_BACKEND" = 'mysql' ]]; then + recreate_database ceilometer utf8 + $CEILOMETER_BIN_DIR/ceilometer-dbsync + fi } # install_ceilometer() - Collect source and prepare From 806233e0ed30e59d9deb9934f831f8ffad879733 Mon Sep 17 00:00:00 2001 From: Andrea Frittoli Date: Mon, 17 Jun 2013 15:34:54 +0100 Subject: [PATCH 0222/4704] Create an endpoint for nova api v3. Supports both SQL and templated keystone backend. Create an additional endpoint for nova api v3. The service type is computev3. The endpoint is similar to the v2 one but the version part is "v3" rather than "v2", and it does not include the tenantid anymore. Fixes: bug #1191798 Change-Id: I86e4734c3a9e57f1dc68f1104449d7c041d6927d --- files/default_catalog.templates | 6 ++++++ lib/nova | 11 +++++++++++ 2 files changed, 17 insertions(+) diff --git a/files/default_catalog.templates b/files/default_catalog.templates index 990cc0e911..1ecf890241 100644 --- a/files/default_catalog.templates +++ b/files/default_catalog.templates @@ -12,6 +12,12 @@ catalog.RegionOne.compute.internalURL = http://%SERVICE_HOST%:8774/v2/$(tenant_i catalog.RegionOne.compute.name = Compute Service +catalog.RegionOne.computev3.publicURL = http://%SERVICE_HOST%:8774/v3 +catalog.RegionOne.computev3.adminURL = http://%SERVICE_HOST%:8774/v3 +catalog.RegionOne.computev3.internalURL = http://%SERVICE_HOST%:8774/v3 +catalog.RegionOne.computev3.name = Compute Service V3 + + catalog.RegionOne.volume.publicURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s catalog.RegionOne.volume.adminURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s catalog.RegionOne.volume.internalURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s diff --git a/lib/nova b/lib/nova index db82aa2362..c87656f22e 100644 --- a/lib/nova +++ b/lib/nova @@ -407,6 +407,17 @@ create_nova_accounts() { --publicurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \ --adminurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \ --internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" + NOVA_V3_SERVICE=$(keystone service-create \ + --name=nova \ + --type=computev3 \ + --description="Nova Compute Service V3" \ + | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $NOVA_V3_SERVICE \ + --publicurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" \ + --adminurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" \ + --internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" fi fi } From 31dcd3e7ab60855d9664bd0aeb87b79eba94913f Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 16 Jul 2013 13:36:34 +1000 Subject: [PATCH 0223/4704] Use unique build dir for pip installs There is a bug in pip [1] where it will choose to install a package from an existing build-dir if it exists over the version actually requested. Thus if a prior component has installed a later version of the package, the unpacked code is already in /tmp/$USER-pip-build; it gets re-installed and manifests in a confusing error along the lines of --- Downloading/unpacking requests>=1.1,<1.2.3 (from -r /home/stack//python-cinderclient/requirements.txt (line 5)) Running setup.py egg_info for package requests Requested requests>=1.1,<1.2.3 (from -r /home/stack/python-cinderclient/requirements.txt (line 5)), but installing version 1.2.3 ... error: Installed distribution requests 1.2.3 conflicts with requirement requests>=1.1,<1.2.3 --- I believe pip 1.4 fixes this problem, but it should always be safe to specify a unique build-directory for pip installs to avoid picking up old versions. We also add a cleanup_tmp function for clearing out anything that stack.sh might leave around when un-stacking, and add a catch-all for the pip-build dir. [1] https://github.com/pypa/pip/issues/709 Change-Id: I7ce919cddfd6d6175ae67bd864f82e256ebc7090 --- functions | 23 ++++++++++++++++++++++- unstack.sh | 2 ++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/functions b/functions index 3a3e28bcdb..f4a3da113f 100644 --- a/functions +++ b/functions @@ -913,14 +913,35 @@ function pip_install { PIP_MIRROR_OPT="--use-mirrors" fi + # pip < 1.4 has a bug where it will use an already existing build + # directory unconditionally. Say an earlier component installs + # foo v1.1; pip will have built foo's source in + # /tmp/$USER-pip-build. Even if a later component specifies foo < + # 1.1, the existing extracted build will be used and cause + # confusing errors. By creating unique build directories we avoid + # this problem. See + # https://github.com/pypa/pip/issues/709 + local pip_build_tmp=$(mktemp --tmpdir -d pip-build.XXXXX) + $SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \ HTTP_PROXY=$http_proxy \ HTTPS_PROXY=$https_proxy \ NO_PROXY=$no_proxy \ - $CMD_PIP install $PIP_MIRROR_OPT $@ + $CMD_PIP install --build=${pip_build_tmp} \ + $PIP_MIRROR_OPT $@ \ + && $SUDO_PIP rm -rf ${pip_build_tmp} } +# Cleanup anything from /tmp on unstack +# clean_tmp +function cleanup_tmp { + local tmp_dir=${TMPDIR:-/tmp} + + # see comments in pip_install + sudo rm -rf ${tmp_dir}/pip-build.* +} + # Service wrapper to restart services # restart_service service-name function restart_service() { diff --git a/unstack.sh b/unstack.sh index ece06eb4ac..1e80bf35c7 100755 --- a/unstack.sh +++ b/unstack.sh @@ -111,3 +111,5 @@ if is_service_enabled neutron; then stop_neutron_third_party cleanup_neutron fi + +cleanup_tmp From dfe3f6bae7b74683472ffc510996a1fd0c41a7c7 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Fri, 19 Jul 2013 16:51:26 -0700 Subject: [PATCH 0224/4704] Set external_network_bridge for the test configuration file Fixes bug #1203210 Supports blueprint nvp-third-party-support Change-Id: I21d769b552d31fe099f2773c919e0c7b471399fc --- lib/neutron_plugins/nicira | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/neutron_plugins/nicira b/lib/neutron_plugins/nicira index 9b9dbdcc1b..eabc41730d 100644 --- a/lib/neutron_plugins/nicira +++ b/lib/neutron_plugins/nicira @@ -47,6 +47,7 @@ function neutron_plugin_configure_common() { function neutron_plugin_configure_debug_command() { sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE + iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge "$PUBLIC_BRIDGE" } function neutron_plugin_configure_dhcp_agent() { From c373cf8b89a6ad5d97027964db5f42e98b568b7d Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Mon, 22 Jul 2013 20:57:57 -0400 Subject: [PATCH 0225/4704] Update neutron_available config option. Tempest change I5ee9ec816845de483fe88d76d1bb047e7bb1af7e changed the behavior of the neutron_available config option. This commit updates devstack to use it's new name and group. Change-Id: I7d5074209fe81f6100f380512d7702fbc8e252ac --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 0e066a8eb5..b4a579bda8 100644 --- a/lib/tempest +++ b/lib/tempest @@ -253,7 +253,7 @@ function configure_tempest() { # Network if is_service_enabled neutron; then - iniset $TEMPEST_CONF network neutron_available "True" + iniset $TEMPEST_CONF service_available neutron "True" fi iniset $TEMPEST_CONF network api_version 2.0 iniset $TEMPEST_CONF network tenant_networks_reachable "$tenant_networks_reachable" From 0352f584abe0e88f372b9b8898b10e2ac25842f0 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 24 Jul 2013 13:01:32 +1000 Subject: [PATCH 0226/4704] Update README for swift off by default Change 11277b1f3cfa850c074d3effbb43987b6e6e6391 disabled swift due to port conflicts but the documentation still states that it is enabled by default, which can be quite confusing. While we're there, wrap affected lines to 80 chars. Change-Id: I9d543f3368bdadadae482c163d814065009ab395 --- README.md | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 5fd4291df5..fbf7b4a008 100644 --- a/README.md +++ b/README.md @@ -85,30 +85,42 @@ Example (Qpid): # Swift -Swift is enabled by default configured with only one replica to avoid being IO/memory intensive on a small vm. When running with only one replica the account, container and object services will run directly in screen. The others services like replicator, updaters or auditor runs in background. +Swift is disabled by default. When enabled, it is configured with +only one replica to avoid being IO/memory intensive on a small +vm. When running with only one replica the account, container and +object services will run directly in screen. The others services like +replicator, updaters or auditor runs in background. -If you would like to disable Swift you can add this to your `localrc` : +If you would like to enable Swift you can add this to your `localrc` : - disable_service s-proxy s-object s-container s-account + enable_service s-proxy s-object s-container s-account -If you want a minimal Swift install with only Swift and Keystone you can have this instead in your `localrc`: +If you want a minimal Swift install with only Swift and Keystone you +can have this instead in your `localrc`: disable_all_services enable_service key mysql s-proxy s-object s-container s-account -If you only want to do some testing of a real normal swift cluster with multiple replicas you can do so by customizing the variable `SWIFT_REPLICAS` in your `localrc` (usually to 3). +If you only want to do some testing of a real normal swift cluster +with multiple replicas you can do so by customizing the variable +`SWIFT_REPLICAS` in your `localrc` (usually to 3). # Swift S3 -If you are enabling `swift3` in `ENABLED_SERVICES` devstack will install the swift3 middleware emulation. Swift will be configured to act as a S3 endpoint for Keystone so effectively replacing the `nova-objectstore`. +If you are enabling `swift3` in `ENABLED_SERVICES` devstack will +install the swift3 middleware emulation. Swift will be configured to +act as a S3 endpoint for Keystone so effectively replacing the +`nova-objectstore`. -Only Swift proxy server is launched in the screen session all other services are started in background and managed by `swift-init` tool. +Only Swift proxy server is launched in the screen session all other +services are started in background and managed by `swift-init` tool. # Neutron Basic Setup -In order to enable Neutron a single node setup, you'll need the following settings in your `localrc` : +In order to enable Neutron a single node setup, you'll need the +following settings in your `localrc` : disable_service n-net enable_service q-svc From 4125fe2fb599715c0f12ea5af1c281c5d94aa61a Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Wed, 24 Jul 2013 05:37:52 -0700 Subject: [PATCH 0227/4704] Use vmware section The vmware configurations are now in a specific vmware section. Change-Id: I283b36bd023a43800852f792dd7fd8adf4d6ac84 --- stack.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/stack.sh b/stack.sh index 3fa025f939..1a2257c278 100755 --- a/stack.sh +++ b/stack.sh @@ -1080,10 +1080,10 @@ if is_service_enabled nova; then echo_summary "Using VMware vCenter driver" iniset $NOVA_CONF DEFAULT compute_driver "vmwareapi.VMwareVCDriver" VMWAREAPI_USER=${VMWAREAPI_USER:-"root"} - iniset $NOVA_CONF DEFAULT vmwareapi_host_ip "$VMWAREAPI_IP" - iniset $NOVA_CONF DEFAULT vmwareapi_host_username "$VMWAREAPI_USER" - iniset $NOVA_CONF DEFAULT vmwareapi_host_password "$VMWAREAPI_PASSWORD" - iniset $NOVA_CONF DEFAULT vmwareapi_cluster_name "$VMWAREAPI_CLUSTER" + iniset $NOVA_CONF vmware host_ip "$VMWAREAPI_IP" + iniset $NOVA_CONF vmware host_username "$VMWAREAPI_USER" + iniset $NOVA_CONF vmware host_password "$VMWAREAPI_PASSWORD" + iniset $NOVA_CONF vmware cluster_name "$VMWAREAPI_CLUSTER" if is_service_enabled neutron; then iniset $NOVA_CONF vmware integration_bridge $OVS_BRIDGE fi From b56d81d5954a55aab92de31ae69a45e1dffa64ef Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Tue, 23 Jul 2013 17:25:39 -0400 Subject: [PATCH 0228/4704] Set service_available config options in tempest This commit loops over the list of services that tempest has config options for in the service_available group and checks if the service is enabled. Devstack will then set whether or not the service is configured in tempest.conf. Change-Id: Ib845d3e098fd3f45c8c26f5696af14cca1534e01 --- lib/tempest | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/lib/tempest b/lib/tempest index b4a579bda8..3831c28a30 100644 --- a/lib/tempest +++ b/lib/tempest @@ -251,10 +251,6 @@ function configure_tempest() { # Compute admin iniset $TEMPEST_CONF "compute-admin" password "$password" # DEPRECATED - # Network - if is_service_enabled neutron; then - iniset $TEMPEST_CONF service_available neutron "True" - fi iniset $TEMPEST_CONF network api_version 2.0 iniset $TEMPEST_CONF network tenant_networks_reachable "$tenant_networks_reachable" iniset $TEMPEST_CONF network public_network_id "$public_network_id" @@ -268,11 +264,6 @@ function configure_tempest() { iniset $TEMPEST_CONF boto http_socket_timeout 30 iniset $TEMPEST_CONF boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros} - # Orchestration - if is_service_enabled heat; then - iniset $TEMPEST_CONF orchestration heat_available "True" - fi - # Scenario iniset $TEMPEST_CONF scenario img_dir "$FILES/images/cirros-0.3.1-x86_64-uec" @@ -287,6 +278,15 @@ function configure_tempest() { # cli iniset $TEMPEST_CONF cli cli_dir $NOVA_BIN_DIR + # service_available + for service in nova cinder glance neutron swift heat ; do + if is_service_enabled $service ; then + iniset $TEMPEST_CONF service_available $service "True" + else + iniset $TEMPEST_CONF service_available $service "False" + fi + done + echo "Created tempest configuration file:" cat $TEMPEST_CONF From abe56ee90f39e486bd8f7c55d05aecb3de7223d9 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Wed, 24 Jul 2013 11:06:27 +0100 Subject: [PATCH 0229/4704] xenapi: Setup and Rotate text console logs This patch installs the cronjob that rotates/sets up the text console logs of the guests. Related to blueprint xenapi-server-log Change-Id: Ie4c778b54f69519fbb80aa0d9822383f55a1e2f9 --- tools/xen/functions | 6 +++--- tools/xen/install_os_domU.sh | 17 +++++++++++++++-- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/tools/xen/functions b/tools/xen/functions index 7146858f7d..7616a5fd4d 100644 --- a/tools/xen/functions +++ b/tools/xen/functions @@ -53,18 +53,18 @@ function find_xapi_plugins_dir { find $1 -path '*/xapi.d/plugins' -type d -print } -function install_xapi_plugins_from_zipball { +function install_xapi_plugins_from { local XAPI_PLUGIN_DIR local EXTRACTED_FILES local EXTRACTED_PLUGINS_DIR + EXTRACTED_FILES="$1" + XAPI_PLUGIN_DIR=$(xapi_plugin_location) - EXTRACTED_FILES=$(extract_remote_zipball $1) EXTRACTED_PLUGINS_DIR=$(find_xapi_plugins_dir $EXTRACTED_FILES) cp -pr $EXTRACTED_PLUGINS_DIR/* $XAPI_PLUGIN_DIR - rm -rf $EXTRACTED_FILES chmod a+x ${XAPI_PLUGIN_DIR}* } diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 6eb30130cd..2cba33c83e 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -63,12 +63,25 @@ fi ## Nova plugins NOVA_ZIPBALL_URL=${NOVA_ZIPBALL_URL:-$(zip_snapshot_location $NOVA_REPO $NOVA_BRANCH)} -install_xapi_plugins_from_zipball $NOVA_ZIPBALL_URL +EXTRACTED_NOVA=$(extract_remote_zipball "$NOVA_ZIPBALL_URL") +install_xapi_plugins_from "$EXTRACTED_NOVA" + +LOGROT_SCRIPT=$(find "$EXTRACTED_NOVA" -name "rotate_xen_guest_logs.sh" -print) +if [ -n "$LOGROT_SCRIPT" ]; then + mkdir -p "/var/log/xen/guest" + cp "$LOGROT_SCRIPT" /root/consolelogrotate + chmod +x /root/consolelogrotate + echo "* * * * * /root/consolelogrotate" | crontab +fi + +rm -rf "$EXTRACTED_NOVA" ## Install the netwrap xapi plugin to support agent control of dom0 networking if [[ "$ENABLED_SERVICES" =~ "q-agt" && "$Q_PLUGIN" = "openvswitch" ]]; then NEUTRON_ZIPBALL_URL=${NEUTRON_ZIPBALL_URL:-$(zip_snapshot_location $NEUTRON_REPO $NEUTRON_BRANCH)} - install_xapi_plugins_from_zipball $NEUTRON_ZIPBALL_URL + EXTRACTED_NEUTRON=$(extract_remote_zipball "$NEUTRON_ZIPBALL_URL") + install_xapi_plugins_from "$EXTRACTED_NEUTRON" + rm -rf "$EXTRACTED_NEUTRON" fi create_directory_for_kernels From ea66acb131645650b12de05ea96a5db6ce6df7dd Mon Sep 17 00:00:00 2001 From: Eugene Nikanorov Date: Wed, 24 Jul 2013 20:59:49 +0400 Subject: [PATCH 0230/4704] Remove unused parameter from lbaas_agent.ini That makes devstack installation script consistent with latest change in neutron lbaas_agent Change-Id: I7946301a7fb4adddb5911575cbdfaf6b024132e3 --- lib/neutron_plugins/services/loadbalancer | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer index 49e286a8cb..c38f904b69 100644 --- a/lib/neutron_plugins/services/loadbalancer +++ b/lib/neutron_plugins/services/loadbalancer @@ -34,7 +34,6 @@ function neutron_agent_lbaas_configure_agent() { cp $NEUTRON_DIR/etc/lbaas_agent.ini $LBAAS_AGENT_CONF_FILENAME - iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT use_namespaces $Q_USE_NAMESPACE # ovs_use_veth needs to be set before the plugin configuration # occurs to allow plugins to override the setting. iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT ovs_use_veth $Q_OVS_USE_VETH From d4f69b21f530059be8d3a7001ebbd1ce53aa7926 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 24 Jul 2013 12:24:43 -0500 Subject: [PATCH 0231/4704] Remove python-netaddr requirement Remove python-netaddr as a DevStack (stack.sh) requirement, this does not change any individual project requirements. Specifically it replaces address_in_net() and adds cidr2netmask() and maskip() functions. Change-Id: Ic604437fde2e057faced40a310ab282f3eb27726 --- functions | 37 ++++++++++++--- tests/test_ip.sh | 118 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 148 insertions(+), 7 deletions(-) create mode 100755 tests/test_ip.sh diff --git a/functions b/functions index 3a3e28bcdb..ba3ce652ed 100644 --- a/functions +++ b/functions @@ -18,15 +18,38 @@ XTRACE=$(set +o | grep xtrace) set +o xtrace -# Exit 0 if address is in network or 1 if address is not in -# network or netaddr library is not installed. +# Convert CIDR notation to a IPv4 netmask +# cidr2netmask cidr-bits +function cidr2netmask() { + local maskpat="255 255 255 255" + local maskdgt="254 252 248 240 224 192 128" + set -- ${maskpat:0:$(( ($1 / 8) * 4 ))}${maskdgt:$(( (7 - ($1 % 8)) * 4 )):3} + echo ${1-0}.${2-0}.${3-0}.${4-0} +} + + +# Return the network portion of the given IP address using netmask +# netmask is in the traditional dotted-quad format +# maskip ip-address netmask +function maskip() { + local ip=$1 + local mask=$2 + local l="${ip%.*}"; local r="${ip#*.}"; local n="${mask%.*}"; local m="${mask#*.}" + local subnet=$((${ip%%.*}&${mask%%.*})).$((${r%%.*}&${m%%.*})).$((${l##*.}&${n##*.})).$((${ip##*.}&${mask##*.})) + echo $subnet +} + + +# Exit 0 if address is in network or 1 if address is not in network +# ip-range is in CIDR notation: 1.2.3.4/20 # address_in_net ip-address ip-range function address_in_net() { - python -c " -import netaddr -import sys -sys.exit(netaddr.IPAddress('$1') not in netaddr.IPNetwork('$2')) -" + local ip=$1 + local range=$2 + local masklen=${range#*/} + local network=$(maskip ${range%/*} $(cidr2netmask $masklen)) + local subnet=$(maskip $ip $(cidr2netmask $masklen)) + [[ $network == $subnet ]] } diff --git a/tests/test_ip.sh b/tests/test_ip.sh new file mode 100755 index 0000000000..e9cbcca4a4 --- /dev/null +++ b/tests/test_ip.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash + +# Tests for DevStack functions +# address_in_net() + +TOP=$(cd $(dirname "$0")/.. && pwd) + +# Import common functions +source $TOP/functions + +# Import configuration +source $TOP/openrc + + +echo "Testing IP addr functions" + +if [[ $(cidr2netmask 4) == 240.0.0.0 ]]; then + echo "cidr2netmask(): /4...OK" +else + echo "cidr2netmask(): /4...failed" +fi +if [[ $(cidr2netmask 8) == 255.0.0.0 ]]; then + echo "cidr2netmask(): /8...OK" +else + echo "cidr2netmask(): /8...failed" +fi +if [[ $(cidr2netmask 12) == 255.240.0.0 ]]; then + echo "cidr2netmask(): /12...OK" +else + echo "cidr2netmask(): /12...failed" +fi +if [[ $(cidr2netmask 16) == 255.255.0.0 ]]; then + echo "cidr2netmask(): /16...OK" +else + echo "cidr2netmask(): /16...failed" +fi +if [[ $(cidr2netmask 20) == 255.255.240.0 ]]; then + echo "cidr2netmask(): /20...OK" +else + echo "cidr2netmask(): /20...failed" +fi +if [[ $(cidr2netmask 24) == 255.255.255.0 ]]; then + echo "cidr2netmask(): /24...OK" +else + echo "cidr2netmask(): /24...failed" +fi +if [[ $(cidr2netmask 28) == 255.255.255.240 ]]; then + echo "cidr2netmask(): /28...OK" +else + echo "cidr2netmask(): /28...failed" +fi +if [[ $(cidr2netmask 30) == 255.255.255.252 ]]; then + echo "cidr2netmask(): /30...OK" +else + echo "cidr2netmask(): /30...failed" +fi +if [[ $(cidr2netmask 32) == 255.255.255.255 ]]; then + echo "cidr2netmask(): /32...OK" +else + echo "cidr2netmask(): /32...failed" +fi + +if [[ $(maskip 169.254.169.254 240.0.0.0) == 160.0.0.0 ]]; then + echo "maskip(): /4...OK" +else + echo "maskip(): /4...failed" +fi +if [[ $(maskip 169.254.169.254 255.0.0.0) == 169.0.0.0 ]]; then + echo "maskip(): /8...OK" +else + echo "maskip(): /8...failed" +fi +if [[ $(maskip 169.254.169.254 255.240.0.0) == 169.240.0.0 ]]; then + echo "maskip(): /12...OK" +else + echo "maskip(): /12...failed" +fi +if [[ $(maskip 169.254.169.254 255.255.0.0) == 169.254.0.0 ]]; then + echo "maskip(): /16...OK" +else + echo "maskip(): /16...failed" +fi +if [[ $(maskip 169.254.169.254 255.255.240.0) == 169.254.160.0 ]]; then + echo "maskip(): /20...OK" +else + echo "maskip(): /20...failed" +fi +if [[ $(maskip 169.254.169.254 255.255.255.0) == 169.254.169.0 ]]; then + echo "maskip(): /24...OK" +else + echo "maskip(): /24...failed" +fi +if [[ $(maskip 169.254.169.254 255.255.255.240) == 169.254.169.240 ]]; then + echo "maskip(): /28...OK" +else + echo "maskip(): /28...failed" +fi +if [[ $(maskip 169.254.169.254 255.255.255.255) == 169.254.169.254 ]]; then + echo "maskip(): /32...OK" +else + echo "maskip(): /32...failed" +fi + +for mask in 8 12 16 20 24 26 28; do + echo -n "address_in_net(): in /$mask..." + if address_in_net 10.10.10.1 10.10.10.0/$mask; then + echo "OK" + else + echo "address_in_net() failed on /$mask" + fi + + echo -n "address_in_net(): not in /$mask..." + if ! address_in_net 10.10.10.1 11.11.11.0/$mask; then + echo "OK" + else + echo "address_in_net() failed on /$mask" + fi +done From d644e23c9596ab128d6360798df863cf5f9cbf23 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 25 Jul 2013 15:34:48 -0400 Subject: [PATCH 0232/4704] make install_heatclient like other client installs all the other install_*client functions do the setup develop inline. Do the same thing for heat. Change-Id: Ib2043580af6b7f14f24f7304e5f47f4523517d06 --- lib/heat | 6 +----- stack.sh | 3 +-- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/lib/heat b/lib/heat index 13bf130b15..85177738dc 100644 --- a/lib/heat +++ b/lib/heat @@ -41,11 +41,6 @@ function cleanup_heat() { sudo rm -rf $HEAT_AUTH_CACHE_DIR } -# configure_heatclient() - Set config files, create data dirs, etc -function configure_heatclient() { - setup_develop $HEATCLIENT_DIR -} - # configure_heat() - Set config files, create data dirs, etc function configure_heat() { setup_develop $HEAT_DIR @@ -176,6 +171,7 @@ function create_heat_cache_dir() { # install_heatclient() - Collect source and prepare function install_heatclient() { git_clone $HEATCLIENT_REPO $HEATCLIENT_DIR $HEATCLIENT_BRANCH + setup_develop $HEATCLIENT_DIR } # install_heat() - Collect source and prepare diff --git a/stack.sh b/stack.sh index 3fa025f939..cdc45fddfb 100755 --- a/stack.sh +++ b/stack.sh @@ -739,11 +739,10 @@ if is_service_enabled ceilometer; then fi if is_service_enabled heat; then - install_heat install_heatclient + install_heat cleanup_heat configure_heat - configure_heatclient fi if is_service_enabled tls-proxy; then From 75195b58cc27bf415594235e1e36ec7cd8223321 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 25 Jul 2013 15:38:09 -0400 Subject: [PATCH 0233/4704] ensure all horizon needed clients are installed from git horizon actually uses a lot of clients no adays, ensure these are all coming from git. Change-Id: Ib02b96de40aed800ff8adbb59f0268fceea7777d --- stack.sh | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index cdc45fddfb..455ccd1127 100755 --- a/stack.sh +++ b/stack.sh @@ -666,12 +666,15 @@ install_keystoneclient install_glanceclient install_cinderclient install_novaclient -if is_service_enabled swift glance; then +if is_service_enabled swift glance horizon; then install_swiftclient fi -if is_service_enabled neutron nova; then +if is_service_enabled neutron nova horizon; then install_neutronclient fi +if is_service_enabled heat horizon; then + install_heatclient +fi git_clone $OPENSTACKCLIENT_REPO $OPENSTACKCLIENT_DIR $OPENSTACKCLIENT_BRANCH setup_develop $OPENSTACKCLIENT_DIR @@ -739,7 +742,6 @@ if is_service_enabled ceilometer; then fi if is_service_enabled heat; then - install_heatclient install_heat cleanup_heat configure_heat From 12bb53b6a0b003434f0e39610f46d2425263ea20 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 25 Jul 2013 23:02:48 +0200 Subject: [PATCH 0234/4704] Add ceilometer to the sys.path before the nova starts The setup.py setup develop adds package to the /usr/lib/python2.7/site-packages/easy-install.pth. The nova uses the ceilometer.compute.nova_notifier, so it must be in the path before the nova started. Change-Id: I80ed5e5611278e75afa7ab7f8fb91417897d423f --- stack.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index 3fa025f939..4da13fe410 100755 --- a/stack.sh +++ b/stack.sh @@ -736,6 +736,9 @@ fi if is_service_enabled ceilometer; then install_ceilometerclient install_ceilometer + echo_summary "Configuring Ceilometer" + configure_ceilometer + configure_ceilometerclient fi if is_service_enabled heat; then @@ -1211,9 +1214,6 @@ if is_service_enabled cinder; then start_cinder fi if is_service_enabled ceilometer; then - echo_summary "Configuring Ceilometer" - configure_ceilometer - configure_ceilometerclient echo_summary "Starting Ceilometer" init_ceilometer start_ceilometer From c62c2b9b6617c731c7979d490a1d2e7a048c1cf7 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 24 Jul 2013 03:56:13 -0700 Subject: [PATCH 0235/4704] Fix various things when n-cell is enabled 1) Use the newer cell_type configuration option instead of overriding the compute_api_class. 2) A nova-cells service is started for both the API cell (region) and the compute cell (child). The screen names were both 'n-cell' which caused both services to log to the same log file. The screen names have been changed to n-cell-region and n-cell-child. 3) Security groups are not supported with cells. Multiple exercises are patched to use the 'default' security group instead of creating a new one. Additionally, if one uses the 'default' security group, do not try to delete it, as one can never delete the default. 4) Disable the floating_ips and aggregates exercises when n-cell is enabled, as they are not supported by cells. 5) Related to #4, disable the floating_ips tests within euca.sh 6) Update the README.md. No services need to be disabled, and one only needs to enable the n-cell service. Change-Id: I9782d1e3cda3c9dd3daefa15c043f5b06473cb87 --- README.md | 14 +----- exercises/aggregates.sh | 2 + exercises/boot_from_volume.sh | 26 +++++++---- exercises/euca.sh | 84 ++++++++++++++++++++--------------- exercises/floating_ips.sh | 2 + exercises/volumes.sh | 26 +++++++---- functions | 5 +++ lib/nova | 8 ++-- 8 files changed, 100 insertions(+), 67 deletions(-) diff --git a/README.md b/README.md index fbf7b4a008..4bcd62c947 100644 --- a/README.md +++ b/README.md @@ -188,15 +188,5 @@ Cells is a new scaling option with a full spec at http://wiki.openstack.org/blue To setup a cells environment add the following to your `localrc`: enable_service n-cell - enable_service n-api-meta - MULTI_HOST=True - - # The following have not been tested with cells, they may or may not work. - disable_service n-obj - disable_service cinder - disable_service c-sch - disable_service c-api - disable_service c-vol - disable_service n-xvnc - -Be aware that there are some features currently missing in cells, one notable one being security groups. + +Be aware that there are some features currently missing in cells, one notable one being security groups. The exercises have been patched to disable functionality not supported by cells. diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh index 3c83725491..e2baecdb11 100755 --- a/exercises/aggregates.sh +++ b/exercises/aggregates.sh @@ -42,6 +42,8 @@ source $TOP_DIR/exerciserc # Test as the admin user . $TOP_DIR/openrc admin admin +# Cells does not support aggregates. +is_service_enabled n-cell && exit 55 # Create an aggregate # =================== diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index 18147325bb..a3a14eb5e4 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -80,12 +80,18 @@ die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" # List security groups nova secgroup-list -# Create a secgroup -if ! nova secgroup-list | grep -q $SECGROUP; then - nova secgroup-create $SECGROUP "$SECGROUP description" - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then - echo "Security group not created" - exit 1 +if is_service_enabled n-cell; then + # Cells does not support security groups, so force the use of "default" + SECGROUP="default" + echo "Using the default security group because of Cells." +else + # Create a secgroup + if ! nova secgroup-list | grep -q $SECGROUP; then + nova secgroup-create $SECGROUP "$SECGROUP description" + if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then + echo "Security group not created" + exit 1 + fi fi fi @@ -200,8 +206,12 @@ fi end_time=$(date +%s) echo "Completed cinder delete in $((end_time - start_time)) seconds" -# Delete secgroup -nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP" +if [[ $SECGROUP = "default" ]] ; then + echo "Skipping deleting default security group" +else + # Delete secgroup + nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP" +fi set +o xtrace echo "*********************************************************************" diff --git a/exercises/euca.sh b/exercises/euca.sh index eec8636fa3..5b0d1ba493 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -58,11 +58,17 @@ SECGROUP=${SECGROUP:-euca_secgroup} IMAGE=`euca-describe-images | grep machine | grep ${DEFAULT_IMAGE_NAME} | cut -f2 | head -n1` die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" -# Add a secgroup -if ! euca-describe-groups | grep -q $SECGROUP; then - euca-add-group -d "$SECGROUP description" $SECGROUP - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! euca-describe-groups | grep -q $SECGROUP; do sleep 1; done"; then - die $LINENO "Security group not created" +if is_service_enabled n-cell; then + # Cells does not support security groups, so force the use of "default" + SECGROUP="default" + echo "Using the default security group because of Cells." +else + # Add a secgroup + if ! euca-describe-groups | grep -q $SECGROUP; then + euca-add-group -d "$SECGROUP description" $SECGROUP + if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! euca-describe-groups | grep -q $SECGROUP; do sleep 1; done"; then + die $LINENO "Security group not created" + fi fi fi @@ -77,7 +83,7 @@ fi # Volumes # ------- -if [[ "$ENABLED_SERVICES" =~ "c-vol" ]]; then +if is_service_enabled c-vol && ! is_service_enabled n-cell; then VOLUME_ZONE=`euca-describe-availability-zones | head -n1 | cut -f2` die_if_not_set $LINENO VOLUME_ZONE "Failure to find zone for volume" @@ -117,41 +123,45 @@ else echo "Volume Tests Skipped" fi -# Allocate floating address -FLOATING_IP=`euca-allocate-address | cut -f2` -die_if_not_set $LINENO FLOATING_IP "Failure allocating floating IP" +if is_service_enabled n-cell; then + echo "Floating IP Tests Skipped because of Cells." +else + # Allocate floating address + FLOATING_IP=`euca-allocate-address | cut -f2` + die_if_not_set $LINENO FLOATING_IP "Failure allocating floating IP" -# Associate floating address -euca-associate-address -i $INSTANCE $FLOATING_IP || \ - die $LINENO "Failure associating address $FLOATING_IP to $INSTANCE" + # Associate floating address + euca-associate-address -i $INSTANCE $FLOATING_IP || \ + die $LINENO "Failure associating address $FLOATING_IP to $INSTANCE" -# Authorize pinging -euca-authorize -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \ - die $LINENO "Failure authorizing rule in $SECGROUP" + # Authorize pinging + euca-authorize -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \ + die $LINENO "Failure authorizing rule in $SECGROUP" -# Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds -ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT + # Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds + ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT -# Revoke pinging -euca-revoke -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \ - die $LINENO "Failure revoking rule in $SECGROUP" + # Revoke pinging + euca-revoke -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \ + die $LINENO "Failure revoking rule in $SECGROUP" -# Release floating address -euca-disassociate-address $FLOATING_IP || \ - die $LINENO "Failure disassociating address $FLOATING_IP" + # Release floating address + euca-disassociate-address $FLOATING_IP || \ + die $LINENO "Failure disassociating address $FLOATING_IP" -# Wait just a tick for everything above to complete so release doesn't fail -if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep $INSTANCE | grep -q $FLOATING_IP; do sleep 1; done"; then - die $LINENO "Floating ip $FLOATING_IP not disassociated within $ASSOCIATE_TIMEOUT seconds" -fi + # Wait just a tick for everything above to complete so release doesn't fail + if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep $INSTANCE | grep -q $FLOATING_IP; do sleep 1; done"; then + die $LINENO "Floating ip $FLOATING_IP not disassociated within $ASSOCIATE_TIMEOUT seconds" + fi -# Release floating address -euca-release-address $FLOATING_IP || \ - die $LINENO "Failure releasing address $FLOATING_IP" + # Release floating address + euca-release-address $FLOATING_IP || \ + die $LINENO "Failure releasing address $FLOATING_IP" -# Wait just a tick for everything above to complete so terminate doesn't fail -if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep -q $FLOATING_IP; do sleep 1; done"; then - die $LINENO "Floating ip $FLOATING_IP not released within $ASSOCIATE_TIMEOUT seconds" + # Wait just a tick for everything above to complete so terminate doesn't fail + if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep -q $FLOATING_IP; do sleep 1; done"; then + die $LINENO "Floating ip $FLOATING_IP not released within $ASSOCIATE_TIMEOUT seconds" + fi fi # Terminate instance @@ -166,8 +176,12 @@ if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | die $LINENO "server didn't terminate within $TERMINATE_TIMEOUT seconds" fi -# Delete secgroup -euca-delete-group $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP" +if [[ "$SECGROUP" = "default" ]] ; then + echo "Skipping deleting default security group" +else + # Delete secgroup + euca-delete-group $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP" +fi set +o xtrace echo "*********************************************************************" diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index b22ef110d2..ac65cf7772 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -56,6 +56,8 @@ TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} # Instance name VM_NAME="ex-float" +# Cells does not support floating ips API calls +is_service_enabled n-cell && exit 55 # Launching a server # ================== diff --git a/exercises/volumes.sh b/exercises/volumes.sh index f574bb3463..b2b391c5d7 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -78,12 +78,18 @@ die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" # List security groups nova secgroup-list -# Create a secgroup -if ! nova secgroup-list | grep -q $SECGROUP; then - nova secgroup-create $SECGROUP "$SECGROUP description" - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then - echo "Security group not created" - exit 1 +if is_service_enabled n-cell; then + # Cells does not support security groups, so force the use of "default" + SECGROUP="default" + echo "Using the default security group because of Cells." +else + # Create a secgroup + if ! nova secgroup-list | grep -q $SECGROUP; then + nova secgroup-create $SECGROUP "$SECGROUP description" + if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then + echo "Security group not created" + exit 1 + fi fi fi @@ -201,8 +207,12 @@ if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sl die $LINENO "Server $VM_NAME not deleted" fi -# Delete secgroup -nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP" +if [[ $SECGROUP = "default" ]] ; then + echo "Skipping deleting default security group" +else + # Delete secgroup + nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP" +fi set +o xtrace echo "*********************************************************************" diff --git a/functions b/functions index eb83dfb2d6..08c525348a 100644 --- a/functions +++ b/functions @@ -745,12 +745,17 @@ function is_running() { # For backward compatibility if we have **swift** in ENABLED_SERVICES all the # **s-** services will be enabled. This will be deprecated in the future. # +# Cells within nova is enabled if **n-cell** is in ``ENABLED_SERVICES``. +# We also need to make sure to treat **n-cell-region** and **n-cell-child** +# as enabled in this case. +# # Uses global ``ENABLED_SERVICES`` # is_service_enabled service [service ...] function is_service_enabled() { services=$@ for service in ${services}; do [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 + [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && return 0 [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0 [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0 [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0 diff --git a/lib/nova b/lib/nova index 617fb08fb4..7a5ff1f98a 100644 --- a/lib/nova +++ b/lib/nova @@ -568,11 +568,11 @@ function init_nova_cells() { iniset $NOVA_CELLS_CONF DEFAULT rabbit_virtual_host child_cell iniset $NOVA_CELLS_CONF DEFAULT dhcpbridge_flagfile $NOVA_CELLS_CONF iniset $NOVA_CELLS_CONF cells enable True + iniset $NOVA_CELLS_CONF cells cell_type compute iniset $NOVA_CELLS_CONF cells name child - iniset $NOVA_CONF DEFAULT scheduler_topic cells - iniset $NOVA_CONF DEFAULT compute_api_class nova.compute.cells_api.ComputeCellsAPI iniset $NOVA_CONF cells enable True + iniset $NOVA_CONF cells cell_type api iniset $NOVA_CONF cells name region if is_service_enabled n-api-meta; then @@ -714,8 +714,8 @@ function start_nova() { if is_service_enabled n-cell; then NOVA_CONF_BOTTOM=$NOVA_CELLS_CONF screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor --config-file $NOVA_CELLS_CONF" - screen_it n-cell "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CONF" - screen_it n-cell "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CELLS_CONF" + screen_it n-cell-region "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CONF" + screen_it n-cell-child "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CELLS_CONF" fi if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then From 859cc6800b2375a7c7c49c904ad32a4831c20f88 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Fri, 26 Jul 2013 15:22:44 -0400 Subject: [PATCH 0236/4704] Set lock_path for Tempest to TMPDIR Tempest change I5376f977a110f502f0e4958cbccbd379539e006b syncs lockutils from oslo. Tempest with testr needs to use external locks which require a lock_path config option. This commit sets that option for tempest to use devstack's TMPDIR. Change-Id: I464edf9e11710e1931ed4f7b0262328a57e5b3de --- lib/tempest | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/tempest b/lib/tempest index b4a579bda8..62da1fcfca 100644 --- a/lib/tempest +++ b/lib/tempest @@ -195,6 +195,9 @@ function configure_tempest() { fi fi + # Oslo + iniset $TEMPEST_CONF DEFAULT lock_path $TMPDIR + # Timeouts iniset $TEMPEST_CONF compute build_timeout $BUILD_TIMEOUT iniset $TEMPEST_CONF volume build_timeout $BUILD_TIMEOUT From a49ee32bc957e1082b4c21e62e7d5504b09533d1 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Sat, 27 Jul 2013 15:36:53 -0400 Subject: [PATCH 0237/4704] Update OpenVZ driver image 11.10 simply does not exist, so this simply does not work. However, devstack-gate tries to pre-cache all images referenced, so even though we're not doing anything with this, it's breaking devstack-gate. Change-Id: I64b03ed387f5205a885427498e8cf8c2a5b32b91 --- stackrc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackrc b/stackrc index 50774e40bc..ac72132f2e 100644 --- a/stackrc +++ b/stackrc @@ -215,8 +215,8 @@ esac # ``IMAGE_URLS`` to be set directly in ``localrc``. case "$VIRT_DRIVER" in openvz) - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-11.10-x86_64} - IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-11.10-x86_64.tar.gz"};; + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-12.04-x86_64} + IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-12.04.x86_64.tar.gz"};; libvirt) case "$LIBVIRT_TYPE" in lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc From 73bd1233a72ea074cced809af31c113cc3c63af0 Mon Sep 17 00:00:00 2001 From: Jay Lau Date: Sun, 28 Jul 2013 18:01:52 +0800 Subject: [PATCH 0238/4704] Do not set os_aut_url for ceilometer When install ceilometer with devstack, if do not set environment variable OS_AUTH_URL, then devstack will set os_aut_url as empty in /etc/ceilometer/ceilometer.conf, this will cause both ceilometer-agent-central and ceilometer-agent-compute cannot start. Fix bug 1205776 Change-Id: I32c77c92dc2e9d03c86e703f170e0216dd829a61 --- lib/ceilometer | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/ceilometer b/lib/ceilometer index 548496e707..15eb6a6d21 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -81,7 +81,6 @@ function configure_ceilometer() { iniset $CEILOMETER_CONF DEFAULT os_username ceilometer iniset $CEILOMETER_CONF DEFAULT os_password $SERVICE_PASSWORD iniset $CEILOMETER_CONF DEFAULT os_tenant_name $SERVICE_TENANT_NAME - iniset $CEILOMETER_CONF DEFAULT os_auth_url $OS_AUTH_URL iniset $CEILOMETER_CONF keystone_authtoken auth_protocol http iniset $CEILOMETER_CONF keystone_authtoken admin_user ceilometer From d98a5d0a58c1fae2ce8adcd8098cefaa9f558381 Mon Sep 17 00:00:00 2001 From: zhang-hare Date: Fri, 21 Jun 2013 18:18:02 +0800 Subject: [PATCH 0239/4704] enable apache2 server as front end for swift 1.install apache and wsgi module 2.config apache2 vhost and wsgi files for proxy, account, container and object server. 3.refactor apache functions from horizon and swift into lib/apache Change-Id: I3a5d1e511c5dca1e6d01a1adca8fda0a43d4f632 Implements: blueprint enable-apache-frontend-for-swift --- README.md | 7 +++ lib/apache | 118 +++++++++++++++++++++++++++++++++++++++++++++++ lib/horizon | 43 +++-------------- lib/swift | 130 +++++++++++++++++++++++++++++++++++++++++++++++++++- stack.sh | 1 + unstack.sh | 3 ++ 6 files changed, 265 insertions(+), 37 deletions(-) create mode 100644 lib/apache diff --git a/README.md b/README.md index fbf7b4a008..23200e2692 100644 --- a/README.md +++ b/README.md @@ -83,6 +83,13 @@ Example (Qpid): ENABLED_SERVICES="$ENABLED_SERVICES,-rabbit,-zeromq,qpid" +# Apache Frontend + +Apache web server is enabled for wsgi services by setting `APACHE_ENABLED_SERVICES` in your localrc. But remember to enable these services at first as above. + +Example: + APACHE_ENABLED_SERVICES+=keystone,swift + # Swift Swift is disabled by default. When enabled, it is configured with diff --git a/lib/apache b/lib/apache new file mode 100644 index 0000000000..a2b0534d16 --- /dev/null +++ b/lib/apache @@ -0,0 +1,118 @@ +# lib/apache +# Functions to control configuration and operation of apache web server + +# Dependencies: +# ``functions`` file +# is_apache_enabled_service +# change_apache_user_group +# install_apache_wsgi +# config_apache_wsgi +# start_apache_server +# stop_apache_server +# restart_apache_server + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# Allow overriding the default Apache user and group, default to +# current user and his default group. +APACHE_USER=${APACHE_USER:-$USER} +APACHE_GROUP=${APACHE_GROUP:-$(id -gn $APACHE_USER)} + + +# Set up apache name and configuration directory +if is_ubuntu; then + APACHE_NAME=apache2 + APACHE_CONF_DIR=sites-available +elif is_fedora; then + APACHE_NAME=httpd + APACHE_CONF_DIR=conf.d +elif is_suse; then + APACHE_NAME=apache2 + APACHE_CONF_DIR=vhosts.d +fi + +# Functions +# --------- + +# is_apache_enabled_service() checks if the service(s) specified as arguments are +# apache enabled by the user in ``APACHE_ENABLED_SERVICES`` as web front end. +# +# Multiple services specified as arguments are ``OR``'ed together; the test +# is a short-circuit boolean, i.e it returns on the first match. +# +# Uses global ``APACHE_ENABLED_SERVICES`` +# APACHE_ENABLED_SERVICES service [service ...] +function is_apache_enabled_service() { + services=$@ + for service in ${services}; do + [[ ,${APACHE_ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 + done + return 1 +} + +# change_apache_user_group() - Change the User/Group to run Apache server +function change_apache_user_group(){ + local stack_user=$@ + if is_ubuntu; then + sudo sed -e " + s/^export APACHE_RUN_USER=.*/export APACHE_RUN_USER=${stack_user}/g; + s/^export APACHE_RUN_GROUP=.*/export APACHE_RUN_GROUP=${stack_user}/g + " -i /etc/${APACHE_NAME}/envvars + elif is_fedora; then + sudo sed -e " + s/^User .*/User ${stack_user}/g; + s/^Group .*/Group ${stack_user}/g + " -i /etc/${APACHE_NAME}/httpd.conf + elif is_suse; then + sudo sed -e " + s/^User .*/User ${stack_user}/g; + s/^Group .*/Group ${stack_user}/g + " -i /etc/${APACHE_NAME}/uid.conf + else + exit_distro_not_supported "apache user and group" + fi +} + +# install_apache_wsgi() - Install Apache server and wsgi module +function install_apache_wsgi() { + # Apache installation, because we mark it NOPRIME + if is_ubuntu; then + # Install apache2, which is NOPRIME'd + install_package apache2 libapache2-mod-wsgi + elif is_fedora; then + sudo rm -f /etc/httpd/conf.d/000-* + install_package httpd mod_wsgi + elif is_suse; then + install_package apache2 apache2-mod_wsgi + else + exit_distro_not_supported "apache installation" + fi +} + +# start_apache_server() - Start running apache server +function start_apache_server() { + start_service $APACHE_NAME +} + +# stop_apache_server() - Stop running apache server +function stop_apache_server() { + if [ -n "$APACHE_NAME" ]; then + stop_service $APACHE_NAME + else + exit_distro_not_supported "apache configuration" + fi +} + +# restart_apache_server +function restart_apache_server() { + restart_service $APACHE_NAME +} + +# Restore xtrace +$XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/horizon b/lib/horizon index b537484608..b06ea1cdb3 100644 --- a/lib/horizon +++ b/lib/horizon @@ -4,6 +4,7 @@ # Dependencies: # ``functions`` file +# ``apache`` file # ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined # @@ -33,23 +34,6 @@ HORIZON_DIR=$DEST/horizon # The example file in Horizon repo is used by default. HORIZON_SETTINGS=${HORIZON_SETTINGS:-$HORIZON_DIR/openstack_dashboard/local/local_settings.py.example} -# Allow overriding the default Apache user and group, default to -# current user and his default group. -APACHE_USER=${APACHE_USER:-$USER} -APACHE_GROUP=${APACHE_GROUP:-$(id -gn $APACHE_USER)} - -# Set up service name and configuration path -if is_ubuntu; then - APACHE_NAME=apache2 - APACHE_CONF=sites-available/horizon -elif is_fedora; then - APACHE_NAME=httpd - APACHE_CONF=conf.d/horizon.conf -elif is_suse; then - APACHE_NAME=apache2 - APACHE_CONF=vhosts.d/horizon.conf -fi - # Functions # --------- @@ -119,11 +103,12 @@ function init_horizon() { sudo mkdir -p $HORIZON_DIR/.blackhole HORIZON_REQUIRE='' + local horizon_conf=/etc/$APACHE_NAME/$APACHE_CONF_DIR/horizon if is_ubuntu; then # Clean up the old config name sudo rm -f /etc/apache2/sites-enabled/000-default # Be a good citizen and use the distro tools here - sudo touch /etc/$APACHE_NAME/$APACHE_CONF + sudo touch $horizon_conf sudo a2ensite horizon # WSGI isn't enabled by default, enable it sudo a2enmod wsgi @@ -153,23 +138,13 @@ function init_horizon() { s,%APACHE_NAME%,$APACHE_NAME,g; s,%DEST%,$DEST,g; s,%HORIZON_REQUIRE%,$HORIZON_REQUIRE,g; - \" $FILES/apache-horizon.template >/etc/$APACHE_NAME/$APACHE_CONF" + \" $FILES/apache-horizon.template >$horizon_conf" } # install_horizon() - Collect source and prepare function install_horizon() { # Apache installation, because we mark it NOPRIME - if is_ubuntu; then - # Install apache2, which is NOPRIME'd - install_package apache2 libapache2-mod-wsgi - elif is_fedora; then - sudo rm -f /etc/httpd/conf.d/000-* - install_package httpd mod_wsgi - elif is_suse; then - install_package apache2 apache2-mod_wsgi - else - exit_distro_not_supported "apache installation" - fi + install_apache_wsgi # NOTE(sdague) quantal changed the name of the node binary if is_ubuntu; then @@ -185,17 +160,13 @@ function install_horizon() { # start_horizon() - Start running processes, including screen function start_horizon() { - restart_service $APACHE_NAME + restart_apache_server screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/$APACHE_NAME/horizon_error.log" } # stop_horizon() - Stop running processes (non-screen) function stop_horizon() { - if [ -n "$APACHE_NAME" ]; then - stop_service $APACHE_NAME - else - exit_distro_not_supported "apache configuration" - fi + stop_apache_server } diff --git a/lib/swift b/lib/swift index e53d674666..c93b8b3923 100644 --- a/lib/swift +++ b/lib/swift @@ -3,6 +3,7 @@ # Dependencies: # ``functions`` file +# ``apache`` file # ``DEST``, ``SCREEN_NAME``, `SWIFT_HASH` must be defined # ``STACK_USER`` must be defined # ``SWIFT_DATA_DIR`` or ``DATA_DIR`` must be defined @@ -10,11 +11,13 @@ # ``stack.sh`` calls the entry points in this order: # # install_swift +# _config_swift_apache_wsgi # configure_swift # init_swift # start_swift # stop_swift # cleanup_swift +# _cleanup_swift_apache_wsgi # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -28,6 +31,7 @@ set +o xtrace SWIFT_DIR=$DEST/swift SWIFTCLIENT_DIR=$DEST/python-swiftclient SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift} +SWIFT_APACHE_WSGI_DIR=${SWIFT_APACHE_WSGI_DIR:-/var/www/swift} SWIFT3_DIR=$DEST/swift3 # TODO: add logging to different location. @@ -97,6 +101,103 @@ function cleanup_swift() { rm ${SWIFT_DATA_DIR}/drives/images/swift.img fi rm -rf ${SWIFT_DATA_DIR}/run/ + if is_apache_enabled_service swift; then + _cleanup_swift_apache_wsgi + fi +} + +# _cleanup_swift_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file +function _cleanup_swift_apache_wsgi() { + sudo rm -f $SWIFT_APACHE_WSGI_DIR/*.wsgi + ! is_fedora && sudo a2dissite proxy-server + for node_number in ${SWIFT_REPLICAS_SEQ}; do + for type in object container account; do + site_name=${type}-server-${node_number} + ! is_fedora && sudo a2dissite ${site_name} + sudo rm -f /etc/$APACHE_NAME/$APACHE_CONF_DIR/${site_name} + done + done +} + +# _config_swift_apache_wsgi() - Set WSGI config files of Swift +function _config_swift_apache_wsgi() { + sudo mkdir -p ${SWIFT_APACHE_WSGI_DIR} + local apache_vhost_dir=/etc/${APACHE_NAME}/$APACHE_CONF_DIR + local proxy_port=${SWIFT_DEFAULT_BIND_PORT:-8080} + + # copy proxy vhost and wsgi file + sudo cp ${SWIFT_DIR}/examples/apache2/proxy-server.template ${apache_vhost_dir}/proxy-server + sudo sed -e " + /^#/d;/^$/d; + s/%PORT%/$proxy_port/g; + s/%SERVICENAME%/proxy-server/g; + s/%APACHE_NAME%/${APACHE_NAME}/g; + " -i ${apache_vhost_dir}/proxy-server + + sudo cp ${SWIFT_DIR}/examples/wsgi/proxy-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/proxy-server.wsgi + sudo sed -e " + /^#/d;/^$/d; + s/%SERVICECONF%/proxy-server.conf/g; + " -i ${SWIFT_APACHE_WSGI_DIR}/proxy-server.wsgi + ! is_fedora && sudo a2ensite proxy-server + + # copy apache vhost file and set name and port + for node_number in ${SWIFT_REPLICAS_SEQ}; do + object_port=$[OBJECT_PORT_BASE + 10 * ($node_number - 1)] + container_port=$[CONTAINER_PORT_BASE + 10 * ($node_number - 1)] + account_port=$[ACCOUNT_PORT_BASE + 10 * ($node_number - 1)] + + sudo cp ${SWIFT_DIR}/examples/apache2/object-server.template ${apache_vhost_dir}/object-server-${node_number} + sudo sed -e " + s/%PORT%/$object_port/g; + s/%SERVICENAME%/object-server-${node_number}/g; + s/%APACHE_NAME%/${APACHE_NAME}/g; + " -i ${apache_vhost_dir}/object-server-${node_number} + ! is_fedora && sudo a2ensite object-server-${node_number} + + sudo cp ${SWIFT_DIR}/examples/wsgi/object-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/object-server-${node_number}.wsgi + sudo sed -e " + /^#/d;/^$/d; + s/%SERVICECONF%/object-server\/${node_number}.conf/g; + " -i ${SWIFT_APACHE_WSGI_DIR}/object-server-${node_number}.wsgi + + sudo cp ${SWIFT_DIR}/examples/apache2/container-server.template ${apache_vhost_dir}/container-server-${node_number} + sudo sed -e " + /^#/d;/^$/d; + s/%PORT%/$container_port/g; + s/%SERVICENAME%/container-server-${node_number}/g; + s/%APACHE_NAME%/${APACHE_NAME}/g; + " -i ${apache_vhost_dir}/container-server-${node_number} + ! is_fedora && sudo a2ensite container-server-${node_number} + + sudo cp ${SWIFT_DIR}/examples/wsgi/container-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/container-server-${node_number}.wsgi + sudo sed -e " + /^#/d;/^$/d; + s/%SERVICECONF%/container-server\/${node_number}.conf/g; + " -i ${SWIFT_APACHE_WSGI_DIR}/container-server-${node_number}.wsgi + + sudo cp ${SWIFT_DIR}/examples/apache2/account-server.template ${apache_vhost_dir}/account-server-${node_number} + sudo sed -e " + /^#/d;/^$/d; + s/%PORT%/$account_port/g; + s/%SERVICENAME%/account-server-${node_number}/g; + s/%APACHE_NAME%/${APACHE_NAME}/g; + " -i ${apache_vhost_dir}/account-server-${node_number} + ! is_fedora && sudo a2ensite account-server-${node_number} + + sudo cp ${SWIFT_DIR}/examples/wsgi/account-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/account-server-${node_number}.wsgi + sudo sed -e " + /^#/d;/^$/d; + s/%SERVICECONF%/account-server\/${node_number}.conf/g; + " -i ${SWIFT_APACHE_WSGI_DIR}/account-server-${node_number}.wsgi + + done + + # run apache server as stack user + change_apache_user_group ${STACK_USER} + + # WSGI isn't enabled by default, enable it + ! is_fedora && sudo a2enmod wsgi } # configure_swift() - Set config files, create data dirs and loop image @@ -288,6 +389,9 @@ EOF sudo chown -R $USER:adm ${swift_log_dir} sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \ tee /etc/rsyslog.d/10-swift.conf + if is_apache_enabled_service swift; then + _config_swift_apache_wsgi + fi } # create_swift_disk - Create Swift backing disk @@ -423,6 +527,9 @@ function init_swift() { function install_swift() { git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH setup_develop $SWIFT_DIR + if is_apache_enabled_service swift; then + install_apache_wsgi + fi } function install_swiftclient() { @@ -444,6 +551,22 @@ function start_swift() { sudo systemctl start xinetd.service fi + if is_apache_enabled_service swift; then + # Make sure the apache lock dir is owned by $STACK_USER + # for running apache server to avoid failure of restarting + # apache server due to permission problem. + sudo chown -R $STACK_USER /var/run/lock/$APACHE_NAME + restart_apache_server + swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start + screen_it s-proxy "cd $SWIFT_DIR && sudo tail -f /var/log/$APACHE_NAME/proxy-server" + if [[ ${SWIFT_REPLICAS} == 1 ]]; then + for type in object container account; do + screen_it s-${type} "cd $SWIFT_DIR && sudo tail -f /var/log/$APACHE_NAME/${type}-server-1" + done + fi + return 0 + fi + # By default with only one replica we are launching the proxy, # container, account and object server in screen in foreground and # other services in background. If we have SWIFT_REPLICAS set to something @@ -460,7 +583,7 @@ function start_swift() { done screen_it s-proxy "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v" if [[ ${SWIFT_REPLICAS} == 1 ]]; then - for type in object container account;do + for type in object container account; do screen_it s-${type} "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v" done fi @@ -468,6 +591,11 @@ function start_swift() { # stop_swift() - Stop running processes (non-screen) function stop_swift() { + + if is_apache_enabled_service swift; then + swift-init --run-dir=${SWIFT_DATA_DIR}/run rest stop && return 0 + fi + # screen normally killed by unstack.sh if type -p swift-init >/dev/null; then swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true diff --git a/stack.sh b/stack.sh index 4e2350581f..c4d414e60d 100755 --- a/stack.sh +++ b/stack.sh @@ -298,6 +298,7 @@ SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} # ================== # Source project function libraries +source $TOP_DIR/lib/apache source $TOP_DIR/lib/tls source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone diff --git a/unstack.sh b/unstack.sh index 1e80bf35c7..2268b90458 100755 --- a/unstack.sh +++ b/unstack.sh @@ -24,6 +24,9 @@ source $TOP_DIR/stackrc # Destination path for service data DATA_DIR=${DATA_DIR:-${DEST}/data} +# Import apache functions +source $TOP_DIR/lib/apache + # Get project function libraries source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/cinder From 0d97cbe2b5066ee0f860b1fb94df3d7df6e2fcb2 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Mon, 29 Jul 2013 09:41:50 +0100 Subject: [PATCH 0240/4704] xenapi: Uninstall instances The devstack script sometimes failed to clean up the left over instances. This patch will utilize the xe vm-uninstall force=true command to get rid of the instances. Change-Id: I3450e2d5c9a4b1fe8ede39c4cc5c337cd541ea35 --- tools/xen/install_os_domU.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 6eb30130cd..43247fc75d 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -138,9 +138,7 @@ if [ "$DO_SHUTDOWN" = "1" ]; then # Destroy any instances that were launched for uuid in `xe vm-list | grep -1 instance | grep uuid | sed "s/.*\: //g"`; do echo "Shutting down nova instance $uuid" - xe vm-unpause uuid=$uuid || true - xe vm-shutdown uuid=$uuid || true - xe vm-destroy uuid=$uuid + xe vm-uninstall uuid=$uuid force=true done # Destroy orphaned vdis From fea70f80c0f653910b68dc5557a9f5b318091f72 Mon Sep 17 00:00:00 2001 From: Julie Pichon Date: Mon, 29 Jul 2013 11:22:08 +0100 Subject: [PATCH 0241/4704] Configure horizon and horizon URLs in Tempest Tempest now supports testing the dashboard. Adjust the configuration accordingly. Change-Id: Ifb5619caab8bd6080c5df4ed43e16c921e7f9b1f --- lib/tempest | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 3831c28a30..9c9c921d10 100644 --- a/lib/tempest +++ b/lib/tempest @@ -275,11 +275,15 @@ function configure_tempest() { iniset $TEMPEST_CONF volume backend2_name "LVM_iSCSI_2" fi + # Dashboard + iniset $TEMPEST_CONF dashboard dashboard_url "http://$SERVICE_HOST/" + iniset $TEMPEST_CONF dashboard login_url "http://$SERVICE_HOST/auth/login/" + # cli iniset $TEMPEST_CONF cli cli_dir $NOVA_BIN_DIR # service_available - for service in nova cinder glance neutron swift heat ; do + for service in nova cinder glance neutron swift heat horizon ; do if is_service_enabled $service ; then iniset $TEMPEST_CONF service_available $service "True" else From b6f7ea4fb0bfd4bcc1d1bb35f97d29eb70075b73 Mon Sep 17 00:00:00 2001 From: Alex Gaynor Date: Mon, 29 Jul 2013 08:12:39 -0700 Subject: [PATCH 0242/4704] Added libffi-dev to the needed packages Change-Id: Ida7001886f17c9413b33312f12f2478a54bd6c90 --- files/apts/swift | 1 + files/rpms/swift | 1 + 2 files changed, 2 insertions(+) diff --git a/files/apts/swift b/files/apts/swift index c52c68b765..1c283cf6f0 100644 --- a/files/apts/swift +++ b/files/apts/swift @@ -1,5 +1,6 @@ curl gcc +libffi-dev memcached python-configobj python-coverage diff --git a/files/rpms/swift b/files/rpms/swift index ee1fad8c8c..2cc4a0bf39 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -1,5 +1,6 @@ curl gcc +libffi-devel memcached python-configobj python-coverage From 14ccba0e400f7f0b5916ab75eca3b44d8c1486d2 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Mon, 29 Jul 2013 16:15:53 -0400 Subject: [PATCH 0243/4704] Set tempest lock_path to default in $DATA_DIR Change I464edf9e11710e1931ed4f7b0262328a57e5b3de set the tempest lock_path to be $TMPDIR, however when run in the gate this is an empty string which results in all tests that use locking failing. This commit corrects that by setting the lock_path config variable to use $TEMPEST_STATE_PATH which defaults to $DATA_DIR/tempest. Change-Id: I02fbd50ca68d6daafb5b4c23579473eb703ae72a --- lib/tempest | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 09333342e4..9c4e6046ee 100644 --- a/lib/tempest +++ b/lib/tempest @@ -42,6 +42,7 @@ set +o xtrace TEMPEST_DIR=$DEST/tempest TEMPEST_CONF_DIR=$TEMPEST_DIR/etc TEMPEST_CONF=$TEMPEST_CONF_DIR/tempest.conf +TEMPEST_STATE_PATH=${TEMPEST_STATE_PATH:=$DATA_DIR/tempest} NOVA_SOURCE_DIR=$DEST/nova @@ -196,7 +197,8 @@ function configure_tempest() { fi # Oslo - iniset $TEMPEST_CONF DEFAULT lock_path $TMPDIR + iniset $TEMPEST_CONF DEFAULT lock_path $TEMPEST_STATE_PATH + mkdir -p $TEMPEST_STATE_PATH # Timeouts iniset $TEMPEST_CONF compute build_timeout $BUILD_TIMEOUT From f34424ea7d6aafeed0f371ec2131924cf3c2ec06 Mon Sep 17 00:00:00 2001 From: Edgar Magana Date: Mon, 29 Jul 2013 16:47:13 -0700 Subject: [PATCH 0244/4704] Update to PLUMgrid plugin configuration Fix bug #1206308 Add function for security groups Add function for ovs check Update plugin class configuration Change-Id: I603e902f985277a61f162abdfffd6430cc00efaa --- lib/neutron_plugins/plumgrid | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/lib/neutron_plugins/plumgrid b/lib/neutron_plugins/plumgrid index a4f0b0dd5d..9d3c92ff51 100644 --- a/lib/neutron_plugins/plumgrid +++ b/lib/neutron_plugins/plumgrid @@ -21,7 +21,7 @@ function neutron_plugin_configure_common() { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/plumgrid Q_PLUGIN_CONF_FILENAME=plumgrid.ini Q_DB_NAME="plumgrid_neutron" - Q_PLUGIN_CLASS="neutron.plugins.plumgrid.plumgrid_nos_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2" + Q_PLUGIN_CLASS="neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2" PLUMGRID_DIRECTOR_IP=${PLUMGRID_DIRECTOR_IP:-localhost} PLUMGRID_DIRECTOR_PORT=${PLUMGRID_DIRECTOR_PORT:-7766} } @@ -35,6 +35,16 @@ function neutron_plugin_configure_debug_command() { : } +function is_neutron_ovs_base_plugin() { + # False + return 1 +} + +function has_neutron_plugin_security_group() { + # False + return 1 +} + function neutron_plugin_check_adv_test_requirements() { is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 } From b772659f9df8153caa3b0fa155614d2d6504541e Mon Sep 17 00:00:00 2001 From: Kyle Mestery Date: Fri, 19 Jul 2013 14:26:53 +0000 Subject: [PATCH 0245/4704] Update the ML2 configuration in devstack This commit adds support to devstack to set ML2 options including MechanismDrivers, TypeDrivers, and TypeDriver specific configuration for ML2 when running with devstack. This also allows for simple configuration of VLAN, GRE, and VXLAN ML2 devstack setups with the OVS agent as follows: # VLAN configuration Q_PLUGIN=ml2 ENABLE_TENANT_VLANS=True # GRE tunnel configuration Q_PLUGIN=ml2 ENABLE_TENANT_TUNNELS=True # VXLAN tunnel configuration Q_PLUGIN=ml2 Q_ML2_TENANT_NETWORK_TYPE=vxlan Fixes bug 1200767 Change-Id: Ib16efac13440b5d50658a0e6be35bc735510a262 --- README.md | 26 ++++++++++++++ lib/neutron_plugins/ml2 | 79 +++++++++++++++++++++++++++++++++-------- 2 files changed, 91 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index fbf7b4a008..63b042102b 100644 --- a/README.md +++ b/README.md @@ -148,6 +148,32 @@ An example of using the variables in your `localrc` is below: Q_AGENT_EXTRA_AGENT_OPTS=(tunnel_type=vxlan vxlan_udp_port=8472) Q_SRV_EXTRA_OPTS=(tenant_network_type=vxlan) +devstack also supports configuring the Neutron ML2 plugin. The ML2 plugin can run with the OVS, LinuxBridge, or Hyper-V agents on compute hosts. A simple way to configure the ml2 plugin is shown below: + + # VLAN configuration + Q_PLUGIN=ml2 + ENABLE_TENANT_VLANS=True + + # GRE tunnel configuration + Q_PLUGIN=ml2 + ENABLE_TENANT_TUNNELS=True + + # VXLAN tunnel configuration + Q_PLUGIN=ml2 + Q_ML2_TENANT_NETWORK_TYPE=vxlan + +The above will default in devstack to using the OVS on each compute host. To change this, set the `Q_AGENT` variable to the agent you want to run (e.g. linuxbridge). + + Variable Name Notes + ------------------------------------------------------------------------------------- + Q_AGENT This specifies which agent to run with the ML2 Plugin (either `openvswitch` or `linuxbridge`). + Q_ML2_PLUGIN_MECHANISM_DRIVERS The ML2 MechanismDrivers to load. The default is none. Note, ML2 will work with the OVS and LinuxBridge agents by default. + Q_ML2_PLUGIN_TYPE_DRIVERS The ML2 TypeDrivers to load. Defaults to all available TypeDrivers. + Q_ML2_PLUGIN_GRE_TYPE_OPTIONS GRE TypeDriver options. Defaults to none. + Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS VXLAN TypeDriver options. Defaults to none. + Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS VLAN TypeDriver options. Defaults to none. + Q_AGENT_EXTRA_AGENT_OPTS Extra configuration options to pass to the OVS or LinuxBridge Agent. + # Tempest If tempest has been successfully configured, a basic set of smoke tests can be run as follows: diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index fcff8703e5..ff49d8e6b8 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -5,10 +5,42 @@ MY_XTRACE=$(set +o | grep xtrace) set +o xtrace +# Enable this to simply and quickly enable tunneling with ML2. +# Select either 'gre', 'vxlan', or '(gre vxlan)' +Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-} +# This has to be set here since the agent will set this in the config file +if [[ "$Q_ML2_TENANT_NETWORK_TYPE" != "" ]]; then + Q_AGENT_EXTRA_AGENT_OPTS=(tunnel_types=$Q_ML2_TENANT_NETWORK_TYPE) +elif [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then + Q_AGENT_EXTRA_AGENT_OPTS=(tunnel_types=gre) +fi + # Default openvswitch L2 agent Q_AGENT=${Q_AGENT:-openvswitch} source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent +# List of MechanismDrivers to load +Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_PLUGIN_MECHANISM_DRIVERS:-} +# List of Type Drivers to load +Q_ML2_PLUGIN_TYPE_DRIVERS=${Q_ML2_PLUGIN_TYPE_DRIVERS:-local,flat,vlan,gre,vxlan} +# Default GRE TypeDriver options +Q_ML2_PLUGIN_GRE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GRE_TYPE_OPTIONS:-tunnel_id_ranges=$TENANT_TUNNEL_RANGES} +# Default VXLAN TypeDriver options +Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS:-vni_ranges=1001:2000} +# Default VLAN TypeDriver options +Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS:-} + +function populate_ml2_config() { + OPTS=$1 + CONF=$2 + SECTION=$3 + + for I in "${OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + iniset $CONF $SECTION ${I/=/ } + done +} + function neutron_plugin_configure_common() { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ml2 Q_PLUGIN_CONF_FILENAME=ml2_conf.ini @@ -17,26 +49,31 @@ function neutron_plugin_configure_common() { } function neutron_plugin_configure_service() { - if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE ml2 tenant_network_types gre - iniset /$Q_PLUGIN_CONF_FILE ml2_type_gre tunnel_id_ranges $TENANT_TUNNEL_RANGES + if [[ "$Q_ML2_TENANT_NETWORK_TYPE" != "" ]]; then + Q_SRV_EXTRA_OPTS=(tenant_network_types=$Q_ML2_TENANT_NETWORK_TYPE) + elif [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then + # This assumes you want a simple configuration, and will overwrite + # Q_SRV_EXTRA_OPTS if set in addition to ENABLE_TENANT_TUNNELS. + Q_SRV_EXTRA_OPTS=(tenant_network_types=gre) + Q_ML2_PLUGIN_GRE_TYPE_OPTIONS=(tunnel_id_ranges=$TENANT_TUNNEL_RANGES) elif [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE ml2 tenant_network_types vlan + Q_SRV_EXTRA_OPTS=(tenant_network_types=vlan) else echo "WARNING - The ml2 plugin is using local tenant networks, with no connectivity between hosts." fi - # Override ``ML2_VLAN_RANGES`` and any needed agent configuration - # variables in ``localrc`` for more complex physical network - # configurations. - if [[ "$ML2_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then - ML2_VLAN_RANGES=$PHYSICAL_NETWORK - if [[ "$TENANT_VLAN_RANGE" != "" ]]; then - ML2_VLAN_RANGES=$ML2_VLAN_RANGES:$TENANT_VLAN_RANGE + # Allow for overrding VLAN configuration (for example, to configure provider + # VLANs) by first checking if Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS is set. + if [ "$Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS" == "" ]; then + if [[ "$ML2_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then + ML2_VLAN_RANGES=$PHYSICAL_NETWORK + if [[ "$TENANT_VLAN_RANGE" != "" ]]; then + ML2_VLAN_RANGES=$ML2_VLAN_RANGES:$TENANT_VLAN_RANGE + fi + fi + if [[ "$ML2_VLAN_RANGES" != "" ]]; then + Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=(network_vlan_ranges=$ML2_VLAN_RANGES) fi - fi - if [[ "$ML2_VLAN_RANGES" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE ml2_type_vlan network_vlan_ranges $ML2_VLAN_RANGES fi # REVISIT(rkukura): Setting firewall_driver here for @@ -52,6 +89,20 @@ function neutron_plugin_configure_service() { iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver neutron.agent.firewall.NoopFirewallDriver fi + # Since we enable the tunnel TypeDrivers, also enable a local_ip + iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $HOST_IP + + populate_ml2_config type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS /$Q_PLUGIN_CONF_FILE ml2 + + populate_ml2_config $Q_SRV_EXTRA_OPTS /$Q_PLUGIN_CONF_FILE ml2 + + populate_ml2_config $Q_ML2_PLUGIN_GRE_TYPE_OPTIONS /$Q_PLUGIN_CONF_FILE ml2_type_gre + + populate_ml2_config $Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS /$Q_PLUGIN_CONF_FILE ml2_type_vxlan + + if [ "$Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS" != "" ]; then + populate_ml2_config $Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS /$Q_PLUGIN_CONF_FILE ml2_type_vlan + fi } function has_neutron_plugin_security_group() { From bbf0645981ce0b9aee73cde88d041f07f213473f Mon Sep 17 00:00:00 2001 From: Arata Notsu Date: Fri, 26 Jul 2013 20:26:07 +0900 Subject: [PATCH 0246/4704] Add option to serve PXE for baremetal from nova-network New variable BM_DNSMASQ_FROM_NOVA_NETWORK: if setting this true, DevStack configures nova-network's dnsmask to provide PXE and does not run baremetal's one. In this case PXE traffic occur in the fixed-ip network, so no dedicated network for PXE is needed. Change-Id: I67cade02c03ab45ab6b77d8da9066d7d5ec6c78b --- ...smasq-for-baremetal-from-nova-network.conf | 3 +++ lib/baremetal | 9 +++++++++ stack.sh | 20 ++++++++++++------- 3 files changed, 25 insertions(+), 7 deletions(-) create mode 100644 files/dnsmasq-for-baremetal-from-nova-network.conf diff --git a/files/dnsmasq-for-baremetal-from-nova-network.conf b/files/dnsmasq-for-baremetal-from-nova-network.conf new file mode 100644 index 0000000000..66a375190e --- /dev/null +++ b/files/dnsmasq-for-baremetal-from-nova-network.conf @@ -0,0 +1,3 @@ +enable-tftp +tftp-root=/tftpboot +dhcp-boot=pxelinux.0 diff --git a/lib/baremetal b/lib/baremetal index bed3c093c3..44263ee2c8 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -80,6 +80,15 @@ BM_USE_FAKE_ENV=`trueorfalse False $BM_USE_FAKE_ENV` # change the virtualization type: --engine qemu BM_POSEUR_EXTRA_OPTS=${BM_POSEUR_EXTRA_OPTS:-} +# To provide PXE, configure nova-network's dnsmasq rather than run the one +# dedicated to baremetal. When enable this, make sure these conditions are +# fulfilled: +# 1) nova-compute and nova-network runs on the same host +# 2) nova-network uses FlatDHCPManager +# NOTE: the other BM_DNSMASQ_* have no effect on the behavior if this option +# is enabled. +BM_DNSMASQ_FROM_NOVA_NETWORK=`trueorfalse False $BM_DNSMASQ_FROM_NOVA_NETWORK` + # BM_DNSMASQ_IFACE should match FLAT_NETWORK_BRIDGE if [ "$BM_USE_FAKE_ENV" ]; then BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-br99} diff --git a/stack.sh b/stack.sh index 1a2257c278..dd3912f592 100755 --- a/stack.sh +++ b/stack.sh @@ -1045,6 +1045,11 @@ if is_service_enabled nova; then iniset $NOVA_CONF baremetal driver $BM_DRIVER iniset $NOVA_CONF baremetal power_manager $BM_POWER_MANAGER iniset $NOVA_CONF baremetal tftp_root /tftpboot + if [[ "$BM_DNSMASQ_FROM_NOVA_NETWORK" = "True" ]]; then + BM_DNSMASQ_CONF=$NOVA_CONF_DIR/dnsmasq-for-baremetal-from-nova-network.conf + sudo cp "$FILES/dnsmasq-for-baremetal-from-nova-network.conf" "$BM_DNSMASQ_CONF" + iniset $NOVA_CONF DEFAULT dnsmasq_config_file "$BM_DNSMASQ_CONF" + fi # Define extra baremetal nova conf flags by defining the array ``EXTRA_BAREMETAL_OPTS``. for I in "${EXTRA_BAREMETAL_OPTS[@]}"; do @@ -1292,16 +1297,17 @@ if is_service_enabled nova && is_baremetal; then [[ -n "$BM_DEPLOY_KERNEL_ID" ]] && [[ -n "$BM_DEPLOY_RAMDISK_ID" ]] && \ create_baremetal_flavor $BM_DEPLOY_KERNEL_ID $BM_DEPLOY_RAMDISK_ID - # otherwise user can manually add it later by calling nova-baremetal-manage # otherwise user can manually add it later by calling nova-baremetal-manage [[ -n "$BM_FIRST_MAC" ]] && add_baremetal_node - # NOTE: we do this here to ensure that our copy of dnsmasq is running - sudo pkill dnsmasq || true - sudo dnsmasq --conf-file= --port=0 --enable-tftp --tftp-root=/tftpboot \ - --dhcp-boot=pxelinux.0 --bind-interfaces --pid-file=/var/run/dnsmasq.pid \ - --interface=$BM_DNSMASQ_IFACE --dhcp-range=$BM_DNSMASQ_RANGE \ - ${BM_DNSMASQ_DNS:+--dhcp-option=option:dns-server,$BM_DNSMASQ_DNS} + if [[ "$BM_DNSMASQ_FROM_NOVA_NETWORK" = "False" ]]; then + # NOTE: we do this here to ensure that our copy of dnsmasq is running + sudo pkill dnsmasq || true + sudo dnsmasq --conf-file= --port=0 --enable-tftp --tftp-root=/tftpboot \ + --dhcp-boot=pxelinux.0 --bind-interfaces --pid-file=/var/run/dnsmasq.pid \ + --interface=$BM_DNSMASQ_IFACE --dhcp-range=$BM_DNSMASQ_RANGE \ + ${BM_DNSMASQ_DNS:+--dhcp-option=option:dns-server,$BM_DNSMASQ_DNS} + fi # ensure callback daemon is running sudo pkill nova-baremetal-deploy-helper || true screen_it baremetal "nova-baremetal-deploy-helper" From 2c3428b1ed5719c719ee8cbf688bfcaf5d87421f Mon Sep 17 00:00:00 2001 From: av-mido Date: Thu, 11 Jul 2013 14:59:00 +0900 Subject: [PATCH 0247/4704] Add Midonet plugin support to devstack. Add support for Midokura's Midonet plugin to devstack, in lib/neutron_plugins and lib/neutron_thirdparty. Change-Id: I9b4d90eab09bbb21b9ba251a311620e0a21e8219 --- lib/neutron_plugins/midonet | 82 ++++++++++++++++++++++++++++++++++ lib/neutron_thirdparty/midonet | 64 ++++++++++++++++++++++++++ 2 files changed, 146 insertions(+) create mode 100644 lib/neutron_plugins/midonet create mode 100644 lib/neutron_thirdparty/midonet diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet new file mode 100644 index 0000000000..4d343f5c91 --- /dev/null +++ b/lib/neutron_plugins/midonet @@ -0,0 +1,82 @@ +# Neutron MidoNet plugin +# ---------------------- + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +function is_neutron_ovs_base_plugin() { + # MidoNet does not use l3-agent + # 0 means True here + return 1 +} + +function neutron_plugin_create_nova_conf() { + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"midonet.nova.virt.libvirt.vif.MidonetVifDriver"} +} + +function neutron_plugin_install_agent_packages() { + : +} + +function neutron_plugin_configure_common() { + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/midonet + Q_PLUGIN_CONF_FILENAME=midonet.ini + Q_DB_NAME="neutron_midonet" + Q_PLUGIN_CLASS="neutron.plugins.midonet.plugin.MidonetPluginV2" +} + +function neutron_plugin_configure_debug_command() { + : +} + +function neutron_plugin_configure_dhcp_agent() { + die $LINENO "q-dhcp must not be executed with MidoNet plugin!" +} + +function neutron_plugin_configure_l3_agent() { + die $LINENO "q-l3 must not be executed with MidoNet plugin!" +} + +function neutron_plugin_configure_plugin_agent() { + die $LINENO "q-agt must not be executed with MidoNet plugin!" +} + +function neutron_plugin_configure_service() { + if [[ "$MIDONET_API_URI" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE MIDONET midonet_uri $MIDONET_API_URI + fi + if [[ "$MIDONET_USERNAME" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE MIDONET username $MIDONET_USERNAME + fi + if [[ "$MIDONET_PASSWORD" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE MIDONET password $MIDONET_PASSWORD + fi + if [[ "$MIDONET_PROJECT_ID" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE MIDONET project_id $MIDONET_PROJECT_ID + fi + if [[ "$MIDONET_PROVIDER_ROUTER_ID" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE MIDONET provider_router_id $MIDONET_PROVIDER_ROUTER_ID + fi + if [[ "$MIDONET_METADATA_ROUTER_ID" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE MIDONET metadata_router_id $MIDONET_METADATA_ROUTER_ID + fi +} + +function neutron_plugin_setup_interface_driver() { + # May change in the future + : +} + +function has_neutron_plugin_security_group() { + # 0 means True here + return 0 +} + +function neutron_plugin_check_adv_test_requirements() { + # 0 means True here + return 1 +} + +# Restore xtrace +$MY_XTRACE diff --git a/lib/neutron_thirdparty/midonet b/lib/neutron_thirdparty/midonet new file mode 100644 index 0000000000..b3c726fe93 --- /dev/null +++ b/lib/neutron_thirdparty/midonet @@ -0,0 +1,64 @@ +# MidoNet +# ------- + +# This file implements functions required to configure MidoNet as the third-party +# system used with devstack's Neutron. To include this file, specify the following +# variables in localrc: +# +# * enable_service midonet +# + +# MidoNet devstack destination dir +MIDONET_DIR=${MIDONET_DIR:-$DEST/midonet} + +# MidoNet client repo +MIDONET_CLIENT_REPO=${MIDONET_CLIENT_REPO:-https://github.com/midokura/python-midonetclient.git} +MIDONET_CLIENT_BRANCH=${MIDONET_CLIENT_BRANCH:-master} +MIDONET_CLIENT_DIR=$MIDONET_DIR/python-midonetclient + +# MidoNet OpenStack repo +MIDONET_OS_REPO=${MIDONET_OS_REPO:-https://github.com/midokura/midonet-openstack.git} +MIDONET_OS_BRANCH=${MIDONET_OS_BRANCH:-master} +MIDONET_OS_DIR=$MIDONET_DIR/midonet-openstack +MIDONET_SETUP_SCRIPT=${MIDONET_SETUP_SCRIPT:-$MIDONET_OS_DIR/bin/setup_midonet_topology.py} + + +MIDOLMAN_LOG=${MIDOLMAN_LOG:-/var/log/midolman/midolman.log} +MIDONET_API_LOG=${MIDONET_API_LOG:-/var/log/tomcat7/midonet-api.log} + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +function configure_midonet() { + : +} + +function init_midonet() { + + # Initialize DB. Evaluate the output of setup_midonet_topology.py to set + # env variables for provider router ID and metadata router ID + eval `python $MIDONET_SETUP_SCRIPT admin $ADMIN_PASSWORD $ADMIN_TENANT provider_devices` + die_if_not_set $LINENO provider_router_id "Error running midonet setup script, provider_router_id was not set." + die_if_not_set $LINENO metadata_router_id "Error running midonet setup script, metadata_router_id was not set." + + iniset /$Q_PLUGIN_CONF_FILE MIDONET provider_router_id $provider_router_id + iniset /$Q_PLUGIN_CONF_FILE MIDONET metadata_router_id $metadata_router_id +} + +function install_midonet() { + git_clone $MIDONET_CLIENT_REPO $MIDONET_CLIENT_DIR $MIDONET_CLIENT_BRANCH + git_clone $MIDONET_OS_REPO $MIDONET_OS_DIR $MIDONET_OS_BRANCH + export PYTHONPATH=$MIDONET_CLIENT_DIR/src:$MIDONET_OS_DIR/src:$PYTHONPATH +} + +function start_midonet() { + : +} + +function stop_midonet() { + : +} + +# Restore xtrace +$MY_XTRACE From c973f1249a08c5dcb42b335b6d3249e8c6c6da24 Mon Sep 17 00:00:00 2001 From: Edgar Magana Date: Mon, 29 Jul 2013 16:39:56 -0700 Subject: [PATCH 0248/4704] Set horizon configuration for security groups Fix bug #1206271 Change-Id: I33a530265be1e5ab9181e605eb4b7c3bf1fdf9c4 --- lib/horizon | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/horizon b/lib/horizon index b537484608..f88247cd00 100644 --- a/lib/horizon +++ b/lib/horizon @@ -104,6 +104,9 @@ function init_horizon() { local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py cp $HORIZON_SETTINGS $local_settings + if is_service_enabled neutron; then + _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_security_group $Q_USE_SECGROUP + fi # enable loadbalancer dashboard in case service is enabled if is_service_enabled q-lbaas; then _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_lb True From a7a219ab76d4a346f794daafd499ece5c32c5e3c Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Tue, 30 Jul 2013 18:22:32 +0200 Subject: [PATCH 0249/4704] renamed deprecated glanceclient parameter According to the sources the parameter --public in python-glanceclient should be removed after only using the new parameter --is-public in Devstack. Change-Id: I25fbb23f4823b3766db647dd50a5b538aad3e55a --- functions | 10 +++++----- lib/baremetal | 14 +++++++------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/functions b/functions index 08c525348a..84e88fbd16 100644 --- a/functions +++ b/functions @@ -1288,9 +1288,9 @@ function upload_image() { if [ "$CONTAINER_FORMAT" = "bare" ]; then if [ "$UNPACK" = "zcat" ]; then - glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --public --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}") + glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}") else - glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --public --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < "${IMAGE}" + glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < "${IMAGE}" fi else # Use glance client to add the kernel the root filesystem. @@ -1298,12 +1298,12 @@ function upload_image() { # kernel for use when uploading the root filesystem. KERNEL_ID=""; RAMDISK_ID=""; if [ -n "$KERNEL" ]; then - KERNEL_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" --public --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2) + KERNEL_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" --is-public True --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2) fi if [ -n "$RAMDISK" ]; then - RAMDISK_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" --public --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2) + RAMDISK_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" --is-public True --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2) fi - glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" --public --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}" + glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" --is-public True --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}" fi } diff --git a/lib/baremetal b/lib/baremetal index bed3c093c3..9848849dba 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -239,14 +239,14 @@ function upload_baremetal_deploy() { --os-image-url http://$GLANCE_HOSTPORT \ image-create \ --name $BM_DEPLOY_KERNEL \ - --public --disk-format=aki \ + --is-public True --disk-format=aki \ < $TOP_DIR/files/$BM_DEPLOY_KERNEL | grep ' id ' | get_field 2) BM_DEPLOY_RAMDISK_ID=$(glance \ --os-auth-token $token \ --os-image-url http://$GLANCE_HOSTPORT \ image-create \ --name $BM_DEPLOY_RAMDISK \ - --public --disk-format=ari \ + --is-public True --disk-format=ari \ < $TOP_DIR/files/$BM_DEPLOY_RAMDISK | grep ' id ' | get_field 2) } @@ -294,14 +294,14 @@ function extract_and_upload_k_and_r_from_image() { --os-image-url http://$GLANCE_HOSTPORT \ image-create \ --name $image_name-kernel \ - --public --disk-format=aki \ + --is-public True --disk-format=aki \ < $TOP_DIR/files/$OUT_KERNEL | grep ' id ' | get_field 2) RAMDISK_ID=$(glance \ --os-auth-token $token \ --os-image-url http://$GLANCE_HOSTPORT \ image-create \ --name $image_name-initrd \ - --public --disk-format=ari \ + --is-public True --disk-format=ari \ < $TOP_DIR/files/$OUT_RAMDISK | grep ' id ' | get_field 2) } @@ -371,14 +371,14 @@ function upload_baremetal_image() { --os-auth-token $token \ --os-image-url http://$GLANCE_HOSTPORT \ image-create \ - --name "$IMAGE_NAME-kernel" --public \ + --name "$IMAGE_NAME-kernel" --is-public True \ --container-format aki \ --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2) RAMDISK_ID=$(glance \ --os-auth-token $token \ --os-image-url http://$GLANCE_HOSTPORT \ image-create \ - --name "$IMAGE_NAME-ramdisk" --public \ + --name "$IMAGE_NAME-ramdisk" --is-public True \ --container-format ari \ --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2) else @@ -390,7 +390,7 @@ function upload_baremetal_image() { --os-auth-token $token \ --os-image-url http://$GLANCE_HOSTPORT \ image-create \ - --name "${IMAGE_NAME%.img}" --public \ + --name "${IMAGE_NAME%.img}" --is-public True \ --container-format $CONTAINER_FORMAT \ --disk-format $DISK_FORMAT \ ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} \ From 8d1370343753b669569f3010a83c82ca8a2904ef Mon Sep 17 00:00:00 2001 From: Eoghan Glynn Date: Tue, 30 Jul 2013 14:14:55 +0000 Subject: [PATCH 0250/4704] Launch ceilometer-alarm-* services. Add ceilometer-alarm-notify and ceilometer-alarm-eval to the set of ceilometer services launchable by devstack when explicitly enabled in the localrc. Change-Id: I1575437c588cf9079f1e3a6c855123eae09c5dac --- lib/ceilometer | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 548496e707..dd370a08fb 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -1,8 +1,11 @@ # lib/ceilometer # Install and start **Ceilometer** service -# To enable Ceilometer services, add the following to localrc -# enable_service ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api +# To enable a minimal set of Ceilometer services, add the following to localrc: +# enable_service ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api +# +# To ensure Ceilometer alarming services are enabled also, further add to the localrc: +# enable_service ceilometer-alarm-notify ceilometer-alarm-eval # Dependencies: # - functions @@ -136,12 +139,14 @@ function start_ceilometer() { screen_it ceilometer-acentral "ceilometer-agent-central --config-file $CEILOMETER_CONF" screen_it ceilometer-collector "ceilometer-collector --config-file $CEILOMETER_CONF" screen_it ceilometer-api "ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" + screen_it ceilometer-alarm-notify "ceilometer-alarm-notifier --config-file $CEILOMETER_CONF" + screen_it ceilometer-alarm-eval "ceilometer-alarm-singleton --config-file $CEILOMETER_CONF" } # stop_ceilometer() - Stop running processes function stop_ceilometer() { # Kill the ceilometer screen windows - for serv in ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api; do + for serv in ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api ceilometer-alarm-notify ceilometer-alarm-eval; do screen -S $SCREEN_NAME -p $serv -X kill done } From c02b2f87cc9f8b75f5d1eb42b31d1117266a1aec Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Tue, 30 Jul 2013 19:43:10 +0100 Subject: [PATCH 0251/4704] xenapi: Use a jeos vm as a template DevStack was using templating to speed up the setup process with XenServer. The template already included some devstack customisations, not just a clean OS. This change modifies devstack behaviour, so that the template is a simple clean operating system. This makes it easier to use custom OS as a template, potentially speeding up the tests. related to blueprint xenapi-devstack-cleanup Change-Id: I6cb0a7ed7a90e749b78329a8e2b65fb8b7fcfa5f --- tools/xen/install_os_domU.sh | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index e762f6d875..92b131795b 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -167,8 +167,8 @@ fi # GUEST_NAME=${GUEST_NAME:-"DevStackOSDomU"} -TNAME="devstack_template" -SNAME_PREPARED="template_prepared" +TNAME="jeos_template_for_devstack" +SNAME_TEMPLATE="jeos_snapshot_for_devstack" SNAME_FIRST_BOOT="before_first_boot" function wait_for_VM_to_halt() { @@ -234,21 +234,8 @@ if [ -z "$templateuuid" ]; then vm_uuid=$(xe_min vm-list name-label="$GUEST_NAME") xe vm-param-set actions-after-reboot=Restart uuid="$vm_uuid" - # - # Prepare VM for DevStack - # - - # Install XenServer tools, and other such things - $THIS_DIR/prepare_guest_template.sh "$GUEST_NAME" - - # start the VM to run the prepare steps - xe vm-start vm="$GUEST_NAME" - - # Wait for prep script to finish and shutdown system - wait_for_VM_to_halt - # Make template from VM - snuuid=$(xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_PREPARED") + snuuid=$(xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_TEMPLATE") xe snapshot-clone uuid=$snuuid new-name-label="$TNAME" else # @@ -257,6 +244,19 @@ else vm_uuid=$(xe vm-install template="$TNAME" new-name-label="$GUEST_NAME") fi +# +# Prepare VM for DevStack +# + +# Install XenServer tools, and other such things +$THIS_DIR/prepare_guest_template.sh "$GUEST_NAME" + +# start the VM to run the prepare steps +xe vm-start vm="$GUEST_NAME" + +# Wait for prep script to finish and shutdown system +wait_for_VM_to_halt + ## Setup network cards # Wipe out all destroy_all_vifs_of "$GUEST_NAME" From 1b6b5318a05adbc049fd35ca62ed30852ea1026a Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 31 Jul 2013 06:46:34 -0400 Subject: [PATCH 0252/4704] install oslo from upstream in devstack the libraries that have graduated from oslo incubation need to be made available in devstack so that projects can develop against upstream versions of these libraries, and that we can test their compatibility in the gate. This should also allow us to force global requirements on all the projects during installation. Change-Id: Idf527b16b50eb58564ec74428290cd31424f5de2 --- lib/oslo | 42 ++++++++++++++++++++++++++++++++++++++++++ stack.sh | 4 ++++ stackrc | 8 ++++++++ 3 files changed, 54 insertions(+) create mode 100644 lib/oslo diff --git a/lib/oslo b/lib/oslo new file mode 100644 index 0000000000..1eb13dbf3d --- /dev/null +++ b/lib/oslo @@ -0,0 +1,42 @@ +# lib/oslo +# +# Functions to install oslo libraries from git +# +# We need this to handle the fact that projects would like to use +# pre-released versions of oslo libraries. + +# Dependencies: +# ``functions`` file + +# ``stack.sh`` calls the entry points in this order: +# +# install_oslo + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- +OSLOCFG_DIR=$DEST/oslo.config +OSLOMSG_DIR=$DEST/oslo.messaging + +# Entry Points +# ------------ + +# install_oslo() - Collect source and prepare +function install_oslo() { + git_clone $OSLOCFG_REPO $OSLOCFG_DIR $OSLOCFG_BRANCH + setup_develop $OSLOCFG_DIR + + git_clone $OSLOMSG_REPO $OSLOMSG_DIR $OSLOMSG_BRANCH + setup_develop $OSLOMSG_DIR +} + +# Restore xtrace +$XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/stack.sh b/stack.sh index 4e2350581f..5013b0ae62 100755 --- a/stack.sh +++ b/stack.sh @@ -299,6 +299,7 @@ SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} # Source project function libraries source $TOP_DIR/lib/tls +source $TOP_DIR/lib/oslo source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance @@ -661,6 +662,9 @@ echo_summary "Installing OpenStack project source" git_clone $PBR_REPO $PBR_DIR $PBR_BRANCH setup_develop $PBR_DIR +# Install oslo libraries that have graduated +install_oslo + # Install clients libraries install_keystoneclient install_glanceclient diff --git a/stackrc b/stackrc index ac72132f2e..3e93d231c9 100644 --- a/stackrc +++ b/stackrc @@ -116,6 +116,14 @@ NOVACLIENT_BRANCH=${NOVACLIENT_BRANCH:-master} OPENSTACKCLIENT_REPO=${OPENSTACKCLIENT_REPO:-${GIT_BASE}/openstack/python-openstackclient.git} OPENSTACKCLIENT_BRANCH=${OPENSTACKCLIENT_BRANCH:-master} +# oslo.config +OSLOCFG_REPO=${OSLOCFG_REPO:-${GIT_BASE}/openstack/oslo.config.git} +OSLOCFG_BRANCH=${OSLOCFG_BRANCH:-master} + +# oslo.messaging +OSLOMSG_REPO=${OSLOMSG_REPO:-${GIT_BASE}/openstack/oslo.messaging.git} +OSLOMSG_BRANCH=${OSLOMSG_BRANCH:-master} + # pbr drives the setuptools configs PBR_REPO=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git} PBR_BRANCH=${PBR_BRANCH:-master} From a8ca815b85e7ae5a3ac71795eddfe063ccceb4bf Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Wed, 31 Jul 2013 13:12:30 -0400 Subject: [PATCH 0253/4704] Fix a typo error in the openvz image link It's a -, not a .. Change-Id: I05a39e20234d89c08cff66b57f6b350ad0fe4a98 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 3e93d231c9..36171cc579 100644 --- a/stackrc +++ b/stackrc @@ -224,7 +224,7 @@ esac case "$VIRT_DRIVER" in openvz) DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-12.04-x86_64} - IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-12.04.x86_64.tar.gz"};; + IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-12.04-x86_64.tar.gz"};; libvirt) case "$LIBVIRT_TYPE" in lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc From 0392a10a635a8befb13ff242e2ed5d5be4b23560 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 31 Jul 2013 13:07:45 -0400 Subject: [PATCH 0254/4704] add lib/infra move the infrastructure projects to a dedicated lib/infra, which gives us access to this during grenade upgrade tests. Change-Id: I1e832792b61d41ad290b4b2ab26fe664e710cebd --- lib/infra | 56 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ stack.sh | 10 +++++----- stackrc | 4 ++++ 3 files changed, 65 insertions(+), 5 deletions(-) create mode 100644 lib/infra diff --git a/lib/infra b/lib/infra new file mode 100644 index 0000000000..0b732598ff --- /dev/null +++ b/lib/infra @@ -0,0 +1,56 @@ +# lib/infra +# +# Functions to install infrastructure projects needed by other projects +# early in the cycle. We need this so we can do things like gate on +# requirements as a global list + +# Dependencies: +# ``functions`` file + +# ``stack.sh`` calls the entry points in this order: +# +# unfubar_setuptools +# install_infra + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- +PBR_DIR=$DEST/pbr +REQUIREMENTS_DIR=$DEST/requirements + +# Entry Points +# ------------ + +# unfubar_setuptools() - Unbreak the giant mess that is the current state of setuptools +function unfubar_setuptools() { + # this is a giant game of who's on first, but it does consistently work + # there is hope that upstream python packaging fixes this in the future + echo_summary "Unbreaking setuptools" + pip_install -U setuptools + pip_install -U pip + uninstall_package python-setuptools + pip_install -U setuptools + pip_install -U pip +} + + +# install_infra() - Collect source and prepare +function install_infra() { + # bring down global requirements + git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH + + # Install pbr + git_clone $PBR_REPO $PBR_DIR $PBR_BRANCH + setup_develop $PBR_DIR +} + +# Restore xtrace +$XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/stack.sh b/stack.sh index ed6afab937..c5e306ecd9 100755 --- a/stack.sh +++ b/stack.sh @@ -299,6 +299,7 @@ SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} # Source project function libraries source $TOP_DIR/lib/tls +source $TOP_DIR/lib/infra source $TOP_DIR/lib/oslo source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone @@ -314,8 +315,6 @@ source $TOP_DIR/lib/ldap # Set the destination directories for other OpenStack projects OPENSTACKCLIENT_DIR=$DEST/python-openstackclient -PBR_DIR=$DEST/pbr - # Interactive Configuration # ------------------------- @@ -588,6 +587,8 @@ if is_service_enabled neutron; then install_neutron_agent_packages fi +# Unbreak the giant mess that is the current state of setuptools +unfubar_setuptools # System-specific preconfigure # ============================ @@ -658,9 +659,8 @@ fi echo_summary "Installing OpenStack project source" -# Install pbr -git_clone $PBR_REPO $PBR_DIR $PBR_BRANCH -setup_develop $PBR_DIR +# Install required infra support libraries +install_infra # Install oslo libraries that have graduated install_oslo diff --git a/stackrc b/stackrc index 3e93d231c9..ec9380cc80 100644 --- a/stackrc +++ b/stackrc @@ -136,6 +136,10 @@ NEUTRON_BRANCH=${NEUTRON_BRANCH:-master} NEUTRONCLIENT_REPO=${NEUTRONCLIENT_REPO:-${GIT_BASE}/openstack/python-neutronclient.git} NEUTRONCLIENT_BRANCH=${NEUTRONCLIENT_BRANCH:-master} +# consolidated openstack requirements +REQUIREMENTS_REPO=${REQUIREMENTS_REPO:-${GIT_BASE}/openstack/requirements.git} +REQUIREMENTS_BRANCH=${REQUIREMENTS_BRANCH:-master} + # storage service SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git} SWIFT_BRANCH=${SWIFT_BRANCH:-master} From 4ee4a0182e9d229a94393ca0246c924b5ff4c195 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Wed, 31 Jul 2013 16:05:42 -0400 Subject: [PATCH 0255/4704] Fix tempest logging configuration This commit will correctly set the tempest output logging to dump all of tempest logs into a tempest.log file in $DEST/tempest/tempest.log. This will also fix the logging for tempest in the gate so it will no longer print every log message on the console. Fixes bug 1207066 Change-Id: I91f2ee781c4a7a18c561ea3e3b26832b97431464 --- lib/tempest | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/tempest b/lib/tempest index 6c6833721e..aaa7281a98 100644 --- a/lib/tempest +++ b/lib/tempest @@ -199,6 +199,8 @@ function configure_tempest() { # Oslo iniset $TEMPEST_CONF DEFAULT lock_path $TEMPEST_STATE_PATH mkdir -p $TEMPEST_STATE_PATH + iniset $TEMPEST_CONF DEFAULT use_stderr False + iniset $TEMPEST_CONF DEFAULT log_file tempest.log # Timeouts iniset $TEMPEST_CONF compute build_timeout $BUILD_TIMEOUT From cbaff86b02a6a474de2503b129a249442b3d6360 Mon Sep 17 00:00:00 2001 From: Sreeram Yerrapragada Date: Wed, 24 Jul 2013 19:49:23 -0700 Subject: [PATCH 0256/4704] Add support for vmdk files as glance images * Adds support for vmdk files as glance images in upload_image function * Set default image url to use for vsphere driver in stackrc * Now using a more stable url Change-Id: If6d45bc8dfd3c812ded5e1bcf69ad7ebd9b64f34 --- functions | 8 ++++++++ stackrc | 3 ++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/functions b/functions index 84e88fbd16..bb51a55fdf 100644 --- a/functions +++ b/functions @@ -1219,6 +1219,14 @@ function upload_image() { return fi + # vmdk format images + if [[ "$image_url" =~ '.vmdk' ]]; then + IMAGE="$FILES/${IMAGE_FNAME}" + IMAGE_NAME="${IMAGE_FNAME%.vmdk}" + glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format bare --disk-format vmdk --property vmware-disktype="preallocated" < "${IMAGE}" + return + fi + # XenServer-ovf-format images are provided as .vhd.tgz as well # and should not be decompressed prior to loading if [[ "$image_url" =~ '.vhd.tgz' ]]; then diff --git a/stackrc b/stackrc index 88f7d8ac0b..74a399c823 100644 --- a/stackrc +++ b/stackrc @@ -240,7 +240,8 @@ case "$VIRT_DRIVER" in esac ;; vsphere) - IMAGE_URLS="";; + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-debian-2.6.32-i686} + IMAGE_URLS=${IMAGE_URLS:-"http://partnerweb.vmware.com/programs/vmdkimage/debian-2.6.32-i686.vmdk"};; *) # Default to Cirros with kernel, ramdisk and disk image DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.1-x86_64-uec} IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz"};; From 05901f46f75e1c80a2448bda463e1dd6b937e7e7 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Thu, 1 Aug 2013 10:44:22 -0700 Subject: [PATCH 0257/4704] Remove unused keystone params from neutron agents' config files DHCP, L3 and Metadata agents' config files no longer need to duplicate this info; it's available in neutron.conf Change-Id: I7bea25d1c2b9249ddacce3f4638f7a8ed4f43197 --- lib/neutron | 4 ---- 1 file changed, 4 deletions(-) diff --git a/lib/neutron b/lib/neutron index 835f900425..bfae486633 100644 --- a/lib/neutron +++ b/lib/neutron @@ -523,7 +523,6 @@ function _configure_neutron_debug_command() { # be cleaned. iniset $NEUTRON_TEST_CONFIG_FILE agent root_helper "$Q_RR_COMMAND" - _neutron_setup_keystone $NEUTRON_TEST_CONFIG_FILE DEFAULT set_auth_url _neutron_setup_interface_driver $NEUTRON_TEST_CONFIG_FILE neutron_plugin_configure_debug_command @@ -540,7 +539,6 @@ function _configure_neutron_dhcp_agent() { iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" - _neutron_setup_keystone $Q_DHCP_CONF_FILE DEFAULT set_auth_url _neutron_setup_interface_driver $Q_DHCP_CONF_FILE neutron_plugin_configure_dhcp_agent @@ -561,7 +559,6 @@ function _configure_neutron_l3_agent() { iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" - _neutron_setup_keystone $Q_L3_CONF_FILE DEFAULT set_auth_url _neutron_setup_interface_driver $Q_L3_CONF_FILE neutron_plugin_configure_l3_agent @@ -578,7 +575,6 @@ function _configure_neutron_metadata_agent() { iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" - _neutron_setup_keystone $Q_META_CONF_FILE DEFAULT set_auth_url } function _configure_neutron_lbaas() { From d2cfcaa5767b12cd1bb7d80f0d0823dd66bbb5c6 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 1 Aug 2013 14:17:27 -0500 Subject: [PATCH 0258/4704] Fix get_pip_command() Be more flexible in finding pip, especially after we start replacing it Change-Id: I14b0ac1584aab99c2fe58a78e3a43196ae8130ca --- functions | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/functions b/functions index 84e88fbd16..b82b71f96c 100644 --- a/functions +++ b/functions @@ -1483,11 +1483,7 @@ function get_rootwrap_location() { # Get the path to the pip command. # get_pip_command function get_pip_command() { - if is_fedora; then - which pip-python - else - which pip - fi + which pip || which pip-python if [ $? -ne 0 ]; then die $LINENO "Unable to find pip; cannot continue" From f9b4738d59c9416893918228041de8d1f810dd89 Mon Sep 17 00:00:00 2001 From: Kui Shi Date: Fri, 2 Aug 2013 05:53:58 +0800 Subject: [PATCH 0259/4704] Update tempest test sample in devstack/README.md As part of bp:tempest-repo-restructure, the scenario directory is added and scenario tests are moved there. The tempest test sample should be updated in README.md. Fixes bug #1207109 Change-Id: I25067e4b42697985e8f4b808fad5eb801c805f9f --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3a47fc3b71..8bca977b2c 100644 --- a/README.md +++ b/README.md @@ -179,7 +179,7 @@ The above will default in devstack to using the OVS on each compute host. To cha If tempest has been successfully configured, a basic set of smoke tests can be run as follows: $ cd /opt/stack/tempest - $ nosetests tempest/tests/network/test_network_basic_ops.py + $ nosetests tempest/scenario/test_network_basic_ops.py # Multi-Node Setup From 1fba1aa6bc458829dffcba2c2ce4f2e3d6a90e5b Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Fri, 2 Aug 2013 00:40:05 +0200 Subject: [PATCH 0260/4704] Don't add bulk middleware. - The bulk middleware is already in the sample. - Fixes bug 1207108 Change-Id: I569ecb734ac2e59716e5f24fa96e97d216cdc1a3 --- lib/swift | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/swift b/lib/swift index e53d674666..e6674c5654 100644 --- a/lib/swift +++ b/lib/swift @@ -54,8 +54,8 @@ fi SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} # Set ``SWIFT_EXTRAS_MIDDLEWARE`` to extras middlewares. -# Default is ``staticweb, tempurl, bulk, formpost`` -SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-tempurl formpost staticweb bulk} +# Default is ``staticweb, tempurl, formpost`` +SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-tempurl formpost staticweb} # The ring uses a configurable number of bits from a path’s MD5 hash as # a partition index that designates a device. The number of bits kept From 2c94ee5237a453a0024bdcc6c6d3f4805534f56e Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Fri, 2 Aug 2013 02:02:01 +0000 Subject: [PATCH 0261/4704] Add support for running multiple n-cpus for performance testing When using the fake VIRT_DRIVER, we can launch an arbitrary number of instances, the next step for performance testing is running an arbitrary number of n-cpus. This can be done with the following settings in your localrc VIRT_DRIVER=fake NUMBER_FAKE_NOVA_COMPUTE=50 This can be used for things like measuring load from n-cpu's periodic tasks. Change-Id: I40e0f05374e2c10c48ea6b8816ef6e7609b3dc80 --- lib/nova | 5 +++++ stackrc | 3 +++ 2 files changed, 8 insertions(+) diff --git a/lib/nova b/lib/nova index 7a5ff1f98a..9c38498d8c 100644 --- a/lib/nova +++ b/lib/nova @@ -722,6 +722,11 @@ function start_nova() { # The group **$LIBVIRT_GROUP** is added to the current user in this script. # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group. screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM'" + elif [[ "$VIRT_DRIVER" = 'fake' ]]; then + for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE` + do + screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM" + done else screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM" fi diff --git a/stackrc b/stackrc index 36171cc579..8d9c1f2c61 100644 --- a/stackrc +++ b/stackrc @@ -187,6 +187,9 @@ case "$VIRT_DRIVER" in LIBVIRT_GROUP=libvirtd fi ;; + fake) + NUMBER_FAKE_NOVA_COMPUTE=${NUMBER_FAKE_NOVA_COMPUTE:-1} + ;; xenserver) # Xen config common to nova and neutron XENAPI_USER=${XENAPI_USER:-"root"} From 5e28a3e2d2e7f02d6f0c81ddfe4ae3b0387112b6 Mon Sep 17 00:00:00 2001 From: Kui Shi Date: Fri, 2 Aug 2013 17:26:28 +0800 Subject: [PATCH 0262/4704] Add call trace in error message Call trace can help user to locate problem quickly. stack.sh uses bash as interpreter, which defines a series of "Shell Variables": BASH_SOURCE: An array variable whose members are the source filenames BASH_LINENO: An array variable whose members are the line numbers in source files where each corresponding member of FUNCNAME was invoked. FUNCNAME: An array variable containing the names of all shell functions currently in the execution call stack. run "man bash" and search the variable name to get detailed info. In function backtrace, it gets the call deepth from ${#BASH_SOURCE[@]}, then print the call stack from top to down. In function die, backtrace is called with parameter "2" to ignore the call trace of function "die" and "backtrace". I add a broken function in lib/database, and call it in stack.sh, the output looks like this: [Call Trace] ./stack.sh:104:broken /home/kui/osd/devstack/lib/database:24:die [ERROR] ./stack.sh:24 It is broken Fixes bug # 1207660 Change-Id: I04d0b3ccf783c769e41582c20f48694c19917334 --- functions | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/functions b/functions index 262f70f29f..d28efefe55 100644 --- a/functions +++ b/functions @@ -76,6 +76,19 @@ function cp_it { } +# Prints backtrace info +# filename:lineno:function +function backtrace { + local level=$1 + local deep=$((${#BASH_SOURCE[@]} - 1)) + echo "[Call Trace]" + while [ $level -le $deep ]; do + echo "${BASH_SOURCE[$deep]}:${BASH_LINENO[$deep-1]}:${FUNCNAME[$deep-1]}" + deep=$((deep - 1)) + done +} + + # Prints line number and "message" then exits # die $LINENO "message" function die() { @@ -85,6 +98,7 @@ function die() { if [ $exitcode == 0 ]; then exitcode=1 fi + backtrace 2 err $line "$*" exit $exitcode } From 17df0775edaf1d45c59a41147779fd65fd986911 Mon Sep 17 00:00:00 2001 From: Kui Shi Date: Fri, 2 Aug 2013 17:55:41 +0800 Subject: [PATCH 0263/4704] misleading source filename in error message when ./stack.sh encounters error, the output may look like this: [ERROR] ./stack.sh:698 nova-api did not start The source filename is wrong. Actually, it should be like this: [ERROR] //lib/nova:698 nova-api did not start stack.sh uses bash as interpreter, which define "Shell Variables" BASH_SOURCE: An array variable whose members are the source filenames where the corresponding shell function names in the FUNCNAME array variable are defined. The shell function ${FUNCNAME[$i]} is defined in the file ${BASH_SOURCE[$i]} and called from ${BASH_SOURCE[$i+1]}. The function "err" is called by function "die" ( and "err_if_not_set", and "err_if_not_set" is not used at all). ${BASH_SOURCE[2]} will ignore the deepest two call entries, which corresponding to the shell functions: "err" and "die". In one sentence, this change will print the source filename where the function is defined and exits via function "die". Fixes bug #1207658 Change-Id: I2aa6642c5cf4cfe781afe278b3dec3e7cba277fa --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 262f70f29f..43ff1a7731 100644 --- a/functions +++ b/functions @@ -113,7 +113,7 @@ function err() { local exitcode=$? errXTRACE=$(set +o | grep xtrace) set +o xtrace - local msg="[ERROR] $0:$1 $2" + local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2" echo $msg 1>&2; if [[ -n ${SCREEN_LOGDIR} ]]; then echo $msg >> "${SCREEN_LOGDIR}/error.log" From d3a18ae1ecc757008ee7686f709209a930d90ab8 Mon Sep 17 00:00:00 2001 From: Arata Notsu Date: Fri, 2 Aug 2013 20:58:56 +0900 Subject: [PATCH 0264/4704] Update baremetal to work with the latest DIB Adjust DevStack to the current DIB's naming to kernel/ramdisk. BM_HOST_CURRENT_KERNEL is removed since the kernel is extracted from a diskimage with the ramdisk and the host's kernel is not used. BM_BUILD_DEPLOY_RAMDISK is added to control whether use DIB or not. If you set BM_BUILD_DEPLOY_RAMDISK=False, you must BM_DEPLOY_KERNEL and BM_DEPLOY_RAMDISK to point existing deploy kernel/ramdisk. Fixes bug 1207719 Change-Id: I62af0b1942b07ac12665c0ed3619d64c1cccbe1f --- lib/baremetal | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/lib/baremetal b/lib/baremetal index 145544d40c..8f6c3f1660 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -138,9 +138,12 @@ BM_FLAVOR_ARCH=${BM_FLAVOR_ARCH:-$BM_CPU_ARCH} BM_IMAGE_BUILD_DIR=${BM_IMAGE_BUILD_DIR:-$DEST/diskimage-builder} BM_POSEUR_DIR=${BM_POSEUR_DIR:-$DEST/bm_poseur} -BM_HOST_CURRENT_KERNEL=$(uname -r) -BM_DEPLOY_RAMDISK=${BM_DEPLOY_RAMDISK:-bm-deploy-$BM_HOST_CURRENT_KERNEL-initrd} -BM_DEPLOY_KERNEL=${BM_DEPLOY_KERNEL:-bm-deploy-$BM_HOST_CURRENT_KERNEL-vmlinuz} +# Use DIB to create deploy ramdisk and kernel. +BM_BUILD_DEPLOY_RAMDISK=`trueorfalse True $BM_BUILD_DEPLOY_RAMDISK` +# If not use DIB, these files are used as deploy ramdisk/kernel. +# (The value must be a relative path from $TOP_DIR/files/) +BM_DEPLOY_RAMDISK=${BM_DEPLOY_RAMDISK:-} +BM_DEPLOY_KERNEL=${BM_DEPLOY_KERNEL:-} # If you need to add any extra flavors to the deploy ramdisk image # eg, specific network drivers, specify them here @@ -233,13 +236,13 @@ function configure_baremetal_nova_dirs() { function upload_baremetal_deploy() { token=$1 - if [ ! -e $TOP_DIR/files/$BM_DEPLOY_KERNEL -a -e /boot/vmlinuz-$BM_HOST_CURRENT_KERNEL ]; then - sudo cp /boot/vmlinuz-$BM_HOST_CURRENT_KERNEL $TOP_DIR/files/$BM_DEPLOY_KERNEL - sudo chmod a+r $TOP_DIR/files/$BM_DEPLOY_KERNEL - fi - if [ ! -e $TOP_DIR/files/$BM_DEPLOY_RAMDISK ]; then - $BM_IMAGE_BUILD_DIR/bin/ramdisk-image-create $BM_DEPLOY_FLAVOR deploy \ - -o $TOP_DIR/files/$BM_DEPLOY_RAMDISK -k $BM_HOST_CURRENT_KERNEL + if [ "$BM_BUILD_DEPLOY_RAMDISK" = "True" ]; then + BM_DEPLOY_KERNEL=bm-deploy.kernel + BM_DEPLOY_RAMDISK=bm-deploy.initramfs + if [ ! -e "$TOP_DIR/files/$BM_DEPLOY_KERNEL" -o ! -e "$TOP_DIR/files/$BM_DEPLOY_RAMDISK" ]; then + $BM_IMAGE_BUILD_DIR/bin/ramdisk-image-create $BM_DEPLOY_FLAVOR deploy \ + -o $TOP_DIR/files/bm-deploy + fi fi # load them into glance From 408a4a7d1c24322b35f9a8617c7c62adeeee0dbe Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 2 Aug 2013 15:43:47 -0400 Subject: [PATCH 0265/4704] Install things in setup_develop with pip -e We have some complex logic in here to try to do the right things with the requirements before doing the install of the package which still winds up being wrong in some cases. Since having written this code, we've learned that the logic we're trying to achieve is actually what pip install -e does. So just use that. We have to follow up with a chown of the resulting egg-info directory, because the sudo command will cause it to be written by root, which prevents subsequent commands from operating without privilege in the directory. Change-Id: Iffd068c94ef84475ebb30758bcf612075d225bea --- functions | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/functions b/functions index 262f70f29f..4b8a06e738 100644 --- a/functions +++ b/functions @@ -1126,9 +1126,9 @@ function service_check() { } -# ``pip install`` the dependencies of the package before ``setup.py develop`` -# so pip and not distutils processes the dependency chain -# Uses globals ``TRACK_DEPENDES``, ``*_proxy` +# ``pip install -e`` the package, which processes the dependencies +# using pip before running `setup.py develop` +# Uses globals ``STACK_USER``, ``TRACK_DEPENDES``, ``*_proxy` # setup_develop directory function setup_develop() { if [[ $TRACK_DEPENDS = True ]]; then @@ -1136,19 +1136,13 @@ function setup_develop() { else SUDO_CMD="sudo" fi - for reqs_file in $1/requirements.txt $1/tools/pip-requires ; do - if [ -f $reqs_file ] ; then - pip_install -r $reqs_file - fi - done - (cd $1; \ - python setup.py egg_info; \ - $SUDO_CMD \ - HTTP_PROXY=$http_proxy \ - HTTPS_PROXY=$https_proxy \ - NO_PROXY=$no_proxy \ - python setup.py develop \ - ) + $SUDO_CMD \ + HTTP_PROXY=$http_proxy \ + HTTPS_PROXY=$https_proxy \ + NO_PROXY=$no_proxy \ + pip install -e $1 + # ensure that further actions can do things like setup.py sdist + $SUDO_CMD chown -R $STACK_USER $1/*.egg-info } From 60aba9cb6a251b72a73a0c52f6c77c5356d6dcfc Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 2 Aug 2013 17:12:45 -0400 Subject: [PATCH 0266/4704] Remove installation of python-netaddr System and pip installation of this conflit and cause breakages. Change-Id: I87a03f88a134683310d6ec0bd62d86c5007fcd72 --- files/apts/general | 1 - files/apts/horizon | 1 - files/apts/neutron | 1 - files/apts/nova | 1 - files/rpms-suse/general | 1 - files/rpms-suse/horizon | 1 - files/rpms-suse/neutron | 1 - files/rpms-suse/nova | 1 - files/rpms/general | 1 - files/rpms/horizon | 1 - files/rpms/neutron | 1 - files/rpms/nova | 1 - 12 files changed, 12 deletions(-) diff --git a/files/apts/general b/files/apts/general index ec6dd0db4b..fdf8e20ad5 100644 --- a/files/apts/general +++ b/files/apts/general @@ -20,4 +20,3 @@ tcpdump euca2ools # only for testing client tar python-cmd2 # dist:precise -python-netaddr diff --git a/files/apts/horizon b/files/apts/horizon index e1ce85f7d5..0865931d44 100644 --- a/files/apts/horizon +++ b/files/apts/horizon @@ -21,4 +21,3 @@ python-cherrypy3 # why? python-migrate nodejs nodejs-legacy # dist:quantal -python-netaddr diff --git a/files/apts/neutron b/files/apts/neutron index 64fc1bfb2d..0f4b69f8ef 100644 --- a/files/apts/neutron +++ b/files/apts/neutron @@ -9,7 +9,6 @@ python-iso8601 python-paste python-routes python-suds -python-netaddr python-pastedeploy python-greenlet python-kombu diff --git a/files/apts/nova b/files/apts/nova index 6a7ef74c59..ae925c3293 100644 --- a/files/apts/nova +++ b/files/apts/nova @@ -30,7 +30,6 @@ python-greenlet python-libvirt # NOPRIME python-libxml2 python-routes -python-netaddr python-numpy # used by websockify for spice console python-pastedeploy python-eventlet diff --git a/files/rpms-suse/general b/files/rpms-suse/general index 93711ff784..f28267c044 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -7,7 +7,6 @@ openssh openssl psmisc python-cmd2 # dist:opensuse-12.3 -python-netaddr python-pip python-pylint python-unittest2 diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon index 405fb7ac56..73932acc1d 100644 --- a/files/rpms-suse/horizon +++ b/files/rpms-suse/horizon @@ -15,7 +15,6 @@ python-dateutil python-eventlet python-kombu python-mox -python-netaddr python-nose python-pylint python-sqlalchemy-migrate diff --git a/files/rpms-suse/neutron b/files/rpms-suse/neutron index aadb156732..e9ccf5921b 100644 --- a/files/rpms-suse/neutron +++ b/files/rpms-suse/neutron @@ -10,7 +10,6 @@ python-greenlet python-iso8601 python-kombu python-mysql -python-netaddr python-Paste python-PasteDeploy python-pyudev diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova index edb1a8a8e6..ee4917d702 100644 --- a/files/rpms-suse/nova +++ b/files/rpms-suse/nova @@ -35,7 +35,6 @@ python-lockfile python-lxml # needed for glance which is needed for nova --- this shouldn't be here python-mox python-mysql -python-netaddr python-numpy # needed by websockify for spice console python-paramiko python-python-gflags diff --git a/files/rpms/general b/files/rpms/general index 5cb3e28c6b..9fa305c992 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -11,7 +11,6 @@ libxml2-devel # dist:rhel6 [2] libxslt-devel # dist:rhel6 [2] psmisc pylint -python-netaddr python-pip python-prettytable # dist:rhel6 [1] python-unittest2 diff --git a/files/rpms/horizon b/files/rpms/horizon index b844d98665..0ca18cadb7 100644 --- a/files/rpms/horizon +++ b/files/rpms/horizon @@ -16,7 +16,6 @@ python-httplib2 python-kombu python-migrate python-mox -python-netaddr python-nose python-paste #dist:f16,f17,f18,f19 python-paste-deploy #dist:f16,f17,f18,f19 diff --git a/files/rpms/neutron b/files/rpms/neutron index 6a8fd3639c..a7700f77d4 100644 --- a/files/rpms/neutron +++ b/files/rpms/neutron @@ -10,7 +10,6 @@ python-eventlet python-greenlet python-iso8601 python-kombu -python-netaddr #rhel6 gets via pip python-paste # dist:f16,f17,f18,f19 python-paste-deploy # dist:f16,f17,f18,f19 diff --git a/files/rpms/nova b/files/rpms/nova index 8d8a0b875a..c99f3defc8 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -28,7 +28,6 @@ python-kombu python-lockfile python-migrate python-mox -python-netaddr python-paramiko # dist:f16,f17,f18,f19 # ^ on RHEL, brings in python-crypto which conflicts with version from # pip we need From 3ea28ece4a71b0137050314af0e4f3e55046db11 Mon Sep 17 00:00:00 2001 From: Kyle Mestery Date: Mon, 5 Aug 2013 12:24:32 +0000 Subject: [PATCH 0267/4704] Correctly setup ML2 mechanism_drivers The ML2 code in devstack was not correctly configuring the mechanism_drivers when asked to do so. This corrects the typo in the variable assignment, and also actually sets these in the plugin configuration file. Fixes bug 1208557 Change-Id: I3746ca099f45d44dcf1cc2ca1c3726745b8e8a1d --- lib/neutron_plugins/ml2 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index ff49d8e6b8..00bd716309 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -20,7 +20,7 @@ Q_AGENT=${Q_AGENT:-openvswitch} source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent # List of MechanismDrivers to load -Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_PLUGIN_MECHANISM_DRIVERS:-} +Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-} # List of Type Drivers to load Q_ML2_PLUGIN_TYPE_DRIVERS=${Q_ML2_PLUGIN_TYPE_DRIVERS:-local,flat,vlan,gre,vxlan} # Default GRE TypeDriver options @@ -92,6 +92,8 @@ function neutron_plugin_configure_service() { # Since we enable the tunnel TypeDrivers, also enable a local_ip iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $HOST_IP + populate_ml2_config mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS /$Q_PLUGIN_CONF_FILE ml2 + populate_ml2_config type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS /$Q_PLUGIN_CONF_FILE ml2 populate_ml2_config $Q_SRV_EXTRA_OPTS /$Q_PLUGIN_CONF_FILE ml2 From 3f1d0e2688ea471a467730d5677dd560671071d1 Mon Sep 17 00:00:00 2001 From: James Kyle Date: Fri, 2 Aug 2013 10:40:32 -0700 Subject: [PATCH 0268/4704] Ensures cross compatibility for zsh and bash. Fixes bug #1207853 Change-Id: I8ab3959ac8dbb5bb199e325bfdfdc513b4327410 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 262f70f29f..e498b3d5a0 100644 --- a/functions +++ b/functions @@ -930,7 +930,7 @@ function pip_install { CMD_PIP=$(get_pip_command) fi - if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then + if is_fedora && [[ $DISTRO =~ (rhel6) ]]; then # RHEL6 pip by default doesn't have this (was introduced # around 0.8.1 or so) PIP_USE_MIRRORS=${PIP_USE_MIRRORS:-False} From 6c84463071e1ff23e20e4ef4fb863aba0732bebc Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 31 Jul 2013 06:50:14 -0400 Subject: [PATCH 0269/4704] Update projects to global requirements before setup We've agreed that we should be using global requirements, so force projects to be updated to global requirements before we test them. Co-Authored-With: Monty Taylor Change-Id: I0652f639673e600fd7508a9869ec85f8d5ce4518 --- functions | 28 +++++++++++++++++++++------- stack.sh | 2 +- 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/functions b/functions index 4b8a06e738..b8fd6880ee 100644 --- a/functions +++ b/functions @@ -1131,18 +1131,32 @@ function service_check() { # Uses globals ``STACK_USER``, ``TRACK_DEPENDES``, ``*_proxy` # setup_develop directory function setup_develop() { + local project_dir=$1 if [[ $TRACK_DEPENDS = True ]]; then SUDO_CMD="env" else SUDO_CMD="sudo" fi - $SUDO_CMD \ - HTTP_PROXY=$http_proxy \ - HTTPS_PROXY=$https_proxy \ - NO_PROXY=$no_proxy \ - pip install -e $1 - # ensure that further actions can do things like setup.py sdist - $SUDO_CMD chown -R $STACK_USER $1/*.egg-info + + echo "cd $REQUIREMENTS_DIR; $SUDO_CMD python update.py $project_dir" + + (cd $REQUIREMENTS_DIR; \ + $SUDO_CMD python update.py $project_dir) + + for reqs_file in $project_dir/requirements.txt $project_dir/tools/pip-requires ; do + if [ -f $reqs_file ] ; then + pip_install -r $reqs_file + fi + done + + (cd $project_dir; \ + python setup.py egg_info; \ + $SUDO_CMD \ + HTTP_PROXY=$http_proxy \ + HTTPS_PROXY=$https_proxy \ + NO_PROXY=$no_proxy \ + python setup.py develop \ + ) } diff --git a/stack.sh b/stack.sh index 5ba60d2430..c9d394c53d 100755 --- a/stack.sh +++ b/stack.sh @@ -647,7 +647,7 @@ TRACK_DEPENDS=${TRACK_DEPENDS:-False} # Install python packages into a virtualenv so that we can track them if [[ $TRACK_DEPENDS = True ]]; then echo_summary "Installing Python packages into a virtualenv $DEST/.venv" - install_package python-virtualenv + pip_install -U virtualenv rm -rf $DEST/.venv virtualenv --system-site-packages $DEST/.venv From cf2d0d3db9bda81a6795d5e57e893fea234b462c Mon Sep 17 00:00:00 2001 From: stack Date: Mon, 5 Aug 2013 04:51:56 -0400 Subject: [PATCH 0270/4704] Add keystoneclient support for cinder. Add an ability to ask keystone about users and groups through keystoneclient in cinder. blueprint volume-acl Change-Id: Ice261e9709833d057722b4f13c404df54e10b204 --- lib/cinder | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/cinder b/lib/cinder index ef7e3dc9cc..3472dcd519 100644 --- a/lib/cinder +++ b/lib/cinder @@ -296,6 +296,10 @@ function configure_cinder() { -e 's/snapshot_autoextend_percent =.*/snapshot_autoextend_percent = 20/' \ /etc/lvm/lvm.conf fi + iniset $CINDER_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT + iniset $CINDER_CONF keystone_authtoken admin_user cinder + iniset $CINDER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $CINDER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD } From dd64988ff4c0a174691cc7d45e3eaef8acdc4fc8 Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Mon, 5 Aug 2013 08:56:17 -0700 Subject: [PATCH 0271/4704] Run Neutron migrations after creating schema Partial-Bug 1207402 This patch executes Neutron DB migrations (based on alembic) before starting the Neutron services, similar to what happens for most of other openstack projects managed by devstack. This will ensure devstack always creates correctly version-stamped Neutron databases. Change-Id: Ia941c426a1563bcc4cb5eae64ea30c0bf7677220 --- lib/neutron | 10 +++------- stack.sh | 5 ++++- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/lib/neutron b/lib/neutron index 835f900425..c546f378b0 100644 --- a/lib/neutron +++ b/lib/neutron @@ -367,7 +367,9 @@ function create_neutron_initial_network() { # init_neutron() - Initialize databases, etc. function init_neutron() { - : + recreate_database $Q_DB_NAME utf8 + # Run Neutron db migrations + $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head } # install_neutron() - Collect source and prepare @@ -614,12 +616,6 @@ function _configure_neutron_service() { cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE - if is_service_enabled $DATABASE_BACKENDS; then - recreate_database $Q_DB_NAME utf8 - else - die $LINENO "A database must be enabled in order to use the $Q_PLUGIN Neutron plugin." - fi - # Update either configuration file with plugin iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS diff --git a/stack.sh b/stack.sh index 5ba60d2430..01f948e6b7 100755 --- a/stack.sh +++ b/stack.sh @@ -939,7 +939,10 @@ if is_service_enabled neutron; then echo_summary "Configuring Neutron" configure_neutron - init_neutron + # Run init_neutron only on the node hosting the neutron API server + if is_service_enabled $DATABASE_BACKENDS && is_service_enabled q-svc; then + init_neutron + fi fi # Some Neutron plugins require network controllers which are not From 46287d8d71f358b330a5b2c28dace72cbdd3492f Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Tue, 30 Jul 2013 09:43:17 +0100 Subject: [PATCH 0272/4704] Some bash versions don't support negative lengths Fix to make the service string work on these versions of bash too Change-Id: Ibb9868ea7bf44480be76a8ea9d7d9fbc278a8ef3 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index b8fd6880ee..632cd46b2f 100644 --- a/functions +++ b/functions @@ -1116,7 +1116,7 @@ function service_check() { for service in $failures; do service=`basename $service` - service=${service::-8} + service=${service%.failure} echo "Error: Service $service is not running" done From 1a794a3d9e8ada8a4ac671cba392d6ed53d99e18 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Tue, 6 Aug 2013 15:25:01 +0200 Subject: [PATCH 0273/4704] Show ip address before associating address In order to see is the instance has a fixed ip at the moment. Change-Id: I506f2f099a03e8b63f1f2daeb564ed72f1322a68 --- exercises/euca.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/exercises/euca.sh b/exercises/euca.sh index 5b0d1ba493..b8b283a8fb 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -129,7 +129,8 @@ else # Allocate floating address FLOATING_IP=`euca-allocate-address | cut -f2` die_if_not_set $LINENO FLOATING_IP "Failure allocating floating IP" - + # describe all instances at this moment + euca-describe-instances # Associate floating address euca-associate-address -i $INSTANCE $FLOATING_IP || \ die $LINENO "Failure associating address $FLOATING_IP to $INSTANCE" From b5bbaac09a2424da68288f90f7de415b8f7b48e8 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 6 Aug 2013 10:35:02 -0300 Subject: [PATCH 0274/4704] Re-enble pip -e in setup_develop We had a bad merge somewhere along the way which reverted the pip -e part of things. Replace it. Change-Id: I620bea80eac7ad53b5bfb79dd2b21c29ad1b2267 --- functions | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/functions b/functions index b8fd6880ee..dbca67070a 100644 --- a/functions +++ b/functions @@ -1128,7 +1128,7 @@ function service_check() { # ``pip install -e`` the package, which processes the dependencies # using pip before running `setup.py develop` -# Uses globals ``STACK_USER``, ``TRACK_DEPENDES``, ``*_proxy` +# Uses globals ``STACK_USER``, ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR`` # setup_develop directory function setup_develop() { local project_dir=$1 @@ -1143,20 +1143,9 @@ function setup_develop() { (cd $REQUIREMENTS_DIR; \ $SUDO_CMD python update.py $project_dir) - for reqs_file in $project_dir/requirements.txt $project_dir/tools/pip-requires ; do - if [ -f $reqs_file ] ; then - pip_install -r $reqs_file - fi - done - - (cd $project_dir; \ - python setup.py egg_info; \ - $SUDO_CMD \ - HTTP_PROXY=$http_proxy \ - HTTPS_PROXY=$https_proxy \ - NO_PROXY=$no_proxy \ - python setup.py develop \ - ) + pip_install -e $project_dir + # ensure that further actions can do things like setup.py sdist + $SUDO_CMD chown -R $STACK_USER $1/*.egg-info } From 985debe5cee5ce8c1847b192b98224a66a85c3c2 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 6 Aug 2013 11:45:07 -0300 Subject: [PATCH 0275/4704] Don't uninstall packages when on xenserver It turns out our fix for package madness breaks when running on xenserver. Put in a simple exclusion for that case until we figure out a more total and systemic solution to everyone's problems. Change-Id: I491917015337f83206937c53949a62aa63f4679f --- stack.sh | 40 +++++++++++++++++++++++----------------- 1 file changed, 23 insertions(+), 17 deletions(-) diff --git a/stack.sh b/stack.sh index c9d394c53d..95172c3008 100755 --- a/stack.sh +++ b/stack.sh @@ -601,23 +601,29 @@ if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then sudo setenforce 0 fi - # An old version of ``python-crypto`` (2.0.1) may be installed on a - # fresh system via Anaconda and the dependency chain - # ``cas`` -> ``python-paramiko`` -> ``python-crypto``. - # ``pip uninstall pycrypto`` will remove the packaged ``.egg-info`` file - # but leave most of the actual library files behind in ``/usr/lib64/python2.6/Crypto``. - # Later ``pip install pycrypto`` will install over the packaged files resulting - # in a useless mess of old, rpm-packaged files and pip-installed files. - # Remove the package so that ``pip install python-crypto`` installs cleanly. - # Note: other RPM packages may require ``python-crypto`` as well. For example, - # RHEL6 does not install ``python-paramiko packages``. - uninstall_package python-crypto - - # A similar situation occurs with ``python-lxml``, which is required by - # ``ipa-client``, an auditing package we don't care about. The - # build-dependencies needed for ``pip install lxml`` (``gcc``, - # ``libxml2-dev`` and ``libxslt-dev``) are present in ``files/rpms/general``. - uninstall_package python-lxml + # The following workarounds break xenserver + if [ "$VIRT_DRIVER" != 'xenserver' ]; then + # An old version of ``python-crypto`` (2.0.1) may be installed on a + # fresh system via Anaconda and the dependency chain + # ``cas`` -> ``python-paramiko`` -> ``python-crypto``. + # ``pip uninstall pycrypto`` will remove the packaged ``.egg-info`` + # file but leave most of the actual library files behind in + # ``/usr/lib64/python2.6/Crypto``. Later ``pip install pycrypto`` + # will install over the packaged files resulting + # in a useless mess of old, rpm-packaged files and pip-installed files. + # Remove the package so that ``pip install python-crypto`` installs + # cleanly. + # Note: other RPM packages may require ``python-crypto`` as well. + # For example, RHEL6 does not install ``python-paramiko packages``. + uninstall_package python-crypto + + # A similar situation occurs with ``python-lxml``, which is required by + # ``ipa-client``, an auditing package we don't care about. The + # build-dependencies needed for ``pip install lxml`` (``gcc``, + # ``libxml2-dev`` and ``libxslt-dev``) are present in + # ``files/rpms/general``. + uninstall_package python-lxml + fi # If the ``dbus`` package was installed by DevStack dependencies the # uuid may not be generated because the service was never started (PR#598200), From 32e1603e9581746d0a4020b2db9f5b399c1a26c8 Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Mon, 29 Jul 2013 15:51:43 +0100 Subject: [PATCH 0276/4704] Default to xenserver driver if xenserver-core is installed Fixes bug 1209205 Change-Id: I63085cc87610a59fc48e519e4351c9233b3961be --- stackrc | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/stackrc b/stackrc index 1e08d1614d..c81906ac8c 100644 --- a/stackrc +++ b/stackrc @@ -180,8 +180,11 @@ SPICE_BRANCH=${SPICE_BRANCH:-master} # Nova hypervisor configuration. We default to libvirt with **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can -# also install an **LXC**, **OpenVZ** or **XenAPI** based system. -VIRT_DRIVER=${VIRT_DRIVER:-libvirt} +# also install an **LXC**, **OpenVZ** or **XenAPI** based system. If xenserver-core +# is installed, the default will be XenAPI +DEFAULT_VIRT_DRIVER=libvirt +is_package_installed xenserver-core && DEFAULT_VIRT_DRIVER=xenserver +VIRT_DRIVER=${VIRT_DRIVER:-$DEFAULT_VIRT_DRIVER} case "$VIRT_DRIVER" in libvirt) LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} From 389b3a02335887a3d6dbc73b0d0b8476a0f69c33 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Thu, 1 Aug 2013 10:44:09 +1200 Subject: [PATCH 0277/4704] Support heat in standalone mode. The following localrc will launch only heat in standalone mode and allow it to provision within the openstack specified by the configured keystone endpoint: HEAT_STANDALONE=True ENABLED_SERVICES=rabbit,mysql,heat,h-api,h-api-cfn,h-api-cw,h-eng KEYSTONE_SERVICE_HOST=... KEYSTONE_AUTH_HOST=... Change-Id: I0d8a541fc9d592577423b074c789829f8b8d6702 --- README.md | 17 +++++++++++++++++ lib/heat | 6 ++++-- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 46d3f96a9f..95c90bc924 100644 --- a/README.md +++ b/README.md @@ -181,6 +181,23 @@ The above will default in devstack to using the OVS on each compute host. To cha Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS VLAN TypeDriver options. Defaults to none. Q_AGENT_EXTRA_AGENT_OPTS Extra configuration options to pass to the OVS or LinuxBridge Agent. +# Heat + +Heat is disabled by default. To enable it you'll need the following settings +in your `localrc` : + + enable_service heat h-api h-api-cfn h-api-cw h-eng + +Heat can also run in standalone mode, and be configured to orchestrate +on an external OpenStack cloud. To launch only Heat in standalone mode +you'll need the following settings in your `localrc` : + + disable_all_services + enable_service rabbit mysql heat h-api h-api-cfn h-api-cw h-eng + HEAT_STANDALONE=True + KEYSTONE_SERVICE_HOST=... + KEYSTONE_AUTH_HOST=... + # Tempest If tempest has been successfully configured, a basic set of smoke tests can be run as follows: diff --git a/lib/heat b/lib/heat index 85177738dc..1b715f2b55 100644 --- a/lib/heat +++ b/lib/heat @@ -30,7 +30,7 @@ set +o xtrace HEAT_DIR=$DEST/heat HEATCLIENT_DIR=$DEST/python-heatclient HEAT_AUTH_CACHE_DIR=${HEAT_AUTH_CACHE_DIR:-/var/cache/heat} - +HEAT_STANDALONE=`trueorfalse False $HEAT_STANDALONE` # Functions # --------- @@ -83,6 +83,7 @@ function configure_heat() { iniset $HEAT_API_CFN_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR/api-cfn iniset $HEAT_API_CFN_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 iniset $HEAT_API_CFN_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens + [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_API_CFN_CONF paste_deploy flavor standalone iniset_rpc_backend heat $HEAT_API_CFN_CONF DEFAULT @@ -104,7 +105,7 @@ function configure_heat() { iniset $HEAT_API_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR/api iniset $HEAT_API_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 iniset $HEAT_API_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens - + [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_API_CONF paste_deploy flavor standalone iniset_rpc_backend heat $HEAT_API_CONF DEFAULT @@ -142,6 +143,7 @@ function configure_heat() { iniset $HEAT_API_CW_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR/api-cloudwatch iniset $HEAT_API_CW_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 iniset $HEAT_API_CW_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens + [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_API_CW_CONF paste_deploy flavor standalone iniset_rpc_backend heat $HEAT_API_CW_CONF DEFAULT From 62d1d698a0c1459e2519938259175cfed86f4a55 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 1 Aug 2013 17:40:40 -0500 Subject: [PATCH 0278/4704] Add tools/install_pip.sh Install a known working recent version of pip that handles installation dependencies more correctly than before. Extract to a separate script so it can be used apart from stack.sh. * Install distro setuptools if it not already present * Install pip from source tarball as get-pip.py proved to be unreliable * Remove python-distribute and python-pip from all prereq files, move python-setuptools to 'general' * Remove the earlier unfubar_setuptppls() call that attenpted to fix this * Only update requirements.txt when no changes in repo Tested on Precise, F18 and CentOS6. * Fedora and RHEL allow pip to install packages ON TOP OF RPM-installed packages. THIS IS BROKEN. And is one reason we have to be so picky about order and so forth. Change-Id: Ibb4b42119dc2e51577c77bbbbffb110863e5324d --- files/apts/general | 2 +- files/apts/keystone | 1 - files/apts/ryu | 1 - files/apts/swift | 1 - files/rpms-suse/general | 2 +- files/rpms-suse/keystone | 2 - files/rpms-suse/ryu | 2 - files/rpms-suse/swift | 2 - files/rpms/general | 2 +- files/rpms/keystone | 3 +- files/rpms/ryu | 1 - files/rpms/swift | 1 - functions | 7 ++- stack.sh | 24 ++++---- tools/install_pip.sh | 118 +++++++++++++++++++++++++++++++++++++++ 15 files changed, 139 insertions(+), 30 deletions(-) create mode 100755 tools/install_pip.sh diff --git a/files/apts/general b/files/apts/general index fdf8e20ad5..fcf0b5b06e 100644 --- a/files/apts/general +++ b/files/apts/general @@ -1,6 +1,6 @@ bridge-utils pylint -python-pip +python-setuptools screen unzip wget diff --git a/files/apts/keystone b/files/apts/keystone index c98409faaf..564921b78b 100644 --- a/files/apts/keystone +++ b/files/apts/keystone @@ -1,4 +1,3 @@ -python-setuptools python-dev python-lxml python-pastescript diff --git a/files/apts/ryu b/files/apts/ryu index 4a4fc523b5..e8ed926c1e 100644 --- a/files/apts/ryu +++ b/files/apts/ryu @@ -1,4 +1,3 @@ -python-setuptools python-gevent python-gflags python-netifaces diff --git a/files/apts/swift b/files/apts/swift index 1c283cf6f0..37d5bc049e 100644 --- a/files/apts/swift +++ b/files/apts/swift @@ -10,7 +10,6 @@ python-greenlet python-netifaces python-nose python-pastedeploy -python-setuptools python-simplejson python-webob python-xattr diff --git a/files/rpms-suse/general b/files/rpms-suse/general index f28267c044..355af885d3 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -6,8 +6,8 @@ iputils openssh openssl psmisc +python-setuptools # instead of python-distribute; dist:sle11sp2 python-cmd2 # dist:opensuse-12.3 -python-pip python-pylint python-unittest2 python-virtualenv diff --git a/files/rpms-suse/keystone b/files/rpms-suse/keystone index 7d9a7bfe9b..403d82f926 100644 --- a/files/rpms-suse/keystone +++ b/files/rpms-suse/keystone @@ -7,8 +7,6 @@ python-Routes python-SQLAlchemy python-WebOb python-devel -python-distribute -python-setuptools # instead of python-distribute; dist:sle11sp2 python-greenlet python-lxml python-mysql diff --git a/files/rpms-suse/ryu b/files/rpms-suse/ryu index 90b43a47d9..3797b6cb44 100644 --- a/files/rpms-suse/ryu +++ b/files/rpms-suse/ryu @@ -1,5 +1,3 @@ -python-distribute -python-setuptools # instead of python-distribute; dist:sle11sp2 python-Sphinx python-gevent python-netifaces diff --git a/files/rpms-suse/swift b/files/rpms-suse/swift index db379bbcdf..f3c95aad98 100644 --- a/files/rpms-suse/swift +++ b/files/rpms-suse/swift @@ -6,8 +6,6 @@ python-WebOb python-configobj python-coverage python-devel -python-distribute -python-setuptools # instead of python-distribute; dist:sle11sp2 python-eventlet python-greenlet python-netifaces diff --git a/files/rpms/general b/files/rpms/general index 9fa305c992..2db31d1db0 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -11,7 +11,7 @@ libxml2-devel # dist:rhel6 [2] libxslt-devel # dist:rhel6 [2] psmisc pylint -python-pip +python-setuptools python-prettytable # dist:rhel6 [1] python-unittest2 python-virtualenv diff --git a/files/rpms/keystone b/files/rpms/keystone index 33a4f47ccf..52dbf477d8 100644 --- a/files/rpms/keystone +++ b/files/rpms/keystone @@ -4,10 +4,9 @@ python-paste #dist:f16,f17,f18,f19 python-paste-deploy #dist:f16,f17,f18,f19 python-paste-script #dist:f16,f17,f18,f19 python-routes -python-setuptools #dist:f16,f17,f18,f19 python-sqlalchemy python-sqlite2 python-webob sqlite -# Deps installed via pip for RHEL \ No newline at end of file +# Deps installed via pip for RHEL diff --git a/files/rpms/ryu b/files/rpms/ryu index 0f62f9fc1f..e8ed926c1e 100644 --- a/files/rpms/ryu +++ b/files/rpms/ryu @@ -1,5 +1,4 @@ python-gevent python-gflags python-netifaces -python-setuptools #dist:f16,f17,f18,f19 python-sphinx diff --git a/files/rpms/swift b/files/rpms/swift index 2cc4a0bf39..b137f30dce 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -10,7 +10,6 @@ python-greenlet python-netifaces python-nose python-paste-deploy # dist:f16,f17,f18,f19 -python-setuptools # dist:f16,f17,f18,f19 python-simplejson python-webob pyxattr diff --git a/functions b/functions index fe37e4c3de..14ed1801c1 100644 --- a/functions +++ b/functions @@ -1140,8 +1140,11 @@ function setup_develop() { echo "cd $REQUIREMENTS_DIR; $SUDO_CMD python update.py $project_dir" - (cd $REQUIREMENTS_DIR; \ - $SUDO_CMD python update.py $project_dir) + # Don't update repo if local changes exist + if (cd $project_dir && git diff --quiet); then + (cd $REQUIREMENTS_DIR; \ + $SUDO_CMD python update.py $project_dir) + fi pip_install -e $project_dir # ensure that further actions can do things like setup.py sdist diff --git a/stack.sh b/stack.sh index c2e6fe4626..36f427f849 100755 --- a/stack.sh +++ b/stack.sh @@ -578,18 +578,8 @@ set -o xtrace echo_summary "Installing package prerequisites" source $TOP_DIR/tools/install_prereqs.sh -install_rpc_backend - -if is_service_enabled $DATABASE_BACKENDS; then - install_database -fi - -if is_service_enabled neutron; then - install_neutron_agent_packages -fi - -# Unbreak the giant mess that is the current state of setuptools -unfubar_setuptools +# Configure an appropriate python environment +$TOP_DIR/tools/install_pip.sh # System-specific preconfigure # ============================ @@ -642,6 +632,16 @@ if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then sudo ln -sf /usr/bin/nosetests1.1 /usr/local/bin/nosetests fi +install_rpc_backend + +if is_service_enabled $DATABASE_BACKENDS; then + install_database +fi + +if is_service_enabled neutron; then + install_neutron_agent_packages +fi + TRACK_DEPENDS=${TRACK_DEPENDS:-False} # Install python packages into a virtualenv so that we can track them diff --git a/tools/install_pip.sh b/tools/install_pip.sh new file mode 100755 index 0000000000..0ea8f536f6 --- /dev/null +++ b/tools/install_pip.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash + +# **install_pip.sh** + +# install_pip.sh [--pip-version ] [--use-get-pip] [--setuptools] [--force] +# +# Update pip and friends to a known common version + +# Assumptions: +# - currently we try to leave the system setuptools alone, install +# the system package if it is not already present +# - update pip to $INSTALL_PIP_VERSION + +# Keep track of the current directory +TOOLS_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=`cd $TOOLS_DIR/..; pwd` + +# Change dir to top of devstack +cd $TOP_DIR + +# Import common functions +source $TOP_DIR/functions + +FILES=$TOP_DIR/files + +# Handle arguments + +INSTALL_PIP_VERSION=${INSTALL_PIP_VERSION:-"1.4"} +while [[ -n "$1" ]]; do + case $1 in + --force) + FORCE=1 + ;; + --pip-version) + INSTALL_PIP_VERSION="$2" + shift + ;; + --setuptools) + SETUPTOOLS=1 + ;; + --use-get-pip) + USE_GET_PIP=1; + ;; + esac + shift +done + +SETUPTOOLS_EZ_SETUP_URL=https://bitbucket.org/pypa/setuptools/raw/bootstrap/ez_setup.py +PIP_GET_PIP_URL=https://raw.github.com/pypa/pip/master/contrib/get-pip.py +PIP_TAR_URL=https://pypi.python.org/packages/source/p/pip/pip-$INSTALL_PIP_VERSION.tar.gz + +GetDistro +echo "Distro: $DISTRO" + +function get_versions() { + PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null) + if [[ -n $PIP ]]; then + DISTRIBUTE_VERSION=$($PIP freeze | grep 'distribute==') + SETUPTOOLS_VERSION=$($PIP freeze | grep 'setuptools==') + PIP_VERSION=$($PIP --version | awk '{ print $2}') + echo "pip: $PIP_VERSION setuptools: $SETUPTOOLS_VERSION distribute: $DISTRIBUTE_VERSION" + fi +} + +function setuptools_ez_setup() { + if [[ ! -r $FILES/ez_setup.py ]]; then + (cd $FILES; \ + curl -OR $SETUPTOOLS_EZ_SETUP_URL; \ + ) + fi + sudo python $FILES/ez_setup.py +} + +function install_get_pip() { + if [[ ! -r $FILES/get-pip.py ]]; then + (cd $FILES; \ + curl $PIP_GET_PIP_URL; \ + ) + fi + sudo python $FILES/get-pip.py +} + +function install_pip_tarball() { + curl -O $PIP_TAR_URL + tar xvfz pip-$INSTALL_PIP_VERSION.tar.gz + cd pip-$INSTALL_PIP_VERSION + sudo python setup.py install +} + +# Show starting versions +get_versions + +# Do setuptools +if [[ -n "$SETUPTOOLS" ]]; then + # We want it from source + uninstall_package python-setuptools + setuptools_ez_setup +else + # See about installing the distro setuptools + if ! python -c "import setuptools"; then + install_package python-setuptools + fi +fi + +# Do pip +if [[ -z $PIP || "$PIP_VERSION" != "$INSTALL_PIP_VERSION" || -n $FORCE ]]; then + + # Eradicate any and all system packages + uninstall_package python-pip + + if [[ -n "$USE_GET_PIP" ]]; then + install_get_pip + else + install_pip_tarball + fi + + get_versions +fi From b3862f98718317042dd48871d50da1e5255c0329 Mon Sep 17 00:00:00 2001 From: Mike Perez Date: Tue, 12 Feb 2013 02:16:41 -0800 Subject: [PATCH 0279/4704] Add Cinder V2 API to keystone catalog Support both SQL and templated keystone backend. This will add an additional endpoint to go with v1. Change-Id: I19168d55c2ffad2b1cd668b6c1341dc8e49e9c1f --- files/default_catalog.templates | 6 ++++++ lib/cinder | 12 ++++++++++++ 2 files changed, 18 insertions(+) diff --git a/files/default_catalog.templates b/files/default_catalog.templates index 1ecf890241..277904a8e3 100644 --- a/files/default_catalog.templates +++ b/files/default_catalog.templates @@ -24,6 +24,12 @@ catalog.RegionOne.volume.internalURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id catalog.RegionOne.volume.name = Volume Service +catalog.RegionOne.volumev2.publicURL = http://%SERVICE_HOST%:8776/v2/$(tenant_id)s +catalog.RegionOne.volumev2.adminURL = http://%SERVICE_HOST%:8776/v2/$(tenant_id)s +catalog.RegionOne.volumev2.internalURL = http://%SERVICE_HOST%:8776/v2/$(tenant_id)s +catalog.RegionOne.volumev2.name = Volume Service V2 + + catalog.RegionOne.ec2.publicURL = http://%SERVICE_HOST%:8773/services/Cloud catalog.RegionOne.ec2.adminURL = http://%SERVICE_HOST%:8773/services/Admin catalog.RegionOne.ec2.internalURL = http://%SERVICE_HOST%:8773/services/Cloud diff --git a/lib/cinder b/lib/cinder index 3472dcd519..2bdc29b0ae 100644 --- a/lib/cinder +++ b/lib/cinder @@ -339,6 +339,18 @@ create_cinder_accounts() { --publicurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ --adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ --internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" + CINDER_V2_SERVICE=$(keystone service-create \ + --name=cinder \ + --type=volumev2 \ + --description="Cinder Volume Service V2" \ + | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $CINDER_V2_SERVICE \ + --publicurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \ + --adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \ + --internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" + fi fi } From 2bfbc779c7254604d666edca87a7a582b2c7ac40 Mon Sep 17 00:00:00 2001 From: Gordon Chung Date: Fri, 9 Aug 2013 10:55:12 -0400 Subject: [PATCH 0280/4704] Cinder configuration is not set up for Ceilometer enable cinder notifications when ceilometer is enabled Change-Id: I55809f1cef35aca90f8513a73df1417dcf08098d Fixes:Bug1210269 --- lib/cinder | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/cinder b/lib/cinder index 3472dcd519..14950c7f4d 100644 --- a/lib/cinder +++ b/lib/cinder @@ -234,6 +234,10 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL + if is_service_enabled ceilometer; then + iniset $CINDER_CONF DEFAULT notification_driver "cinder.openstack.common.notifier.rpc_notifier" + fi + if is_service_enabled tls-proxy; then # Set the service port for a proxy to take the original iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT From 385152cd890affbf1d1526a3fb14abe71b3d3ac6 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Fri, 9 Aug 2013 11:13:28 -0400 Subject: [PATCH 0281/4704] Enable debug logging on tempest With tempest moving to testr (serially currently) the log level is no longer defaulting to debug as it did with nose. To get the same level of verbosity in the logging as when running with nose this commit sets the debug flag on tempest. Change-Id: I6acd57be0f8188d31825d88471ba9883ebb30519 --- lib/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tempest b/lib/tempest index aaa7281a98..b97f0d86a9 100644 --- a/lib/tempest +++ b/lib/tempest @@ -201,6 +201,7 @@ function configure_tempest() { mkdir -p $TEMPEST_STATE_PATH iniset $TEMPEST_CONF DEFAULT use_stderr False iniset $TEMPEST_CONF DEFAULT log_file tempest.log + iniset $TEMPEST_CONF DEFAULT debug True # Timeouts iniset $TEMPEST_CONF compute build_timeout $BUILD_TIMEOUT From db5fadb5cb768820df54fc3d1c7428a57b511582 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 9 Aug 2013 13:41:33 -0400 Subject: [PATCH 0282/4704] cleanup potentially installed older oslo.config If the user had oslo.config installed prior to us setting up the oslo.config out of git they can get themselves into this very funny situation where pip doesn't see oslo.config 1.1.x, however some packages might. This manifests itself as a user error trying to start nova-api which uses DeprecatedOption, not in oslo.config 1.1.x Because of the funny state pip is in, you can't uninstall oslo.config. So in these situations, if we see old oslo.config in the filesystem, pip install / uninstall it to ensure that everyone ends up using the git version instead. To reduce the amount of user confusion, do this on every install_oslo for a while, which we can purge after Havana ships. Change-Id: If92073be5a431840701c952a194e63a7c452c9ca --- clean.sh | 1 + lib/oslo | 15 +++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/clean.sh b/clean.sh index 493c449fca..f7d15dfe4e 100755 --- a/clean.sh +++ b/clean.sh @@ -56,6 +56,7 @@ if [[ -n "$SESSION" ]]; then fi # Clean projects +cleanup_oslo cleanup_cinder cleanup_glance cleanup_keystone diff --git a/lib/oslo b/lib/oslo index 1eb13dbf3d..de5ec4e83e 100644 --- a/lib/oslo +++ b/lib/oslo @@ -27,6 +27,10 @@ OSLOMSG_DIR=$DEST/oslo.messaging # install_oslo() - Collect source and prepare function install_oslo() { + # TODO(sdague): remove this once we get to Icehouse, this just makes + # for a smoother transition of existing users. + cleanup_oslo + git_clone $OSLOCFG_REPO $OSLOCFG_DIR $OSLOCFG_BRANCH setup_develop $OSLOCFG_DIR @@ -34,6 +38,17 @@ function install_oslo() { setup_develop $OSLOMSG_DIR } +# cleanup_oslo() - purge possibly old versions of oslo +function cleanup_oslo() { + # this means we've got an old olso installed, lets get rid of it + if find /usr | grep oslo.config | grep -v oslo.config.egg-link > /dev/null; then + echo "Found old oslo.config... removing to ensure consistency" + local PIP_CMD=$(get_pip_command) + pip_install olso.config + sudo $PIP_CMD uninstall -y olso.config + fi +} + # Restore xtrace $XTRACE From 376b6316608fe72bc4a0bd997e1c94f76b086588 Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Mon, 29 Jul 2013 13:10:25 +0100 Subject: [PATCH 0283/4704] Force $DEST to have wider permissions This is particularly useful in the case where we create the home directory above and the permissions are too strict. Other users, such as the apache user, need read/execute for this directory. Change-Id: I908d993dbcd863b482030afcc04e5e7b9f4cffa1 --- stack.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stack.sh b/stack.sh index 880529d5af..d4c0eab608 100755 --- a/stack.sh +++ b/stack.sh @@ -234,8 +234,10 @@ else fi # Create the destination directory and ensure it is writable by the user +# and read/executable by everybody for daemons (e.g. apache run for horizon) sudo mkdir -p $DEST sudo chown -R $STACK_USER $DEST +chmod 0755 $DEST # a basic test for $DEST path permissions (fatal on error unless skipped) check_path_perm_sanity ${DEST} From 4b600898743a19f18e83c938eb15744bb2dc13c1 Mon Sep 17 00:00:00 2001 From: John Griffith Date: Sat, 10 Aug 2013 17:48:07 +0000 Subject: [PATCH 0284/4704] Add some missing pkgs to Cinder install Added python-dev even though it's picked up by other projects already, just to be explicit. Also added libpq-dev, this way users can just run "sudo pip install -r test-requirements.txt" and perform everything in run_tests.sh without the need for venv. Change-Id: I3953032ac40ef78fc6f67d77539e13539fbbb2ac --- files/apts/cinder | 2 ++ files/rpms-suse/cinder | 2 ++ files/rpms/cinder | 2 ++ 3 files changed, 6 insertions(+) diff --git a/files/apts/cinder b/files/apts/cinder index c45b97f5a2..32cb3a0039 100644 --- a/files/apts/cinder +++ b/files/apts/cinder @@ -1,3 +1,5 @@ tgt lvm2 qemu-utils +libpq-dev +python-dev diff --git a/files/rpms-suse/cinder b/files/rpms-suse/cinder index 8f4a5a7998..49e2cb8249 100644 --- a/files/rpms-suse/cinder +++ b/files/rpms-suse/cinder @@ -1,3 +1,5 @@ lvm2 tgt qemu-tools +python-devel +postgresql-devel diff --git a/files/rpms/cinder b/files/rpms/cinder index 19dedffe91..699f2fc22c 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -1,3 +1,5 @@ lvm2 scsi-target-utils qemu-img +python-devel +postgresql-devel From 9acc12a3921a261c7ae7a1902871183a6a5b64da Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 9 Aug 2013 15:09:31 -0500 Subject: [PATCH 0285/4704] More Python package fixes * Add tools/fixup_stuff.sh to fix prettytable and httplib2 install with pip 1.4+ * Cache downloads properly in tools/install_pip.sh Change-Id: I482590cb91f7a10c1436bc9015afd572ac1cc73e --- stack.sh | 4 ++++ tools/fixup_stuff.sh | 43 +++++++++++++++++++++++++++++++++++++++++++ tools/install_pip.sh | 10 ++++++---- 3 files changed, 53 insertions(+), 4 deletions(-) create mode 100755 tools/fixup_stuff.sh diff --git a/stack.sh b/stack.sh index 22a23c81d3..aca49d0ae9 100755 --- a/stack.sh +++ b/stack.sh @@ -581,6 +581,10 @@ source $TOP_DIR/tools/install_prereqs.sh # Configure an appropriate python environment $TOP_DIR/tools/install_pip.sh +# Do the ugly hacks for borken packages and distros +$TOP_DIR/tools/fixup_stuff.sh + + # System-specific preconfigure # ============================ diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh new file mode 100755 index 0000000000..60d0f468e0 --- /dev/null +++ b/tools/fixup_stuff.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +# **fixup_stuff.sh** + +# fixup_stuff.sh +# +# All distro and package specific hacks go in here +# - prettytable 0.7.2 permissions are 600 in the package and +# pip 1.4 doesn't fix it (1.3 did) +# - httplib2 0.8 permissions are 600 in the package and +# pip 1.4 doesn't fix it (1.3 did) + +# Keep track of the current directory +TOOLS_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=`cd $TOOLS_DIR/..; pwd` + +# Change dir to top of devstack +cd $TOP_DIR + +# Import common functions +source $TOP_DIR/functions + +FILES=$TOP_DIR/files + +# Pre-install affected packages so we can fix the permissions +sudo pip install prettytable +sudo pip install httplib2 + +SITE_DIRS=$(python -c "import site; import os; print os.linesep.join(site.getsitepackages())") +for dir in $SITE_DIRS; do + + # Fix prettytable 0.7.2 permissions + if [[ -r $dir/prettytable.py ]]; then + sudo chmod +r $dir/prettytable-0.7.2*/* + fi + + # Fix httplib2 0.8 permissions + httplib_dir=httplib2-0.8.egg-info + if [[ -d $dir/$httplib_dir ]]; then + sudo chmod +r $dir/$httplib_dir/* + fi + +done diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 0ea8f536f6..6e3e9d2104 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -81,10 +81,12 @@ function install_get_pip() { } function install_pip_tarball() { - curl -O $PIP_TAR_URL - tar xvfz pip-$INSTALL_PIP_VERSION.tar.gz - cd pip-$INSTALL_PIP_VERSION - sudo python setup.py install + (cd $FILES; \ + curl -O $PIP_TAR_URL; \ + tar xvfz pip-$INSTALL_PIP_VERSION.tar.gz; \ + cd pip-$INSTALL_PIP_VERSION; \ + sudo python setup.py install; \ + ) } # Show starting versions From dace92f557a3c07a80bb9a5d9e480810d81611e9 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Sat, 10 Aug 2013 23:49:47 -0300 Subject: [PATCH 0286/4704] Stop doing special things with setuptools pip 1.4 can handle the distribute/setuptools upgrade sequencing appropriate. So it turns out all we need to upgrade is pip, and then the rest will fall in to place. This will still not fix the packages vs. pip interactions, but we don't to muck with the system setuptools packages at all. Change-Id: I99220ccc190798c3eb77bb2361abc6606bd546b4 --- tools/install_pip.sh | 52 ++++++++++---------------------------------- 1 file changed, 11 insertions(+), 41 deletions(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 0ea8f536f6..64cc20052e 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -2,13 +2,11 @@ # **install_pip.sh** -# install_pip.sh [--pip-version ] [--use-get-pip] [--setuptools] [--force] +# install_pip.sh [--pip-version ] [--use-get-pip] [--force] # # Update pip and friends to a known common version # Assumptions: -# - currently we try to leave the system setuptools alone, install -# the system package if it is not already present # - update pip to $INSTALL_PIP_VERSION # Keep track of the current directory @@ -25,7 +23,7 @@ FILES=$TOP_DIR/files # Handle arguments -INSTALL_PIP_VERSION=${INSTALL_PIP_VERSION:-"1.4"} +INSTALL_PIP_VERSION=${INSTALL_PIP_VERSION:-"1.4.1"} while [[ -n "$1" ]]; do case $1 in --force) @@ -35,9 +33,6 @@ while [[ -n "$1" ]]; do INSTALL_PIP_VERSION="$2" shift ;; - --setuptools) - SETUPTOOLS=1 - ;; --use-get-pip) USE_GET_PIP=1; ;; @@ -45,7 +40,6 @@ while [[ -n "$1" ]]; do shift done -SETUPTOOLS_EZ_SETUP_URL=https://bitbucket.org/pypa/setuptools/raw/bootstrap/ez_setup.py PIP_GET_PIP_URL=https://raw.github.com/pypa/pip/master/contrib/get-pip.py PIP_TAR_URL=https://pypi.python.org/packages/source/p/pip/pip-$INSTALL_PIP_VERSION.tar.gz @@ -55,21 +49,11 @@ echo "Distro: $DISTRO" function get_versions() { PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null) if [[ -n $PIP ]]; then - DISTRIBUTE_VERSION=$($PIP freeze | grep 'distribute==') - SETUPTOOLS_VERSION=$($PIP freeze | grep 'setuptools==') PIP_VERSION=$($PIP --version | awk '{ print $2}') - echo "pip: $PIP_VERSION setuptools: $SETUPTOOLS_VERSION distribute: $DISTRIBUTE_VERSION" + echo "pip: $PIP_VERSION" fi } -function setuptools_ez_setup() { - if [[ ! -r $FILES/ez_setup.py ]]; then - (cd $FILES; \ - curl -OR $SETUPTOOLS_EZ_SETUP_URL; \ - ) - fi - sudo python $FILES/ez_setup.py -} function install_get_pip() { if [[ ! -r $FILES/get-pip.py ]]; then @@ -90,29 +74,15 @@ function install_pip_tarball() { # Show starting versions get_versions -# Do setuptools -if [[ -n "$SETUPTOOLS" ]]; then - # We want it from source - uninstall_package python-setuptools - setuptools_ez_setup -else - # See about installing the distro setuptools - if ! python -c "import setuptools"; then - install_package python-setuptools - fi -fi - # Do pip -if [[ -z $PIP || "$PIP_VERSION" != "$INSTALL_PIP_VERSION" || -n $FORCE ]]; then - # Eradicate any and all system packages - uninstall_package python-pip - - if [[ -n "$USE_GET_PIP" ]]; then - install_get_pip - else - install_pip_tarball - fi +# Eradicate any and all system packages +uninstall_package python-pip - get_versions +if [[ -n "$USE_GET_PIP" ]]; then + install_get_pip +else + install_pip_tarball fi + +get_versions From d5cccad2f0655b59e1db9219458f8bc35edb9ad1 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Fri, 19 Jul 2013 10:34:24 +1200 Subject: [PATCH 0287/4704] Generate heat images for tempest tests This requires HEAT_CREATE_TEST_IMAGE to be set for any images to be created. If the file (files/fedora-vm-heat-cfntools-tempest.qcow2) already exists then disk-image-create will not be called, and the existing image file will be registered with glance. This is most likely to happen in the following scenarios: - a second run of stack.sh - the image has been pre-built elsewhere (such as during devstack-gate image building) Change-Id: I276573a20927e72f2cb68784f655c1ba1913ae8a --- lib/heat | 15 +++++++++++++++ lib/tempest | 7 +++++++ 2 files changed, 22 insertions(+) diff --git a/lib/heat b/lib/heat index 1b715f2b55..92b4e50ee2 100644 --- a/lib/heat +++ b/lib/heat @@ -197,6 +197,21 @@ function stop_heat() { done } +function disk_image_create { + local elements_path=$1 + local elements=$2 + local arch=$3 + local output=$TOP_DIR/files/$4 + if [[ -f "$output.qcow2" ]]; + then + echo "Image file already exists: $output_file" + else + ELEMENTS_PATH=$elements_path disk-image-create \ + $elements -a $arch -o $output + fi + # upload with fake URL so that image in $TOP_DIR/files is used + upload_image "http://localhost/$output.qcow2" $TOKEN +} # Restore xtrace $XTRACE diff --git a/lib/tempest b/lib/tempest index aaa7281a98..5142f24a2e 100644 --- a/lib/tempest +++ b/lib/tempest @@ -24,6 +24,7 @@ # ``DEFAULT_INSTANCE_TYPE`` # ``DEFAULT_INSTANCE_USER`` # ``CINDER_MULTI_LVM_BACKEND`` +# ``HEAT_CREATE_TEST_IMAGE`` # ``stack.sh`` calls the entry points in this order: # # install_tempest @@ -271,6 +272,12 @@ function configure_tempest() { iniset $TEMPEST_CONF boto http_socket_timeout 30 iniset $TEMPEST_CONF boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros} + # Orchestration test image + if [ $HEAT_CREATE_TEST_IMAGE == "True" ]; then + disk_image_create /usr/share/tripleo-image-elements "vm fedora heat-cfntools" "i386" "fedora-vm-heat-cfntools-tempest" + iniset $TEMPEST_CONF orchestration image_ref "fedora-vm-heat-cfntools-tempest" + fi + # Scenario iniset $TEMPEST_CONF scenario img_dir "$FILES/images/cirros-0.3.1-x86_64-uec" From 556ffe402252b8e993f7849a2d7e959adc8c6291 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Tue, 6 Aug 2013 16:42:38 +1200 Subject: [PATCH 0288/4704] Colorize heat engine log Change-Id: If6ffb234e360e8a579eb8e1e7baedb90354b10ae --- lib/heat | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/heat b/lib/heat index 92b4e50ee2..568f4d00ca 100644 --- a/lib/heat +++ b/lib/heat @@ -125,6 +125,14 @@ function configure_heat() { iniset_rpc_backend heat $HEAT_ENGINE_CONF DEFAULT + if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then + # Add color to logging output + iniset $HEAT_ENGINE_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" + iniset $HEAT_ENGINE_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + iniset $HEAT_ENGINE_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" + iniset $HEAT_ENGINE_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" + fi + # Cloudwatch API HEAT_API_CW_CONF=$HEAT_CONF_DIR/heat-api-cloudwatch.conf cp $HEAT_DIR/etc/heat/heat-api-cloudwatch.conf $HEAT_API_CW_CONF From 99405a45aa3930a9b695d9a1c1dabf0d967e48ad Mon Sep 17 00:00:00 2001 From: Roman Gorodeckij Date: Wed, 7 Aug 2013 09:20:36 -0400 Subject: [PATCH 0289/4704] Pip install fails because of --use-mirrors parameter Having --use-mirrors parameter in pip commands causes pip to hang on some distros. Pypi uses CDN for long time already, so there's no point to keep this parameter no more. Wipe PIP_USE_MIRRORS out of the "function" file. Change-Id: I70adaf6591834af2482e09eb7f8f9f60df8e7692 Closes-Bug: #1069309 --- functions | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/functions b/functions index e9c60615e6..087a0ea844 100644 --- a/functions +++ b/functions @@ -944,13 +944,9 @@ function pip_install { CMD_PIP=$(get_pip_command) fi - if is_fedora && [[ $DISTRO =~ (rhel6) ]]; then - # RHEL6 pip by default doesn't have this (was introduced - # around 0.8.1 or so) - PIP_USE_MIRRORS=${PIP_USE_MIRRORS:-False} - else - PIP_USE_MIRRORS=${PIP_USE_MIRRORS:-True} - fi + # Mirror option not needed anymore because pypi has CDN available, + # but it's useful in certain circumstances + PIP_USE_MIRRORS=${PIP_USE_MIRRORS:-False} if [[ "$PIP_USE_MIRRORS" != "False" ]]; then PIP_MIRROR_OPT="--use-mirrors" fi From bf10ac55a99d226a81bdbc7e6bd1e85b4f48652d Mon Sep 17 00:00:00 2001 From: Maru Newby Date: Sat, 10 Aug 2013 21:27:54 +0000 Subject: [PATCH 0290/4704] Add auth config for neutron metadata proxy * Without auth config, the proxy will return 500 errors on cloud-init requests, which will cause VM connectivity checks to fail. * A cleaner fix would be for the metadata proxy to reuse the configuration from the keystone_authtoken section of neutron.conf, but I chose the easier route because of a pending switch from REST to RPC communication (RPC won't need the auth config). * Fixes bug 1210664 Change-Id: Iaa3c74f5ada2404119c44c8cbdad380eda158f66 --- lib/neutron | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/lib/neutron b/lib/neutron index 306140a4b5..3b8dcf59a6 100644 --- a/lib/neutron +++ b/lib/neutron @@ -577,6 +577,8 @@ function _configure_neutron_metadata_agent() { iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" + _neutron_setup_keystone $Q_META_CONF_FILE DEFAULT True True + } function _configure_neutron_lbaas() { @@ -687,6 +689,7 @@ function _neutron_setup_keystone() { local conf_file=$1 local section=$2 local use_auth_url=$3 + local skip_auth_cache=$4 if [[ -n $use_auth_url ]]; then iniset $conf_file $section auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0" else @@ -697,11 +700,13 @@ function _neutron_setup_keystone() { iniset $conf_file $section admin_tenant_name $SERVICE_TENANT_NAME iniset $conf_file $section admin_user $Q_ADMIN_USERNAME iniset $conf_file $section admin_password $SERVICE_PASSWORD - iniset $conf_file $section signing_dir $NEUTRON_AUTH_CACHE_DIR - # Create cache dir - sudo mkdir -p $NEUTRON_AUTH_CACHE_DIR - sudo chown $STACK_USER $NEUTRON_AUTH_CACHE_DIR - rm -f $NEUTRON_AUTH_CACHE_DIR/* + if [[ -z $skip_auth_cache ]]; then + iniset $conf_file $section signing_dir $NEUTRON_AUTH_CACHE_DIR + # Create cache dir + sudo mkdir -p $NEUTRON_AUTH_CACHE_DIR + sudo chown $STACK_USER $NEUTRON_AUTH_CACHE_DIR + rm -f $NEUTRON_AUTH_CACHE_DIR/* + fi } function _neutron_setup_interface_driver() { From 039979424bebc71b94f53f51030eda5e9d2b7734 Mon Sep 17 00:00:00 2001 From: Ben Nemec Date: Sat, 10 Aug 2013 09:56:16 -0500 Subject: [PATCH 0291/4704] Allow disabling of debug logging I find that enabling the debug log level often causes me to miss important error messages due to the sheer volume of information logged. This change allows configuration of the debug option in a number of the projects so it can be disabled globally without having to make one-off changes after each re-stack. Note that this does not apply to Keystone or Swift right now. They use a different method to configure their logging level and I'm not as familiar with them so I didn't want to mess with their settings. Change-Id: I185d496543d245a644854c8a37f3359377cb978c --- lib/cinder | 2 +- lib/glance | 6 +++--- lib/heat | 8 ++++---- lib/neutron | 10 +++++----- lib/nova | 2 +- stack.sh | 3 +++ 6 files changed, 17 insertions(+), 14 deletions(-) diff --git a/lib/cinder b/lib/cinder index 14950c7f4d..f49eda15be 100644 --- a/lib/cinder +++ b/lib/cinder @@ -212,7 +212,7 @@ function configure_cinder() { cp $CINDER_DIR/etc/cinder/cinder.conf.sample $CINDER_CONF iniset $CINDER_CONF DEFAULT auth_strategy keystone - iniset $CINDER_CONF DEFAULT debug True + iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $CINDER_CONF DEFAULT verbose True if [ "$CINDER_MULTI_LVM_BACKEND" = "True" ]; then iniset $CINDER_CONF DEFAULT enabled_backends lvmdriver-1,lvmdriver-2 diff --git a/lib/glance b/lib/glance index 583f879555..a18189f474 100644 --- a/lib/glance +++ b/lib/glance @@ -71,7 +71,7 @@ function configure_glance() { # Copy over our glance configurations and update them cp $GLANCE_DIR/etc/glance-registry.conf $GLANCE_REGISTRY_CONF - iniset $GLANCE_REGISTRY_CONF DEFAULT debug True + iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL inicomment $GLANCE_REGISTRY_CONF DEFAULT log_file local dburl=`database_connection_url glance` iniset $GLANCE_REGISTRY_CONF DEFAULT sql_connection $dburl @@ -87,7 +87,7 @@ function configure_glance() { iniset $GLANCE_REGISTRY_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/registry cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF - iniset $GLANCE_API_CONF DEFAULT debug True + iniset $GLANCE_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL inicomment $GLANCE_API_CONF DEFAULT log_file iniset $GLANCE_API_CONF DEFAULT sql_connection $dburl iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG @@ -123,7 +123,7 @@ function configure_glance() { cp -p $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI cp $GLANCE_DIR/etc/glance-cache.conf $GLANCE_CACHE_CONF - iniset $GLANCE_CACHE_CONF DEFAULT debug True + iniset $GLANCE_CACHE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL inicomment $GLANCE_CACHE_CONF DEFAULT log_file iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_CACHE_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/ diff --git a/lib/heat b/lib/heat index 1b715f2b55..3c3b2c4a2d 100644 --- a/lib/heat +++ b/lib/heat @@ -68,7 +68,7 @@ function configure_heat() { # Cloudformation API HEAT_API_CFN_CONF=$HEAT_CONF_DIR/heat-api-cfn.conf cp $HEAT_DIR/etc/heat/heat-api-cfn.conf $HEAT_API_CFN_CONF - iniset $HEAT_API_CFN_CONF DEFAULT debug True + iniset $HEAT_API_CFN_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL inicomment $HEAT_API_CFN_CONF DEFAULT log_file iniset $HEAT_API_CFN_CONF DEFAULT use_syslog $SYSLOG iniset $HEAT_API_CFN_CONF DEFAULT bind_host $HEAT_API_CFN_HOST @@ -90,7 +90,7 @@ function configure_heat() { # OpenStack API HEAT_API_CONF=$HEAT_CONF_DIR/heat-api.conf cp $HEAT_DIR/etc/heat/heat-api.conf $HEAT_API_CONF - iniset $HEAT_API_CONF DEFAULT debug True + iniset $HEAT_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL inicomment $HEAT_API_CONF DEFAULT log_file iniset $HEAT_API_CONF DEFAULT use_syslog $SYSLOG iniset $HEAT_API_CONF DEFAULT bind_host $HEAT_API_HOST @@ -112,7 +112,7 @@ function configure_heat() { # engine HEAT_ENGINE_CONF=$HEAT_CONF_DIR/heat-engine.conf cp $HEAT_DIR/etc/heat/heat-engine.conf $HEAT_ENGINE_CONF - iniset $HEAT_ENGINE_CONF DEFAULT debug True + iniset $HEAT_ENGINE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL inicomment $HEAT_ENGINE_CONF DEFAULT log_file iniset $HEAT_ENGINE_CONF DEFAULT use_syslog $SYSLOG iniset $HEAT_ENGINE_CONF DEFAULT bind_host $HEAT_ENGINE_HOST @@ -128,7 +128,7 @@ function configure_heat() { # Cloudwatch API HEAT_API_CW_CONF=$HEAT_CONF_DIR/heat-api-cloudwatch.conf cp $HEAT_DIR/etc/heat/heat-api-cloudwatch.conf $HEAT_API_CW_CONF - iniset $HEAT_API_CW_CONF DEFAULT debug True + iniset $HEAT_API_CW_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL inicomment $HEAT_API_CW_CONF DEFAULT log_file iniset $HEAT_API_CW_CONF DEFAULT use_syslog $SYSLOG iniset $HEAT_API_CW_CONF DEFAULT bind_host $HEAT_API_CW_HOST diff --git a/lib/neutron b/lib/neutron index 306140a4b5..564315b107 100644 --- a/lib/neutron +++ b/lib/neutron @@ -537,7 +537,7 @@ function _configure_neutron_dhcp_agent() { cp $NEUTRON_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE iniset $Q_DHCP_CONF_FILE DEFAULT verbose True - iniset $Q_DHCP_CONF_FILE DEFAULT debug True + iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" @@ -557,7 +557,7 @@ function _configure_neutron_l3_agent() { cp $NEUTRON_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE iniset $Q_L3_CONF_FILE DEFAULT verbose True - iniset $Q_L3_CONF_FILE DEFAULT debug True + iniset $Q_L3_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" @@ -573,7 +573,7 @@ function _configure_neutron_metadata_agent() { cp $NEUTRON_DIR/etc/metadata_agent.ini $Q_META_CONF_FILE iniset $Q_META_CONF_FILE DEFAULT verbose True - iniset $Q_META_CONF_FILE DEFAULT debug True + iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" @@ -597,7 +597,7 @@ function _configure_neutron_plugin_agent() { # ensure that an agent's configuration can override the default iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_COMMAND" iniset $NEUTRON_CONF DEFAULT verbose True - iniset $NEUTRON_CONF DEFAULT debug True + iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL # Configure agent for plugin neutron_plugin_configure_plugin_agent @@ -620,7 +620,7 @@ function _configure_neutron_service() { fi iniset $NEUTRON_CONF DEFAULT verbose True - iniset $NEUTRON_CONF DEFAULT debug True + iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $NEUTRON_CONF DEFAULT policy_file $Q_POLICY_FILE iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP diff --git a/lib/nova b/lib/nova index 9c38498d8c..842c098624 100644 --- a/lib/nova +++ b/lib/nova @@ -430,7 +430,7 @@ function create_nova_conf() { # (Re)create ``nova.conf`` rm -f $NOVA_CONF iniset $NOVA_CONF DEFAULT verbose "True" - iniset $NOVA_CONF DEFAULT debug "True" + iniset $NOVA_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" iniset $NOVA_CONF DEFAULT auth_strategy "keystone" iniset $NOVA_CONF DEFAULT allow_resize_to_same_host "True" iniset $NOVA_CONF DEFAULT api_paste_config "$NOVA_API_PASTE_INI" diff --git a/stack.sh b/stack.sh index aca49d0ae9..e2703224a4 100755 --- a/stack.sh +++ b/stack.sh @@ -250,6 +250,9 @@ OFFLINE=`trueorfalse False $OFFLINE` # operation. ERROR_ON_CLONE=`trueorfalse False $ERROR_ON_CLONE` +# Whether to enable the debug log level in OpenStack services +ENABLE_DEBUG_LOG_LEVEL=`trueorfalse True $ENABLE_DEBUG_LOG_LEVEL` + # Destination path for service data DATA_DIR=${DATA_DIR:-${DEST}/data} sudo mkdir -p $DATA_DIR From c325227465e5b31936bbab888d2a282be097d01e Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Tue, 13 Aug 2013 00:32:20 -0700 Subject: [PATCH 0292/4704] VMware: Add cinder support to devstack The patch set adds cinder support to devstack. VMware cinder support can be found at: - Nova - https://review.openstack.org/#/c/40245/ - Cinder - https://review.openstack.org/#/c/41600/ Change-Id: I0a05643010ea6cfb6635505accc3dcf411fdd419 --- lib/cinder | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/cinder b/lib/cinder index 14950c7f4d..b7f765b391 100644 --- a/lib/cinder +++ b/lib/cinder @@ -287,6 +287,14 @@ function configure_cinder() { CINDER_GLUSTERFS_SHARES=$(echo $CINDER_GLUSTERFS_SHARES | tr ";" "\n") echo "$CINDER_GLUSTERFS_SHARES" > $CINDER_CONF_DIR/glusterfs_shares fi + elif [ "$CINDER_DRIVER" == "vsphere" ]; then + echo_summary "Using VMware vCenter driver" + iniset $CINDER_CONF DEFAULT enabled_backends vmware + iniset $CINDER_CONF vmware host_ip "$VMWAREAPI_IP" + iniset $CINDER_CONF vmware host_username "$VMWAREAPI_USER" + iniset $CINDER_CONF vmware host_password "$VMWAREAPI_PASSWORD" + iniset $CINDER_CONF vmware cluster_name "$VMWAREAPI_CLUSTER" + iniset $CINDER_CONF vmware volume_driver "cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver" fi if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then From f5dbf8c8ef30c66cd40b07605b4aefa06b3e3c1d Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Tue, 13 Aug 2013 09:02:46 -0700 Subject: [PATCH 0293/4704] Fix option for metadata access in nicira neutron plugin Bug 1211850 Set metadata_mode option rather than enable_metadata_access_network. Change-Id: Ia85aba4d0dfb3e7b21937cf15aebc629e3705aed --- lib/neutron_plugins/nicira | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/nicira b/lib/neutron_plugins/nicira index eabc41730d..e9deb64e11 100644 --- a/lib/neutron_plugins/nicira +++ b/lib/neutron_plugins/nicira @@ -90,7 +90,7 @@ function neutron_plugin_configure_service() { iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l3_gw_service_uuid $DEFAULT_L3_GW_SVC_UUID Q_L3_ENABLED=True Q_L3_ROUTER_PER_TENANT=True - iniset /$Q_PLUGIN_CONF_FILE nvp enable_metadata_access_network True + iniset /$Q_PLUGIN_CONF_FILE nvp metadata_mode access_network fi if [[ "$DEFAULT_L2_GW_SVC_UUID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l2_gw_service_uuid $DEFAULT_L2_GW_SVC_UUID From 025fc5e5f5d7f92f0d0bda7032cf1782b029f28a Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Tue, 13 Aug 2013 18:55:33 +0200 Subject: [PATCH 0294/4704] Faster old oslo.config detection Just search in the path where python searches for modules. Let's use python for searching, it knows the exact rules. Change-Id: I659f734c418ab5e56f4956f418af48dfbe054c8a --- lib/oslo | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/oslo b/lib/oslo index de5ec4e83e..449c4de17f 100644 --- a/lib/oslo +++ b/lib/oslo @@ -41,7 +41,7 @@ function install_oslo() { # cleanup_oslo() - purge possibly old versions of oslo function cleanup_oslo() { # this means we've got an old olso installed, lets get rid of it - if find /usr | grep oslo.config | grep -v oslo.config.egg-link > /dev/null; then + if ! python -c 'import oslo.config' 2>/dev/null; then echo "Found old oslo.config... removing to ensure consistency" local PIP_CMD=$(get_pip_command) pip_install olso.config From cfb708d9c53e1680f21ef63c1715ca2693b9758d Mon Sep 17 00:00:00 2001 From: Lorin Hochstein Date: Tue, 13 Aug 2013 14:34:18 -0400 Subject: [PATCH 0295/4704] Redirect dpkg -l stderr to /dev/null Fixes bug 1211413 Change-Id: I33a7e1e8fb3755c69ca0570e333e4908cb6f3da4 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index e9c60615e6..6ae650eb8e 100644 --- a/functions +++ b/functions @@ -909,7 +909,7 @@ function is_package_installed() { fi if [[ "$os_PACKAGE" = "deb" ]]; then - dpkg -l "$@" > /dev/null + dpkg -l "$@" > /dev/null 2> /dev/null elif [[ "$os_PACKAGE" = "rpm" ]]; then rpm --quiet -q "$@" else From 4669122dc8e50a3c0cead54e227a5a46508fed50 Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Mon, 12 Aug 2013 17:28:50 +0100 Subject: [PATCH 0296/4704] Add XenServer to GetDistro's logic This is primarily to satisfy the expectations of various scripts that os_* variables are defined, and will result in a distro similar to "xs6.1.0-59235p" Fixes bug 1211001 Change-Id: I951e1eb3a5e25f4d8773a11b15cf38157b6492fe --- functions | 5 ++++- tools/xen/install_os_domU.sh | 4 ++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/functions b/functions index e9c60615e6..1d50651891 100644 --- a/functions +++ b/functions @@ -387,8 +387,9 @@ GetOSVersion() { # CentOS release 5.5 (Final) # CentOS Linux release 6.0 (Final) # Fedora release 16 (Verne) + # XenServer release 6.2.0-70446c (xenenterprise) os_CODENAME="" - for r in "Red Hat" CentOS Fedora; do + for r in "Red Hat" CentOS Fedora XenServer; do os_VENDOR=$r if [[ -n "`grep \"$r\" /etc/redhat-release`" ]]; then ver=`sed -e 's/^.* \(.*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release` @@ -451,6 +452,8 @@ function GetDistro() { elif [[ "$os_VENDOR" =~ (Red Hat) || "$os_VENDOR" =~ (CentOS) ]]; then # Drop the . release as we assume it's compatible DISTRO="rhel${os_RELEASE::1}" + elif [[ "$os_VENDOR" =~ (XenServer) ]]; then + DISTRO="xs$os_RELEASE" else # Catch-all for now is Vendor + Release + Update DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE" diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 92b131795b..997644d018 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -29,6 +29,10 @@ THIS_DIR=$(cd $(dirname "$0") && pwd) # xapi functions . $THIS_DIR/functions +# Determine what system we are running on. +# Might not be XenServer if we're using xenserver-core +GetDistro + # # Get Settings # From 6769b166b10272947db77c3f9bfb0d115e8d0a2d Mon Sep 17 00:00:00 2001 From: Nachi Ueno Date: Mon, 12 Aug 2013 18:18:56 -0700 Subject: [PATCH 0297/4704] Added functions for get a ip on an instance The cause of some gating failure looks like because of getting ip address on instance. However current exercise didn't log the return value. In this commit, we add get_instance_ip function with error hanlding support, and apply it on the execise. Change-Id: I8e17ba68093faafe58a98eb780a032368eea38aa --- exercises/boot_from_volume.sh | 3 ++- exercises/floating_ips.sh | 2 +- exercises/neutron-adv-test.sh | 12 ++++++------ exercises/volumes.sh | 3 ++- functions | 13 +++++++++++++ 5 files changed, 24 insertions(+), 9 deletions(-) diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index a3a14eb5e4..36524ede4b 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -174,7 +174,8 @@ if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | g fi # Get the instance IP -IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2) +IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME) + die_if_not_set $LINENO IP "Failure retrieving IP address" # Private IPs can be pinged in single node deployments diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index ac65cf7772..f93a727df6 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -132,7 +132,7 @@ if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | g fi # Get the instance IP -IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2) +IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME) die_if_not_set $LINENO IP "Failure retrieving IP address" # Private IPs can be pinged in single node deployments diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index 4367e2e3c1..abb29cf333 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -272,12 +272,12 @@ function create_vms { } function ping_ip { - # Test agent connection. Assumes namespaces are disabled, and - # that DHCP is in use, but not L3 - local VM_NAME=$1 - local NET_NAME=$2 - IP=`nova show $VM_NAME | grep 'network' | awk '{print $5}'` - ping_check $NET_NAME $IP $BOOT_TIMEOUT + # Test agent connection. Assumes namespaces are disabled, and + # that DHCP is in use, but not L3 + local VM_NAME=$1 + local NET_NAME=$2 + IP=$(get_instance_ip $VM_NAME $NET_NAME) + ping_check $NET_NAME $IP $BOOT_TIMEOUT } function check_vm { diff --git a/exercises/volumes.sh b/exercises/volumes.sh index b2b391c5d7..028d19b36a 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -135,7 +135,8 @@ if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | g fi # Get the instance IP -IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2) +IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME) + die_if_not_set $LINENO IP "Failure retrieving IP address" # Private IPs can be pinged in single node deployments diff --git a/functions b/functions index 087a0ea844..b56df08673 100644 --- a/functions +++ b/functions @@ -1433,6 +1433,19 @@ function _ping_check_novanet() { fi } +# Get ip of instance +function get_instance_ip(){ + local vm_id=$1 + local network_name=$2 + local nova_result="$(nova show $vm_id)" + local ip=$(echo "$nova_result" | grep "$network_name" | get_field 2) + if [[ $ip = "" ]];then + echo "$nova_result" + die $LINENO "[Fail] Coudn't get ipaddress of VM" + exit 1 + fi + echo $ip +} # ssh check From 248a8cce71754b4a16c60bd161ec566098b81305 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Tue, 6 Aug 2013 08:00:06 +0200 Subject: [PATCH 0298/4704] .conf suffix for the horizon config on Fedora On Fedora by default the *.conf imported only from the /etc/httpd/conf.d/. Changing the default config name to horizon.conf with all distribution in order to have a simple logic. Change-Id: I08c3e825f697640fd73ac1f9c569f313abc3c04f --- lib/horizon | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/horizon b/lib/horizon index 89bd65901c..a879d1e089 100644 --- a/lib/horizon +++ b/lib/horizon @@ -106,13 +106,13 @@ function init_horizon() { sudo mkdir -p $HORIZON_DIR/.blackhole HORIZON_REQUIRE='' - local horizon_conf=/etc/$APACHE_NAME/$APACHE_CONF_DIR/horizon + local horizon_conf=/etc/$APACHE_NAME/$APACHE_CONF_DIR/horizon.conf if is_ubuntu; then # Clean up the old config name sudo rm -f /etc/apache2/sites-enabled/000-default # Be a good citizen and use the distro tools here sudo touch $horizon_conf - sudo a2ensite horizon + sudo a2ensite horizon.conf # WSGI isn't enabled by default, enable it sudo a2enmod wsgi elif is_fedora; then From c2a4c9238d4004f0271d51a5fc9b66bb94ba3a8f Mon Sep 17 00:00:00 2001 From: Alessio Ababilov Date: Fri, 16 Aug 2013 21:53:22 +0300 Subject: [PATCH 0299/4704] Fix 'olso' typo in lib/oslo This enables commit If92073be5a431840701c952a194e63a7c452c9ca for cleaning up potentially installed older oslo.config. Here are its original details. If the user had oslo.config installed prior to us setting up the oslo.config out of git they can get themselves into this very funny situation where pip doesn't see oslo.config 1.1.x, however some packages might. This manifests itself as a user error trying to start nova-api which uses DeprecatedOption, not in oslo.config 1.1.x Because of the funny state pip is in, you can't uninstall oslo.config. So in these situations, if we see old oslo.config in the filesystem, pip install / uninstall it to ensure that everyone ends up using the git version instead. To reduce the amount of user confusion, do this on every install_oslo for a while, which we can purge after Havana ships. Change-Id: I7fa0b70497bf5622f4638da284afe5363a004d3c Fixes: bug #1213089 --- lib/oslo | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/oslo b/lib/oslo index 449c4de17f..f77a4fa941 100644 --- a/lib/oslo +++ b/lib/oslo @@ -40,12 +40,12 @@ function install_oslo() { # cleanup_oslo() - purge possibly old versions of oslo function cleanup_oslo() { - # this means we've got an old olso installed, lets get rid of it + # this means we've got an old oslo installed, lets get rid of it if ! python -c 'import oslo.config' 2>/dev/null; then echo "Found old oslo.config... removing to ensure consistency" local PIP_CMD=$(get_pip_command) - pip_install olso.config - sudo $PIP_CMD uninstall -y olso.config + pip_install oslo.config + sudo $PIP_CMD uninstall -y oslo.config fi } From 41815cdc7bcbd91500f9efad0f4e8d57fa4b284c Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Fri, 16 Aug 2013 14:57:38 -0700 Subject: [PATCH 0300/4704] Echo service start failures. * functions: Previously screen_it would log service start failures by touching a file, this isn't very useful when working with Jenkins. Switch to echo'ing that a service failed to start and pipe that through tee so that we can keep the old behavior of touching a file (note this behavior is slightly modified and the touched file will now have contents). Change-Id: I2d3f272b9a65a9d64dbbc01373a02fccf52f56a8 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 37876e0cc2..19cf4ff813 100644 --- a/functions +++ b/functions @@ -1063,7 +1063,7 @@ function screen_it { sleep 1.5 NL=`echo -ne '\015'` - screen -S $SCREEN_NAME -p $1 -X stuff "$2 || touch \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL" + screen -S $SCREEN_NAME -p $1 -X stuff "$2 || echo \"$1 failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL" else # Spawn directly without screen run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$service.pid From 4f7dccc34729cf15195f80c753e0f8a27c24171c Mon Sep 17 00:00:00 2001 From: joequant Date: Mon, 19 Aug 2013 11:58:25 +0800 Subject: [PATCH 0301/4704] turn out usb_tablet for libvirt This patch turns out usb_tablet for the libvirt nova.conf file. When usb_tablet is turned on, qemu will poll for usb events and this causes CPU usage even when the qemu is idle. Change-Id: I03e260dfd0873b7d15e01c10c206203833d04e73 --- lib/nova | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/nova b/lib/nova index 842c098624..32a51d3d4d 100644 --- a/lib/nova +++ b/lib/nova @@ -451,6 +451,7 @@ function create_nova_conf() { if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE" iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none" + iniset $NOVA_CONF DEFAULT use_usb_tablet "False" fi iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" iniset $NOVA_CONF osapi_v3 enabled "True" From 7fb9808e61e9e38eb62a446dee9933d88a6dd086 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 19 Aug 2013 16:16:54 +0200 Subject: [PATCH 0302/4704] Remove useless sources.list Change-Id: I136b568f04f1de35556aa7c3d546c44402254eef --- files/sources.list | 9 --------- 1 file changed, 9 deletions(-) delete mode 100644 files/sources.list diff --git a/files/sources.list b/files/sources.list deleted file mode 100644 index 77a1bfb52e..0000000000 --- a/files/sources.list +++ /dev/null @@ -1,9 +0,0 @@ -deb http://mirror.rackspace.com/ubuntu/ %DIST% main restricted -deb http://mirror.rackspace.com/ubuntu/ %DIST%-updates main restricted -deb http://mirror.rackspace.com/ubuntu/ %DIST% universe -deb http://mirror.rackspace.com/ubuntu/ %DIST%-updates universe -deb http://mirror.rackspace.com/ubuntu/ %DIST% multiverse -deb http://mirror.rackspace.com/ubuntu/ %DIST%-updates multiverse -deb http://security.ubuntu.com/ubuntu %DIST%-security main restricted -deb http://security.ubuntu.com/ubuntu %DIST%-security universe -deb http://security.ubuntu.com/ubuntu %DIST%-security multiverse From 71d5630c4ad2dd74964119a4ad8f16833f61fc21 Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Mon, 22 Jul 2013 11:37:42 +0200 Subject: [PATCH 0303/4704] Adds new tag for testonly packages in package lists Also introduces a new parameter INSTALL_TESTONLY_PACKAGES in the stackrc with False as default value. Setting it to True stack.sh will install all packages tagged with the tag testonly in the package lists. Includes needed packages for Ubuntu and Fedora fixes bug #1203680 Change-Id: I911a6601819a34262853bba0658f6751148bfbec --- files/apts/glance | 6 ++++++ files/rpms/glance | 6 ++++++ functions | 26 +++++++++++++++++++++++--- stackrc | 3 +++ 4 files changed, 38 insertions(+), 3 deletions(-) diff --git a/files/apts/glance b/files/apts/glance index a05e9f2ea7..26826a53c7 100644 --- a/files/apts/glance +++ b/files/apts/glance @@ -1,5 +1,10 @@ gcc +libffi-dev # testonly +libmysqlclient-dev # testonly +libpq-dev # testonly +libssl-dev # testonly libxml2-dev +libxslt1-dev # testonly python-dev python-eventlet python-routes @@ -10,3 +15,4 @@ python-wsgiref python-pastedeploy python-xattr python-iso8601 +zlib1g-dev # testonly diff --git a/files/rpms/glance b/files/rpms/glance index 0f113eaa01..dd66171f7a 100644 --- a/files/rpms/glance +++ b/files/rpms/glance @@ -1,5 +1,10 @@ gcc +libffi-devel # testonly libxml2-devel +libxslt-devel # testonly +mysql-devel # testonly +openssl-devel # testonly +postgresql-devel # testonly python-argparse python-devel python-eventlet @@ -9,3 +14,4 @@ python-routes python-sqlalchemy python-wsgiref pyxattr +zlib-devel # testonly diff --git a/functions b/functions index 087a0ea844..5546defa22 100644 --- a/functions +++ b/functions @@ -317,16 +317,36 @@ function get_packages() { continue fi + # Assume we want this package + package=${line%#*} + inst_pkg=1 + + # Look for # dist:xxx in comment if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then # We are using BASH regexp matching feature. package=${BASH_REMATCH[1]} distros=${BASH_REMATCH[2]} # In bash ${VAR,,} will lowecase VAR - [[ ${distros,,} =~ ${DISTRO,,} ]] && echo $package - continue + # Look for a match in the distro list + if [[ ! ${distros,,} =~ ${DISTRO,,} ]]; then + # If no match then skip this package + inst_pkg=0 + fi + fi + + # Look for # testonly in comment + if [[ $line =~ (.*)#.*testonly.* ]]; then + package=${BASH_REMATCH[1]} + # Are we installing test packages? (test for the default value) + if [[ $INSTALL_TESTONLY_PACKAGES = "False" ]]; then + # If not installing test packages the skip this package + inst_pkg=0 + fi fi - echo ${line%#*} + if [[ $inst_pkg = 1 ]]; then + echo $package + fi done IFS=$OIFS done diff --git a/stackrc b/stackrc index c81906ac8c..8b97536b50 100644 --- a/stackrc +++ b/stackrc @@ -275,6 +275,9 @@ USE_SCREEN=${SCREEN_DEV:-$USE_SCREEN} # Set default screen name SCREEN_NAME=${SCREEN_NAME:-stack} +# Do not install packages tagged with 'testonly' by default +INSTALL_TESTONLY_PACKAGES=${INSTALL_TESTONLY_PACKAGES:-False} + # Local variables: # mode: shell-script # End: From fac533e38db871631cee33d0e3c94884035851b8 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Wed, 14 Aug 2013 16:04:01 +0200 Subject: [PATCH 0304/4704] Configure bash completion Add bash completion rules to the /etc/bash_completion.d from the cinder, neutron, keystone, nova and nova-manage. This is very fast operation and makes the cli usage easier. Change-Id: Icdcdaf55d58efaaa1afe25fd55f088bf7dc8b3f1 --- lib/cinder | 1 + lib/keystone | 1 + lib/neutron | 1 + lib/nova | 2 ++ 4 files changed, 5 insertions(+) diff --git a/lib/cinder b/lib/cinder index 54cf844831..6fc877d279 100644 --- a/lib/cinder +++ b/lib/cinder @@ -468,6 +468,7 @@ function install_cinder() { function install_cinderclient() { git_clone $CINDERCLIENT_REPO $CINDERCLIENT_DIR $CINDERCLIENT_BRANCH setup_develop $CINDERCLIENT_DIR + sudo install -D -m 0644 -o $STACK_USER {$CINDERCLIENT_DIR/tools/,/etc/bash_completion.d/}cinder.bash_completion } # apply config.d approach for cinder volumes directory diff --git a/lib/keystone b/lib/keystone index e7e0544bb4..0a35dd5d80 100644 --- a/lib/keystone +++ b/lib/keystone @@ -289,6 +289,7 @@ function init_keystone() { function install_keystoneclient() { git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH setup_develop $KEYSTONECLIENT_DIR + sudo install -D -m 0644 -o $STACK_USER {$KEYSTONECLIENT_DIR/tools/,/etc/bash_completion.d/}keystone.bash_completion } # install_keystone() - Collect source and prepare diff --git a/lib/neutron b/lib/neutron index 31876dee88..c46003b08b 100644 --- a/lib/neutron +++ b/lib/neutron @@ -382,6 +382,7 @@ function install_neutron() { function install_neutronclient() { git_clone $NEUTRONCLIENT_REPO $NEUTRONCLIENT_DIR $NEUTRONCLIENT_BRANCH setup_develop $NEUTRONCLIENT_DIR + sudo install -D -m 0644 -o $STACK_USER {$NEUTRONCLIENT_DIR/tools/,/etc/bash_completion.d/}neutron.bash_completion } # install_neutron_agent_packages() - Collect source and prepare diff --git a/lib/nova b/lib/nova index 842c098624..bb9bca2533 100644 --- a/lib/nova +++ b/lib/nova @@ -645,6 +645,7 @@ function init_nova() { function install_novaclient() { git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH setup_develop $NOVACLIENT_DIR + sudo install -D -m 0644 -o $STACK_USER {$NOVACLIENT_DIR/tools/,/etc/bash_completion.d/}nova.bash_completion } # install_nova() - Collect source and prepare @@ -682,6 +683,7 @@ function install_nova() { git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH setup_develop $NOVA_DIR + sudo install -D -m 0644 -o $STACK_USER {$NOVA_DIR/tools/,/etc/bash_completion.d/}nova-manage.bash_completion } # start_nova_api() - Start the API process ahead of other things From ce696b60d77752f74924fa133c45910e9d0ef706 Mon Sep 17 00:00:00 2001 From: Roman Prykhodchenko Date: Fri, 9 Aug 2013 10:40:45 +0300 Subject: [PATCH 0305/4704] Basic support of Ironic Ironic is an OpenStack project than brings a separate service for baremetal provisioning. Currently Ironic is in incubation but it needs to have basic support in devstack to provide automatic deployment testing. Change-Id: Ide65a1379fa207a6c8b2c7d9a4f9c874b10fd9ba --- lib/ironic | 222 +++++++++++++++++++++++++++++++++++++++++++++++++++++ stack.sh | 21 +++++ stackrc | 4 + unstack.sh | 7 ++ 4 files changed, 254 insertions(+) create mode 100644 lib/ironic diff --git a/lib/ironic b/lib/ironic new file mode 100644 index 0000000000..2ce5038ea4 --- /dev/null +++ b/lib/ironic @@ -0,0 +1,222 @@ +# lib/ironic +# Functions to control the configuration and operation of the **Ironic** service + +# Dependencies: +# ``functions`` file +# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined +# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined +# ``SERVICE_HOST`` +# ``KEYSTONE_TOKEN_FORMAT`` must be defined + +# ``stack.sh`` calls the entry points in this order: +# +# install_ironic +# configure_ironic +# init_ironic +# start_ironic +# stop_ironic +# cleanup_ironic + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories +IRONIC_DIR=$DEST/ironic +IRONIC_AUTH_CACHE_DIR=${IRONIC_AUTH_CACHE_DIR:-/var/cache/ironic} +IRONIC_CONF_DIR=${IRONIC_CONF_DIR:-/etc/ironic} +IRONIC_CONF_FILE=$IRONIC_CONF_DIR/ironic.conf +IRONIC_ROOTWRAP_CONF=$IRONIC_CONF_DIR/rootwrap.conf +IRONIC_ROOTWRAP_FILTERS=$IRONIC_CONF_DIR/rootwrap.d +IRONIC_POLICY_JSON=$IRONIC_CONF_DIR/policy.json + +# Support entry points installation of console scripts +IRONIC_BIN_DIR=$(get_python_exec_prefix) + +# Ironic connection info. Note the port must be specified. +IRONIC_SERVICE_PROTOCOL=http +IRONIC_HOSTPORT=${IRONIC_HOSTPORT:-$SERVICE_HOST:6385} + + +# Functions +# --------- + +# cleanup_ironic() - Remove residual data files, anything left over from previous +# runs that would need to clean up. +function cleanup_ironic() { + sudo rm -rf $IRONIC_AUTH_CACHE_DIR +} + +# configure_ironic() - Set config files, create data dirs, etc +function configure_ironic() { + if [[ ! -d $IRONIC_CONF_DIR ]]; then + sudo mkdir -p $IRONIC_CONF_DIR + fi + sudo chown $STACK_USER $IRONIC_CONF_DIR + + # Copy over ironic configuration file and configure common parameters. + cp $IRONIC_DIR/etc/ironic/ironic.conf.sample $IRONIC_CONF_FILE + iniset $IRONIC_CONF_FILE DEFAULT debug True + inicomment $IRONIC_CONF_FILE DEFAULT log_file + iniset $IRONIC_CONF_FILE DEFAULT sql_connection `database_connection_url ironic` + iniset $IRONIC_CONF_FILE DEFAULT use_syslog $SYSLOG + + # Configure Ironic conductor, if it was enabled. + if is_service_enabled ir-cond; then + configure_ironic_conductor + fi + + # Configure Ironic API, if it was enabled. + if is_service_enabled ir-api; then + configure_ironic_api + fi +} + +# configure_ironic_api() - Is used by configure_ironic(). Performs +# API specific configuration. +function configure_ironic_api() { + iniset $IRONIC_CONF_FILE keystone_authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $IRONIC_CONF_FILE keystone_authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $IRONIC_CONF_FILE keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $IRONIC_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ + iniset $IRONIC_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $IRONIC_CONF_FILE keystone_authtoken admin_user ironic + iniset $IRONIC_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD + if is_service_enabled qpid; then + iniset $IRONIC_CONF_FILE DEFAULT notifier_strategy qpid + elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then + iniset $IRONIC_CONF_FILE DEFAULT notifier_strategy rabbit + fi + iniset_rpc_backend ironic $IRONIC_CONF_FILE DEFAULT + iniset $IRONIC_CONF_FILE keystone_authtoken signing_dir $IRONIC_AUTH_CACHE_DIR/api + + cp -p $IRONIC_DIR/etc/ironic/policy.json $IRONIC_POLICY_JSON +} + +# configure_ironic_conductor() - Is used by configure_ironic(). +# Sets conductor specific settings. +function configure_ironic_conductor() { + cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF + cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_ROOTWRAP_FILTERS + + iniset $IRONIC_CONF DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF +} + +# create_ironic_cache_dir() - Part of the init_ironic() process +function create_ironic_cache_dir() { + # Create cache dir + sudo mkdir -p $IRONIC_AUTH_CACHE_DIR/api + sudo chown $STACK_USER $IRONIC_AUTH_CACHE_DIR/api + rm -f $IRONIC_AUTH_CACHE_DIR/api/* + sudo mkdir -p $IRONIC_AUTH_CACHE_DIR/registry + sudo chown $STACK_USER $IRONIC_AUTH_CACHE_DIR/registry + rm -f $IRONIC_AUTH_CACHE_DIR/registry/* +} + +# create_ironic_accounts() - Set up common required ironic accounts + +# Tenant User Roles +# ------------------------------------------------------------------ +# service ironic admin # if enabled +create_ironic_accounts() { + + SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + + # Ironic + if [[ "$ENABLED_SERVICES" =~ "ir-api" ]]; then + IRONIC_USER=$(keystone user-create \ + --name=ironic \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=ironic@example.com \ + | grep " id " | get_field 2) + keystone user-role-add \ + --tenant_id $SERVICE_TENANT \ + --user_id $IRONIC_USER \ + --role_id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + IRONIC_SERVICE=$(keystone service-create \ + --name=ironic \ + --type=baremetal \ + --description="Ironic baremetal provisioning service" \ + | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $IRONIC_SERVICE \ + --publicurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT/v1/" \ + --adminurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT/v1/" \ + --internalurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT/v1/" + fi + fi +} + + +# init_ironic() - Initialize databases, etc. +function init_ironic() { + # (Re)create ironic database + recreate_database ironic utf8 + + # Migrate ironic database + $IRONIC_BIN_DIR/ironic-dbsync + + create_ironic_cache_dir + + # Create keystone artifacts for Ironic. + create_ironic_accounts +} + +# install_ironic() - Collect source and prepare +function install_ironic() { + git_clone $IRONIC_REPO $IRONIC_DIR $IRONIC_BRANCH + setup_develop $IRONIC_DIR +} + +# start_ironic() - Start running processes, including screen +function start_ironic() { + # Start Ironic API server, if enabled. + if is_service_enabled ir-api; then + start_ironic_api + fi + + # Start Ironic conductor, if enabled. + if is_service_enabled ir-cond; then + start_ironic_conductor + fi +} + +# start_ironic_api() - Used by start_ironic(). +# Starts Ironic API server. +function start_ironic_api() { + screen_it ir-api "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-api --config-file=$IRONIC_CONF_FILE" + echo "Waiting for ir-api ($IRONIC_HOSTPORT) to start..." + if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$IRONIC_HOSTPORT; do sleep 1; done"; then + die $LINENO "ir-api did not start" + fi +} + +# start_ironic_conductor() - Used by start_ironic(). +# Starts Ironic conductor. +function start_ironic_conductor() { + screen_it ir-cond "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-conductor --config-file=$IRONIC_CONF_FILE" + # TODO(romcheg): Find a way to check whether the conductor has started. +} + +# stop_ironic() - Stop running processes +function stop_ironic() { + # Kill the Ironic screen windows + screen -S $SCREEN_NAME -p ir-api -X kill + screen -S $SCREEN_NAME -p ir-cond -X kill +} + + +# Restore xtrace +$XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/stack.sh b/stack.sh index 8f59328792..5094e25b67 100755 --- a/stack.sh +++ b/stack.sh @@ -318,6 +318,7 @@ source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/ldap +source $TOP_DIR/lib/ironic # Set the destination directories for other OpenStack projects OPENSTACKCLIENT_DIR=$DEST/python-openstackclient @@ -778,6 +779,11 @@ if is_service_enabled tls-proxy; then # don't be naive and add to existing line! fi +if is_service_enabled ir-api ir-cond; then + install_ironic + configure_ironic +fi + if [[ $TRACK_DEPENDS = True ]]; then $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff; then @@ -946,6 +952,15 @@ if is_service_enabled g-reg; then init_glance fi +# Ironic +# ------ + +if is_service_enabled ir-api ir-cond; then + echo_summary "Configuring Ironic" + init_ironic +fi + + # Neutron # ------- @@ -1186,6 +1201,12 @@ if is_service_enabled g-api g-reg; then start_glance fi +# Launch the Ironic services +if is_service_enabled ir-api ir-cond; then + echo_summary "Starting Ironic" + start_ironic +fi + # Create an access key and secret key for nova ec2 register image if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nova; then NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1) diff --git a/stackrc b/stackrc index c81906ac8c..b3e2e148ce 100644 --- a/stackrc +++ b/stackrc @@ -96,6 +96,10 @@ HEATCLIENT_BRANCH=${HEATCLIENT_BRANCH:-master} HORIZON_REPO=${HORIZON_REPO:-${GIT_BASE}/openstack/horizon.git} HORIZON_BRANCH=${HORIZON_BRANCH:-master} +# baremetal provisionint service +IRONIC_REPO=${IRONIC_REPO:-${GIT_BASE}/openstack/ironic.git} +IRONIC_BRANCH=${IRONIC_BRANCH:-master} + # unified auth system (manages accounts/tokens) KEYSTONE_REPO=${KEYSTONE_REPO:-${GIT_BASE}/openstack/keystone.git} KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-master} diff --git a/unstack.sh b/unstack.sh index 2268b90458..84eee4f3c1 100755 --- a/unstack.sh +++ b/unstack.sh @@ -33,6 +33,7 @@ source $TOP_DIR/lib/cinder source $TOP_DIR/lib/horizon source $TOP_DIR/lib/swift source $TOP_DIR/lib/neutron +source $TOP_DIR/lib/ironic # Determine what system we are running on. This provides ``os_VENDOR``, # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` @@ -71,6 +72,12 @@ if is_service_enabled s-proxy; then cleanup_swift fi +# Ironic runs daemons +if is_service_enabled ir-api ir-cond; then + stop_ironic + cleanup_ironic +fi + # Apache has the WSGI processes if is_service_enabled horizon; then stop_horizon From 73b21910123704ac64ca3d2ba7f50e90e248d7ea Mon Sep 17 00:00:00 2001 From: Kyle Mestery Date: Thu, 22 Aug 2013 11:25:21 +0000 Subject: [PATCH 0306/4704] Add support for setting Neutron DHCP agent options. This patch adds support for setting arbitrary Neutron DHCP agent options. An example of using it would be to add this to your localrc: Q_DHCP_EXTRA_DEFAULT_OPTS=(enable_multihost=True) Change-Id: I56d267eafa06c52c3867e3396483f5fde3ee5570 --- lib/neutron | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/neutron b/lib/neutron index 31876dee88..3ab6a4cde9 100644 --- a/lib/neutron +++ b/lib/neutron @@ -541,6 +541,14 @@ function _configure_neutron_dhcp_agent() { iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" + # Define extra "DEFAULT" configuration options when q-dhcp is configured by + # defining the array ``Q_DHCP_EXTRA_DEFAULT_OPTS``. + # For Example: ``Q_DHCP_EXTRA_DEFAULT_OPTS=(foo=true bar=2)`` + for I in "${Q_DHCP_EXTRA_DEFAULT_OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + iniset $Q_DHCP_CONF_FILE DEFAULT ${I/=/ } + done + _neutron_setup_interface_driver $Q_DHCP_CONF_FILE neutron_plugin_configure_dhcp_agent From 032e45468ecf9f8e1ee6745f03a43e8ec3dd2b59 Mon Sep 17 00:00:00 2001 From: Yong Sheng Gong Date: Sun, 25 Aug 2013 10:21:10 +0800 Subject: [PATCH 0307/4704] change quantum into neutron for neutron configuration values in nova.conf Change quantum into neutron Since nova already supports the new neutron items and values Change-Id: I747eae613c0ec28596ea67da4e98fb3d7f6d93bc Fixes: Bug #1216455 --- lib/neutron | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/lib/neutron b/lib/neutron index 31876dee88..c5874602d2 100644 --- a/lib/neutron +++ b/lib/neutron @@ -250,18 +250,18 @@ function configure_neutron() { } function create_nova_conf_neutron() { - iniset $NOVA_CONF DEFAULT network_api_class "nova.network.quantumv2.api.API" - iniset $NOVA_CONF DEFAULT quantum_admin_username "$Q_ADMIN_USERNAME" - iniset $NOVA_CONF DEFAULT quantum_admin_password "$SERVICE_PASSWORD" - iniset $NOVA_CONF DEFAULT quantum_admin_auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" - iniset $NOVA_CONF DEFAULT quantum_auth_strategy "$Q_AUTH_STRATEGY" - iniset $NOVA_CONF DEFAULT quantum_admin_tenant_name "$SERVICE_TENANT_NAME" - iniset $NOVA_CONF DEFAULT quantum_region_name "RegionOne" - iniset $NOVA_CONF DEFAULT quantum_url "http://$Q_HOST:$Q_PORT" + iniset $NOVA_CONF DEFAULT network_api_class "nova.network.neutronv2.api.API" + iniset $NOVA_CONF DEFAULT neutron_admin_username "$Q_ADMIN_USERNAME" + iniset $NOVA_CONF DEFAULT neutron_admin_password "$SERVICE_PASSWORD" + iniset $NOVA_CONF DEFAULT neutron_admin_auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" + iniset $NOVA_CONF DEFAULT neutron_auth_strategy "$Q_AUTH_STRATEGY" + iniset $NOVA_CONF DEFAULT neutron_admin_tenant_name "$SERVICE_TENANT_NAME" + iniset $NOVA_CONF DEFAULT neutron_region_name "RegionOne" + iniset $NOVA_CONF DEFAULT neutron_url "http://$Q_HOST:$Q_PORT" if [[ "$Q_USE_SECGROUP" == "True" ]]; then LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver - iniset $NOVA_CONF DEFAULT security_group_api quantum + iniset $NOVA_CONF DEFAULT security_group_api neutron fi # set NOVA_VIF_DRIVER and optionally set options in nova_conf @@ -270,7 +270,7 @@ function create_nova_conf_neutron() { iniset $NOVA_CONF DEFAULT libvirt_vif_driver "$NOVA_VIF_DRIVER" iniset $NOVA_CONF DEFAULT linuxnet_interface_driver "$LINUXNET_VIF_DRIVER" if is_service_enabled q-meta; then - iniset $NOVA_CONF DEFAULT service_quantum_metadata_proxy "True" + iniset $NOVA_CONF DEFAULT service_neutron_metadata_proxy "True" fi } From 8535d8b3fc283ac4ebb7a851b19bf2bff36d78d0 Mon Sep 17 00:00:00 2001 From: Yong Sheng Gong Date: Sun, 25 Aug 2013 11:21:13 +0800 Subject: [PATCH 0308/4704] use keystone service port instead of admin port Change-Id: Iaf1848ecabf100171f741fde0efee5d8f65b7795 Fixes: Bug #1214921 --- lib/neutron | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/lib/neutron b/lib/neutron index 31876dee88..1084e65785 100644 --- a/lib/neutron +++ b/lib/neutron @@ -577,7 +577,7 @@ function _configure_neutron_metadata_agent() { iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" - _neutron_setup_keystone $Q_META_CONF_FILE DEFAULT True True + _neutron_setup_keystone $Q_META_CONF_FILE DEFAULT True True True } @@ -690,11 +690,16 @@ function _neutron_setup_keystone() { local section=$2 local use_auth_url=$3 local skip_auth_cache=$4 + local use_service_port=$5 + local keystone_port=$KEYSTONE_AUTH_PORT + if [[ -n $use_service_port ]]; then + keystone_port=$KEYSTONE_SERVICE_PORT + fi if [[ -n $use_auth_url ]]; then - iniset $conf_file $section auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0" + iniset $conf_file $section auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_AUTH_HOST:$keystone_port/v2.0" else iniset $conf_file $section auth_host $KEYSTONE_SERVICE_HOST - iniset $conf_file $section auth_port $KEYSTONE_AUTH_PORT + iniset $conf_file $section auth_port $keystone_port iniset $conf_file $section auth_protocol $KEYSTONE_SERVICE_PROTOCOL fi iniset $conf_file $section admin_tenant_name $SERVICE_TENANT_NAME From f645a8504a2b0b824cfa6693a49e5032d0b9d1ee Mon Sep 17 00:00:00 2001 From: Angus Salkeld Date: Mon, 26 Aug 2013 10:13:36 +1000 Subject: [PATCH 0309/4704] Add support for heat enviroments heat now has global environments that make it easy to rename and customise resource behaviour. These are yaml files that need to be in /etc/heat/environment.d/ Change-Id: I5a08c6ce8f5d7222f79aab2be0903ba783c10aa1 --- lib/heat | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/lib/heat b/lib/heat index fb4002b7e6..5d6c6aa29b 100644 --- a/lib/heat +++ b/lib/heat @@ -31,6 +31,8 @@ HEAT_DIR=$DEST/heat HEATCLIENT_DIR=$DEST/python-heatclient HEAT_AUTH_CACHE_DIR=${HEAT_AUTH_CACHE_DIR:-/var/cache/heat} HEAT_STANDALONE=`trueorfalse False $HEAT_STANDALONE` +HEAT_CONF_DIR=/etc/heat +HEAT_ENV_DIR=$HEAT_CONF_DIR/environment.d # Functions # --------- @@ -39,13 +41,13 @@ HEAT_STANDALONE=`trueorfalse False $HEAT_STANDALONE` # runs that a clean run would need to clean up function cleanup_heat() { sudo rm -rf $HEAT_AUTH_CACHE_DIR + sudo rm -rf $HEAT_ENV_DIR } # configure_heat() - Set config files, create data dirs, etc function configure_heat() { setup_develop $HEAT_DIR - HEAT_CONF_DIR=/etc/heat if [[ ! -d $HEAT_CONF_DIR ]]; then sudo mkdir -p $HEAT_CONF_DIR fi @@ -155,6 +157,12 @@ function configure_heat() { iniset_rpc_backend heat $HEAT_API_CW_CONF DEFAULT + # heat environment + sudo mkdir -p $HEAT_ENV_DIR + sudo chown $STACK_USER $HEAT_ENV_DIR + # copy the default environment + cp $HEAT_DIR/etc/heat/environment.d/* $HEAT_ENV_DIR/ + } # init_heat() - Initialize database From fb71a272db2bc447f2ee7c842f8b245d497b4217 Mon Sep 17 00:00:00 2001 From: Angus Salkeld Date: Mon, 26 Aug 2013 10:15:38 +1000 Subject: [PATCH 0310/4704] Add support for heat resource templates These are resources that are defined as a template and can be customised by a deployer. Change-Id: Ia739a36d627b7cfec26641b89513355e65cd1d1f --- lib/heat | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/heat b/lib/heat index 5d6c6aa29b..8b6fd7fc4b 100644 --- a/lib/heat +++ b/lib/heat @@ -33,6 +33,7 @@ HEAT_AUTH_CACHE_DIR=${HEAT_AUTH_CACHE_DIR:-/var/cache/heat} HEAT_STANDALONE=`trueorfalse False $HEAT_STANDALONE` HEAT_CONF_DIR=/etc/heat HEAT_ENV_DIR=$HEAT_CONF_DIR/environment.d +HEAT_TEMPLATES_DIR=$HEAT_CONF_DIR/templates # Functions # --------- @@ -42,6 +43,7 @@ HEAT_ENV_DIR=$HEAT_CONF_DIR/environment.d function cleanup_heat() { sudo rm -rf $HEAT_AUTH_CACHE_DIR sudo rm -rf $HEAT_ENV_DIR + sudo rm -rf $HEAT_TEMPLATES_DIR } # configure_heat() - Set config files, create data dirs, etc @@ -163,6 +165,12 @@ function configure_heat() { # copy the default environment cp $HEAT_DIR/etc/heat/environment.d/* $HEAT_ENV_DIR/ + # heat template resources. + sudo mkdir -p $HEAT_TEMPLATES_DIR + sudo chown $STACK_USER $HEAT_TEMPLATES_DIR + # copy the default templates + cp $HEAT_DIR/etc/heat/templates/* $HEAT_TEMPLATES_DIR/ + } # init_heat() - Initialize database From dc4f2342dba3bb37c42f1c0782eb2cb82d3a63a3 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Mon, 19 Aug 2013 23:46:17 -0700 Subject: [PATCH 0311/4704] VMware: update cinder support for VMware configuration settings The review https://review.openstack.org/#/c/41600 was update to have a 'vmware' prefix for all of the VMware cinder settings. These were previously in a 'vmware' section and now they are in the 'DEFAULT' section. Change-Id: I8eadfb0f64914d3b0667760aff651415df48f627 --- lib/cinder | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/lib/cinder b/lib/cinder index 54cf844831..ec5a3563b6 100644 --- a/lib/cinder +++ b/lib/cinder @@ -289,12 +289,11 @@ function configure_cinder() { fi elif [ "$CINDER_DRIVER" == "vsphere" ]; then echo_summary "Using VMware vCenter driver" - iniset $CINDER_CONF DEFAULT enabled_backends vmware - iniset $CINDER_CONF vmware host_ip "$VMWAREAPI_IP" - iniset $CINDER_CONF vmware host_username "$VMWAREAPI_USER" - iniset $CINDER_CONF vmware host_password "$VMWAREAPI_PASSWORD" - iniset $CINDER_CONF vmware cluster_name "$VMWAREAPI_CLUSTER" - iniset $CINDER_CONF vmware volume_driver "cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver" + iniset $CINDER_CONF DEFAULT vmware_host_ip "$VMWAREAPI_IP" + iniset $CINDER_CONF DEFAULT vmware_host_username "$VMWAREAPI_USER" + iniset $CINDER_CONF DEFAULT vmware_host_password "$VMWAREAPI_PASSWORD" + iniset $CINDER_CONF DEFAULT vmware_cluster_name "$VMWAREAPI_CLUSTER" + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver" fi if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then From bb8c6d42a4628f2a696babcc960e293786f67af6 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Mon, 26 Aug 2013 17:00:05 -0400 Subject: [PATCH 0312/4704] add TEMPEST_LARGE_OPS_NUMBER to option The tempest scenario.large_ops test tries to catch any performance issues when running large numbers of operations at once, in this case launching instances. Set to 0 by default, to maintain the current default, but add this as an option so this can be set via devstacks localrc. Part of regression test for bug 1199433 Change-Id: I459717b849f9b6d180a7956c0ff470cefe7c1ff6 --- lib/tempest | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/tempest b/lib/tempest index 0d4f370c87..50289b60d4 100644 --- a/lib/tempest +++ b/lib/tempest @@ -282,6 +282,9 @@ function configure_tempest() { # Scenario iniset $TEMPEST_CONF scenario img_dir "$FILES/images/cirros-0.3.1-x86_64-uec" + # Large Ops Number + iniset $TEMPEST_CONF scenario large_ops_number ${TEMPEST_LARGE_OPS_NUMBER:-0} + # Volume CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND) if [ $CINDER_MULTI_LVM_BACKEND == "True" ]; then From bc2ef929ed4529197b0418fc8234aaca56f84109 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Thu, 15 Aug 2013 18:06:59 +0100 Subject: [PATCH 0313/4704] xenapi: devstack support for raw tgz image upload Devstack will recognise the .xen-raw.tgz extensions, and upload them to glance as raw tgz images with xen pv_mode. This change also adds "tgz" to the recognised container formats of glance. The changes for raw tgz support are: https://review.openstack.org/#/c/40908/ https://review.openstack.org/#/c/40909/ https://review.openstack.org/#/c/41651/ related to blueprint xenapi-supported-image-import-export Change-Id: I077564587d4303291bb4f10d62bb16380b574106 --- functions | 18 +++++++++++++++++- lib/glance | 4 ++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/functions b/functions index 41f008ed84..5ae2ee5cd3 100644 --- a/functions +++ b/functions @@ -1240,7 +1240,7 @@ function upload_image() { return fi - # XenServer-ovf-format images are provided as .vhd.tgz as well + # XenServer-vhd-ovf-format images are provided as .vhd.tgz # and should not be decompressed prior to loading if [[ "$image_url" =~ '.vhd.tgz' ]]; then IMAGE="$FILES/${IMAGE_FNAME}" @@ -1249,6 +1249,22 @@ function upload_image() { return fi + # .xen-raw.tgz suggests a Xen capable raw image inside a tgz. + # and should not be decompressed prior to loading. + # Setting metadata, so PV mode is used. + if [[ "$image_url" =~ '.xen-raw.tgz' ]]; then + IMAGE="$FILES/${IMAGE_FNAME}" + IMAGE_NAME="${IMAGE_FNAME%.xen-raw.tgz}" + glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name "$IMAGE_NAME" --is-public=True \ + --container-format=tgz --disk-format=raw \ + --property vm_mode=xen < "${IMAGE}" + return + fi + KERNEL="" RAMDISK="" DISK_FORMAT="" diff --git a/lib/glance b/lib/glance index a18189f474..64d8b0695a 100644 --- a/lib/glance +++ b/lib/glance @@ -108,6 +108,10 @@ function configure_glance() { fi iniset_rpc_backend glance $GLANCE_API_CONF DEFAULT iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api + if [ "$VIRT_DRIVER" = 'xenserver' ]; then + iniset $GLANCE_API_CONF DEFAULT container_formats "ami,ari,aki,bare,ovf,tgz" + iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,raw,iso" + fi # Store the images in swift if enabled. if is_service_enabled s-proxy; then From 04762cd823302ca9992b67419e55ad5fc4dbf8fe Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 27 Aug 2013 17:06:14 -0500 Subject: [PATCH 0314/4704] Fix is_package_installed() check with dpkg is_package_installed() incorrectly returned '0' for packages that had 'un' status in the dpkg database. Change-Id: I81b77486c2ed7717ed81cb2c2572fe6c4b394ffc --- functions | 28 ++++++++++++++++++++++++---- tests/functions.sh | 22 ++++++++++++++++++++++ 2 files changed, 46 insertions(+), 4 deletions(-) diff --git a/functions b/functions index 41f008ed84..b8df520854 100644 --- a/functions +++ b/functions @@ -317,16 +317,36 @@ function get_packages() { continue fi + # Assume we want this package + package=${line%#*} + inst_pkg=1 + + # Look for # dist:xxx in comment if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then # We are using BASH regexp matching feature. package=${BASH_REMATCH[1]} distros=${BASH_REMATCH[2]} # In bash ${VAR,,} will lowecase VAR - [[ ${distros,,} =~ ${DISTRO,,} ]] && echo $package - continue + # Look for a match in the distro list + if [[ ! ${distros,,} =~ ${DISTRO,,} ]]; then + # If no match then skip this package + inst_pkg=0 + fi + fi + + # Look for # testonly in comment + if [[ $line =~ (.*)#.*testonly.* ]]; then + package=${BASH_REMATCH[1]} + # Are we installing test packages? (test for the default value) + if [[ $INSTALL_TESTONLY_PACKAGES = "False" ]]; then + # If not installing test packages the skip this package + inst_pkg=0 + fi fi - echo ${line%#*} + if [[ $inst_pkg = 1 ]]; then + echo $package + fi done IFS=$OIFS done @@ -912,7 +932,7 @@ function is_package_installed() { fi if [[ "$os_PACKAGE" = "deb" ]]; then - dpkg -l "$@" > /dev/null 2> /dev/null + dpkg -s "$@" > /dev/null 2> /dev/null elif [[ "$os_PACKAGE" = "rpm" ]]; then rpm --quiet -q "$@" else diff --git a/tests/functions.sh b/tests/functions.sh index 27a6cfeec4..7d486d4cc5 100755 --- a/tests/functions.sh +++ b/tests/functions.sh @@ -367,3 +367,25 @@ if [[ "$VAL" -ne 0 ]]; then else echo "is_package_installed() on non-existing package failed" fi + +# test against removed package...was a bug on Ubuntu +if is_ubuntu; then + PKG=cowsay + if ! (dpkg -s $PKG >/dev/null 2>&1); then + # it was never installed...set up the condition + sudo apt-get install -y cowsay >/dev/null 2>&1 + fi + if (dpkg -s $PKG >/dev/null 2>&1); then + # remove it to create the 'un' status + sudo dpkg -P $PKG >/dev/null 2>&1 + fi + + # now test the installed check on a deleted package + is_package_installed $PKG + VAL=$? + if [[ "$VAL" -ne 0 ]]; then + echo "OK" + else + echo "is_package_installed() on deleted package failed" + fi +fi From 300e1bf276b16c7aeab1631f709048346db63bd8 Mon Sep 17 00:00:00 2001 From: Yong Sheng Gong Date: Wed, 28 Aug 2013 17:02:56 +0800 Subject: [PATCH 0315/4704] Use the varialbe to export right keystone api version Change-Id: I1e8ea2b7173c549065ed1f08814eb4b4bb2f05cd Fixes: Bug #1217783 --- openrc | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/openrc b/openrc index a23c6e95bd..3de7e3958f 100644 --- a/openrc +++ b/openrc @@ -63,21 +63,19 @@ SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} # should be listening on HOST_IP. If its running elsewhere, it can be set here GLANCE_HOST=${GLANCE_HOST:-$HOST_IP} +# Identity API version +export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-2.0} + # Authenticating against an Openstack cloud using Keystone returns a **Token** # and **Service Catalog**. The catalog contains the endpoints for all services # the user/tenant has access to - including nova, glance, keystone, swift, ... # We currently recommend using the 2.0 *identity api*. # -# *NOTE*: Using the 2.0 *identity api* does not mean that compute api is 2.0. We -# will use the 1.1 *compute api* -export OS_AUTH_URL=$SERVICE_PROTOCOL://$SERVICE_HOST:5000/v2.0 +export OS_AUTH_URL=$SERVICE_PROTOCOL://$SERVICE_HOST:5000/v${OS_IDENTITY_API_VERSION} # Set the pointer to our CA certificate chain. Harmless if TLS is not used. export OS_CACERT=$INT_CA_DIR/ca-chain.pem -# Identity API version -export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-2.0} - # Currently novaclient needs you to specify the *compute api* version. This # needs to match the config of your catalog returned by Keystone. export NOVA_VERSION=${NOVA_VERSION:-1.1} From 2aa2a89cdb9071cea919116e283c16ac9dd841d6 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Sun, 4 Aug 2013 19:53:19 -0500 Subject: [PATCH 0316/4704] Add support for Docker as Nova hypervisor * Add basic support for hypervisor plugins in lib/nova_plugins * Add lib/nova_plugins/hypervisor-docker to use Docker as a Nova hypervisor. * Add tools/install_docker.sh to install the Docker daemon and registry container, download base image and import * Configure Nova to use docker plugin * Add docker exercise and skip unsupported ones Nova blueprint: new-hypervisor-docker Change-Id: I9e7065b562dce2ce853def583ab1165886612227 --- README.md | 6 +- clean.sh | 5 ++ exercises/boot_from_volume.sh | 3 + exercises/docker.sh | 105 +++++++++++++++++++++++ exercises/euca.sh | 3 + exercises/floating_ips.sh | 3 + exercises/sec_groups.sh | 3 + exercises/volumes.sh | 3 + lib/nova | 17 +++- lib/nova_plugins/hypervisor-docker | 132 +++++++++++++++++++++++++++++ stack.sh | 18 +++- tools/docker/README.md | 13 +++ tools/docker/install_docker.sh | 75 ++++++++++++++++ unstack.sh | 8 ++ 14 files changed, 391 insertions(+), 3 deletions(-) create mode 100755 exercises/docker.sh create mode 100644 lib/nova_plugins/hypervisor-docker create mode 100644 tools/docker/README.md create mode 100755 tools/docker/install_docker.sh diff --git a/README.md b/README.md index 6426e9a4ed..99e983887e 100644 --- a/README.md +++ b/README.md @@ -12,10 +12,14 @@ Read more at http://devstack.org (built from the gh-pages branch) IMPORTANT: Be sure to carefully read `stack.sh` and any other scripts you execute before you run them, as they install software and may alter your networking configuration. We strongly recommend that you run `stack.sh` in a clean and disposable vm when you are first getting started. -# Devstack on Xenserver +# DevStack on Xenserver If you would like to use Xenserver as the hypervisor, please refer to the instructions in `./tools/xen/README.md`. +# DevStack on Docker + +If you would like to use Docker as the hypervisor, please refer to the instructions in `./tools/docker/README.md`. + # Versions The devstack master branch generally points to trunk versions of OpenStack components. For older, stable versions, look for branches named stable/[release] in the DevStack repo. For example, you can do the following to create a diablo OpenStack cloud: diff --git a/clean.sh b/clean.sh index f7d15dfe4e..a443ac82d0 100755 --- a/clean.sh +++ b/clean.sh @@ -64,6 +64,11 @@ cleanup_nova cleanup_neutron cleanup_swift +# Do the hypervisor cleanup until this can be moved back into lib/nova +if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + cleanup_nova_hypervisor +fi + # cinder doesn't always clean up the volume group as it might be used elsewhere... # clean it up if it is a loop device VG_DEV=$(sudo losetup -j $DATA_DIR/${VOLUME_GROUP}-backing-file | awk -F':' '/backing-file/ { print $1}') diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index 36524ede4b..fe27bd0956 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -44,6 +44,9 @@ source $TOP_DIR/exerciserc # the exercise is skipped is_service_enabled cinder || exit 55 +# Also skip if the hypervisor is Docker +[[ "$VIRT_DRIVER" == "docker" ]] && exit 55 + # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} diff --git a/exercises/docker.sh b/exercises/docker.sh new file mode 100755 index 0000000000..0672bc0087 --- /dev/null +++ b/exercises/docker.sh @@ -0,0 +1,105 @@ +#!/usr/bin/env bash + +# **docker** + +# Test Docker hypervisor + +echo "*********************************************************************" +echo "Begin DevStack Exercise: $0" +echo "*********************************************************************" + +# This script exits on an error so that errors don't compound and you see +# only the first error that occurred. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + + +# Settings +# ======== + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import configuration +source $TOP_DIR/openrc + +# Import exercise configuration +source $TOP_DIR/exerciserc + +# Skip if the hypervisor is not Docker +[[ "$VIRT_DRIVER" == "docker" ]] || exit 55 + +# Import docker functions and declarations +source $TOP_DIR/lib/nova_plugins/hypervisor-docker + +# Image and flavor are ignored but the CLI requires them... + +# Instance type to create +DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} + +# Boot this image, use first AMI image if unset +DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} + +# Instance name +VM_NAME=ex-docker + + +# Launching a server +# ================== + +# Grab the id of the image to launch +IMAGE=$(glance image-list | egrep " $DOCKER_IMAGE_NAME:latest " | get_field 1) +die_if_not_set $LINENO IMAGE "Failure getting image $DOCKER_IMAGE_NAME" + +# Select a flavor +INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) +if [[ -z "$INSTANCE_TYPE" ]]; then + # grab the first flavor in the list to launch if default doesn't exist + INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) +fi + +# Clean-up from previous runs +nova delete $VM_NAME || true +if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then + die $LINENO "server didn't terminate!" +fi + +# Boot instance +# ------------- + +VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE $VM_NAME | grep ' id ' | get_field 2) +die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME" + +# Check that the status is active within ACTIVE_TIMEOUT seconds +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then + die $LINENO "server didn't become active!" +fi + +# Get the instance IP +IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2) +die_if_not_set $LINENO IP "Failure retrieving IP address" + +# Private IPs can be pinged in single node deployments +ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT + +# Clean up +# -------- + +# Delete instance +nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME" +if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then + die $LINENO "Server $VM_NAME not deleted" +fi + +set +o xtrace +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" + diff --git a/exercises/euca.sh b/exercises/euca.sh index b8b283a8fb..64c0014236 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -41,6 +41,9 @@ fi # Import exercise configuration source $TOP_DIR/exerciserc +# Skip if the hypervisor is Docker +[[ "$VIRT_DRIVER" == "docker" ]] && exit 55 + # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index f93a727df6..2833b650ba 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -38,6 +38,9 @@ fi # Import exercise configuration source $TOP_DIR/exerciserc +# Skip if the hypervisor is Docker +[[ "$VIRT_DRIVER" == "docker" ]] && exit 55 + # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh index 6b67291cde..7d80570326 100755 --- a/exercises/sec_groups.sh +++ b/exercises/sec_groups.sh @@ -33,6 +33,9 @@ source $TOP_DIR/openrc # Import exercise configuration source $TOP_DIR/exerciserc +# Skip if the hypervisor is Docker +[[ "$VIRT_DRIVER" == "docker" ]] && exit 55 + # Testing Security Groups # ======================= diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 028d19b36a..e536d16249 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -42,6 +42,9 @@ source $TOP_DIR/exerciserc # exercise is skipped. is_service_enabled cinder || exit 55 +# Also skip if the hypervisor is Docker +[[ "$VIRT_DRIVER" == "docker" ]] && exit 55 + # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} diff --git a/lib/nova b/lib/nova index 842c098624..3486aa8fb9 100644 --- a/lib/nova +++ b/lib/nova @@ -169,6 +169,13 @@ function cleanup_nova() { fi sudo rm -rf $NOVA_STATE_PATH $NOVA_AUTH_CACHE_DIR + + # NOTE(dtroyer): This really should be called from here but due to the way + # nova abuses the _cleanup() function we're moving it + # directly into cleanup.sh until this can be fixed. + #if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + # cleanup_nova_hypervisor + #fi } # configure_nova_rootwrap() - configure Nova's rootwrap @@ -650,7 +657,9 @@ function install_novaclient() { # install_nova() - Collect source and prepare function install_nova() { if is_service_enabled n-cpu; then - if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then + if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + install_nova_hypervisor + elif [[ "$VIRT_DRIVER" = 'libvirt' ]]; then if is_ubuntu; then install_package kvm install_package libvirt-bin @@ -728,6 +737,9 @@ function start_nova() { screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM" done else + if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + start_nova_hypervisor + fi screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM" fi screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert" @@ -754,6 +766,9 @@ function stop_nova() { for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cond n-cell n-cell n-api-meta; do screen -S $SCREEN_NAME -p $serv -X kill done + if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + stop_nova_hypervisor + fi } diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker new file mode 100644 index 0000000000..4c8fc279b0 --- /dev/null +++ b/lib/nova_plugins/hypervisor-docker @@ -0,0 +1,132 @@ +# lib/nova_plugins/docker +# Configure the Docker hypervisor + +# Enable with: +# VIRT_DRIVER=docker + +# Dependencies: +# ``functions`` file +# ``nova`` and ``glance`` configurations + +# install_nova_hypervisor - install any external requirements +# configure_nova_hypervisor - make configuration changes, including those to other services +# start_nova_hypervisor - start any external services +# stop_nova_hypervisor - stop any external services +# cleanup_nova_hypervisor - remove transient data and cache + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories +DOCKER_DIR=$DEST/docker +DOCKER_REPO=${DOCKER_REPO:-https://github.com/dotcloud/openstack-docker.git} +DOCKER_BRANCH=${DOCKER_BRANCH:-master} + +DOCKER_UNIX_SOCKET=/var/run/docker.sock +DOCKER_PID_FILE=/var/run/docker.pid +DOCKER_REGISTRY_PORT=${DOCKER_REGISTRY_PORT:-5042} + +DOCKER_IMAGE=${DOCKER_IMAGE:-http://get.docker.io/images/openstack/docker-ut.tar.gz} +DOCKER_IMAGE_NAME=docker-busybox +DOCKER_REGISTRY_IMAGE=${DOCKER_REGISTRY_IMAGE:-http://get.docker.io/images/openstack/docker-registry.tar.gz} +DOCKER_REGISTRY_IMAGE_NAME=docker-registry +DOCKER_REPOSITORY_NAME=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT}/${DOCKER_IMAGE_NAME} + +DOCKER_PACKAGE_VERSION=${DOCKER_PACKAGE_VERSION:-0.6.1} +DOCKER_APT_REPO=${DOCKER_APT_REPO:-https://get.docker.io/ubuntu} + + +# Entry Points +# ------------ + +# clean_nova_hypervisor - Clean up an installation +function cleanup_nova_hypervisor() { + stop_service docker + + # Clean out work area + sudo rm -rf /var/lib/docker +} + +# configure_nova_hypervisor - Set config files, create data dirs, etc +function configure_nova_hypervisor() { + git_clone $DOCKER_REPO $DOCKER_DIR $DOCKER_BRANCH + + ln -snf ${DOCKER_DIR}/nova-driver $NOVA_DIR/nova/virt/docker + + iniset $NOVA_CONF DEFAULT compute_driver docker.DockerDriver + iniset $GLANCE_API_CONF DEFAULT container_formats ami,ari,aki,bare,ovf,docker + + sudo cp -p ${DOCKER_DIR}/nova-driver/docker.filters $NOVA_CONF_DIR/rootwrap.d +} + +# install_nova_hypervisor() - Install external components +function install_nova_hypervisor() { + # So far this is Ubuntu only + if ! is_ubuntu; then + die $LINENO "Docker is only supported on Ubuntu at this time" + fi + + # Make sure Docker is installed + if ! is_package_installed lxc-docker; then + die $LINENO "Docker is not installed. Please run tools/docker/install_docker.sh" + fi + + local docker_pid + read docker_pid <$DOCKER_PID_FILE + if [[ -z $docker_pid ]] || ! ps -p $docker_pid | grep [d]ocker; then + die $LINENO "Docker not running" + fi +} + +# start_nova_hypervisor - Start any required external services +function start_nova_hypervisor() { + local docker_pid + read docker_pid <$DOCKER_PID_FILE + if [[ -z $docker_pid ]] || ! ps -p $docker_pid | grep [d]ocker; then + die $LINENO "Docker not running, start the daemon" + fi + + # Start the Docker registry container + docker run -d -p ${DOCKER_REGISTRY_PORT}:5000 \ + -e SETTINGS_FLAVOR=openstack -e OS_USERNAME=${OS_USERNAME} \ + -e OS_PASSWORD=${OS_PASSWORD} -e OS_TENANT_NAME=${OS_TENANT_NAME} \ + -e OS_GLANCE_URL="${SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}" \ + -e OS_AUTH_URL=${OS_AUTH_URL} \ + $DOCKER_REGISTRY_IMAGE_NAME ./docker-registry/run.sh + + echo "Waiting for docker registry to start..." + DOCKER_REGISTRY=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT} + if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl -s $DOCKER_REGISTRY; do sleep 1; done"; then + die $LINENO "docker-registry did not start" + fi + + # Tag image if not already tagged + if ! docker images | grep $DOCKER_REPOSITORY_NAME; then + docker tag $DOCKER_IMAGE_NAME $DOCKER_REPOSITORY_NAME + fi + + # Make sure we copied the image in Glance + DOCKER_IMAGE=$(glance image-list | egrep " $DOCKER_IMAGE_NAME ") + if ! is_set DOCKER_IMAGE ; then + docker push $DOCKER_REPOSITORY_NAME + fi +} + +# stop_nova_hypervisor - Stop any external services +function stop_nova_hypervisor() { + # Stop the docker registry container + docker kill $(docker ps | grep docker-registry | cut -d' ' -f1) +} + + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/stack.sh b/stack.sh index 8f59328792..c3f69adcf1 100755 --- a/stack.sh +++ b/stack.sh @@ -319,6 +319,13 @@ source $TOP_DIR/lib/neutron source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/ldap +# Look for Nova hypervisor plugin +NOVA_PLUGINS=$TOP_DIR/lib/nova_plugins +if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + # Load plugin + source $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER +fi + # Set the destination directories for other OpenStack projects OPENSTACKCLIENT_DIR=$DEST/python-openstackclient @@ -1013,6 +1020,10 @@ if is_service_enabled cinder; then init_cinder fi + +# Compute Service +# --------------- + if is_service_enabled nova; then echo_summary "Configuring Nova" # Rebuild the config file from scratch @@ -1027,10 +1038,15 @@ if is_service_enabled nova; then fi + if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + # Configure hypervisor plugin + configure_nova_hypervisor + + # XenServer # --------- - if [ "$VIRT_DRIVER" = 'xenserver' ]; then + elif [ "$VIRT_DRIVER" = 'xenserver' ]; then echo_summary "Using XenServer virtualization driver" if [ -z "$XENAPI_CONNECTION_URL" ]; then die $LINENO "XENAPI_CONNECTION_URL is not specified" diff --git a/tools/docker/README.md b/tools/docker/README.md new file mode 100644 index 0000000000..976111f750 --- /dev/null +++ b/tools/docker/README.md @@ -0,0 +1,13 @@ +# DevStack on Docker + +Using Docker as Nova's hypervisor requries two steps: + +* Configure DevStack by adding the following to `localrc`:: + + VIRT_DRIVER=docker + +* Download and install the Docker service and images:: + + tools/docker/install_docker.sh + +After this, `stack.sh` should run as normal. diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh new file mode 100755 index 0000000000..d659ad104b --- /dev/null +++ b/tools/docker/install_docker.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash + +# **install_docker.sh** - Do the initial Docker installation and configuration + +# install_docker.sh +# +# Install docker package and images +# * downloads a base busybox image and a glance registry image if necessary +# * install the images in Docker's image cache + + +# Keep track of the current directory +SCRIPT_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $SCRIPT_DIR/../..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Load local configuration +source $TOP_DIR/stackrc + +FILES=$TOP_DIR/files + +# Get our defaults +source $TOP_DIR/lib/nova_plugins/hypervisor-docker + +SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} + + +# Install Docker Service +# ====================== + +# Stop the auto-repo updates and do it when required here +NO_UPDATE_REPOS=True + +# Set up home repo +curl https://get.docker.io/gpg | sudo apt-key add - +install_package python-software-properties && \ + sudo sh -c "echo deb $DOCKER_APT_REPO docker main > /etc/apt/sources.list.d/docker.list" +apt_get update +install_package --force-yes lxc-docker=${DOCKER_PACKAGE_VERSION} + +# Start the daemon - restart just in case the package ever auto-starts... +restart_service docker + +echo "Waiting for docker daemon to start..." +DOCKER_GROUP=$(groups | cut -d' ' -f1) +CONFIGURE_CMD="while ! /bin/echo -e 'GET /v1.3/version HTTP/1.0\n\n' | socat - unix-connect:$DOCKER_UNIX_SOCKET | grep -q '200 OK'; do + # Set the right group on docker unix socket before retrying + sudo chgrp $DOCKER_GROUP $DOCKER_UNIX_SOCKET + sudo chmod g+rw $DOCKER_UNIX_SOCKET + sleep 1 +done" +if ! timeout $SERVICE_TIMEOUT sh -c "$CONFIGURE_CMD"; then + die $LINENO "docker did not start" +fi + + +# Get Docker image +if [[ ! -r $FILES/docker-ut.tar.gz ]]; then + (cd $FILES; curl -OR $DOCKER_IMAGE) +fi +if [[ ! -r $FILES/docker-ut.tar.gz ]]; then + die $LINENO "Docker image unavailable" +fi +docker import - $DOCKER_IMAGE_NAME <$FILES/docker-ut.tar.gz + +# Get Docker registry image +if [[ ! -r $FILES/docker-registry.tar.gz ]]; then + (cd $FILES; curl -OR $DOCKER_REGISTRY_IMAGE) +fi +if [[ ! -r $FILES/docker-registry.tar.gz ]]; then + die $LINENO "Docker registry image unavailable" +fi +docker import - $DOCKER_REGISTRY_IMAGE_NAME <$FILES/docker-registry.tar.gz diff --git a/unstack.sh b/unstack.sh index 2268b90458..f053bcddd8 100755 --- a/unstack.sh +++ b/unstack.sh @@ -65,6 +65,14 @@ if [[ -n "$SCREEN" ]]; then fi fi +# Shut down Nova hypervisor plugins after Nova +NOVA_PLUGINS=$TOP_DIR/lib/nova_plugins +if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + # Load plugin + source $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER + stop_nova_hypervisor +fi + # Swift runs daemons if is_service_enabled s-proxy; then stop_swift From b1dc9bd5e43568e0fc96b4e2be4520be12a1d955 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Thu, 29 Aug 2013 11:52:20 +0100 Subject: [PATCH 0317/4704] xenapi: enable block device access for stack user Although nova is setting the permissions on block devices, sometimes it fails, and that results in an instance failing to launch. It is only an issue for 3-part images, and images accessed through block devices. This patch adds an udev rule, so that devices will be accessible. fixes bug 1218251 Change-Id: I837ea515457fbfc50e9ce138ea9de9db12baa8be --- tools/xen/prepare_guest.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh index f109d723d9..6ec5ffa546 100755 --- a/tools/xen/prepare_guest.sh +++ b/tools/xen/prepare_guest.sh @@ -48,6 +48,11 @@ useradd $STACK_USER -s /bin/bash -d /opt/stack -G libvirtd echo $STACK_USER:$GUEST_PASSWORD | chpasswd echo "$STACK_USER ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +# Add an udev rule, so that new block devices could be written by stack user +cat > /etc/udev/rules.d/50-openstack-blockdev.rules << EOF +KERNEL=="xvd[b-z]", GROUP="$STACK_USER", MODE="0660" +EOF + # Give ownership of /opt/stack to stack user chown -R $STACK_USER /opt/stack From a213e2c3cafe0739c60766b451d0d44755e87ced Mon Sep 17 00:00:00 2001 From: Angus Salkeld Date: Fri, 30 Aug 2013 10:48:46 +1000 Subject: [PATCH 0318/4704] Move Heat to a single heat.conf the old config files that are no longer needed (but still supported): heat-engine.conf, heat-api.conf, heat-api-cfn.conf, heat-api-cw.conf Change-Id: I7ba0566325539bf7215bcb606843a90b5e3e4a98 --- lib/heat | 138 ++++++++++++++++++++----------------------------------- 1 file changed, 50 insertions(+), 88 deletions(-) diff --git a/lib/heat b/lib/heat index 8b6fd7fc4b..67509bcfa0 100644 --- a/lib/heat +++ b/lib/heat @@ -32,6 +32,7 @@ HEATCLIENT_DIR=$DEST/python-heatclient HEAT_AUTH_CACHE_DIR=${HEAT_AUTH_CACHE_DIR:-/var/cache/heat} HEAT_STANDALONE=`trueorfalse False $HEAT_STANDALONE` HEAT_CONF_DIR=/etc/heat +HEAT_CONF=$HEAT_CONF_DIR/heat.conf HEAT_ENV_DIR=$HEAT_CONF_DIR/environment.d HEAT_TEMPLATES_DIR=$HEAT_CONF_DIR/templates @@ -54,6 +55,8 @@ function configure_heat() { sudo mkdir -p $HEAT_CONF_DIR fi sudo chown $STACK_USER $HEAT_CONF_DIR + # remove old config files + rm -f $HEAT_CONF_DIR/heat-*.conf HEAT_API_CFN_HOST=${HEAT_API_CFN_HOST:-$SERVICE_HOST} HEAT_API_CFN_PORT=${HEAT_API_CFN_PORT:-8000} @@ -68,96 +71,55 @@ function configure_heat() { cp $HEAT_DIR/etc/heat/api-paste.ini $HEAT_API_PASTE_FILE cp $HEAT_DIR/etc/heat/policy.json $HEAT_POLICY_FILE + cp $HEAT_DIR/etc/heat/heat.conf.sample $HEAT_CONF + + # common options + iniset_rpc_backend heat $HEAT_CONF DEFAULT + iniset $HEAT_CONF DEFAULT heat_metadata_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT + iniset $HEAT_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1/waitcondition + iniset $HEAT_CONF DEFAULT heat_watch_server_url http://$HEAT_API_CW_HOST:$HEAT_API_CW_PORT + iniset $HEAT_CONF DEFAULT sql_connection `database_connection_url heat` + iniset $HEAT_CONF DEFAULT auth_encryption_key `hexdump -n 16 -v -e '/1 "%02x"' /dev/random` + + # logging + iniset $HEAT_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $HEAT_CONF DEFAULT use_syslog $SYSLOG + if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then + # Add color to logging output + iniset $HEAT_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" + iniset $HEAT_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + iniset $HEAT_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" + iniset $HEAT_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" + fi - # Cloudformation API - HEAT_API_CFN_CONF=$HEAT_CONF_DIR/heat-api-cfn.conf - cp $HEAT_DIR/etc/heat/heat-api-cfn.conf $HEAT_API_CFN_CONF - iniset $HEAT_API_CFN_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - inicomment $HEAT_API_CFN_CONF DEFAULT log_file - iniset $HEAT_API_CFN_CONF DEFAULT use_syslog $SYSLOG - iniset $HEAT_API_CFN_CONF DEFAULT bind_host $HEAT_API_CFN_HOST - iniset $HEAT_API_CFN_CONF DEFAULT bind_port $HEAT_API_CFN_PORT - iniset $HEAT_API_CFN_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST - iniset $HEAT_API_CFN_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT - iniset $HEAT_API_CFN_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $HEAT_API_CFN_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 - iniset $HEAT_API_CFN_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $HEAT_API_CFN_CONF keystone_authtoken admin_user heat - iniset $HEAT_API_CFN_CONF keystone_authtoken admin_password $SERVICE_PASSWORD - iniset $HEAT_API_CFN_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR/api-cfn - iniset $HEAT_API_CFN_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 - iniset $HEAT_API_CFN_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens - [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_API_CFN_CONF paste_deploy flavor standalone - - iniset_rpc_backend heat $HEAT_API_CFN_CONF DEFAULT + # keystone authtoken + iniset $HEAT_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $HEAT_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $HEAT_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $HEAT_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + iniset $HEAT_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $HEAT_CONF keystone_authtoken admin_user heat + iniset $HEAT_CONF keystone_authtoken admin_password $SERVICE_PASSWORD + iniset $HEAT_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR/api-cfn + + # ec2authtoken + iniset $HEAT_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + iniset $HEAT_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens + + # paste_deploy + [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_CONF paste_deploy flavor standalone # OpenStack API - HEAT_API_CONF=$HEAT_CONF_DIR/heat-api.conf - cp $HEAT_DIR/etc/heat/heat-api.conf $HEAT_API_CONF - iniset $HEAT_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - inicomment $HEAT_API_CONF DEFAULT log_file - iniset $HEAT_API_CONF DEFAULT use_syslog $SYSLOG - iniset $HEAT_API_CONF DEFAULT bind_host $HEAT_API_HOST - iniset $HEAT_API_CONF DEFAULT bind_port $HEAT_API_PORT - iniset $HEAT_API_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST - iniset $HEAT_API_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT - iniset $HEAT_API_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $HEAT_API_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 - iniset $HEAT_API_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $HEAT_API_CONF keystone_authtoken admin_user heat - iniset $HEAT_API_CONF keystone_authtoken admin_password $SERVICE_PASSWORD - iniset $HEAT_API_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR/api - iniset $HEAT_API_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 - iniset $HEAT_API_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens - [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_API_CONF paste_deploy flavor standalone - iniset_rpc_backend heat $HEAT_API_CONF DEFAULT - - - # engine - HEAT_ENGINE_CONF=$HEAT_CONF_DIR/heat-engine.conf - cp $HEAT_DIR/etc/heat/heat-engine.conf $HEAT_ENGINE_CONF - iniset $HEAT_ENGINE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - inicomment $HEAT_ENGINE_CONF DEFAULT log_file - iniset $HEAT_ENGINE_CONF DEFAULT use_syslog $SYSLOG - iniset $HEAT_ENGINE_CONF DEFAULT bind_host $HEAT_ENGINE_HOST - iniset $HEAT_ENGINE_CONF DEFAULT bind_port $HEAT_ENGINE_PORT - iniset $HEAT_ENGINE_CONF DEFAULT heat_metadata_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT - iniset $HEAT_ENGINE_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1/waitcondition - iniset $HEAT_ENGINE_CONF DEFAULT heat_watch_server_url http://$HEAT_API_CW_HOST:$HEAT_API_CW_PORT - iniset $HEAT_ENGINE_CONF DEFAULT sql_connection `database_connection_url heat` - iniset $HEAT_ENGINE_CONF DEFAULT auth_encryption_key `hexdump -n 16 -v -e '/1 "%02x"' /dev/random` - - iniset_rpc_backend heat $HEAT_ENGINE_CONF DEFAULT + iniset $HEAT_CONF heat_api bind_host $HEAT_API_HOST + iniset $HEAT_CONF heat_api bind_port $HEAT_API_PORT - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - # Add color to logging output - iniset $HEAT_ENGINE_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" - iniset $HEAT_ENGINE_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" - iniset $HEAT_ENGINE_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" - iniset $HEAT_ENGINE_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" - fi + # Cloudformation API + iniset $HEAT_CONF heat_api_cfn bind_host $HEAT_API_CFN_HOST + iniset $HEAT_CONF heat_api_cfn bind_port $HEAT_API_CFN_PORT # Cloudwatch API - HEAT_API_CW_CONF=$HEAT_CONF_DIR/heat-api-cloudwatch.conf - cp $HEAT_DIR/etc/heat/heat-api-cloudwatch.conf $HEAT_API_CW_CONF - iniset $HEAT_API_CW_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - inicomment $HEAT_API_CW_CONF DEFAULT log_file - iniset $HEAT_API_CW_CONF DEFAULT use_syslog $SYSLOG - iniset $HEAT_API_CW_CONF DEFAULT bind_host $HEAT_API_CW_HOST - iniset $HEAT_API_CW_CONF DEFAULT bind_port $HEAT_API_CW_PORT - iniset $HEAT_API_CW_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST - iniset $HEAT_API_CW_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT - iniset $HEAT_API_CW_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $HEAT_API_CW_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 - iniset $HEAT_API_CW_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $HEAT_API_CW_CONF keystone_authtoken admin_user heat - iniset $HEAT_API_CW_CONF keystone_authtoken admin_password $SERVICE_PASSWORD - iniset $HEAT_API_CW_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR/api-cloudwatch - iniset $HEAT_API_CW_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 - iniset $HEAT_API_CW_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens - [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_API_CW_CONF paste_deploy flavor standalone - - iniset_rpc_backend heat $HEAT_API_CW_CONF DEFAULT + iniset $HEAT_CONF heat_api_cloudwatch bind_host $HEAT_API_CW_HOST + iniset $HEAT_CONF heat_api_cloudwatch bind_port $HEAT_API_CW_PORT # heat environment sudo mkdir -p $HEAT_ENV_DIR @@ -207,10 +169,10 @@ function install_heat() { # start_heat() - Start running processes, including screen function start_heat() { - screen_it h-eng "cd $HEAT_DIR; bin/heat-engine --config-file=$HEAT_CONF_DIR/heat-engine.conf" - screen_it h-api "cd $HEAT_DIR; bin/heat-api --config-dir=$HEAT_CONF_DIR/heat-api.conf" - screen_it h-api-cfn "cd $HEAT_DIR; bin/heat-api-cfn --config-dir=$HEAT_CONF_DIR/heat-api-cfn.conf" - screen_it h-api-cw "cd $HEAT_DIR; bin/heat-api-cloudwatch --config-dir=$HEAT_CONF_DIR/heat-api-cloudwatch.conf" + screen_it h-eng "cd $HEAT_DIR; bin/heat-engine --config-file=$HEAT_CONF" + screen_it h-api "cd $HEAT_DIR; bin/heat-api --config-file=$HEAT_CONF" + screen_it h-api-cfn "cd $HEAT_DIR; bin/heat-api-cfn --config-file=$HEAT_CONF" + screen_it h-api-cw "cd $HEAT_DIR; bin/heat-api-cloudwatch --config-file=$HEAT_CONF" } # stop_heat() - Stop running processes From 97621a1d1f39a944a24371fc9f2bf9b86faec248 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Fri, 30 Aug 2013 13:12:17 +0100 Subject: [PATCH 0319/4704] xenapi: add username to vncviewer command Devstack prints out an instruction, how to look at the virtual machine's console. The command did not include the username, so if the user had a config file to use a different username for that network, the command failed. Change-Id: I5dd49169c45e26e8d2bb3d5920a1b7fa584be50f --- tools/xen/install_os_domU.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 997644d018..a012a08561 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -181,7 +181,7 @@ function wait_for_VM_to_halt() { mgmt_ip=$(echo $XENAPI_CONNECTION_URL | tr -d -c '1234567890.') domid=$(xe vm-list name-label="$GUEST_NAME" params=dom-id minimal=true) port=$(xenstore-read /local/domain/$domid/console/vnc-port) - echo "vncviewer -via $mgmt_ip localhost:${port:2}" + echo "vncviewer -via root@$mgmt_ip localhost:${port:2}" while true do state=$(xe_min vm-list name-label="$GUEST_NAME" power-state=halted) From 1533a349da34a002ab6a09cee86d47daf6d777fb Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Fri, 30 Aug 2013 14:10:52 +0100 Subject: [PATCH 0320/4704] remove multi-host timeout If you ran exercises with MULTI_HOST enabled, an additional sleep was performed. This change removes that sleep to speed up tests. Change-Id: I9dfd61cbb9415bd5e8fd1e40f4e41512be2ae0d2 --- functions | 1 - 1 file changed, 1 deletion(-) diff --git a/functions b/functions index f24cc89e82..e0d0e2a70f 100644 --- a/functions +++ b/functions @@ -1454,7 +1454,6 @@ function _ping_check_novanet() { local check_command="" MULTI_HOST=`trueorfalse False $MULTI_HOST` if [[ "$MULTI_HOST" = "True" && "$from_net" = "$PRIVATE_NETWORK_NAME" ]]; then - sleep $boot_timeout return fi if [[ "$expected" = "True" ]]; then From 16ed068db52516238b618408656fa0bc612b9218 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Fri, 30 Aug 2013 13:28:31 +0100 Subject: [PATCH 0321/4704] xenapi: Set VM memory before starting it If someone was re-using an existing template, for a memory change, he needed to re-install the vm. This change sets the osdomu mem before starting it, so it doesn't matter how much memory did the VM had according to the template. It also removes the memory manipulation bits from install-os-vpx.sh. Change-Id: Iad85f573b90c23140012c20c552a17277d9c97a0 --- tools/xen/functions | 19 +++++++++++++++++++ tools/xen/install_os_domU.sh | 10 ++++++++-- tools/xen/scripts/install-os-vpx.sh | 28 +--------------------------- 3 files changed, 28 insertions(+), 29 deletions(-) diff --git a/tools/xen/functions b/tools/xen/functions index 7616a5fd4d..a5c4b70bc3 100644 --- a/tools/xen/functions +++ b/tools/xen/functions @@ -268,3 +268,22 @@ function attach_network() { xe network-attach uuid=$net host-uuid=$host } + +function set_vm_memory() { + local vm_name_label + local memory + + vm_name_label="$1" + memory="$2" + + local vm + + vm=$(_vm_uuid "$vm_name_label") + + xe vm-memory-limits-set \ + static-min=${memory}MiB \ + static-max=${memory}MiB \ + dynamic-min=${memory}MiB \ + dynamic-max=${memory}MiB \ + uuid=$vm +} diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 997644d018..dc7959ad79 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -228,8 +228,11 @@ if [ -z "$templateuuid" ]; then $THIS_DIR/scripts/install-os-vpx.sh \ -t "$UBUNTU_INST_TEMPLATE_NAME" \ -n "$UBUNTU_INST_BRIDGE_OR_NET_NAME" \ - -l "$GUEST_NAME" \ - -r "$OSDOMU_MEM_MB" + -l "$GUEST_NAME" + + set_vm_memory "$GUEST_NAME" "$OSDOMU_MEM_MB" + + xe vm-start vm="$GUEST_NAME" # wait for install to finish wait_for_VM_to_halt @@ -255,6 +258,9 @@ fi # Install XenServer tools, and other such things $THIS_DIR/prepare_guest_template.sh "$GUEST_NAME" +# Set virtual machine parameters +set_vm_memory "$GUEST_NAME" "$OSDOMU_MEM_MB" + # start the VM to run the prepare steps xe vm-start vm="$GUEST_NAME" diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh index 8ee8b675a9..c94a593e3d 100755 --- a/tools/xen/scripts/install-os-vpx.sh +++ b/tools/xen/scripts/install-os-vpx.sh @@ -20,8 +20,6 @@ set -eux BRIDGE= -RAM= -BALLOONING= NAME_LABEL= TEMPLATE_NAME= @@ -29,7 +27,7 @@ usage() { cat << EOF - Usage: $0 -t TEMPLATE_NW_INSTALL -l NAME_LABEL [-n BRIDGE] [-r RAM] [-b] + Usage: $0 -t TEMPLATE_NW_INSTALL -l NAME_LABEL [-n BRIDGE] Install a VM from a template @@ -37,9 +35,6 @@ cat << EOF -h Shows this message. -t template VM template to use - -b Enable memory ballooning. When set min_RAM=RAM/2 max_RAM=RAM. - -r MiB Specifies RAM used by the VPX, in MiB. - By default it will take the value from the XVA. -l name Specifies the name label for the VM. -n bridge The bridge/network to use for eth0. Defaults to xenbr0 EOF @@ -53,12 +48,6 @@ get_params() h) usage exit 1 ;; - b) - BALLOONING=1 - ;; - r) - RAM=$OPTARG - ;; n) BRIDGE=$OPTARG ;; @@ -119,19 +108,6 @@ create_vif() } -set_memory() -{ - local v="$1" - if [ "$RAM" != "" ] - then - echo "Setting RAM to $RAM MiB." - [ "$BALLOONING" == 1 ] && RAM_MIN=$(($RAM / 2)) || RAM_MIN=$RAM - xe vm-memory-limits-set static-min=16MiB static-max=${RAM}MiB \ - dynamic-min=${RAM_MIN}MiB dynamic-max=${RAM}MiB \ - uuid="$v" - fi -} - # Make the VM auto-start on server boot. set_auto_start() @@ -161,5 +137,3 @@ set_auto_start "$vm_uuid" create_vif "$vm_uuid" xe vm-param-set other-config:os-vpx=true uuid="$vm_uuid" xe vm-param-set actions-after-reboot=Destroy uuid="$vm_uuid" -set_memory "$vm_uuid" -xe vm-start uuid=$vm_uuid From bee5c50766698baa87f5e049734708436766777b Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Fri, 30 Aug 2013 13:48:08 -0400 Subject: [PATCH 0322/4704] Disable neutron quotas when using fake virt driver Nova's fake virt driver, can be used to do scale testing, so when using it disable neutron's quota limits. Change-Id: I9ce995079af04202179820777217ef294df71226 --- lib/neutron | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lib/neutron b/lib/neutron index 31876dee88..bf04840e47 100644 --- a/lib/neutron +++ b/lib/neutron @@ -507,6 +507,15 @@ function _configure_neutron_common() { done fi + if [ "$VIRT_DRIVER" = 'fake' ]; then + # Disable arbitrary limits + iniset $NEUTRON_CONF quotas quota_network -1 + iniset $NEUTRON_CONF quotas quota_subnet -1 + iniset $NEUTRON_CONF quotas quota_port -1 + iniset $NEUTRON_CONF quotas quota_security_group -1 + iniset $NEUTRON_CONF quotas quota_security_group_rule -1 + fi + _neutron_setup_rootwrap } From 49ba22460bfc7932f061e7c2a100d73c8781d48b Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 9 Aug 2013 19:51:20 -0500 Subject: [PATCH 0323/4704] Move RHEL6 hacks to tools/fixup_stuff.sh Change-Id: Ice983bc16379bc2bc25659c37cfc16b63fdfc34b --- stack.sh | 58 ---------------------------------- tools/fixup_stuff.sh | 75 +++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 74 insertions(+), 59 deletions(-) diff --git a/stack.sh b/stack.sh index 8f59328792..df3cc4ed36 100755 --- a/stack.sh +++ b/stack.sh @@ -589,64 +589,6 @@ $TOP_DIR/tools/install_pip.sh # Do the ugly hacks for borken packages and distros $TOP_DIR/tools/fixup_stuff.sh - -# System-specific preconfigure -# ============================ - -if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then - # Disable selinux to avoid configuring to allow Apache access - # to Horizon files or run nodejs (LP#1175444) - if selinuxenabled; then - sudo setenforce 0 - fi - - # The following workarounds break xenserver - if [ "$VIRT_DRIVER" != 'xenserver' ]; then - # An old version of ``python-crypto`` (2.0.1) may be installed on a - # fresh system via Anaconda and the dependency chain - # ``cas`` -> ``python-paramiko`` -> ``python-crypto``. - # ``pip uninstall pycrypto`` will remove the packaged ``.egg-info`` - # file but leave most of the actual library files behind in - # ``/usr/lib64/python2.6/Crypto``. Later ``pip install pycrypto`` - # will install over the packaged files resulting - # in a useless mess of old, rpm-packaged files and pip-installed files. - # Remove the package so that ``pip install python-crypto`` installs - # cleanly. - # Note: other RPM packages may require ``python-crypto`` as well. - # For example, RHEL6 does not install ``python-paramiko packages``. - uninstall_package python-crypto - - # A similar situation occurs with ``python-lxml``, which is required by - # ``ipa-client``, an auditing package we don't care about. The - # build-dependencies needed for ``pip install lxml`` (``gcc``, - # ``libxml2-dev`` and ``libxslt-dev``) are present in - # ``files/rpms/general``. - uninstall_package python-lxml - fi - - # If the ``dbus`` package was installed by DevStack dependencies the - # uuid may not be generated because the service was never started (PR#598200), - # causing Nova to stop later on complaining that ``/var/lib/dbus/machine-id`` - # does not exist. - sudo service messagebus restart - - # ``setup.py`` contains a ``setup_requires`` package that is supposed - # to be transient. However, RHEL6 distribute has a bug where - # ``setup_requires`` registers entry points that are not cleaned - # out properly after the setup-phase resulting in installation failures - # (bz#924038). Pre-install the problem package so the ``setup_requires`` - # dependency is satisfied and it will not be installed transiently. - # Note we do this before the track-depends below. - pip_install hgtools - - # RHEL6's version of ``python-nose`` is incompatible with Tempest. - # Install nose 1.1 (Tempest-compatible) from EPEL - install_package python-nose1.1 - # Add a symlink for the new nosetests to allow tox for Tempest to - # work unmolested. - sudo ln -sf /usr/bin/nosetests1.1 /usr/local/bin/nosetests -fi - install_rpc_backend if is_service_enabled $DATABASE_BACKENDS; then diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 60d0f468e0..371b25fc8f 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -9,10 +9,17 @@ # pip 1.4 doesn't fix it (1.3 did) # - httplib2 0.8 permissions are 600 in the package and # pip 1.4 doesn't fix it (1.3 did) +# - RHEL6: +# - set selinux not enforcing +# - (re)start messagebus daemon +# - remove distro packages python-crypto and python-lxml +# - pre-install hgtools to work around a bug in RHEL6 distribute +# - install nose 1.1 from EPEL + # Keep track of the current directory TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=`cd $TOOLS_DIR/..; pwd` +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) # Change dir to top of devstack cd $TOP_DIR @@ -22,6 +29,10 @@ source $TOP_DIR/functions FILES=$TOP_DIR/files + +# Python Packages +# --------------- + # Pre-install affected packages so we can fix the permissions sudo pip install prettytable sudo pip install httplib2 @@ -41,3 +52,65 @@ for dir in $SITE_DIRS; do fi done + + +# RHEL6 +# ----- + +if [[ $DISTRO =~ (rhel6) ]]; then + + # Disable selinux to avoid configuring to allow Apache access + # to Horizon files or run nodejs (LP#1175444) + # FIXME(dtroyer): see if this can be skipped without node or if Horizon is not enabled + if selinuxenabled; then + sudo setenforce 0 + fi + + # If the ``dbus`` package was installed by DevStack dependencies the + # uuid may not be generated because the service was never started (PR#598200), + # causing Nova to stop later on complaining that ``/var/lib/dbus/machine-id`` + # does not exist. + sudo service messagebus restart + + # The following workarounds break xenserver + if [ "$VIRT_DRIVER" != 'xenserver' ]; then + # An old version of ``python-crypto`` (2.0.1) may be installed on a + # fresh system via Anaconda and the dependency chain + # ``cas`` -> ``python-paramiko`` -> ``python-crypto``. + # ``pip uninstall pycrypto`` will remove the packaged ``.egg-info`` + # file but leave most of the actual library files behind in + # ``/usr/lib64/python2.6/Crypto``. Later ``pip install pycrypto`` + # will install over the packaged files resulting + # in a useless mess of old, rpm-packaged files and pip-installed files. + # Remove the package so that ``pip install python-crypto`` installs + # cleanly. + # Note: other RPM packages may require ``python-crypto`` as well. + # For example, RHEL6 does not install ``python-paramiko packages``. + uninstall_package python-crypto + + # A similar situation occurs with ``python-lxml``, which is required by + # ``ipa-client``, an auditing package we don't care about. The + # build-dependencies needed for ``pip install lxml`` (``gcc``, + # ``libxml2-dev`` and ``libxslt-dev``) are present in + # ``files/rpms/general``. + uninstall_package python-lxml + fi + + # ``setup.py`` contains a ``setup_requires`` package that is supposed + # to be transient. However, RHEL6 distribute has a bug where + # ``setup_requires`` registers entry points that are not cleaned + # out properly after the setup-phase resulting in installation failures + # (bz#924038). Pre-install the problem package so the ``setup_requires`` + # dependency is satisfied and it will not be installed transiently. + # Note we do this before the track-depends in ``stack.sh``. + pip_install hgtools + + + # RHEL6's version of ``python-nose`` is incompatible with Tempest. + # Install nose 1.1 (Tempest-compatible) from EPEL + install_package python-nose1.1 + # Add a symlink for the new nosetests to allow tox for Tempest to + # work unmolested. + sudo ln -sf /usr/bin/nosetests1.1 /usr/local/bin/nosetests + +fi From 4728001d014a38409aabf639fc9a06024342321a Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Sat, 31 Aug 2013 12:12:46 +0100 Subject: [PATCH 0324/4704] xenapi: Increase default OS domU memory to 2G In XenServer scenarios, an additional domU is created to run OpenStack services. This change is increasing the memory for that VM to speed up test runs. Change-Id: I322f4e4703e506620fa7e7456c4264ee0d050edc --- tools/xen/xenrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/xen/xenrc b/tools/xen/xenrc index 03b30ac55e..f698be1085 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -13,7 +13,7 @@ CLEAN_TEMPLATES=${CLEAN_TEMPLATES:-false} # Size of image VDI_MB=${VDI_MB:-5000} -OSDOMU_MEM_MB=1024 +OSDOMU_MEM_MB=2048 OSDOMU_VDI_GB=8 # Network mapping. Specify bridge names or network names. Network names may From bbf56237747cace1f4c8f393893239488b9a344f Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Fri, 30 Aug 2013 12:40:18 +0100 Subject: [PATCH 0325/4704] xenapi: README.md and embedded localrc updates This change updates the readme, so that it is easier to get started, and reflects the actual behavior of DevStack. Changes in README.md: - Link to xenserver download page - Remove neutron interface - it is no longer installed by devstack - Add appendix with - How to use a different ubuntu mirror - How to use a proxy for ubuntu - How to re-use the created VM - Remove run from snapshot section and "do cloudy stuff" Changes in the Readme-embedded sample localrc: - Upload a vhd image and a uec image by default - easier to get started Change-Id: I13bb8e59ff5367ff7623fe9aa273886a957f81a7 --- tools/xen/README.md | 131 +++++++++++++++++++++++++++++++------------- 1 file changed, 94 insertions(+), 37 deletions(-) diff --git a/tools/xen/README.md b/tools/xen/README.md index af54d729b1..06192ed2b7 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -1,48 +1,54 @@ -# Getting Started With XenServer 5.6 and Devstack -The purpose of the code in this directory it to help developers bootstrap -a XenServer 5.6 (or greater) + Openstack development environment. This file gives -some pointers on how to get started. +# Getting Started With XenServer and Devstack -Xenserver is a Type 1 hypervisor, so it needs to be installed on bare metal. -The Openstack services are configured to run within a "privileged" virtual -machine on the Xenserver host (called OS domU). The VM uses the XAPI toolstack -to communicate with the host. +The purpose of the code in this directory it to help developers bootstrap a +XenServer 6.2 (older versions may also work) + Openstack development +environment. This file gives some pointers on how to get started. + +Xenserver is a Type 1 hypervisor, so it is best installed on bare metal. The +Openstack services are configured to run within a virtual machine (called OS +domU) on the XenServer host. The VM uses the XAPI toolstack to communicate with +the host over a network connection (see `MGT_BRIDGE_OR_NET_NAME`). The provided localrc helps to build a basic environment. -The requirements are: + +## Introduction + +### Requirements + - An internet-enabled network with a DHCP server on it - XenServer box plugged in to the same network This network will be used as the OpenStack management network. The VM Network and the Public Network will not be connected to any physical interfaces, only new virtual networks will be created by the `install_os_domU.sh` script. -Steps to follow: +### Steps to follow + - Install XenServer - Download Devstack to XenServer - Customise `localrc` - Start `install_os_domU.sh` script +### Brief explanation + The `install_os_domU.sh` script will: - Setup XenAPI plugins - Create the named networks, if they don't exist - - Preseed-Netinstall an Ubuntu Virtual Machine, with 1 network interface: - - eth0 - Connected to `UBUNTU_INST_BRIDGE_OR_NET_NAME`, defaults to - `MGT_BRIDGE_OR_NET_NAME` + - Preseed-Netinstall an Ubuntu Virtual Machine (NOTE: you can save and reuse + it, see [Reuse the Ubuntu VM](#reuse-the-ubuntu-vm)), with 1 network + interface: + - `eth0` - Connected to `UBUNTU_INST_BRIDGE_OR_NET_NAME`, defaults to + `MGT_BRIDGE_OR_NET_NAME` - After the Ubuntu install process finished, the network configuration is modified to: - - eth0 - Management interface, connected to `MGT_BRIDGE_OR_NET_NAME` - - eth1 - VM interface, connected to `VM_BRIDGE_OR_NET_NAME` - - eth2 - Public interface, connected to `PUB_BRIDGE_OR_NET_NAME` - - (eth3) - Optional network interface if neutron is used, to enforce xapi to - create the underlying bridge. + - `eth0` - Management interface, connected to `MGT_BRIDGE_OR_NET_NAME`. Xapi + must be accessible through this network. + - `eth1` - VM interface, connected to `VM_BRIDGE_OR_NET_NAME` + - `eth2` - Public interface, connected to `PUB_BRIDGE_OR_NET_NAME` - Start devstack inside the created OpenStack VM ## Step 1: Install Xenserver -Install XenServer 5.6+ on a clean box. You can get XenServer by signing -up for an account on citrix.com, and then visiting: -https://www.citrix.com/English/ss/downloads/details.asp?downloadId=2311504&productId=683148 - -For details on installation, see: http://wiki.openstack.org/XenServer/Install +Install XenServer on a clean box. You can download the latest XenServer for +free from: http://www.xenserver.org/ The XenServer IP configuration depends on your local network setup. If you are using dhcp, make a reservation for XenServer, so its IP address won't change @@ -85,17 +91,20 @@ Of course, use real passwords if this machine is exposed. XENAPI_CONNECTION_URL="http://address_of_your_xenserver" VNCSERVER_PROXYCLIENT_ADDRESS=address_of_your_xenserver - # Do not download the usual images - IMAGE_URLS="" - # Explicitly set virt driver here + # Download a vhd and a uec image + IMAGE_URLS="\ + https://github.com/downloads/citrix-openstack/warehouse/cirros-0.3.0-x86_64-disk.vhd.tgz,\ + http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz" + + # Explicitly set virt driver VIRT_DRIVER=xenserver - # Explicitly enable multi-host + + # Explicitly enable multi-host for nova-network HA MULTI_HOST=1 + # Give extra time for boot ACTIVE_TIMEOUT=45 - # NOTE: the value of FLAT_NETWORK_BRIDGE will automatically be determined - # by install_os_domU.sh script. EOF ## Step 4: Run `./install_os_domU.sh` from the `tools/xen` directory @@ -107,12 +116,60 @@ Once this script finishes executing, log into the VM (openstack domU) that it installed and tail the run.sh.log file. You will need to wait until it run.sh has finished executing. -## Step 5: Do cloudy stuff! -* Play with horizon -* Play with the CLI -* Log bugs to devstack and core projects, and submit fixes! +# Appendix + +This section contains useful information for running devstack in CI +environments / using ubuntu network mirrors. + +## Use a specific Ubuntu mirror for installation + +To speed up the Ubuntu installation, you can use a specific mirror. To specify +a mirror explicitly, include the following settings in your `localrc` file: + + UBUNTU_INST_HTTP_HOSTNAME="archive.ubuntu.com" + UBUNTU_INST_HTTP_DIRECTORY="/ubuntu" + +These variables set the `mirror/http/hostname` and `mirror/http/directory` +settings in the ubuntu preseed file. The minimal ubuntu VM will use the +specified parameters. + +## Use an http proxy to speed up Ubuntu installation + +To further speed up the Ubuntu VM and package installation, an internal http +proxy could be used. `squid-deb-proxy` has prooven to be stable. To use an http +proxy, specify: + + UBUNTU_INST_HTTP_PROXY="http://ubuntu-proxy.somedomain.com:8000" + +in your `localrc` file. + +## Reuse the Ubuntu VM + +Performing a minimal ubuntu installation could take a lot of time, depending on +your mirror/network speed. If you run `install_os_domU.sh` script on a clean +hypervisor, you can speed up the installation, by re-using the ubuntu vm from +a previous installation. + +### Export the Ubuntu VM to an XVA + +Given you have an nfs export `TEMPLATE_NFS_DIR`: + + TEMPLATE_FILENAME=devstack-jeos.xva + TEMPLATE_NAME=jeos_template_for_devstack + mountdir=$(mktemp -d) + mount -t nfs "$TEMPLATE_NFS_DIR" "$mountdir" + VM="$(xe template-list name-label="$TEMPLATE_NAME" --minimal)" + xe template-export template-uuid=$VM filename="$mountdir/$TEMPLATE_FILENAME" + umount "$mountdir" + rm -rf "$mountdir" + +### Import the Ubuntu VM + +Given you have an nfs export `TEMPLATE_NFS_DIR` where you exported the Ubuntu +VM as `TEMPLATE_FILENAME`: -## Step 6: Run from snapshot -If you want to quicky re-run devstack from a clean state, -using the same settings you used in your previous run, -you can revert the DomU to the snapshot called `before_first_boot` + mountdir=$(mktemp -d) + mount -t nfs "$TEMPLATE_NFS_DIR" "$mountdir" + xe vm-import filename="$mountdir/$TEMPLATE_FILENAME" + umount "$mountdir" + rm -rf "$mountdir" From f39ee96f1dc7fae9aaad194669467a7f9fcc3d31 Mon Sep 17 00:00:00 2001 From: Jorge Valderrama Romero Date: Mon, 2 Sep 2013 17:18:40 +0200 Subject: [PATCH 0326/4704] Fix deprecated params user_id, role_id, tenant_id Update keystone cli by changing parameter options (user_id, role_id and tenant_id) to use '-' rather than '_' in the method user_role_add and consequently the method user_role_remove without maintain backward compatibility because these are deprecated. python-keystoneclient - Bug #1150434 Change-Id: Ia5113718eb050cf7dba443b8d0caf3bdaa1730f0 --- files/keystone_data.sh | 36 ++++++++++++++++++------------------ lib/cinder | 6 +++--- lib/keystone | 14 +++++++------- lib/neutron | 6 +++--- lib/nova | 6 +++--- lib/swift | 8 ++++---- 6 files changed, 38 insertions(+), 38 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 45f9c8165c..3f3137cb14 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -58,9 +58,9 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" | # Nova needs ResellerAdmin role to download images when accessing # swift through the s3 api. keystone user-role-add \ - --tenant_id $SERVICE_TENANT \ - --user_id $NOVA_USER \ - --role_id $RESELLER_ROLE + --tenant-id $SERVICE_TENANT \ + --user-id $NOVA_USER \ + --role-id $RESELLER_ROLE fi # Heat @@ -69,9 +69,9 @@ if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then --pass="$SERVICE_PASSWORD" \ --tenant_id $SERVICE_TENANT \ --email=heat@example.com) - keystone user-role-add --tenant_id $SERVICE_TENANT \ - --user_id $HEAT_USER \ - --role_id $SERVICE_ROLE + keystone user-role-add --tenant-id $SERVICE_TENANT \ + --user-id $HEAT_USER \ + --role-id $SERVICE_ROLE # heat_stack_user role is for users created by Heat keystone role-create --name heat_stack_user if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then @@ -106,9 +106,9 @@ if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then --tenant_id $SERVICE_TENANT \ --email=glance@example.com) keystone user-role-add \ - --tenant_id $SERVICE_TENANT \ - --user_id $GLANCE_USER \ - --role_id $ADMIN_ROLE + --tenant-id $SERVICE_TENANT \ + --user-id $GLANCE_USER \ + --role-id $ADMIN_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then GLANCE_SERVICE=$(get_id keystone service-create \ --name=glance \ @@ -129,13 +129,13 @@ if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]]; then --pass="$SERVICE_PASSWORD" \ --tenant_id $SERVICE_TENANT \ --email=ceilometer@example.com) - keystone user-role-add --tenant_id $SERVICE_TENANT \ - --user_id $CEILOMETER_USER \ - --role_id $ADMIN_ROLE + keystone user-role-add --tenant-id $SERVICE_TENANT \ + --user-id $CEILOMETER_USER \ + --role-id $ADMIN_ROLE # Ceilometer needs ResellerAdmin role to access swift account stats. - keystone user-role-add --tenant_id $SERVICE_TENANT \ - --user_id $CEILOMETER_USER \ - --role_id $RESELLER_ROLE + keystone user-role-add --tenant-id $SERVICE_TENANT \ + --user-id $CEILOMETER_USER \ + --role-id $RESELLER_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then CEILOMETER_SERVICE=$(get_id keystone service-create \ --name=ceilometer \ @@ -192,7 +192,7 @@ if [[ "$ENABLED_SERVICES" =~ "tempest" ]]; then --pass="$ADMIN_PASSWORD" \ --email=alt_demo@example.com) keystone user-role-add \ - --tenant_id $ALT_DEMO_TENANT \ - --user_id $ALT_DEMO_USER \ - --role_id $MEMBER_ROLE + --tenant-id $ALT_DEMO_TENANT \ + --user-id $ALT_DEMO_USER \ + --role-id $MEMBER_ROLE fi diff --git a/lib/cinder b/lib/cinder index 826b9586da..b30829f6de 100644 --- a/lib/cinder +++ b/lib/cinder @@ -335,9 +335,9 @@ create_cinder_accounts() { --email=cinder@example.com \ | grep " id " | get_field 2) keystone user-role-add \ - --tenant_id $SERVICE_TENANT \ - --user_id $CINDER_USER \ - --role_id $ADMIN_ROLE + --tenant-id $SERVICE_TENANT \ + --user-id $CINDER_USER \ + --role-id $ADMIN_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then CINDER_SERVICE=$(keystone service-create \ --name=cinder \ diff --git a/lib/keystone b/lib/keystone index 0a35dd5d80..535710f52b 100644 --- a/lib/keystone +++ b/lib/keystone @@ -217,9 +217,9 @@ create_keystone_accounts() { --name admin \ | grep " id " | get_field 2) keystone user-role-add \ - --user_id $ADMIN_USER \ - --role_id $ADMIN_ROLE \ - --tenant_id $ADMIN_TENANT + --user-id $ADMIN_USER \ + --role-id $ADMIN_ROLE \ + --tenant-id $ADMIN_TENANT # service SERVICE_TENANT=$(keystone tenant-create \ @@ -244,10 +244,10 @@ create_keystone_accounts() { --pass "$ADMIN_PASSWORD" \ --email demo@example.com \ | grep " id " | get_field 2) - keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $DEMO_TENANT - keystone user-role-add --user_id $ADMIN_USER --role_id $ADMIN_ROLE --tenant_id $DEMO_TENANT - keystone user-role-add --user_id $DEMO_USER --role_id $ANOTHER_ROLE --tenant_id $DEMO_TENANT - keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $INVIS_TENANT + keystone user-role-add --user-id $DEMO_USER --role-id $MEMBER_ROLE --tenant-id $DEMO_TENANT + keystone user-role-add --user-id $ADMIN_USER --role-id $ADMIN_ROLE --tenant-id $DEMO_TENANT + keystone user-role-add --user-id $DEMO_USER --role-id $ANOTHER_ROLE --tenant-id $DEMO_TENANT + keystone user-role-add --user-id $DEMO_USER --role-id $MEMBER_ROLE --tenant-id $INVIS_TENANT # Keystone if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then diff --git a/lib/neutron b/lib/neutron index 01fe3eafba..f6c2377dbb 100644 --- a/lib/neutron +++ b/lib/neutron @@ -301,9 +301,9 @@ function create_neutron_accounts() { --email=neutron@example.com \ | grep " id " | get_field 2) keystone user-role-add \ - --tenant_id $SERVICE_TENANT \ - --user_id $NEUTRON_USER \ - --role_id $ADMIN_ROLE + --tenant-id $SERVICE_TENANT \ + --user-id $NEUTRON_USER \ + --role-id $ADMIN_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then NEUTRON_SERVICE=$(keystone service-create \ --name=neutron \ diff --git a/lib/nova b/lib/nova index 0b65f84366..19093adc3a 100644 --- a/lib/nova +++ b/lib/nova @@ -399,9 +399,9 @@ create_nova_accounts() { --email=nova@example.com \ | grep " id " | get_field 2) keystone user-role-add \ - --tenant_id $SERVICE_TENANT \ - --user_id $NOVA_USER \ - --role_id $ADMIN_ROLE + --tenant-id $SERVICE_TENANT \ + --user-id $NOVA_USER \ + --role-id $ADMIN_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then NOVA_SERVICE=$(keystone service-create \ --name=nova \ diff --git a/lib/swift b/lib/swift index 8e641521a0..f72beafef7 100644 --- a/lib/swift +++ b/lib/swift @@ -464,7 +464,7 @@ function create_swift_accounts() { SWIFT_USER=$(keystone user-create --name=swift --pass="$SERVICE_PASSWORD" \ --tenant_id $SERVICE_TENANT --email=swift@example.com | grep " id " | get_field 2) - keystone user-role-add --tenant_id $SERVICE_TENANT --user_id $SWIFT_USER --role_id $ADMIN_ROLE + keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $SWIFT_USER --role-id $ADMIN_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then SWIFT_SERVICE=$(keystone service-create --name=swift --type="object-store" \ @@ -479,14 +479,14 @@ function create_swift_accounts() { SWIFT_TENANT_TEST1=$(keystone tenant-create --name=swifttenanttest1 | grep " id " | get_field 2) SWIFT_USER_TEST1=$(keystone user-create --name=swiftusertest1 --pass=testing --email=test@example.com | grep " id " | get_field 2) - keystone user-role-add --user_id $SWIFT_USER_TEST1 --role_id $ADMIN_ROLE --tenant_id $SWIFT_TENANT_TEST1 + keystone user-role-add --user-id $SWIFT_USER_TEST1 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST1 SWIFT_USER_TEST3=$(keystone user-create --name=swiftusertest3 --pass=testing3 --email=test3@example.com | grep " id " | get_field 2) - keystone user-role-add --user_id $SWIFT_USER_TEST3 --role_id $ANOTHER_ROLE --tenant_id $SWIFT_TENANT_TEST1 + keystone user-role-add --user-id $SWIFT_USER_TEST3 --role-id $ANOTHER_ROLE --tenant-id $SWIFT_TENANT_TEST1 SWIFT_TENANT_TEST2=$(keystone tenant-create --name=swifttenanttest2 | grep " id " | get_field 2) SWIFT_USER_TEST2=$(keystone user-create --name=swiftusertest2 --pass=testing2 --email=test2@example.com | grep " id " | get_field 2) - keystone user-role-add --user_id $SWIFT_USER_TEST2 --role_id $ADMIN_ROLE --tenant_id $SWIFT_TENANT_TEST2 + keystone user-role-add --user-id $SWIFT_USER_TEST2 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST2 } # init_swift() - Initialize rings From 533e14d6a5fc1ba3dbd24fb0075ef1eafd00a705 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 30 Aug 2013 15:11:22 -0500 Subject: [PATCH 0327/4704] Copy policy_add() from Grenade functions policy_all() was added to Grenade's functions file, which is notmally synced from DevStack so we need to bring it over here before the next sync. Change-Id: Ifd852e9d1ffe39fa23f6312d1ddf2874b5f2b9f0 --- functions | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/functions b/functions index f24cc89e82..14e817c999 100644 --- a/functions +++ b/functions @@ -1645,6 +1645,37 @@ vercmp_numbers() { } +# ``policy_add policy_file policy_name policy_permissions`` +# +# Add a policy to a policy.json file +# Do nothing if the policy already exists + +function policy_add() { + local policy_file=$1 + local policy_name=$2 + local policy_perm=$3 + + if grep -q ${policy_name} ${policy_file}; then + echo "Policy ${policy_name} already exists in ${policy_file}" + return + fi + + # Add a terminating comma to policy lines without one + # Remove the closing '}' and all lines following to the end-of-file + local tmpfile=$(mktemp) + uniq ${policy_file} | sed -e ' + s/]$/],/ + /^[}]/,$d + ' > ${tmpfile} + + # Append policy and closing brace + echo " \"${policy_name}\": ${policy_perm}" >>${tmpfile} + echo "}" >>${tmpfile} + + mv ${tmpfile} ${policy_file} +} + + # Restore xtrace $XTRACE From 4d0d5ce778d4fa79cdbe2e5532608060a95870e3 Mon Sep 17 00:00:00 2001 From: "Walter A. Boring IV" Date: Fri, 30 Aug 2013 12:39:42 -0700 Subject: [PATCH 0328/4704] Cinder needs iscsiadm available This patch adds the binary packages that contains the iscsiadm utility for cinder. Cinder uses the iscsiadm utility for various actions and it should be there for devstack users. Fixes bug #1219032 Change-Id: I8e1c6e2e5d4bfade50aba9259b6da3957d6d622d --- files/apts/cinder | 2 ++ files/rpms-suse/cinder | 1 + files/rpms/cinder | 1 + 3 files changed, 4 insertions(+) diff --git a/files/apts/cinder b/files/apts/cinder index 32cb3a0039..f8e3b6d06d 100644 --- a/files/apts/cinder +++ b/files/apts/cinder @@ -3,3 +3,5 @@ lvm2 qemu-utils libpq-dev python-dev +open-iscsi +open-iscsi-utils # Deprecated since quantal dist:lucid,oneiric,precise diff --git a/files/rpms-suse/cinder b/files/rpms-suse/cinder index 49e2cb8249..55078da27c 100644 --- a/files/rpms-suse/cinder +++ b/files/rpms-suse/cinder @@ -3,3 +3,4 @@ tgt qemu-tools python-devel postgresql-devel +open-iscsi diff --git a/files/rpms/cinder b/files/rpms/cinder index 699f2fc22c..c4edb68f14 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -3,3 +3,4 @@ scsi-target-utils qemu-img python-devel postgresql-devel +iscsi-initiator-utils From 08df29bff4e5c9e717358e7593e8c5a9c51a26bf Mon Sep 17 00:00:00 2001 From: Nikolay Sobolevskiy Date: Fri, 30 Aug 2013 21:59:15 +0400 Subject: [PATCH 0329/4704] Add my_ip option in cinder.conf Then cinder installed in multinode environment with more than one interface, it's better to use CINDER_SERVICE_HOST option for cinder ip address. Change-Id: I775b70407379d9c97696f536f5f89cecb33657be --- lib/cinder | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/cinder b/lib/cinder index 826b9586da..aed3004370 100644 --- a/lib/cinder +++ b/lib/cinder @@ -226,6 +226,7 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT volume_group $VOLUME_GROUP iniset $CINDER_CONF DEFAULT volume_name_template ${VOLUME_NAME_PREFIX}%s fi + iniset $CINDER_CONF DEFAULT my_ip "$CINDER_SERVICE_HOST" iniset $CINDER_CONF DEFAULT iscsi_helper tgtadm iniset $CINDER_CONF DEFAULT sql_connection `database_connection_url cinder` iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI From e2c4ee23642a00ebed0343ad2086b5c250f24516 Mon Sep 17 00:00:00 2001 From: sbauza Date: Thu, 29 Aug 2013 17:29:46 +0200 Subject: [PATCH 0330/4704] Fix Neutron issues related to Baremetal service When deploying devstack on a single host with a single NIC and baremetal and neutron services enabled, the host looses Internet access as default route is deleted. Also, if localrc is not correctly set with correct values, OVS ports and Neutron net and subnet aren't created (commands missing arguments), we need devstack to properly fail. Change-Id: I7f39bbdf7b8cb544b8b4a59effe16f04b85d1425 --- lib/neutron | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index dc3c622a7e..5664ff2cc5 100644 --- a/lib/neutron +++ b/lib/neutron @@ -327,6 +327,9 @@ function create_neutron_initial_network() { # Since neutron command is executed in admin context at this point, # ``--tenant_id`` needs to be specified. if is_baremetal; then + if [[ "$PUBLIC_INTERFACE" == '' || "$OVS_PHYSICAL_BRIDGE" == '' ]]; then + die $LINENO "Neutron settings for baremetal not set.. exiting" + fi sudo ovs-vsctl add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE for IP in $(ip addr show dev $PUBLIC_INTERFACE | grep ' inet ' | awk '{print $2}'); do sudo ip addr del $IP dev $PUBLIC_INTERFACE @@ -335,6 +338,7 @@ function create_neutron_initial_network() { NET_ID=$(neutron net-create $PHYSICAL_NETWORK --tenant_id $TENANT_ID --provider:network_type flat --provider:physical_network "$PHYSICAL_NETWORK" | grep ' id ' | get_field 2) SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) sudo ifconfig $OVS_PHYSICAL_BRIDGE up + sudo route add default gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE else NET_ID=$(neutron net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) @@ -493,7 +497,7 @@ function _configure_neutron_common() { # ``Q_PLUGIN_EXTRA_CONF_FILES=(file1, file2)`` neutron_plugin_configure_common - if [[ $Q_PLUGIN_CONF_PATH == '' || $Q_PLUGIN_CONF_FILENAME == '' || $Q_PLUGIN_CLASS == '' ]]; then + if [[ "$Q_PLUGIN_CONF_PATH" == '' || "$Q_PLUGIN_CONF_FILENAME" == '' || "$Q_PLUGIN_CLASS" == '' ]]; then die $LINENO "Neutron plugin not set.. exiting" fi From 1e3d318c861565ddc26746bed4818daee77e2f47 Mon Sep 17 00:00:00 2001 From: Nachi Ueno Date: Thu, 15 Aug 2013 18:15:31 -0700 Subject: [PATCH 0331/4704] Support OpenSwan in Neturon VPNaaS Neutron VPNaaS chagned ipsec package for OpenSwan. This commit updates the package. Change-Id: I333501a405fbc552c575d26cfbac083646d05dfd --- lib/neutron_plugins/services/vpn | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn index 0a79a697ad..b8f5c7d56b 100644 --- a/lib/neutron_plugins/services/vpn +++ b/lib/neutron_plugins/services/vpn @@ -8,9 +8,10 @@ set +o xtrace AGENT_VPN_BINARY="$NEUTRON_BIN_DIR/neutron-vpn-agent" VPN_PLUGIN="neutron.services.vpn.plugin.VPNDriverPlugin" +IPSEC_PACKAGE=${IPSEC_PACKAGE:-"openswan"} function neutron_vpn_install_agent_packages() { - install_package strongswan + install_package $IPSEC_PACKAGE } function neutron_vpn_configure_common() { From b53bce1c262e59e1a39b8dd1d2cfcc2ab2e187ef Mon Sep 17 00:00:00 2001 From: fujioka yuuichi Date: Thu, 5 Sep 2013 19:08:50 +0900 Subject: [PATCH 0332/4704] Rename ceilometer alarm service name Rename service name "ceilometer-alarm-eval" to "ceilometer-alarm-singleton" and "ceilometer-alarm-notify" to ceilometer-alarm-notifier" in this patch. Change-Id: I6619cc02874f6f59c43ba2952325e9d0533e395d --- lib/ceilometer | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 8768122bab..53e98b86d7 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -5,7 +5,7 @@ # enable_service ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api # # To ensure Ceilometer alarming services are enabled also, further add to the localrc: -# enable_service ceilometer-alarm-notify ceilometer-alarm-eval +# enable_service ceilometer-alarm-notifier ceilometer-alarm-singleton # Dependencies: # - functions @@ -138,14 +138,14 @@ function start_ceilometer() { screen_it ceilometer-acentral "ceilometer-agent-central --config-file $CEILOMETER_CONF" screen_it ceilometer-collector "ceilometer-collector --config-file $CEILOMETER_CONF" screen_it ceilometer-api "ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" - screen_it ceilometer-alarm-notify "ceilometer-alarm-notifier --config-file $CEILOMETER_CONF" - screen_it ceilometer-alarm-eval "ceilometer-alarm-singleton --config-file $CEILOMETER_CONF" + screen_it ceilometer-alarm-notifier "ceilometer-alarm-notifier --config-file $CEILOMETER_CONF" + screen_it ceilometer-alarm-singleton "ceilometer-alarm-singleton --config-file $CEILOMETER_CONF" } # stop_ceilometer() - Stop running processes function stop_ceilometer() { # Kill the ceilometer screen windows - for serv in ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api ceilometer-alarm-notify ceilometer-alarm-eval; do + for serv in ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-singleton; do screen -S $SCREEN_NAME -p $serv -X kill done } From 69f745748d4f2bcfd3e678037187bce1f8e53ccf Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 27 Aug 2013 11:43:53 +0200 Subject: [PATCH 0333/4704] Switch Ceilometer default backend to MySQL MongoDB 2.4 not being available in Ubuntu cloud archive for a while now, and the catch up done by this driver allows me to think it might be a good idea to switch by default on SQL for now on devstack. We can add another job to have Ceilometer tested on MongoDB too later. Change-Id: I74c3c436d009fed898c5ae4ffb82763e9a337d90 --- lib/ceilometer | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ceilometer b/lib/ceilometer index 8768122bab..46e4624d49 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -43,7 +43,7 @@ CEILOMETER_AUTH_CACHE_DIR=${CEILOMETER_AUTH_CACHE_DIR:-/var/cache/ceilometer} CEILOMETER_BIN_DIR=$(get_python_exec_prefix) # Set up database backend -CEILOMETER_BACKEND=${CEILOMETER_BACKEND:-mongodb} +CEILOMETER_BACKEND=${CEILOMETER_BACKEND:-mysql} # Functions # --------- From 33b331875d9ec904419ad8da3f5bf66077e5d78c Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Thu, 5 Sep 2013 13:06:52 -0700 Subject: [PATCH 0334/4704] Enable multi-threaded nova API servers Add an additional worker thread for each nova service: compute, ec2 and metadata. Any real deployment will be using multiple API server threads, so lets make devstack test that scenario. With this patch we will have four worker threads for each type of API server. Change-Id: I09f4c6f57e71982b8c7fc92645b3ebec12ff1348 --- lib/nova | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/nova b/lib/nova index 32a51d3d4d..e2e1ea2835 100644 --- a/lib/nova +++ b/lib/nova @@ -444,6 +444,9 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT s3_port "$S3_SERVICE_PORT" iniset $NOVA_CONF DEFAULT osapi_compute_extension "nova.api.openstack.compute.contrib.standard_extensions" iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP" + iniset $NOVA_CONF DEFAULT osapi_compute_workers "4" + iniset $NOVA_CONF DEFAULT ec2_workers "4" + iniset $NOVA_CONF DEFAULT metadata_workers "4" iniset $NOVA_CONF DEFAULT sql_connection `database_connection_url nova` if is_baremetal; then iniset $NOVA_CONF baremetal sql_connection `database_connection_url nova_bm` From a6273b9378622318057c2f5f685022389a066818 Mon Sep 17 00:00:00 2001 From: Ryan Hsu Date: Wed, 4 Sep 2013 23:51:29 -0700 Subject: [PATCH 0335/4704] Provide a means of setting vmdk custom properties via image filename Custom properties for vmdk disk type, storage adapter type, and networking adapter type can now be retrieved from a vmdk image's filename. The filename format is defined as: -:: An example filename following this format would be debian-2.6.32-i646-thin:ide:VirtualE1000. If the vmdk filename does not match the above format then underlying nova driver will supply default values. Change-Id: I83483d20f984250bd8154d8e270b2e801d2df303 Closes-bug: #1221044 --- functions | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/functions b/functions index df8166a0e2..d14c973715 100644 --- a/functions +++ b/functions @@ -1256,7 +1256,25 @@ function upload_image() { if [[ "$image_url" =~ '.vmdk' ]]; then IMAGE="$FILES/${IMAGE_FNAME}" IMAGE_NAME="${IMAGE_FNAME%.vmdk}" - glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format bare --disk-format vmdk --property vmware-disktype="preallocated" < "${IMAGE}" + + # Before we can upload vmdk type images to glance, we need to know it's + # disk type, storage adapter, and networking adapter. These values are + # passed to glance as custom properties. We take these values from the + # vmdk filename, which is expected in the following format: + # + # -:: + # + # If the filename does not follow the above format then the vsphere + # driver will supply default values. + property_string=`echo "$IMAGE_NAME" | grep -oP '(?<=-)(?!.*-).+:.+:.+$'` + if [[ ! -z "$property_string" ]]; then + IFS=':' read -a props <<< "$property_string" + vmdk_disktype="${props[0]}" + vmdk_adapter_type="${props[1]}" + vmdk_net_adapter="${props[2]}" + fi + + glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format bare --disk-format vmdk --property vmware-disktype="$vmdk_disktype" --property vmware_adaptertype="$vmdk_adapter_type" --property hw_vif_model="$vmdk_net_adapter" < "${IMAGE}" return fi From e118655028bfb093c5dd0cde4d615a23a0abbc7c Mon Sep 17 00:00:00 2001 From: Angus Salkeld Date: Fri, 6 Sep 2013 13:35:09 +1000 Subject: [PATCH 0336/4704] Fix Heat's signing_dir This is not critical but looks odd using api-cfn. Change-Id: Ie0f5c66f635b4a7c6ba51581ad01bab624158e61 --- lib/heat | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/lib/heat b/lib/heat index 67509bcfa0..58505ab792 100644 --- a/lib/heat +++ b/lib/heat @@ -100,7 +100,7 @@ function configure_heat() { iniset $HEAT_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $HEAT_CONF keystone_authtoken admin_user heat iniset $HEAT_CONF keystone_authtoken admin_password $SERVICE_PASSWORD - iniset $HEAT_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR/api-cfn + iniset $HEAT_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR # ec2authtoken iniset $HEAT_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 @@ -148,12 +148,8 @@ function init_heat() { # create_heat_cache_dir() - Part of the init_heat() process function create_heat_cache_dir() { # Create cache dirs - sudo mkdir -p $HEAT_AUTH_CACHE_DIR/api - sudo chown $STACK_USER $HEAT_AUTH_CACHE_DIR/api - sudo mkdir -p $HEAT_AUTH_CACHE_DIR/api-cfn - sudo chown $STACK_USER $HEAT_AUTH_CACHE_DIR/api-cfn - sudo mkdir -p $HEAT_AUTH_CACHE_DIR/api-cloudwatch - sudo chown $STACK_USER $HEAT_AUTH_CACHE_DIR/api-cloudwatch + sudo mkdir -p $HEAT_AUTH_CACHE_DIR + sudo chown $STACK_USER $HEAT_AUTH_CACHE_DIR } # install_heatclient() - Collect source and prepare From 5917868e75b0bd1a76bbf0e80eef50645e5b5c96 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Fri, 6 Sep 2013 16:14:17 +0200 Subject: [PATCH 0337/4704] Use 1.4.1 of pip. - This is where the option pip install --pre is. Change-Id: I3f836a701f17a4fea888ec51da62e7137cf0e6db --- tools/install_pip.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 6e3e9d2104..cb414a7168 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -25,7 +25,7 @@ FILES=$TOP_DIR/files # Handle arguments -INSTALL_PIP_VERSION=${INSTALL_PIP_VERSION:-"1.4"} +INSTALL_PIP_VERSION=${INSTALL_PIP_VERSION:-"1.4.1"} while [[ -n "$1" ]]; do case $1 in --force) From bc6324771b538ff9aee3ad44c4ca5ecdad402273 Mon Sep 17 00:00:00 2001 From: Kyle Mestery Date: Fri, 6 Sep 2013 14:59:30 +0000 Subject: [PATCH 0338/4704] Default to linuxbridge and openvswitch drivers for ML2. Since the addition of ML2 port-binding, the linuxbridge and openvswitch drivers are required to be loaded when running with ML2. This small patch adds their loading into ML2 into devstack. Fixes bug 1220743 Change-Id: I97c5f4e0e4af59766e0084ed3b2dea2843cb33bf --- lib/neutron_plugins/ml2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index 00bd716309..6ac20fe72e 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -20,7 +20,7 @@ Q_AGENT=${Q_AGENT:-openvswitch} source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent # List of MechanismDrivers to load -Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-} +Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-openvswitch,linuxbridge} # List of Type Drivers to load Q_ML2_PLUGIN_TYPE_DRIVERS=${Q_ML2_PLUGIN_TYPE_DRIVERS:-local,flat,vlan,gre,vxlan} # Default GRE TypeDriver options From 74aad31c33b08f53681af07d79421970a106548f Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Wed, 28 Aug 2013 11:32:14 +0100 Subject: [PATCH 0339/4704] Missing pxelinux.0 on RedHat systems On a RedHat system the syslinux binaries are located in a different directory. Change-Id: I3b7a111e82e8845b6222c57fb2cfb725d9bb1dd7 --- lib/baremetal | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/lib/baremetal b/lib/baremetal index 8f6c3f1660..b591410638 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -215,7 +215,16 @@ function configure_baremetal_nova_dirs() { # ensure /tftpboot is prepared sudo mkdir -p /tftpboot sudo mkdir -p /tftpboot/pxelinux.cfg - sudo cp /usr/lib/syslinux/pxelinux.0 /tftpboot/ + + PXEBIN=/usr/share/syslinux/pxelinux.0 + if [ ! -f $PXEBIN ]; then + PXEBIN=/usr/lib/syslinux/pxelinux.0 + if [ ! -f $PXEBIN ]; then + die $LINENO "pxelinux.0 (from SYSLINUX) not found." + fi + fi + + sudo cp $PXEBIN /tftpboot/ sudo chown -R $STACK_USER:$LIBVIRT_GROUP /tftpboot # ensure $NOVA_STATE_PATH/baremetal is prepared From 35f0966d351c6cf4fe11c7bf482e1d9c02c7dac5 Mon Sep 17 00:00:00 2001 From: Ryu Ishimoto Date: Tue, 27 Aug 2013 18:32:00 +0900 Subject: [PATCH 0340/4704] Modify midonet plugin to support the latest MidoNet MidoNet has been upgraded and devstack needs to be updated to be compatible. This change is required to run the current version of MidoNet plugin with DevStack. Closes-Bug: #1222314 Change-Id: If3379b4d5da4e4fcf989ee7398b5952d71b68d5a --- lib/neutron_plugins/midonet | 10 ++++++---- lib/neutron_thirdparty/midonet | 16 ++++++---------- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet index 4d343f5c91..0ad760b289 100644 --- a/lib/neutron_plugins/midonet +++ b/lib/neutron_plugins/midonet @@ -31,7 +31,12 @@ function neutron_plugin_configure_debug_command() { } function neutron_plugin_configure_dhcp_agent() { - die $LINENO "q-dhcp must not be executed with MidoNet plugin!" + DHCP_DRIVER=${DHCP_DRIVER:-"neutron.plugins.midonet.agent.midonet_driver.DhcpNoOpDriver"} + DHCP_INTERFACE_DRIVER=${DHCP_INTEFACE_DRIVER:-"neutron.plugins.midonet.agent.midonet_driver.MidonetInterfaceDriver"} + iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_driver $DHCP_DRIVER + iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver $DHCP_INTERFACE_DRIVER + iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces True + iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True } function neutron_plugin_configure_l3_agent() { @@ -58,9 +63,6 @@ function neutron_plugin_configure_service() { if [[ "$MIDONET_PROVIDER_ROUTER_ID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE MIDONET provider_router_id $MIDONET_PROVIDER_ROUTER_ID fi - if [[ "$MIDONET_METADATA_ROUTER_ID" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE MIDONET metadata_router_id $MIDONET_METADATA_ROUTER_ID - fi } function neutron_plugin_setup_interface_driver() { diff --git a/lib/neutron_thirdparty/midonet b/lib/neutron_thirdparty/midonet index b3c726fe93..7928bca31f 100644 --- a/lib/neutron_thirdparty/midonet +++ b/lib/neutron_thirdparty/midonet @@ -10,22 +10,20 @@ # MidoNet devstack destination dir MIDONET_DIR=${MIDONET_DIR:-$DEST/midonet} +MIDONET_API_PORT=${MIDONET_API_PORT:-8080} +MIDONET_API_URL=${MIDONET_API_URL:-http://localhost:$MIDONET_API_PORT/midonet-api} # MidoNet client repo MIDONET_CLIENT_REPO=${MIDONET_CLIENT_REPO:-https://github.com/midokura/python-midonetclient.git} MIDONET_CLIENT_BRANCH=${MIDONET_CLIENT_BRANCH:-master} -MIDONET_CLIENT_DIR=$MIDONET_DIR/python-midonetclient +MIDONET_CLIENT_DIR=${MIDONET_CLIENT_DIR:-$MIDONET_DIR/python-midonetclient} # MidoNet OpenStack repo MIDONET_OS_REPO=${MIDONET_OS_REPO:-https://github.com/midokura/midonet-openstack.git} MIDONET_OS_BRANCH=${MIDONET_OS_BRANCH:-master} -MIDONET_OS_DIR=$MIDONET_DIR/midonet-openstack +MIDONET_OS_DIR=${MIDONET_OS_DIR:-$MIDONET_DIR/midonet-openstack} MIDONET_SETUP_SCRIPT=${MIDONET_SETUP_SCRIPT:-$MIDONET_OS_DIR/bin/setup_midonet_topology.py} - -MIDOLMAN_LOG=${MIDOLMAN_LOG:-/var/log/midolman/midolman.log} -MIDONET_API_LOG=${MIDONET_API_LOG:-/var/log/tomcat7/midonet-api.log} - # Save trace setting MY_XTRACE=$(set +o | grep xtrace) set +o xtrace @@ -37,13 +35,11 @@ function configure_midonet() { function init_midonet() { # Initialize DB. Evaluate the output of setup_midonet_topology.py to set - # env variables for provider router ID and metadata router ID - eval `python $MIDONET_SETUP_SCRIPT admin $ADMIN_PASSWORD $ADMIN_TENANT provider_devices` + # env variables for provider router ID. + eval `python $MIDONET_SETUP_SCRIPT $MIDONET_API_URL admin $ADMIN_PASSWORD admin provider_devices` die_if_not_set $LINENO provider_router_id "Error running midonet setup script, provider_router_id was not set." - die_if_not_set $LINENO metadata_router_id "Error running midonet setup script, metadata_router_id was not set." iniset /$Q_PLUGIN_CONF_FILE MIDONET provider_router_id $provider_router_id - iniset /$Q_PLUGIN_CONF_FILE MIDONET metadata_router_id $metadata_router_id } function install_midonet() { From 061d52507d4f4e597b825e7e7fb0d9d1858e08db Mon Sep 17 00:00:00 2001 From: Kyle Mestery Date: Mon, 9 Sep 2013 08:52:19 +0000 Subject: [PATCH 0341/4704] Augment instead of override extra ML2 options. The existing ML2 code overwrote extra options set in localrc with defualt values in some cases. This fixes it so it no longer does that and instead adds to rather than overrides those values. Fixes bug 1222854 Change-Id: Iafdaad7d4253f1b61e8a214c50adaf7599a641f2 --- lib/neutron_plugins/ml2 | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index 00bd716309..4d4340b614 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -10,9 +10,9 @@ set +o xtrace Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-} # This has to be set here since the agent will set this in the config file if [[ "$Q_ML2_TENANT_NETWORK_TYPE" != "" ]]; then - Q_AGENT_EXTRA_AGENT_OPTS=(tunnel_types=$Q_ML2_TENANT_NETWORK_TYPE) + Q_AGENT_EXTRA_AGENT_OPTS+=(tunnel_types=$Q_ML2_TENANT_NETWORK_TYPE) elif [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then - Q_AGENT_EXTRA_AGENT_OPTS=(tunnel_types=gre) + Q_AGENT_EXTRA_AGENT_OPTS+=(tunnel_types=gre) fi # Default openvswitch L2 agent @@ -50,14 +50,14 @@ function neutron_plugin_configure_common() { function neutron_plugin_configure_service() { if [[ "$Q_ML2_TENANT_NETWORK_TYPE" != "" ]]; then - Q_SRV_EXTRA_OPTS=(tenant_network_types=$Q_ML2_TENANT_NETWORK_TYPE) + Q_SRV_EXTRA_OPTS+=(tenant_network_types=$Q_ML2_TENANT_NETWORK_TYPE) elif [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then # This assumes you want a simple configuration, and will overwrite # Q_SRV_EXTRA_OPTS if set in addition to ENABLE_TENANT_TUNNELS. - Q_SRV_EXTRA_OPTS=(tenant_network_types=gre) + Q_SRV_EXTRA_OPTS+=(tenant_network_types=gre) Q_ML2_PLUGIN_GRE_TYPE_OPTIONS=(tunnel_id_ranges=$TENANT_TUNNEL_RANGES) elif [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then - Q_SRV_EXTRA_OPTS=(tenant_network_types=vlan) + Q_SRV_EXTRA_OPTS+=(tenant_network_types=vlan) else echo "WARNING - The ml2 plugin is using local tenant networks, with no connectivity between hosts." fi From 46d1ba6ef02b52de47897b78ccf9a29d022a0c17 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Mon, 9 Sep 2013 14:31:37 +0200 Subject: [PATCH 0342/4704] Install schema-image.json Otherwise a warning is logged during startup Change-Id: I958ab8bb7bce474d3e6854b43bb4709986fb61d4 Fixes: LP Bug#1222797 --- lib/glance | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/glance b/lib/glance index 64d8b0695a..7e6968200f 100644 --- a/lib/glance +++ b/lib/glance @@ -39,6 +39,7 @@ GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini GLANCE_CACHE_CONF=$GLANCE_CONF_DIR/glance-cache.conf GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json +GLANCE_SCHEMA_JSON=$GLANCE_CONF_DIR/schema-image.json # Support entry points installation of console scripts if [[ -d $GLANCE_DIR/bin ]]; then @@ -142,6 +143,7 @@ function configure_glance() { iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD cp -p $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON + cp -p $GLANCE_DIR/etc/schema-image.json $GLANCE_SCHEMA_JSON } # create_glance_cache_dir() - Part of the init_glance() process From c33d1f986ed4c0ed8a944ada3030c1de51892290 Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Fri, 6 Sep 2013 18:14:51 +0100 Subject: [PATCH 0343/4704] Wrong arguments for die() call die() needs $LINENO as its first arg Change-Id: I7c8043dbeb55ec9ed566e7055a02c0a2993d0a8a --- lib/baremetal | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/baremetal b/lib/baremetal index 8f6c3f1660..0eb852887f 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -291,7 +291,7 @@ function extract_and_upload_k_and_r_from_image() { out=$($BM_IMAGE_BUILD_DIR/bin/disk-image-get-kernel \ -x -d $TOP_DIR/files -o bm-deploy -i $file) if [ $? -ne 0 ]; then - die "Failed to get kernel and ramdisk from $file" + die $LINENO "Failed to get kernel and ramdisk from $file" fi XTRACE=$(set +o | grep xtrace) set +o xtrace @@ -439,9 +439,9 @@ function add_baremetal_node() { "$BM_FLAVOR_ROOT_DISK" \ "$mac_1" \ | grep ' id ' | get_field 2 ) - [ $? -eq 0 ] || [ "$id" ] || die "Error adding baremetal node" + [ $? -eq 0 ] || [ "$id" ] || die $LINENO "Error adding baremetal node" id2=$(nova baremetal-interface-add "$id" "$mac_2" ) - [ $? -eq 0 ] || [ "$id2" ] || die "Error adding interface to barmetal node $id" + [ $? -eq 0 ] || [ "$id2" ] || die $LINENO "Error adding interface to barmetal node $id" } From 748fe3d5e33337555b0ae16ef1d3b3ed02ad80f2 Mon Sep 17 00:00:00 2001 From: Bob Melander Date: Thu, 31 Jan 2013 17:12:56 +0100 Subject: [PATCH 0344/4704] Changes to make Devstack work with Neutron L3 plugin patch. Implements bp/quantum-l3-plugin-support Change-Id: I0c56661685fb641efe34fee1390d7d4f37f84494 --- lib/neutron_plugins/ml2 | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index 00bd716309..035d6cc81d 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -46,6 +46,15 @@ function neutron_plugin_configure_common() { Q_PLUGIN_CONF_FILENAME=ml2_conf.ini Q_DB_NAME="neutron_ml2" Q_PLUGIN_CLASS="neutron.plugins.ml2.plugin.Ml2Plugin" + # The ML2 plugin delegates L3 routing/NAT functionality to + # the L3 service plugin which must therefore be specified. + Q_L3_PLUGIN_CLASS=${Q_L3_PLUGIN_CLASS:-"neutron.services.l3_router.l3_router_plugin.L3RouterPlugin"} + if ini_has_option $NEUTRON_CONF DEFAULT service_plugins ; then + srv_plugins=$(iniget $NEUTRON_CONF DEFAULT service_plugins)","$Q_L3_PLUGIN_CLASS + else + srv_plugins=$Q_L3_PLUGIN_CLASS + fi + iniset $NEUTRON_CONF DEFAULT service_plugins $srv_plugins } function neutron_plugin_configure_service() { From 54d1faecc56e8008717b02e3c92b5abf628024db Mon Sep 17 00:00:00 2001 From: Clint Byrum Date: Mon, 9 Sep 2013 11:54:28 -0700 Subject: [PATCH 0345/4704] Lower Heat max_template_size for Tempest The default size leaves a very large padding for users to create larger templates, but for testing and development a 10kB template is plenty. This value is specifically meant to mirror upcoming changes to tempest so that they are unified and can be tested in lock-step. Change-Id: I0ea9798018a6d864ac04429c3ac89fb374583fb6 --- lib/heat | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/heat b/lib/heat index 58505ab792..ef134ec0f4 100644 --- a/lib/heat +++ b/lib/heat @@ -121,6 +121,9 @@ function configure_heat() { iniset $HEAT_CONF heat_api_cloudwatch bind_host $HEAT_API_CW_HOST iniset $HEAT_CONF heat_api_cloudwatch bind_port $HEAT_API_CW_PORT + # Set limits to match tempest defaults + iniset $HEAT_CONF max_template_size 10240 + # heat environment sudo mkdir -p $HEAT_ENV_DIR sudo chown $STACK_USER $HEAT_ENV_DIR From f208aafa35996c98de40c1388bbebf326ab2ed20 Mon Sep 17 00:00:00 2001 From: Cyril Roelandt Date: Thu, 5 Sep 2013 09:20:15 +0000 Subject: [PATCH 0346/4704] Swift: configure Ceilometer when it is enabled This allows the storage.objects.{incoming,outgoing}.bytes measurements to be easily used. Closes-Bug: #1221097 Change-Id: If988a85930d7df1e043997763c3b5ebd720d6d86 --- lib/swift | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/lib/swift b/lib/swift index f72beafef7..742be67a82 100644 --- a/lib/swift +++ b/lib/swift @@ -61,6 +61,10 @@ SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} # Default is ``staticweb, tempurl, formpost`` SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-tempurl formpost staticweb} +# Set ``SWIFT_EXTRAS_MIDDLEWARE_LAST`` to extras middlewares that need to be at +# the end of the pipeline. +SWIFT_EXTRAS_MIDDLEWARE_LAST=${SWIFT_EXTRAS_MIDDLEWARE_LAST} + # The ring uses a configurable number of bits from a path’s MD5 hash as # a partition index that designates a device. The number of bits kept # from the hash is known as the partition power, and 2 to the partition @@ -252,6 +256,12 @@ function configure_swift() { iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080} + # Configure Ceilometer + if is_service_enabled ceilometer; then + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer use "egg:ceilometer#swift" + SWIFT_EXTRAS_MIDDLEWARE_LAST="${SWIFT_EXTRAS_MIDDLEWARE_LAST} ceilometer" + fi + # By default Swift will be installed with keystone and tempauth middleware # and add the swift3 middleware if its configured for it. The token for # tempauth would be prefixed with the reseller_prefix setting TEMPAUTH_ the @@ -261,6 +271,7 @@ function configure_swift() { fi swift_pipeline+=" authtoken keystoneauth tempauth " sed -i "/^pipeline/ { s/tempauth/${swift_pipeline} ${SWIFT_EXTRAS_MIDDLEWARE}/ ;}" ${SWIFT_CONFIG_PROXY_SERVER} + sed -i "/^pipeline/ { s/proxy-server/${SWIFT_EXTRAS_MIDDLEWARE_LAST} proxy-server/ ; }" ${SWIFT_CONFIG_PROXY_SERVER} iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth account_autocreate iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true From eaa9e1e3e0af74ac66cd934bde6762a63d14d1a8 Mon Sep 17 00:00:00 2001 From: Akihiro MOTOKI Date: Tue, 10 Sep 2013 05:22:37 +0900 Subject: [PATCH 0347/4704] Configure VPNaaS Horizon panel if q-vpn is enabled Change-Id: I062fd31cb1de50f356c2c549a783d9c597b129fa Closes-Bug: #1223012 --- lib/horizon | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/horizon b/lib/horizon index f6bb9f55e0..e55bc152f6 100644 --- a/lib/horizon +++ b/lib/horizon @@ -101,6 +101,11 @@ function init_horizon() { _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_firewall True fi + # enable VPN dashboard in case service is enabled + if is_service_enabled q-vpn; then + _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_vpn True + fi + # Initialize the horizon database (it stores sessions and notices shown to # users). The user system is external (keystone). cd $HORIZON_DIR From 3632ab1b66e4928ed0b9ef6ef65392c0e5531a66 Mon Sep 17 00:00:00 2001 From: Giulio Fidente Date: Tue, 10 Sep 2013 02:51:26 +0200 Subject: [PATCH 0348/4704] enable volume backup tests if c-bak is enabled this will set to True the tempest volume/volume_backup_enabled option if c-bak is in ENABLED_SERVICES Change-Id: I69931d668411fc8144d0fdb2f58ad9b6e987f793 --- lib/tempest | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/tempest b/lib/tempest index 50289b60d4..e48ccf2062 100644 --- a/lib/tempest +++ b/lib/tempest @@ -286,6 +286,9 @@ function configure_tempest() { iniset $TEMPEST_CONF scenario large_ops_number ${TEMPEST_LARGE_OPS_NUMBER:-0} # Volume + if is_service_enabled c-bak; then + iniset $TEMPEST_CONF volume volume_backup_enabled "True" + fi CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND) if [ $CINDER_MULTI_LVM_BACKEND == "True" ]; then iniset $TEMPEST_CONF volume multi_backend_enabled "True" From 5c3b861c1963ca6fee9048ed88873c4efea64b8c Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Tue, 10 Sep 2013 05:20:07 -0700 Subject: [PATCH 0349/4704] VMware: remove invalid configuration variable The configuration variable vmware_cluster_name is not used by the cinder plugin. Change-Id: I8c0ed58d1dcd66b6f8ea3325007bf5135216933c --- lib/cinder | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index 324db9de27..7f1544b444 100644 --- a/lib/cinder +++ b/lib/cinder @@ -293,7 +293,6 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT vmware_host_ip "$VMWAREAPI_IP" iniset $CINDER_CONF DEFAULT vmware_host_username "$VMWAREAPI_USER" iniset $CINDER_CONF DEFAULT vmware_host_password "$VMWAREAPI_PASSWORD" - iniset $CINDER_CONF DEFAULT vmware_cluster_name "$VMWAREAPI_CLUSTER" iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver" fi From f68c9d3fb77dab0a3ba4a63cd20e3f4bfac11c2b Mon Sep 17 00:00:00 2001 From: Clint Byrum Date: Tue, 10 Sep 2013 11:37:47 -0700 Subject: [PATCH 0350/4704] Fix section on iniset for max_template_size The section was not specified, leading to trying to create a section of max_template_size. Change-Id: Ie3b525030efa780e9cef2d3108be92169d400857 --- lib/heat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/heat b/lib/heat index ef134ec0f4..afa0eeb765 100644 --- a/lib/heat +++ b/lib/heat @@ -122,7 +122,7 @@ function configure_heat() { iniset $HEAT_CONF heat_api_cloudwatch bind_port $HEAT_API_CW_PORT # Set limits to match tempest defaults - iniset $HEAT_CONF max_template_size 10240 + iniset $HEAT_CONF DEFAULT max_template_size 10240 # heat environment sudo mkdir -p $HEAT_ENV_DIR From e700267e33d748fe36c621cf16497597fcbe6aac Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Thu, 5 Sep 2013 08:10:07 -0400 Subject: [PATCH 0351/4704] Only run chmod/chown for local files When the /opt/stack directory is NFS mounted, chown to the local user might not work. Create safe_chown and safe_chmod functions that do nothing on NFS filesystems to avoid spurrious errors. Change-Id: Iaa68879e867a4426b1990d4d46164769177dc7cc --- functions | 52 ++++++++++++++++++++++++++++++++++++++++++++++------ stack.sh | 10 +++++----- 2 files changed, 51 insertions(+), 11 deletions(-) diff --git a/functions b/functions index df8166a0e2..0634fac716 100644 --- a/functions +++ b/functions @@ -1158,6 +1158,51 @@ function service_check() { fi } +# Returns true if the directory is on a filesystem mounted via NFS. +function is_nfs_directory() { + local mount_type=`stat -f -L -c %T $1` + test "$mount_type" == "nfs" +} + +# Only run the command if the target file (the last arg) is not on an +# NFS filesystem. +function _safe_permission_operation() { + local args=( $@ ) + local last + local sudo_cmd + local dir_to_check + + let last="${#args[*]} - 1" + + dir_to_check=${args[$last]} + if [ ! -d "$dir_to_check" ]; then + dir_to_check=`dirname "$dir_to_check"` + fi + + if is_nfs_directory "$dir_to_check" ; then + return 0 + fi + + if [[ $TRACK_DEPENDS = True ]]; then + sudo_cmd="env" + else + sudo_cmd="sudo" + fi + + $sudo_cmd $@ +} + +# Only change ownership of a file or directory if it is not on an NFS +# filesystem. +function safe_chown() { + _safe_permission_operation chown $@ +} + +# Only change permissions of a file or directory if it is not on an +# NFS filesystem. +function safe_chmod() { + _safe_permission_operation chmod $@ +} # ``pip install -e`` the package, which processes the dependencies # using pip before running `setup.py develop` @@ -1165,11 +1210,6 @@ function service_check() { # setup_develop directory function setup_develop() { local project_dir=$1 - if [[ $TRACK_DEPENDS = True ]]; then - SUDO_CMD="env" - else - SUDO_CMD="sudo" - fi echo "cd $REQUIREMENTS_DIR; $SUDO_CMD python update.py $project_dir" @@ -1181,7 +1221,7 @@ function setup_develop() { pip_install -e $project_dir # ensure that further actions can do things like setup.py sdist - $SUDO_CMD chown -R $STACK_USER $1/*.egg-info + safe_chown -R $STACK_USER $1/*.egg-info } diff --git a/stack.sh b/stack.sh index 89e4c248c4..975194b846 100755 --- a/stack.sh +++ b/stack.sh @@ -203,7 +203,7 @@ if [[ $EUID -eq 0 ]]; then echo "Copying files to $STACK_USER user" STACK_DIR="$DEST/${TOP_DIR##*/}" cp -r -f -T "$TOP_DIR" "$STACK_DIR" - chown -R $STACK_USER "$STACK_DIR" + safe_chown -R $STACK_USER "$STACK_DIR" cd "$STACK_DIR" if [[ "$SHELL_AFTER_RUN" != "no" ]]; then exec sudo -u $STACK_USER bash -l -c "set -e; bash stack.sh; bash" @@ -236,8 +236,8 @@ fi # Create the destination directory and ensure it is writable by the user # and read/executable by everybody for daemons (e.g. apache run for horizon) sudo mkdir -p $DEST -sudo chown -R $STACK_USER $DEST -chmod 0755 $DEST +safe_chown -R $STACK_USER $DEST +safe_chmod 0755 $DEST # a basic test for $DEST path permissions (fatal on error unless skipped) check_path_perm_sanity ${DEST} @@ -258,7 +258,7 @@ ENABLE_DEBUG_LOG_LEVEL=`trueorfalse True $ENABLE_DEBUG_LOG_LEVEL` # Destination path for service data DATA_DIR=${DATA_DIR:-${DEST}/data} sudo mkdir -p $DATA_DIR -sudo chown -R $STACK_USER $DATA_DIR +safe_chown -R $STACK_USER $DATA_DIR # Common Configuration @@ -954,7 +954,7 @@ if is_service_enabled n-net q-dhcp; then clean_iptables rm -rf ${NOVA_STATE_PATH}/networks sudo mkdir -p ${NOVA_STATE_PATH}/networks - sudo chown -R ${USER} ${NOVA_STATE_PATH}/networks + safe_chown -R ${USER} ${NOVA_STATE_PATH}/networks # Force IP forwarding on, just in case sudo sysctl -w net.ipv4.ip_forward=1 fi From 6650fda680310e71b5dda7764bf4033f670d90f0 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Tue, 10 Sep 2013 16:39:18 -0700 Subject: [PATCH 0352/4704] Revert "Swift: configure Ceilometer when it is enabled" This reverts commit f208aafa35996c98de40c1388bbebf326ab2ed20. This commit broke swift functional tests because the ceilometer middleware changes HTTP 404 responses into zero byte responses. This results in BadStatusLine exceptions. Back out the use of ceilometer middleware until it can be fixed. Change-Id: Ie25269b58334c40dc1ecae985326af1cf29c3af4 --- lib/swift | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/lib/swift b/lib/swift index 742be67a82..f72beafef7 100644 --- a/lib/swift +++ b/lib/swift @@ -61,10 +61,6 @@ SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} # Default is ``staticweb, tempurl, formpost`` SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-tempurl formpost staticweb} -# Set ``SWIFT_EXTRAS_MIDDLEWARE_LAST`` to extras middlewares that need to be at -# the end of the pipeline. -SWIFT_EXTRAS_MIDDLEWARE_LAST=${SWIFT_EXTRAS_MIDDLEWARE_LAST} - # The ring uses a configurable number of bits from a path’s MD5 hash as # a partition index that designates a device. The number of bits kept # from the hash is known as the partition power, and 2 to the partition @@ -256,12 +252,6 @@ function configure_swift() { iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080} - # Configure Ceilometer - if is_service_enabled ceilometer; then - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer use "egg:ceilometer#swift" - SWIFT_EXTRAS_MIDDLEWARE_LAST="${SWIFT_EXTRAS_MIDDLEWARE_LAST} ceilometer" - fi - # By default Swift will be installed with keystone and tempauth middleware # and add the swift3 middleware if its configured for it. The token for # tempauth would be prefixed with the reseller_prefix setting TEMPAUTH_ the @@ -271,7 +261,6 @@ function configure_swift() { fi swift_pipeline+=" authtoken keystoneauth tempauth " sed -i "/^pipeline/ { s/tempauth/${swift_pipeline} ${SWIFT_EXTRAS_MIDDLEWARE}/ ;}" ${SWIFT_CONFIG_PROXY_SERVER} - sed -i "/^pipeline/ { s/proxy-server/${SWIFT_EXTRAS_MIDDLEWARE_LAST} proxy-server/ ; }" ${SWIFT_CONFIG_PROXY_SERVER} iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth account_autocreate iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true From cd77058ee0e974bd9bd7acaf8426ef24ea9a7a52 Mon Sep 17 00:00:00 2001 From: Alex Rudenko Date: Sun, 1 Sep 2013 16:26:03 +0200 Subject: [PATCH 0353/4704] blueprint devstack-support-for-keystone-mixbackend Added KEYSTONE_ASSIGNMENT_BACKEND to support mixed backend Modified code for KEYSTONE_IDENTITY_BACKEND according to comments. Implemented the check for variables instead of case statements. UPD: Removed arrays. UPD2: fixed spacing issues Change-Id: Ie92eed1fb5be5f875ef6633ede9c9e08daf6bf4f Implements: devstack-support-for-keystone-mixbackend --- lib/keystone | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) mode change 100644 => 100755 lib/keystone diff --git a/lib/keystone b/lib/keystone old mode 100644 new mode 100755 index 535710f52b..3642904e1c --- a/lib/keystone +++ b/lib/keystone @@ -44,6 +44,12 @@ KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates # Select the backend for Tokens KEYSTONE_TOKEN_BACKEND=${KEYSTONE_TOKEN_BACKEND:-sql} +# Select the backend for Identity +KEYSTONE_IDENTITY_BACKEND=${KEYSTONE_IDENTITY_BACKEND:-sql} + +# Select the backend for Assignment +KEYSTONE_ASSIGNMENT_BACKEND=${KEYSTONE_ASSIGNMENT_BACKEND:-sql} + # Select Keystone's token format # Choose from 'UUID' and 'PKI' KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-PKI} @@ -63,10 +69,14 @@ KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} # Set the tenant for service accounts in Keystone SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} +# valid identity backends as per dir keystone/identity/backends +KEYSTONE_VALID_IDENTITY_BACKENDS=kvs,ldap,pam,sql + +# valid assignment backends as per dir keystone/identity/backends +KEYSTONE_VALID_ASSIGNMENT_BACKENDS=kvs,ldap,sql # Functions # --------- - # cleanup_keystone() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_keystone() { @@ -116,8 +126,14 @@ function configure_keystone() { iniset $KEYSTONE_CONF DEFAULT member_role_name "_member_" fi - if [[ "$KEYSTONE_IDENTITY_BACKEND" == "ldap" ]]; then - iniset $KEYSTONE_CONF identity driver "keystone.identity.backends.ldap.Identity" + # check if identity backend is valid + if [[ "$KEYSTONE_VALID_IDENTITY_BACKENDS" =~ "$KEYSTONE_IDENTITY_BACKEND" ]]; then + iniset $KEYSTONE_CONF identity driver "keystone.identity.backends.$KEYSTONE_IDENTITY_BACKEND.Identity" + fi + + # check if assignment backend is valid + if [[ "$KEYSTONE_VALID_ASSIGNMENT_BACKENDS" =~ "$KEYSTONE_ASSIGNMENT_BACKEND" ]]; then + iniset $KEYSTONE_CONF assignment driver "keystone.assignment.backends.$KEYSTONE_ASSIGNMENT_BACKEND.Assignment" fi # Set the URL advertised in the ``versions`` structure returned by the '/' route From d02287e5e6f2b356beff8f485e0dd2f7c3beab5f Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 11 Sep 2013 14:08:59 -0400 Subject: [PATCH 0354/4704] import lib/oslo so we can clean it up we were calling cleanup_oslo, however we weren't importing lib/oslo, so that was just throwing an error message and moving on. Let's stop doing that and actually clean up oslo. Change-Id: I48340a8b3d5b50477fb5a7e2ce0bed27deb3ec01 --- clean.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/clean.sh b/clean.sh index a443ac82d0..6ceb5a4933 100755 --- a/clean.sh +++ b/clean.sh @@ -33,6 +33,7 @@ GetDistro source $TOP_DIR/lib/database source $TOP_DIR/lib/rpc_backend +source $TOP_DIR/lib/oslo source $TOP_DIR/lib/tls source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone From 0cccad43aad08487ad1712c63afed602889aaf72 Mon Sep 17 00:00:00 2001 From: Nikhil Manchanda Date: Mon, 3 Dec 2012 18:15:09 -0700 Subject: [PATCH 0355/4704] Added Trove (Database as a Service). - Added changes to stackrc for the Trove Repos. - Added support to devstack for "install", "configure", "init", and "run" implements blueprint:trove-devstack-integration Change-Id: Ib3f6daad33e629f764a174b80762c808ce8588e2 --- exercises/trove.sh | 45 +++++++++++ files/apts/trove | 1 + files/rpms-suse/trove | 1 + files/rpms/trove | 1 + functions | 2 + lib/trove | 170 ++++++++++++++++++++++++++++++++++++++++++ stack.sh | 27 ++++++- stackrc | 7 ++ unstack.sh | 5 ++ 9 files changed, 257 insertions(+), 2 deletions(-) create mode 100755 exercises/trove.sh create mode 100644 files/apts/trove create mode 100644 files/rpms-suse/trove create mode 100644 files/rpms/trove create mode 100644 lib/trove diff --git a/exercises/trove.sh b/exercises/trove.sh new file mode 100755 index 0000000000..d48d5fec99 --- /dev/null +++ b/exercises/trove.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash + +# **trove.sh** + +# Sanity check that trove started if enabled + +echo "*********************************************************************" +echo "Begin DevStack Exercise: $0" +echo "*********************************************************************" + +# This script exits on an error so that errors don't compound and you see +# only the first error that occurred. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + + +# Settings +# ======== + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import configuration +source $TOP_DIR/openrc + +# Import exercise configuration +source $TOP_DIR/exerciserc + +is_service_enabled trove || exit 55 + +# can we get a list versions +curl http://$SERVICE_HOST:8779/ 2>/dev/null | grep -q 'versions' || die $LINENO "Trove API not functioning!" + +set +o xtrace +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" + diff --git a/files/apts/trove b/files/apts/trove new file mode 100644 index 0000000000..09dcee8104 --- /dev/null +++ b/files/apts/trove @@ -0,0 +1 @@ +libxslt1-dev # testonly diff --git a/files/rpms-suse/trove b/files/rpms-suse/trove new file mode 100644 index 0000000000..09dcee8104 --- /dev/null +++ b/files/rpms-suse/trove @@ -0,0 +1 @@ +libxslt1-dev # testonly diff --git a/files/rpms/trove b/files/rpms/trove new file mode 100644 index 0000000000..09dcee8104 --- /dev/null +++ b/files/rpms/trove @@ -0,0 +1 @@ +libxslt1-dev # testonly diff --git a/functions b/functions index f24cc89e82..54a72aefce 100644 --- a/functions +++ b/functions @@ -779,6 +779,7 @@ function is_running() { # **glance** returns true if any service enabled start with **g-** # **neutron** returns true if any service enabled start with **q-** # **swift** returns true if any service enabled start with **s-** +# **trove** returns true if any service enabled start with **tr-** # For backward compatibility if we have **swift** in ENABLED_SERVICES all the # **s-** services will be enabled. This will be deprecated in the future. # @@ -798,6 +799,7 @@ function is_service_enabled() { [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0 [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0 [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0 + [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && return 0 [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && return 0 [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && return 0 done diff --git a/lib/trove b/lib/trove new file mode 100644 index 0000000000..e64ca5f6ac --- /dev/null +++ b/lib/trove @@ -0,0 +1,170 @@ +# lib/trove +# Functions to control the configuration and operation of the **Trove** service + +# Dependencies: +# ``functions`` file +# ``DEST``, ``STACK_USER`` must be defined +# ``SERVICE_{HOST|PROTOCOL|TOKEN}`` must be defined + +# ``stack.sh`` calls the entry points in this order: +# +# install_trove +# configure_trove +# init_trove +# start_trove +# stop_trove +# cleanup_trove + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# Defaults +# -------- + +NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1} + +# Set up default configuration +TROVE_DIR=$DEST/trove +TROVECLIENT_DIR=$DEST/python-troveclient +TROVE_CONF_DIR=/etc/trove +TROVE_LOCAL_CONF_DIR=$TROVE_DIR/etc/trove +TROVE_AUTH_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT//v$IDENTITY_API_VERSION +TROVE_AUTH_CACHE_DIR=${TROVE_AUTH_CACHE_DIR:-/var/cache/trove} +TROVE_BIN_DIR=/usr/local/bin + +# create_trove_accounts() - Set up common required trove accounts + +# Tenant User Roles +# ------------------------------------------------------------------ +# service trove admin # if enabled + +create_trove_accounts() { + # Trove + SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + SERVICE_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + + if [[ "$ENABLED_SERVICES" =~ "trove" ]]; then + TROVE_USER=$(keystone user-create --name=trove \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=trove@example.com \ + | grep " id " | get_field 2) + keystone user-role-add --tenant-id $SERVICE_TENANT \ + --user-id $TROVE_USER \ + --role-id $SERVICE_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + TROVE_SERVICE=$(keystone service-create \ + --name=trove \ + --type=database \ + --description="Trove Service" \ + | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $TROVE_SERVICE \ + --publicurl "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \ + --adminurl "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \ + --internalurl "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" + fi + fi +} + +# stack.sh entry points +# --------------------- + +# cleanup_trove() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_trove() { + #Clean up dirs + rm -fr $TROVE_AUTH_CACHE_DIR/* + rm -fr $TROVE_CONF_DIR/* +} + +# configure_troveclient() - Set config files, create data dirs, etc +function configure_troveclient() { + setup_develop $TROVECLIENT_DIR +} + +# configure_trove() - Set config files, create data dirs, etc +function configure_trove() { + setup_develop $TROVE_DIR + + # Create the trove conf dir and cache dirs if they don't exist + sudo mkdir -p ${TROVE_CONF_DIR} + sudo mkdir -p ${TROVE_AUTH_CACHE_DIR} + sudo chown -R $STACK_USER: ${TROVE_CONF_DIR} + sudo chown -R $STACK_USER: ${TROVE_AUTH_CACHE_DIR} + + # Copy api-paste file over to the trove conf dir and configure it + cp $TROVE_LOCAL_CONF_DIR/api-paste.ini $TROVE_CONF_DIR/api-paste.ini + TROVE_API_PASTE_INI=$TROVE_CONF_DIR/api-paste.ini + iniset $TROVE_API_PASTE_INI filter:tokenauth auth_host $KEYSTONE_AUTH_HOST + iniset $TROVE_API_PASTE_INI filter:tokenauth auth_port $KEYSTONE_AUTH_PORT + iniset $TROVE_API_PASTE_INI filter:tokenauth auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $TROVE_API_PASTE_INI filter:tokenauth admin_tenant_name $SERVICE_TENANT_NAME + iniset $TROVE_API_PASTE_INI filter:tokenauth admin_user trove + iniset $TROVE_API_PASTE_INI filter:tokenauth admin_password $SERVICE_PASSWORD + iniset $TROVE_API_PASTE_INI filter:tokenauth signing_dir $TROVE_AUTH_CACHE_DIR + + # (Re)create trove conf files + rm -f $TROVE_CONF_DIR/trove.conf + rm -f $TROVE_CONF_DIR/trove-taskmanager.conf + iniset $TROVE_CONF_DIR/trove.conf DEFAULT rabbit_password $RABBIT_PASSWORD + iniset $TROVE_CONF_DIR/trove.conf DEFAULT sql_connection `database_connection_url trove` + iniset $TROVE_CONF_DIR/trove.conf DEFAULT add_addresses True + + iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT rabbit_password $RABBIT_PASSWORD + iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT sql_connection `database_connection_url trove` + sed -i "s/localhost/$NETWORK_GATEWAY/g" $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample + + # (Re)create trove taskmanager conf file if needed + if is_service_enabled tr-tmgr; then + iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT rabbit_password $RABBIT_PASSWORD + iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT sql_connection `database_connection_url trove` + iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT taskmanager_manager trove.taskmanager.manager.Manager + iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_user radmin + iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_tenant_name trove + iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS + iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT + fi +} + +# install_troveclient() - Collect source and prepare +function install_troveclient() { + git_clone $TROVECLIENT_REPO $TROVECLIENT_DIR $TROVECLIENT_BRANCH +} + +# install_trove() - Collect source and prepare +function install_trove() { + git_clone $TROVE_REPO $TROVE_DIR $TROVE_BRANCH +} + +# init_trove() - Initializes Trove Database as a Service +function init_trove() { + #(Re)Create trove db + recreate_database trove utf8 + + #Initialize the trove database + $TROVE_DIR/bin/trove-manage db_sync +} + +# start_trove() - Start running processes, including screen +function start_trove() { + screen_it tr-api "cd $TROVE_DIR; bin/trove-api --config-file=$TROVE_CONF_DIR/trove.conf --debug 2>&1" + screen_it tr-tmgr "cd $TROVE_DIR; bin/trove-taskmanager --config-file=$TROVE_CONF_DIR/trove-taskmanager.conf --debug 2>&1" +} + +# stop_trove() - Stop running processes +function stop_trove() { + # Kill the trove screen windows + for serv in tr-api tr-tmgr; do + screen -S $SCREEN_NAME -p $serv -X kill + done +} + +# Restore xtrace +$XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/stack.sh b/stack.sh index 89e4c248c4..be04bedade 100755 --- a/stack.sh +++ b/stack.sh @@ -2,8 +2,8 @@ # ``stack.sh`` is an opinionated OpenStack developer installation. It # installs and configures various combinations of **Ceilometer**, **Cinder**, -# **Glance**, **Heat**, **Horizon**, **Keystone**, **Nova**, **Neutron** -# and **Swift**. +# **Glance**, **Heat**, **Horizon**, **Keystone**, **Nova**, **Neutron**, +# **Swift**, and **Trove** # This script allows you to specify configuration options of what git # repositories to use, enabled services, network configuration and various @@ -319,6 +319,7 @@ source $TOP_DIR/lib/neutron source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/ldap source $TOP_DIR/lib/ironic +source $TOP_DIR/lib/trove # Look for Nova hypervisor plugin NOVA_PLUGINS=$TOP_DIR/lib/nova_plugins @@ -720,6 +721,12 @@ if is_service_enabled heat; then configure_heat fi +if is_service_enabled trove; then + install_trove + install_troveclient + cleanup_trove +fi + if is_service_enabled tls-proxy; then configure_CA init_CA @@ -860,6 +867,10 @@ if is_service_enabled key; then create_cinder_accounts create_neutron_accounts + if is_service_enabled trove; then + create_trove_accounts + fi + if is_service_enabled swift || is_service_enabled s-proxy; then create_swift_accounts fi @@ -1236,6 +1247,18 @@ if is_service_enabled heat; then start_heat fi +# Configure and launch the trove service api, and taskmanager +if is_service_enabled trove; then + # Initialize trove + echo_summary "Configuring Trove" + configure_troveclient + configure_trove + init_trove + + # Start the trove API and trove taskmgr components + echo_summary "Starting Trove" + start_trove +fi # Create account rc files # ======================= diff --git a/stackrc b/stackrc index f9a977c432..3a338d16f2 100644 --- a/stackrc +++ b/stackrc @@ -181,6 +181,13 @@ RYU_BRANCH=${RYU_BRANCH:-master} SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git} SPICE_BRANCH=${SPICE_BRANCH:-master} +# trove service +TROVE_REPO=${TROVE_REPO:-${GIT_BASE}/openstack/trove.git} +TROVE_BRANCH=${TROVE_BRANCH:-master} + +# trove client library test +TROVECLIENT_REPO=${TROVECLIENT_REPO:-${GIT_BASE}/openstack/python-troveclient.git} +TROVECLIENT_BRANCH=${TROVECLIENT_BRANCH:-master} # Nova hypervisor configuration. We default to libvirt with **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can diff --git a/unstack.sh b/unstack.sh index 38f795b09b..05d9fb7c83 100755 --- a/unstack.sh +++ b/unstack.sh @@ -34,6 +34,7 @@ source $TOP_DIR/lib/horizon source $TOP_DIR/lib/swift source $TOP_DIR/lib/neutron source $TOP_DIR/lib/ironic +source $TOP_DIR/lib/trove # Determine what system we are running on. This provides ``os_VENDOR``, # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` @@ -130,4 +131,8 @@ if is_service_enabled neutron; then cleanup_neutron fi +if is_service_enabled trove; then + cleanup_trove +fi + cleanup_tmp From d187bd95368c926af317723b3bc563ea2cae61bb Mon Sep 17 00:00:00 2001 From: Zhi Kun Liu Date: Wed, 11 Sep 2013 14:51:18 +0800 Subject: [PATCH 0356/4704] remove whitebox configuration in tempest.conf sync up with removing whitebox tests in tempest This commit depends on https://review.openstack.org/#/c/46116/ Change-Id: I410583187284c2951d872f6e9465f741decc60bd --- lib/tempest | 8 -------- 1 file changed, 8 deletions(-) diff --git a/lib/tempest b/lib/tempest index e48ccf2062..bc0b18d9f4 100644 --- a/lib/tempest +++ b/lib/tempest @@ -249,14 +249,6 @@ function configure_tempest() { iniset $TEMPEST_CONF compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False} iniset $TEMPEST_CONF compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} - # Whitebox - iniset $TEMPEST_CONF whitebox source_dir $NOVA_SOURCE_DIR - iniset $TEMPEST_CONF whitebox bin_dir $NOVA_BIN_DIR - # TODO(jaypipes): Create the key file here... right now, no whitebox - # tests actually use a key. - iniset $TEMPEST_CONF whitebox path_to_private_key $TEMPEST_DIR/id_rsa - iniset $TEMPEST_CONF whitebox db_uri $BASE_SQL_CONN/nova - # Compute admin iniset $TEMPEST_CONF "compute-admin" password "$password" # DEPRECATED From 05ae833be298d1b8fa85cfbb9ef57c059baea05e Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Tue, 20 Aug 2013 14:51:08 -0700 Subject: [PATCH 0357/4704] Colorize Neutron log output and refactor log setup code Bug 1214616 This patch adds colors to on-screen Neutron log output in the same way nova, cinder, and heat engine do. To this aim, colorized logging configuration has been moved to ./functions. The reason for this refactoring is that these instruction are the same or very similar for each project, with the only exception of the target configuration file. Change-Id: Idf0d1b842bb9ab046c9ef826de1dfc55b3f1df9d --- functions | 19 +++++++++++++++++++ lib/cinder | 7 ++----- lib/heat | 7 ++----- lib/neutron | 5 +++++ lib/nova | 7 ++----- 5 files changed, 30 insertions(+), 15 deletions(-) diff --git a/functions b/functions index f996ba89ab..566c85c3b9 100644 --- a/functions +++ b/functions @@ -1695,6 +1695,25 @@ function policy_add() { } +# This function sets log formatting options for colorizing log +# output to stdout. It is meant to be called by lib modules. +# The last two parameters are optional and can be used to specify +# non-default value for project and user format variables. +# Defaults are respectively 'project_name' and 'user_name' +# +# setup_colorized_logging something.conf SOMESECTION +function setup_colorized_logging() { + local conf_file=$1 + local conf_section=$2 + local project_var=${3:-"project_name"} + local user_var=${4:-"user_name"} + # Add color to logging output + iniset $conf_file $conf_section logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %("$user_var")s %("$project_var")s%(color)s] %(instance)s%(color)s%(message)s" + iniset $conf_file $conf_section logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + iniset $conf_file $conf_section logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" + iniset $conf_file $conf_section logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" +} + # Restore xtrace $XTRACE diff --git a/lib/cinder b/lib/cinder index 7f1544b444..bec65ed234 100644 --- a/lib/cinder +++ b/lib/cinder @@ -255,12 +255,9 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT volume_clear none fi + # Format logging if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - # Add color to logging output - iniset $CINDER_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s%(color)s] %(instance)s%(color)s%(message)s" - iniset $CINDER_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" - iniset $CINDER_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" - iniset $CINDER_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" + setup_colorized_logging $CINDER_CONF DEFAULT "project_id" "user_id" fi if [ "$CINDER_DRIVER" == "XenAPINFS" ]; then diff --git a/lib/heat b/lib/heat index afa0eeb765..ac769162db 100644 --- a/lib/heat +++ b/lib/heat @@ -1,4 +1,4 @@ -# lib/heat +etup lib/heat # Install and start **Heat** service # To enable, add the following to localrc @@ -86,10 +86,7 @@ function configure_heat() { iniset $HEAT_CONF DEFAULT use_syslog $SYSLOG if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then # Add color to logging output - iniset $HEAT_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" - iniset $HEAT_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" - iniset $HEAT_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" - iniset $HEAT_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" + setup_colorized_logging $HEAT_CONF DEFAULT fi # keystone authtoken diff --git a/lib/neutron b/lib/neutron index 5664ff2cc5..4a3d1b06a6 100644 --- a/lib/neutron +++ b/lib/neutron @@ -534,6 +534,11 @@ function _configure_neutron_common() { iniset $NEUTRON_CONF quotas quota_security_group_rule -1 fi + # Format logging + if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then + setup_colorized_logging $NEUTRON_CONF DEFAULT + fi + _neutron_setup_rootwrap } diff --git a/lib/nova b/lib/nova index 9b766a9114..568f67d445 100644 --- a/lib/nova +++ b/lib/nova @@ -499,12 +499,9 @@ function create_nova_conf() { if [ "$API_RATE_LIMIT" != "True" ]; then iniset $NOVA_CONF DEFAULT api_rate_limit "False" fi + # Format logging if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - # Add color to logging output - iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" - iniset $NOVA_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" - iniset $NOVA_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" - iniset $NOVA_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" + setup_colorized_logging $NOVA_CONF DEFAULT else # Show user_name and project_name instead of user_id and project_id iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" From c76c058df21ae9fa0198dfcaad0c0ea4ead8e09f Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Thu, 12 Sep 2013 11:42:08 -0700 Subject: [PATCH 0358/4704] Disable ceilometer.compute.nova_notifier driver It appears that the ceilometer nova notification driver is causing nova-compute to hang. The last thing nova-compute logs before hanging is a line from this driver. At the very least the ceilometer nova notification keeps stacktracing. Change-Id: Ic375272b751159a64777ca73c1b64515195aacfb Related-Bug: #1221987 --- lib/nova | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/nova b/lib/nova index 9b766a9114..577c260d35 100644 --- a/lib/nova +++ b/lib/nova @@ -513,7 +513,6 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT instance_usage_audit "True" iniset $NOVA_CONF DEFAULT instance_usage_audit_period "hour" iniset $NOVA_CONF DEFAULT notify_on_state_change "vm_and_task_state" - iniset_multiline $NOVA_CONF DEFAULT notification_driver "nova.openstack.common.notifier.rpc_notifier" "ceilometer.compute.nova_notifier" fi # Provide some transition from ``EXTRA_FLAGS`` to ``EXTRA_OPTS`` From 8f5bf93d069f2ec4b85710fb05378e5d3027be86 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Mon, 16 Sep 2013 01:40:13 +0200 Subject: [PATCH 0359/4704] lib/tempest remove whitebox section tempest whitebox tests are removed from the tempest repo, so it's configuration is unnecessary. Change-Id: I6659e2af894014518a486f411ca06179d43bbb8b --- lib/tempest | 8 -------- 1 file changed, 8 deletions(-) diff --git a/lib/tempest b/lib/tempest index e48ccf2062..bc0b18d9f4 100644 --- a/lib/tempest +++ b/lib/tempest @@ -249,14 +249,6 @@ function configure_tempest() { iniset $TEMPEST_CONF compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False} iniset $TEMPEST_CONF compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} - # Whitebox - iniset $TEMPEST_CONF whitebox source_dir $NOVA_SOURCE_DIR - iniset $TEMPEST_CONF whitebox bin_dir $NOVA_BIN_DIR - # TODO(jaypipes): Create the key file here... right now, no whitebox - # tests actually use a key. - iniset $TEMPEST_CONF whitebox path_to_private_key $TEMPEST_DIR/id_rsa - iniset $TEMPEST_CONF whitebox db_uri $BASE_SQL_CONN/nova - # Compute admin iniset $TEMPEST_CONF "compute-admin" password "$password" # DEPRECATED From 1e4551db44d6c6d89ab5a595935b310ea0584210 Mon Sep 17 00:00:00 2001 From: Aaron Rosen Date: Mon, 16 Sep 2013 13:58:08 -0700 Subject: [PATCH 0360/4704] use method pip_install over sudo pip install so proxy settings work Change-Id: I2f0c69a72ef73c317b707d99c65cab0fb590d158 --- tools/fixup_stuff.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 371b25fc8f..87922c8ece 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -34,8 +34,8 @@ FILES=$TOP_DIR/files # --------------- # Pre-install affected packages so we can fix the permissions -sudo pip install prettytable -sudo pip install httplib2 +pip_install prettytable +pip_install httplib2 SITE_DIRS=$(python -c "import site; import os; print os.linesep.join(site.getsitepackages())") for dir in $SITE_DIRS; do From d582460147404587fbcd3a39f350109d1a04a74f Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Tue, 17 Sep 2013 11:44:37 +1000 Subject: [PATCH 0361/4704] Allow replacing a user variable in a swift template Prepare for a change in swift templates that will have a %USER% variable. Change-Id: I611ae7f82de7f2e6a38ce3de38d0600fa8687bff Partial-Bug: 1226346 --- lib/swift | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/swift b/lib/swift index f72beafef7..ae8ef746f0 100644 --- a/lib/swift +++ b/lib/swift @@ -132,6 +132,7 @@ function _config_swift_apache_wsgi() { s/%PORT%/$proxy_port/g; s/%SERVICENAME%/proxy-server/g; s/%APACHE_NAME%/${APACHE_NAME}/g; + s/%USER%/${STACK_USER}/g; " -i ${apache_vhost_dir}/proxy-server sudo cp ${SWIFT_DIR}/examples/wsgi/proxy-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/proxy-server.wsgi @@ -152,6 +153,7 @@ function _config_swift_apache_wsgi() { s/%PORT%/$object_port/g; s/%SERVICENAME%/object-server-${node_number}/g; s/%APACHE_NAME%/${APACHE_NAME}/g; + s/%USER%/${STACK_USER}/g; " -i ${apache_vhost_dir}/object-server-${node_number} ! is_fedora && sudo a2ensite object-server-${node_number} @@ -167,6 +169,7 @@ function _config_swift_apache_wsgi() { s/%PORT%/$container_port/g; s/%SERVICENAME%/container-server-${node_number}/g; s/%APACHE_NAME%/${APACHE_NAME}/g; + s/%USER%/${STACK_USER}/g; " -i ${apache_vhost_dir}/container-server-${node_number} ! is_fedora && sudo a2ensite container-server-${node_number} @@ -182,6 +185,7 @@ function _config_swift_apache_wsgi() { s/%PORT%/$account_port/g; s/%SERVICENAME%/account-server-${node_number}/g; s/%APACHE_NAME%/${APACHE_NAME}/g; + s/%USER%/${STACK_USER}/g; " -i ${apache_vhost_dir}/account-server-${node_number} ! is_fedora && sudo a2ensite account-server-${node_number} From 5a3d7707931186664f32b1232970e3f4f4b7526f Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Tue, 17 Sep 2013 11:44:05 +1000 Subject: [PATCH 0362/4704] Remove change_apache_user_group function This function allowed you to change the process running user for all of apache. This is better handled on a per-process basis. Change-Id: I165adc0c49fc328f34835856b49983c4e189f143 Fixes: bug 1226346 Fixes: bug 1225853 --- lib/apache | 24 ------------------------ lib/swift | 7 ------- 2 files changed, 31 deletions(-) diff --git a/lib/apache b/lib/apache index a2b0534d16..d811f87510 100644 --- a/lib/apache +++ b/lib/apache @@ -4,7 +4,6 @@ # Dependencies: # ``functions`` file # is_apache_enabled_service -# change_apache_user_group # install_apache_wsgi # config_apache_wsgi # start_apache_server @@ -52,29 +51,6 @@ function is_apache_enabled_service() { return 1 } -# change_apache_user_group() - Change the User/Group to run Apache server -function change_apache_user_group(){ - local stack_user=$@ - if is_ubuntu; then - sudo sed -e " - s/^export APACHE_RUN_USER=.*/export APACHE_RUN_USER=${stack_user}/g; - s/^export APACHE_RUN_GROUP=.*/export APACHE_RUN_GROUP=${stack_user}/g - " -i /etc/${APACHE_NAME}/envvars - elif is_fedora; then - sudo sed -e " - s/^User .*/User ${stack_user}/g; - s/^Group .*/Group ${stack_user}/g - " -i /etc/${APACHE_NAME}/httpd.conf - elif is_suse; then - sudo sed -e " - s/^User .*/User ${stack_user}/g; - s/^Group .*/Group ${stack_user}/g - " -i /etc/${APACHE_NAME}/uid.conf - else - exit_distro_not_supported "apache user and group" - fi -} - # install_apache_wsgi() - Install Apache server and wsgi module function install_apache_wsgi() { # Apache installation, because we mark it NOPRIME diff --git a/lib/swift b/lib/swift index ae8ef746f0..83f5369cbb 100644 --- a/lib/swift +++ b/lib/swift @@ -197,9 +197,6 @@ function _config_swift_apache_wsgi() { done - # run apache server as stack user - change_apache_user_group ${STACK_USER} - # WSGI isn't enabled by default, enable it ! is_fedora && sudo a2enmod wsgi } @@ -556,10 +553,6 @@ function start_swift() { fi if is_apache_enabled_service swift; then - # Make sure the apache lock dir is owned by $STACK_USER - # for running apache server to avoid failure of restarting - # apache server due to permission problem. - sudo chown -R $STACK_USER /var/run/lock/$APACHE_NAME restart_apache_server swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start screen_it s-proxy "cd $SWIFT_DIR && sudo tail -f /var/log/$APACHE_NAME/proxy-server" From 0f4f44315905db86fb0e3f43f9c6cf3b85ea34c1 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Tue, 17 Sep 2013 17:15:25 -0400 Subject: [PATCH 0363/4704] docker: ensure socat is installed install_docker.sh failed for me because socat wasn't installed. Add it to this script since it expects it to be there. Change-Id: Ic55f5e38de1b38bdd37407b7bec533d4c3eff2a9 --- tools/docker/install_docker.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh index d659ad104b..289002e8e7 100755 --- a/tools/docker/install_docker.sh +++ b/tools/docker/install_docker.sh @@ -38,7 +38,7 @@ curl https://get.docker.io/gpg | sudo apt-key add - install_package python-software-properties && \ sudo sh -c "echo deb $DOCKER_APT_REPO docker main > /etc/apt/sources.list.d/docker.list" apt_get update -install_package --force-yes lxc-docker=${DOCKER_PACKAGE_VERSION} +install_package --force-yes lxc-docker=${DOCKER_PACKAGE_VERSION} socat # Start the daemon - restart just in case the package ever auto-starts... restart_service docker From 3418c1caa5c52fd9989e5829fda0848b4a8dfea7 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Mon, 16 Sep 2013 18:35:49 +0200 Subject: [PATCH 0364/4704] Increase default swift storage Swift storage is used as glance image back-end. Tempest have cinder to uploads 1 GiB image from cinder in twice. In parallel execution in cause an issue, bacuse the current default size is 1_000_000 KiB. Increasing the default swit storage size from 1_000_000 KiB 4_000_000 KiB when tempest is enabled. Fixing bug 1225664 Change-Id: Iccd6368e4df71abb5ccfe7d361c64d86e1071d35 --- lib/swift | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/swift b/lib/swift index f72beafef7..3dcd8b6eb0 100644 --- a/lib/swift +++ b/lib/swift @@ -55,7 +55,13 @@ fi # swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in # kilobytes. # Default is 1 gigabyte. -SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} +SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=1048576 +# if tempest enabled the default size is 4 Gigabyte. +if is_service_enabled tempest; then + SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=${SWIFT_LOOPBACK_DISK_SIZE:-4194304} +fi + +SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-$SWIFT_LOOPBACK_DISK_SIZE_DEFAULT} # Set ``SWIFT_EXTRAS_MIDDLEWARE`` to extras middlewares. # Default is ``staticweb, tempurl, formpost`` From 1ca490c049d2d4b3882d764c1274a614b1588501 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Thu, 19 Sep 2013 10:03:36 +0100 Subject: [PATCH 0365/4704] xenapi: Use C locale By exporting the LC_ALL=C we can get rid of localisation issues, as the actual scripts are already assuming an english installation. FIxes bug 1227527 Change-Id: Ieeebce4d53b09959146a970f3fb803201ac5ebdf --- tools/xen/install_os_domU.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index b49504d9e9..110bbd998c 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -10,6 +10,8 @@ set -o errexit set -o nounset set -o xtrace +export LC_ALL=C + # Abort if localrc is not set if [ ! -e ../../localrc ]; then echo "You must have a localrc with ALL necessary passwords defined before proceeding." From 704106a1bd316d9a0df2f82233817ceeda92e744 Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Thu, 12 Sep 2013 14:24:47 +0200 Subject: [PATCH 0366/4704] Fix LDAP support for openSUSE Closes-Bug: #1227651 Change-Id: I1c55fbb2f65f882a0ae2bcf4767c0a3e0f0f47e7 --- files/ldap/base-config.ldif | 19 +++++++++++++++++++ lib/ldap | 18 ++++++++++++++---- 2 files changed, 33 insertions(+), 4 deletions(-) create mode 100644 files/ldap/base-config.ldif diff --git a/files/ldap/base-config.ldif b/files/ldap/base-config.ldif new file mode 100644 index 0000000000..026d8bc0fc --- /dev/null +++ b/files/ldap/base-config.ldif @@ -0,0 +1,19 @@ +dn: cn=config +objectClass: olcGlobal +cn: config +olcArgsFile: /var/run/slapd/slapd.args +olcAuthzRegexp: {0}gidNumber=0\+uidNumber=0,cn=peercred,cn=external,cn=auth dn + :cn=config +olcPidFile: /var/run/slapd/slapd.pid +olcSizeLimit: 10000 + +dn: cn=schema,cn=config +objectClass: olcSchemaConfig +cn: schema + +include: file:///etc/openldap/schema/core.ldif + +dn: olcDatabase={1}hdb,cn=config +objectClass: olcHdbConfig +olcDbDirectory: /var/lib/ldap +olcSuffix: dc=openstack,dc=org diff --git a/lib/ldap b/lib/ldap index 89b31b2c25..2a24ccddf7 100644 --- a/lib/ldap +++ b/lib/ldap @@ -8,6 +8,7 @@ XTRACE=$(set +o | grep xtrace) set +o xtrace +LDAP_SERVICE_NAME=slapd # Functions # --------- @@ -24,10 +25,19 @@ function install_ldap() { LDAP_ROOTPW_COMMAND=replace sudo DEBIAN_FRONTEND=noninteractive apt-get install slapd ldap-utils #automatically starts LDAP on ubuntu so no need to call start_ldap - elif is_fedora || is_suse; then + elif is_fedora; then LDAP_OLCDB_NUMBER=2 LDAP_ROOTPW_COMMAND=add start_ldap + elif is_suse; then + LDAP_OLCDB_NUMBER=1 + LDAP_ROOTPW_COMMAND=add + LDAP_SERVICE_NAME=ldap + # SUSE has slappasswd in /usr/sbin/ + PATH=$PATH:/usr/sbin/ + sudo slapadd -F /etc/openldap/slapd.d/ -bcn=config -l $FILES/ldap/base-config.ldif + sudo sed -i '/^OPENLDAP_START_LDAPI=/s/"no"/"yes"/g' /etc/sysconfig/openldap + start_ldap fi printf "generate password file" @@ -42,7 +52,7 @@ function install_ldap() { sudo ldapmodify -Y EXTERNAL -H ldapi:/// -f $TMP_MGR_DIFF_FILE # On fedora we need to manually add cosine and inetorgperson schemas - if is_fedora; then + if is_fedora || is_suse; then sudo ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/cosine.ldif sudo ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/inetorgperson.ldif fi @@ -64,13 +74,13 @@ function install_ldap() { # start_ldap() - Start LDAP function start_ldap() { - sudo service slapd restart + sudo service $LDAP_SERVICE_NAME restart } # stop_ldap() - Stop LDAP function stop_ldap() { - sudo service slapd stop + sudo service $LDAP_SERVICE_NAME stop } # clear_ldap_state() - Clear LDAP State From 7d5621583737fd74119cc30e8216780e1a192291 Mon Sep 17 00:00:00 2001 From: ZhiQiang Fan Date: Fri, 20 Sep 2013 02:20:35 +0800 Subject: [PATCH 0367/4704] Replace OpenStack LLC with OpenStack Foundation Change-Id: I7642e7163b615798867881b012240164465c5e43 Fixes-Bug: #1214176 --- tools/xen/scripts/install-os-vpx.sh | 2 +- tools/xen/scripts/mkxva | 2 +- tools/xen/scripts/uninstall-os-vpx.sh | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh index c94a593e3d..7469e0c10b 100755 --- a/tools/xen/scripts/install-os-vpx.sh +++ b/tools/xen/scripts/install-os-vpx.sh @@ -1,7 +1,7 @@ #!/bin/bash # # Copyright (c) 2011 Citrix Systems, Inc. -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/tools/xen/scripts/mkxva b/tools/xen/scripts/mkxva index a316da2ddb..392c05b407 100755 --- a/tools/xen/scripts/mkxva +++ b/tools/xen/scripts/mkxva @@ -1,7 +1,7 @@ #!/bin/bash # # Copyright (c) 2011 Citrix Systems, Inc. -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/tools/xen/scripts/uninstall-os-vpx.sh b/tools/xen/scripts/uninstall-os-vpx.sh index 0feaec79e5..ac260949c4 100755 --- a/tools/xen/scripts/uninstall-os-vpx.sh +++ b/tools/xen/scripts/uninstall-os-vpx.sh @@ -1,7 +1,7 @@ #!/bin/bash # # Copyright (c) 2011 Citrix Systems, Inc. -# Copyright 2011 OpenStack LLC. +# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may From 072d137766d2a9d933147a9cbb61445674387334 Mon Sep 17 00:00:00 2001 From: AmalaBasha Date: Fri, 20 Sep 2013 16:26:10 +0530 Subject: [PATCH 0368/4704] edit-glance-manage-command-for-recreate-db As per https://bugs.launchpad.net/glance/+bug/1213197, and subsequent review at https://review.openstack.org/#/c/47161/ Glance-manage commands are proposed to be subcommands of 'db'. This would require change to the script to recreate_db which calls the db_sync command. Implements blueprint edit-glance-manage-command-for-recreate-db Change-Id: I9470709ec34896dba7a37fdff4791206bb5ef5ed --- lib/glance | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/glance b/lib/glance index 7e6968200f..510692c261 100644 --- a/lib/glance +++ b/lib/glance @@ -171,7 +171,7 @@ function init_glance() { recreate_database glance utf8 # Migrate glance database - $GLANCE_BIN_DIR/glance-manage db_sync + $GLANCE_BIN_DIR/glance-manage db sync create_glance_cache_dir } From 14ea1a2b79aa7a9e7fff284b7d534c0038bbaa89 Mon Sep 17 00:00:00 2001 From: Kyle Mestery Date: Sun, 22 Sep 2013 03:04:56 +0000 Subject: [PATCH 0369/4704] Correctly set the L3 service plugin for ML2 ML2 uses a service plugin for L3. This patch to devstack correctly sets this by setting or updating the variable Q_SERVICE_PLUGIN_CLASSES, which makes ML2 compatible when running with other service plugins (e.g. LBaaS and VPN). Fixes bug 1231622 Change-Id: I0ce1f5a42bd052995135ffac1ee5ef382d69789e --- lib/neutron_plugins/ml2 | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index 71a0638670..8d2e303854 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -30,6 +30,9 @@ Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS:-vni_ranges=10 # Default VLAN TypeDriver options Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS:-} +# L3 Plugin to load for ML2 +ML2_L3_PLUGIN=${ML2_L3_PLUGIN:-neutron.services.l3_router.l3_router_plugin.L3RouterPlugin} + function populate_ml2_config() { OPTS=$1 CONF=$2 @@ -48,13 +51,11 @@ function neutron_plugin_configure_common() { Q_PLUGIN_CLASS="neutron.plugins.ml2.plugin.Ml2Plugin" # The ML2 plugin delegates L3 routing/NAT functionality to # the L3 service plugin which must therefore be specified. - Q_L3_PLUGIN_CLASS=${Q_L3_PLUGIN_CLASS:-"neutron.services.l3_router.l3_router_plugin.L3RouterPlugin"} - if ini_has_option $NEUTRON_CONF DEFAULT service_plugins ; then - srv_plugins=$(iniget $NEUTRON_CONF DEFAULT service_plugins)","$Q_L3_PLUGIN_CLASS + if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then + Q_SERVICE_PLUGIN_CLASSES=$ML2_L3_PLUGIN else - srv_plugins=$Q_L3_PLUGIN_CLASS + Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$ML2_L3_PLUGIN" fi - iniset $NEUTRON_CONF DEFAULT service_plugins $srv_plugins } function neutron_plugin_configure_service() { From 19eed744225acdb08a35b4c8b7b13df3c0f078b7 Mon Sep 17 00:00:00 2001 From: Eoghan Glynn Date: Fri, 20 Sep 2013 21:11:25 +0000 Subject: [PATCH 0370/4704] Modified ceilometer alarm evaluator console script Take account of the modification to the alarm evaluator console script naming in the following commit: https://github.com/openstack/ceilometer/commit/bad5f18e Change-Id: Ic7fc3b8ad7be9dd2a5b5ed3c07e169691229bb4d --- lib/ceilometer | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 2afbc88b36..1b0431906a 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -5,7 +5,7 @@ # enable_service ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api # # To ensure Ceilometer alarming services are enabled also, further add to the localrc: -# enable_service ceilometer-alarm-notifier ceilometer-alarm-singleton +# enable_service ceilometer-alarm-notifier ceilometer-alarm-evaluator # Dependencies: # - functions @@ -139,13 +139,13 @@ function start_ceilometer() { screen_it ceilometer-collector "ceilometer-collector --config-file $CEILOMETER_CONF" screen_it ceilometer-api "ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" screen_it ceilometer-alarm-notifier "ceilometer-alarm-notifier --config-file $CEILOMETER_CONF" - screen_it ceilometer-alarm-singleton "ceilometer-alarm-singleton --config-file $CEILOMETER_CONF" + screen_it ceilometer-alarm-evaluator "ceilometer-alarm-evaluator --config-file $CEILOMETER_CONF" } # stop_ceilometer() - Stop running processes function stop_ceilometer() { # Kill the ceilometer screen windows - for serv in ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-singleton; do + for serv in ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do screen -S $SCREEN_NAME -p $serv -X kill done } From 835db2feadd1795201abaf4be00efc85ef9f8253 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 23 Sep 2013 14:17:06 -0400 Subject: [PATCH 0371/4704] print out git references for trees in order to be sure we understand the environment that's running in an upstream test, print out the git information for the tree. This will hopefully address questions of "which commit of tempest" is being used for particular tests. Change-Id: Ief4e8a17fd75945f02982d2adf8625fe927d823d --- functions | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/functions b/functions index e1a5f4be3a..209f13c7d7 100644 --- a/functions +++ b/functions @@ -548,12 +548,18 @@ function is_arch { # Uses global ``OFFLINE`` # git_clone remote dest-dir branch function git_clone { - [[ "$OFFLINE" = "True" ]] && return - GIT_REMOTE=$1 GIT_DEST=$2 GIT_REF=$3 + if [[ "$OFFLINE" = "True" ]]; then + echo "Running in offline mode, clones already exist" + # print out the results so we know what change was used in the logs + cd $GIT_DEST + git show --oneline + return + fi + if echo $GIT_REF | egrep -q "^refs"; then # If our branch name is a gerrit style refs/changes/... if [[ ! -d $GIT_DEST ]]; then @@ -595,6 +601,10 @@ function git_clone { fi fi + + # print out the results so we know what change was used in the logs + cd $GIT_DEST + git show --oneline } From 9a532b84474f5c6e9e11808bcda9566f20274011 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 23 Sep 2013 13:44:38 -0500 Subject: [PATCH 0372/4704] XenServer hypervisor plugin Convert XenServer hypervisor configuration in Nova to the new plugin setup. Change-Id: I8916560ca3f2dae8b8d8bcb60b7aa2eb5984cbcb --- lib/nova | 16 +---- lib/nova_plugins/hypervisor-xenserver | 85 +++++++++++++++++++++++++++ stack.sh | 19 ------ 3 files changed, 87 insertions(+), 33 deletions(-) create mode 100644 lib/nova_plugins/hypervisor-xenserver diff --git a/lib/nova b/lib/nova index 577c260d35..b058bd382e 100644 --- a/lib/nova +++ b/lib/nova @@ -76,15 +76,7 @@ SPICE_DIR=$DEST/spice-html5 # -------------------------- # Set defaults according to the virt driver -if [ "$VIRT_DRIVER" = 'xenserver' ]; then - PUBLIC_INTERFACE_DEFAULT=eth2 - GUEST_INTERFACE_DEFAULT=eth1 - # Allow ``build_domU.sh`` to specify the flat network bridge via kernel args - FLAT_NETWORK_BRIDGE_DEFAULT=$(sed -e 's/.* flat_network_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline) - if is_service_enabled neutron; then - XEN_INTEGRATION_BRIDGE=$(sed -e 's/.* xen_integration_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline) - fi -elif [ "$VIRT_DRIVER" = 'baremetal' ]; then +if [ "$VIRT_DRIVER" = 'baremetal' ]; then NETWORK_MANAGER=${NETWORK_MANAGER:-FlatManager} PUBLIC_INTERFACE_DEFAULT=eth0 FLAT_INTERFACE=${FLAT_INTERFACE:-eth0} @@ -537,16 +529,12 @@ function create_nova_conf() { SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"} iniset $NOVA_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL" fi - if [ "$VIRT_DRIVER" = 'xenserver' ]; then - VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1} - else - VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1} - fi if is_service_enabled n-novnc || is_service_enabled n-xvnc; then # Address on which instance vncservers will listen on compute hosts. # For multi-host, this should be the management ip of the compute host. VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} + VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1} iniset $NOVA_CONF DEFAULT vnc_enabled true iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN" iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver new file mode 100644 index 0000000000..f47994f187 --- /dev/null +++ b/lib/nova_plugins/hypervisor-xenserver @@ -0,0 +1,85 @@ +# lib/nova_plugins/hypervisor-xenserver +# Configure the XenServer hypervisor + +# Enable with: +# VIRT_DRIVER=xenserver + +# Dependencies: +# ``functions`` file +# ``nova`` configuration + +# install_nova_hypervisor - install any external requirements +# configure_nova_hypervisor - make configuration changes, including those to other services +# start_nova_hypervisor - start any external services +# stop_nova_hypervisor - stop any external services +# cleanup_nova_hypervisor - remove transient data and cache + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +PUBLIC_INTERFACE_DEFAULT=eth2 +GUEST_INTERFACE_DEFAULT=eth1 +# Allow ``build_domU.sh`` to specify the flat network bridge via kernel args +FLAT_NETWORK_BRIDGE_DEFAULT=$(sed -e 's/.* flat_network_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline) +if is_service_enabled neutron; then + XEN_INTEGRATION_BRIDGE=$(sed -e 's/.* xen_integration_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline) +fi + +VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1} + + +# Entry Points +# ------------ + +# clean_nova_hypervisor - Clean up an installation +function cleanup_nova_hypervisor() { + # This function intentionally left blank + : +} + +# configure_nova_hypervisor - Set config files, create data dirs, etc +function configure_nova_hypervisor() { + if [ -z "$XENAPI_CONNECTION_URL" ]; then + die $LINENO "XENAPI_CONNECTION_URL is not specified" + fi + read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN." + iniset $NOVA_CONF DEFAULT compute_driver "xenapi.XenAPIDriver" + iniset $NOVA_CONF DEFAULT xenapi_connection_url "$XENAPI_CONNECTION_URL" + iniset $NOVA_CONF DEFAULT xenapi_connection_username "$XENAPI_USER" + iniset $NOVA_CONF DEFAULT xenapi_connection_password "$XENAPI_PASSWORD" + iniset $NOVA_CONF DEFAULT flat_injected "False" + # Need to avoid crash due to new firewall support + XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"} + iniset $NOVA_CONF DEFAULT firewall_driver "$XEN_FIREWALL_DRIVER" +} + +# install_nova_hypervisor() - Install external components +function install_nova_hypervisor() { + # This function intentionally left blank + : +} + +# start_nova_hypervisor - Start any required external services +function start_nova_hypervisor() { + # This function intentionally left blank + : +} + +# stop_nova_hypervisor - Stop any external services +function stop_nova_hypervisor() { + # This function intentionally left blank + : +} + + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/stack.sh b/stack.sh index 71e7317ee3..7bb4b59cc1 100755 --- a/stack.sh +++ b/stack.sh @@ -1011,25 +1011,6 @@ if is_service_enabled nova; then configure_nova_hypervisor - # XenServer - # --------- - - elif [ "$VIRT_DRIVER" = 'xenserver' ]; then - echo_summary "Using XenServer virtualization driver" - if [ -z "$XENAPI_CONNECTION_URL" ]; then - die $LINENO "XENAPI_CONNECTION_URL is not specified" - fi - read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN." - iniset $NOVA_CONF DEFAULT compute_driver "xenapi.XenAPIDriver" - iniset $NOVA_CONF DEFAULT xenapi_connection_url "$XENAPI_CONNECTION_URL" - iniset $NOVA_CONF DEFAULT xenapi_connection_username "$XENAPI_USER" - iniset $NOVA_CONF DEFAULT xenapi_connection_password "$XENAPI_PASSWORD" - iniset $NOVA_CONF DEFAULT flat_injected "False" - # Need to avoid crash due to new firewall support - XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"} - iniset $NOVA_CONF DEFAULT firewall_driver "$XEN_FIREWALL_DRIVER" - - # OpenVZ # ------ From f4bd16ac84904eb3afc0eca283b63a1a6efd2c5a Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 23 Sep 2013 14:07:31 -0500 Subject: [PATCH 0373/4704] fake hypervisor plugin Convert fake hypervisor configuration in Nova to the new plugin setup. Change-Id: I8b1404ee97a2a65f0884efae642b98bb134cb2aa --- lib/nova_plugins/hypervisor-fake | 77 ++++++++++++++++++++++++++++++++ stack.sh | 20 --------- 2 files changed, 77 insertions(+), 20 deletions(-) create mode 100644 lib/nova_plugins/hypervisor-fake diff --git a/lib/nova_plugins/hypervisor-fake b/lib/nova_plugins/hypervisor-fake new file mode 100644 index 0000000000..fe0d1900ee --- /dev/null +++ b/lib/nova_plugins/hypervisor-fake @@ -0,0 +1,77 @@ +# lib/nova_plugins/hypervisor-fake +# Configure the fake hypervisor + +# Enable with: +# VIRT_DRIVER=fake + +# Dependencies: +# ``functions`` file +# ``nova`` configuration + +# install_nova_hypervisor - install any external requirements +# configure_nova_hypervisor - make configuration changes, including those to other services +# start_nova_hypervisor - start any external services +# stop_nova_hypervisor - stop any external services +# cleanup_nova_hypervisor - remove transient data and cache + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + + +# Entry Points +# ------------ + +# clean_nova_hypervisor - Clean up an installation +function cleanup_nova_hypervisor() { + # This function intentionally left blank + : +} + +# configure_nova_hypervisor - Set config files, create data dirs, etc +function configure_nova_hypervisor() { + iniset $NOVA_CONF DEFAULT compute_driver "nova.virt.fake.FakeDriver" + # Disable arbitrary limits + iniset $NOVA_CONF DEFAULT quota_instances -1 + iniset $NOVA_CONF DEFAULT quota_cores -1 + iniset $NOVA_CONF DEFAULT quota_ram -1 + iniset $NOVA_CONF DEFAULT quota_floating_ips -1 + iniset $NOVA_CONF DEFAULT quota_fixed_ips -1 + iniset $NOVA_CONF DEFAULT quota_metadata_items -1 + iniset $NOVA_CONF DEFAULT quota_injected_files -1 + iniset $NOVA_CONF DEFAULT quota_injected_file_path_bytes -1 + iniset $NOVA_CONF DEFAULT quota_security_groups -1 + iniset $NOVA_CONF DEFAULT quota_security_group_rules -1 + iniset $NOVA_CONF DEFAULT quota_key_pairs -1 + iniset $NOVA_CONF DEFAULT scheduler_default_filters "RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter" +} + +# install_nova_hypervisor() - Install external components +function install_nova_hypervisor() { + # This function intentionally left blank + : +} + +# start_nova_hypervisor - Start any required external services +function start_nova_hypervisor() { + # This function intentionally left blank + : +} + +# stop_nova_hypervisor - Stop any external services +function stop_nova_hypervisor() { + # This function intentionally left blank + : +} + + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/stack.sh b/stack.sh index 71e7317ee3..6bab2183d6 100755 --- a/stack.sh +++ b/stack.sh @@ -1104,26 +1104,6 @@ if is_service_enabled nova; then iniset $NOVA_CONF vmware integration_bridge $OVS_BRIDGE fi - # fake - # ---- - - elif [ "$VIRT_DRIVER" = 'fake' ]; then - echo_summary "Using fake Virt driver" - iniset $NOVA_CONF DEFAULT compute_driver "nova.virt.fake.FakeDriver" - # Disable arbitrary limits - iniset $NOVA_CONF DEFAULT quota_instances -1 - iniset $NOVA_CONF DEFAULT quota_cores -1 - iniset $NOVA_CONF DEFAULT quota_ram -1 - iniset $NOVA_CONF DEFAULT quota_floating_ips -1 - iniset $NOVA_CONF DEFAULT quota_fixed_ips -1 - iniset $NOVA_CONF DEFAULT quota_metadata_items -1 - iniset $NOVA_CONF DEFAULT quota_injected_files -1 - iniset $NOVA_CONF DEFAULT quota_injected_file_path_bytes -1 - iniset $NOVA_CONF DEFAULT quota_security_groups -1 - iniset $NOVA_CONF DEFAULT quota_security_group_rules -1 - iniset $NOVA_CONF DEFAULT quota_key_pairs -1 - iniset $NOVA_CONF DEFAULT scheduler_default_filters "RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter" - # Default libvirt # --------------- From c3431bfdd90b3d149b119038d19f6a22bc278dc0 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Fri, 6 Sep 2013 15:30:22 -0400 Subject: [PATCH 0374/4704] Clean up automated changes to requirements Some of us like to reuse sandboxes, and keep them up to date. This is very difficult to do if devstack leaves modifications to requirements.txt files after a run, since 'git pull' may refuse to overwrite those changes. This modification has devstack undo the changes to the requirements files, to leave the sandbox in a clean state again. Change-Id: Ia2d928ade8141b59b56a2c4548d760bf6911a3e5 --- functions | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/functions b/functions index e1a5f4be3a..1c45851d91 100644 --- a/functions +++ b/functions @@ -1216,7 +1216,10 @@ function setup_develop() { echo "cd $REQUIREMENTS_DIR; $SUDO_CMD python update.py $project_dir" # Don't update repo if local changes exist - if (cd $project_dir && git diff --quiet); then + (cd $project_dir && git diff --quiet) + local update_requirements=$? + + if [ $update_requirements -eq 0 ]; then (cd $REQUIREMENTS_DIR; \ $SUDO_CMD python update.py $project_dir) fi @@ -1224,6 +1227,11 @@ function setup_develop() { pip_install -e $project_dir # ensure that further actions can do things like setup.py sdist safe_chown -R $STACK_USER $1/*.egg-info + + # Undo requirements changes, if we made them + if [ $update_requirements -eq 0 ]; then + (cd $project_dir && git checkout -- requirements.txt test-requirements.txt setup.py) + fi } From 5470701e10ee68c80860d4cf7e0fa5d8a913c288 Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Tue, 17 Sep 2013 12:07:48 +1000 Subject: [PATCH 0375/4704] Fix swift httpd on fedora Implements a fedora equivalent of ubuntu's sites-enabled and moves enabling of mod_wsgi to the installation period so that it doesn't have to be handled in a platform dependant way later. Fixes: bug 1226363 Change-Id: I85325179f1792d985b0375572abfe8c8a82fecc3 --- lib/apache | 27 +++++++++++++++++++++++++++ lib/horizon | 5 ----- lib/swift | 16 ++++++---------- 3 files changed, 33 insertions(+), 15 deletions(-) diff --git a/lib/apache b/lib/apache index d811f87510..3a1f6f1263 100644 --- a/lib/apache +++ b/lib/apache @@ -6,6 +6,8 @@ # is_apache_enabled_service # install_apache_wsgi # config_apache_wsgi +# enable_apache_site +# disable_apache_site # start_apache_server # stop_apache_server # restart_apache_server @@ -57,16 +59,41 @@ function install_apache_wsgi() { if is_ubuntu; then # Install apache2, which is NOPRIME'd install_package apache2 libapache2-mod-wsgi + # WSGI isn't enabled by default, enable it + sudo a2enmod wsgi elif is_fedora; then sudo rm -f /etc/httpd/conf.d/000-* install_package httpd mod_wsgi elif is_suse; then install_package apache2 apache2-mod_wsgi + # WSGI isn't enabled by default, enable it + sudo a2enmod wsgi else exit_distro_not_supported "apache installation" fi } +# enable_apache_site() - Enable a particular apache site +function enable_apache_site() { + local site=$@ + if is_ubuntu; then + sudo a2ensite ${site} + elif is_fedora; then + # fedora conf.d is only imported if it ends with .conf so this is approx the same + sudo mv /etc/$APACHE_NAME/$APACHE_CONF_DIR/${site} /etc/$APACHE_NAME/$APACHE_CONF_DIR/${site}.conf + fi +} + +# disable_apache_site() - Disable a particular apache site +function disable_apache_site() { + local site=$@ + if is_ubuntu; then + sudo a2dissite ${site} + elif is_fedora; then + sudo mv /etc/$APACHE_NAME/$APACHE_CONF_DIR/${site}.conf /etc/$APACHE_NAME/$APACHE_CONF_DIR/${site} + fi +} + # start_apache_server() - Start running apache server function start_apache_server() { start_service $APACHE_NAME diff --git a/lib/horizon b/lib/horizon index e55bc152f6..5973eb2a9f 100644 --- a/lib/horizon +++ b/lib/horizon @@ -123,8 +123,6 @@ function init_horizon() { # Be a good citizen and use the distro tools here sudo touch $horizon_conf sudo a2ensite horizon.conf - # WSGI isn't enabled by default, enable it - sudo a2enmod wsgi elif is_fedora; then if [[ "$os_RELEASE" -ge "18" ]]; then # fedora 18 has Require all denied in its httpd.conf @@ -132,9 +130,6 @@ function init_horizon() { HORIZON_REQUIRE='Require all granted' fi sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf - elif is_suse; then - # WSGI isn't enabled by default, enable it - sudo a2enmod wsgi else exit_distro_not_supported "apache configuration" fi diff --git a/lib/swift b/lib/swift index 8741e551ad..9c80802ba9 100644 --- a/lib/swift +++ b/lib/swift @@ -115,11 +115,11 @@ function cleanup_swift() { # _cleanup_swift_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file function _cleanup_swift_apache_wsgi() { sudo rm -f $SWIFT_APACHE_WSGI_DIR/*.wsgi - ! is_fedora && sudo a2dissite proxy-server + disable_apache_site proxy-server for node_number in ${SWIFT_REPLICAS_SEQ}; do for type in object container account; do site_name=${type}-server-${node_number} - ! is_fedora && sudo a2dissite ${site_name} + disable_apache_site ${site_name} sudo rm -f /etc/$APACHE_NAME/$APACHE_CONF_DIR/${site_name} done done @@ -140,13 +140,13 @@ function _config_swift_apache_wsgi() { s/%APACHE_NAME%/${APACHE_NAME}/g; s/%USER%/${STACK_USER}/g; " -i ${apache_vhost_dir}/proxy-server + enable_apache_site proxy-server sudo cp ${SWIFT_DIR}/examples/wsgi/proxy-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/proxy-server.wsgi sudo sed -e " /^#/d;/^$/d; s/%SERVICECONF%/proxy-server.conf/g; " -i ${SWIFT_APACHE_WSGI_DIR}/proxy-server.wsgi - ! is_fedora && sudo a2ensite proxy-server # copy apache vhost file and set name and port for node_number in ${SWIFT_REPLICAS_SEQ}; do @@ -161,7 +161,7 @@ function _config_swift_apache_wsgi() { s/%APACHE_NAME%/${APACHE_NAME}/g; s/%USER%/${STACK_USER}/g; " -i ${apache_vhost_dir}/object-server-${node_number} - ! is_fedora && sudo a2ensite object-server-${node_number} + enable_apache_site object-server-${node_number} sudo cp ${SWIFT_DIR}/examples/wsgi/object-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/object-server-${node_number}.wsgi sudo sed -e " @@ -177,7 +177,7 @@ function _config_swift_apache_wsgi() { s/%APACHE_NAME%/${APACHE_NAME}/g; s/%USER%/${STACK_USER}/g; " -i ${apache_vhost_dir}/container-server-${node_number} - ! is_fedora && sudo a2ensite container-server-${node_number} + enable_apache_site container-server-${node_number} sudo cp ${SWIFT_DIR}/examples/wsgi/container-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/container-server-${node_number}.wsgi sudo sed -e " @@ -193,18 +193,14 @@ function _config_swift_apache_wsgi() { s/%APACHE_NAME%/${APACHE_NAME}/g; s/%USER%/${STACK_USER}/g; " -i ${apache_vhost_dir}/account-server-${node_number} - ! is_fedora && sudo a2ensite account-server-${node_number} + enable_apache_site account-server-${node_number} sudo cp ${SWIFT_DIR}/examples/wsgi/account-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/account-server-${node_number}.wsgi sudo sed -e " /^#/d;/^$/d; s/%SERVICECONF%/account-server\/${node_number}.conf/g; " -i ${SWIFT_APACHE_WSGI_DIR}/account-server-${node_number}.wsgi - done - - # WSGI isn't enabled by default, enable it - ! is_fedora && sudo a2enmod wsgi } # configure_swift() - Set config files, create data dirs and loop image From a00e5f8810b6ca3b0b5d63cc228125e19bc91955 Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Tue, 17 Sep 2013 12:47:03 +1000 Subject: [PATCH 0376/4704] Allow keystone to run from apache Provide a template for running keystone as a mod_wsgi process and enable it from configuration. Based on: https://review.openstack.org/#/c/36474/ Also-by: zhang-hare Implements: blueprint devstack-setup-apache-keystone Change-Id: Icc9d7ddfa4a488c08816ff4ae0b53c0134a1016b --- files/apache-keystone.template | 22 ++++++++++++++++ lib/keystone | 47 ++++++++++++++++++++++++++++++++-- 2 files changed, 67 insertions(+), 2 deletions(-) create mode 100644 files/apache-keystone.template diff --git a/files/apache-keystone.template b/files/apache-keystone.template new file mode 100644 index 0000000000..919452a040 --- /dev/null +++ b/files/apache-keystone.template @@ -0,0 +1,22 @@ +Listen %PUBLICPORT% +Listen %ADMINPORT% + + + WSGIDaemonProcess keystone-public processes=5 threads=1 user=%USER% + WSGIProcessGroup keystone-public + WSGIScriptAlias / %PUBLICWSGI% + WSGIApplicationGroup %{GLOBAL} + ErrorLog /var/log/%APACHE_NAME%/keystone + LogLevel debug + CustomLog /var/log/%APACHE_NAME%/access.log combined + + + + WSGIDaemonProcess keystone-admin processes=5 threads=1 user=%USER% + WSGIProcessGroup keystone-admin + WSGIScriptAlias / %ADMINWSGI% + WSGIApplicationGroup %{GLOBAL} + ErrorLog /var/log/%APACHE_NAME%/keystone + LogLevel debug + CustomLog /var/log/%APACHE_NAME%/access.log combined + diff --git a/lib/keystone b/lib/keystone index 3642904e1c..c4b2dff93b 100755 --- a/lib/keystone +++ b/lib/keystone @@ -14,11 +14,13 @@ # # install_keystone # configure_keystone +# _config_keystone_apache_wsgi # init_keystone # start_keystone # create_keystone_accounts # stop_keystone # cleanup_keystone +# _cleanup_keystone_apache_wsgi # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -34,6 +36,7 @@ KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone} KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf KEYSTONE_PASTE_INI=${KEYSTONE_PASTE_INI:-$KEYSTONE_CONF_DIR/keystone-paste.ini} KEYSTONE_AUTH_CACHE_DIR=${KEYSTONE_AUTH_CACHE_DIR:-/var/cache/keystone} +KEYSTONE_WSGI_DIR=${KEYSTONE_WSGI_DIR:-/var/www/keystone} KEYSTONECLIENT_DIR=$DEST/python-keystoneclient @@ -86,6 +89,33 @@ function cleanup_keystone() { : } +# _cleanup_keystone_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file +function _cleanup_keystone_apache_wsgi() { + sudo rm -f $KEYSTONE_WSGI_DIR/*.wsgi + disable_apache_site keystone + sudo rm -f /etc/$APACHE_NAME/$APACHE_CONF_DIR/keystone +} + +# _config_keystone_apache_wsgi() - Set WSGI config files of Keystone +function _config_keystone_apache_wsgi() { + sudo mkdir -p $KEYSTONE_WSGI_DIR + + # copy proxy vhost and wsgi file + sudo cp $KEYSTONE_DIR/httpd/keystone.py $KEYSTONE_WSGI_DIR/main + sudo cp $KEYSTONE_DIR/httpd/keystone.py $KEYSTONE_WSGI_DIR/admin + + sudo cp $FILES/apache-keystone.template /etc/$APACHE_NAME/$APACHE_CONF_DIR/keystone + sudo sed -e " + s|%PUBLICPORT%|$KEYSTONE_SERVICE_PORT|g; + s|%ADMINPORT%|$KEYSTONE_AUTH_PORT|g; + s|%APACHE_NAME%|$APACHE_NAME|g; + s|%PUBLICWSGI%|$KEYSTONE_WSGI_DIR/main|g; + s|%ADMINWSGI%|$KEYSTONE_WSGI_DIR/admin|g; + s|%USER%|$STACK_USER|g + " -i /etc/$APACHE_NAME/$APACHE_CONF_DIR/keystone + enable_apache_site keystone +} + # configure_keystone() - Set config files, create data dirs, etc function configure_keystone() { if [[ ! -d $KEYSTONE_CONF_DIR ]]; then @@ -204,6 +234,10 @@ function configure_keystone() { cp $KEYSTONE_DIR/etc/logging.conf.sample $KEYSTONE_CONF_DIR/logging.conf iniset $KEYSTONE_CONF_DIR/logging.conf logger_root level "DEBUG" iniset $KEYSTONE_CONF_DIR/logging.conf logger_root handlers "devel,production" + + if is_apache_enabled_service key; then + _config_keystone_apache_wsgi + fi } # create_keystone_accounts() - Sets up common required keystone accounts @@ -316,6 +350,9 @@ function install_keystone() { fi git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH setup_develop $KEYSTONE_DIR + if is_apache_enabled_service key; then + install_apache_wsgi + fi } # start_keystone() - Start running processes, including screen @@ -326,8 +363,14 @@ function start_keystone() { service_port=$KEYSTONE_SERVICE_PORT_INT fi - # Start Keystone in a screen window - screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug" + if is_apache_enabled_service key; then + restart_apache_server + screen_it key "cd $KEYSTONE_DIR && sudo tail -f /var/log/$APACHE_NAME/keystone" + else + # Start Keystone in a screen window + screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug" + fi + echo "Waiting for keystone to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s http://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then die $LINENO "keystone did not start" From 06d17eb54ad37e6d21eafcded52cc581a56d328b Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 24 Sep 2013 07:04:11 -0400 Subject: [PATCH 0377/4704] make git show not display a diff if the top commit includes actual content, this was being displayed in the devstack log, which was just confusing. --quiet suppresses this. Change-Id: Id52604d3b2b9a1372746120cb5d8d741c35576b7 --- functions | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/functions b/functions index 209f13c7d7..d590ac52f6 100644 --- a/functions +++ b/functions @@ -556,7 +556,7 @@ function git_clone { echo "Running in offline mode, clones already exist" # print out the results so we know what change was used in the logs cd $GIT_DEST - git show --oneline + git show --oneline --quiet return fi @@ -604,7 +604,7 @@ function git_clone { # print out the results so we know what change was used in the logs cd $GIT_DEST - git show --oneline + git show --oneline --quiet } From fb434b28d057d279a8351776f7909102def571dd Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Tue, 24 Sep 2013 15:58:37 +0100 Subject: [PATCH 0378/4704] xenapi: Get rid of prompt settings Remove the setting of prompt from prepare guest, keeping the scripts smaller. Change-Id: Ifb2b3aba07831e1552d6a1c6cd2081592c43ccf6 --- tools/xen/prepare_guest.sh | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh index 6ec5ffa546..05ac86cf99 100755 --- a/tools/xen/prepare_guest.sh +++ b/tools/xen/prepare_guest.sh @@ -56,11 +56,6 @@ EOF # Give ownership of /opt/stack to stack user chown -R $STACK_USER /opt/stack -# Make our ip address hostnames look nice at the command prompt -echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> /opt/stack/.bashrc -echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> /root/.bashrc -echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> /etc/profile - function setup_vimrc { if [ ! -e $1 ]; then # Simple but usable vimrc From f5002ef12a890fd3110782c873d99487a4d05b17 Mon Sep 17 00:00:00 2001 From: Roman Prykhodchenko Date: Tue, 24 Sep 2013 19:09:26 +0300 Subject: [PATCH 0379/4704] Expose all versions of Ironic API In the observable future new versions of the Ironic API will appear. That's why it's reasonable to expose the endpoint that will provide access to all versions of the API. Closes-Bug: #1229780 Change-Id: I4ec2b45688da3fa6c0d43e8be60885774cfbffd6 --- lib/ironic | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ironic b/lib/ironic index 2ce5038ea4..072d2ded82 100644 --- a/lib/ironic +++ b/lib/ironic @@ -148,9 +148,9 @@ create_ironic_accounts() { keystone endpoint-create \ --region RegionOne \ --service_id $IRONIC_SERVICE \ - --publicurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT/v1/" \ - --adminurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT/v1/" \ - --internalurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT/v1/" + --publicurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \ + --adminurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \ + --internalurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" fi fi } From 6d23500aa66e3d399cd263c2fb1d07dba0e0170c Mon Sep 17 00:00:00 2001 From: Kyle Mestery Date: Wed, 18 Sep 2013 20:27:08 +0000 Subject: [PATCH 0380/4704] Default to the ML2 plugin in Neutron instead of OVS In Icehouse, the OVS and LinuxBridge plugins are being deprecated in favor of the Modular Layer 2 (ML2) plugin. This change modifies devstack to default to ML2 when Neutron is used for networking. Fixes bug 1220745 Change-Id: I9a4c84b04727a710219fc11f862a655309ffb99b --- lib/neutron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 4a3d1b06a6..b1f96fce79 100644 --- a/lib/neutron +++ b/lib/neutron @@ -88,7 +88,7 @@ NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} # Default Neutron Plugin -Q_PLUGIN=${Q_PLUGIN:-openvswitch} +Q_PLUGIN=${Q_PLUGIN:-ml2} # Default Neutron Port Q_PORT=${Q_PORT:-9696} # Default Neutron Host From 2dc11fb5c843ccf057fac9e01cf6beca7d877421 Mon Sep 17 00:00:00 2001 From: Morgan Fainberg Date: Tue, 24 Sep 2013 23:43:08 -0700 Subject: [PATCH 0381/4704] Update user_attribute_ignore for LDAP Identity config With a recent patch to keystone, the use of tenantId, tenant_id, and default_project_id was normalized to reference default_project_id for all cases internally and translate to the expected results at the controller (v2.0 returns tenantId, v3 returns default_project_id). Devstack must now properly ignore the expected mapped LDAP attribute of 'default_project_id' instead of the old 'tenantId'. Without this fix devstack will fail when using the LDAP identity backend because the 'default_project_id' has been made a special case that requires the operator of a Openstack cloud to explicitly choose an attribute to map 'default_project_id' to if storing that value is desired. Without explicitly mapping that attribute and not having it in the 'user_attribute_ignore' config option, the user_creates can fail. related-bug: 1219739 Change-Id: I1dd3719de50f6d0948b3a9743e32a03d0ac56b3c --- lib/keystone | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index 3642904e1c..bc5d1511b8 100755 --- a/lib/keystone +++ b/lib/keystone @@ -115,7 +115,7 @@ function configure_keystone() { iniset $KEYSTONE_CONF ldap user "dc=Manager,dc=openstack,dc=org" iniset $KEYSTONE_CONF ldap suffix "dc=openstack,dc=org" iniset $KEYSTONE_CONF ldap use_dumb_member "True" - iniset $KEYSTONE_CONF ldap user_attribute_ignore "enabled,email,tenants,tenantId" + iniset $KEYSTONE_CONF ldap user_attribute_ignore "enabled,email,tenants,default_project_id" iniset $KEYSTONE_CONF ldap tenant_attribute_ignore "enabled" iniset $KEYSTONE_CONF ldap tenant_domain_id_attribute "businessCategory" iniset $KEYSTONE_CONF ldap tenant_desc_attribute "description" From a8d41e3af70309fb9c8df150ef162685bae41ee4 Mon Sep 17 00:00:00 2001 From: Sirushti Murugesan Date: Wed, 25 Sep 2013 11:30:31 +0530 Subject: [PATCH 0382/4704] Normalise RECLONE flag to True Or False. RECLONE flag now uses function trueorfalse for flag handling. Added more flag cases to normalisation function trueorfalse. Fixes bug #1200382 Change-Id: I0738537c87634281c6a92fa93b7f84a6b0dad497 --- functions | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/functions b/functions index 4c4487f9cb..6cdee78db6 100644 --- a/functions +++ b/functions @@ -551,6 +551,7 @@ function git_clone { GIT_REMOTE=$1 GIT_DEST=$2 GIT_REF=$3 + RECLONE=$(trueorfalse False $RECLONE) if [[ "$OFFLINE" = "True" ]]; then echo "Running in offline mode, clones already exist" @@ -576,7 +577,7 @@ function git_clone { cd $GIT_DEST # This checkout syntax works for both branches and tags git checkout $GIT_REF - elif [[ "$RECLONE" == "yes" ]]; then + elif [[ "$RECLONE" = "True" ]]; then # if it does exist then simulate what clone does if asked to RECLONE cd $GIT_DEST # set the url to pull from and fetch @@ -1260,16 +1261,16 @@ function stop_service() { # Normalize config values to True or False -# Accepts as False: 0 no false False FALSE -# Accepts as True: 1 yes true True TRUE +# Accepts as False: 0 no No NO false False FALSE +# Accepts as True: 1 yes Yes YES true True TRUE # VAR=$(trueorfalse default-value test-value) function trueorfalse() { local default=$1 local testval=$2 [[ -z "$testval" ]] && { echo "$default"; return; } - [[ "0 no false False FALSE" =~ "$testval" ]] && { echo "False"; return; } - [[ "1 yes true True TRUE" =~ "$testval" ]] && { echo "True"; return; } + [[ "0 no No NO false False FALSE" =~ "$testval" ]] && { echo "False"; return; } + [[ "1 yes Yes YES true True TRUE" =~ "$testval" ]] && { echo "True"; return; } echo "$default" } From 45ea08115074a78b2bb31cf9f880eddf1e7051aa Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Wed, 25 Sep 2013 10:00:29 +0100 Subject: [PATCH 0383/4704] Fix typo in lib/heat With the change https://review.openstack.org/43006 a typo was introduced. This change fixes it. Change-Id: Iebcbfe49d779552c17f6ab216976149f332b772c --- lib/heat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/heat b/lib/heat index ac769162db..ff9473ecdb 100644 --- a/lib/heat +++ b/lib/heat @@ -1,4 +1,4 @@ -etup lib/heat +# lib/heat # Install and start **Heat** service # To enable, add the following to localrc From 93f3b8693af1250b4b6925e83c33662c4dcd9636 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Tue, 24 Sep 2013 17:35:00 +0100 Subject: [PATCH 0384/4704] xenapi: enable user interaction with stack.sh In XenServer, devstack runs inside a virtual machine. This makes it hard for the user to interact with stack.sh should a parameter be missing. This change will create an upstart job (devstack) that runs stack.sh with console enabled so user can interact with it by running vncviewer or using XenCenter. Logging the output is also disabled, stamp files are used instead to detect the script run status in case install_os_domU.sh is used. As run.sh.log is removed, standard devstack logging should be used. The change also removes the environment settings from run.sh, as they are not needed, they should be specified in localrc. This way user cannot get different experiences by using unstack.sh/stack.sh or run.sh Also a proper unstack.sh is called instead of killing screen in run.sh Change-Id: I7eb12bd74746cc7a1db3aa9fd68ece645a50001d --- tools/xen/build_xva.sh | 40 ++++++++++++++++++++++++++++-------- tools/xen/install_os_domU.sh | 24 +++++++++------------- 2 files changed, 41 insertions(+), 23 deletions(-) diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh index d0cdf17391..7272fe2664 100755 --- a/tools/xen/build_xva.sh +++ b/tools/xen/build_xva.sh @@ -93,13 +93,34 @@ mkdir -p $STAGING_DIR/opt/stack/devstack tar xf /tmp/devstack.tar -C $STAGING_DIR/opt/stack/devstack cd $TOP_DIR -# Run devstack on launch -cat <$STAGING_DIR/etc/rc.local -# network restart required for getting the right gateway -/etc/init.d/networking restart -chown -R $STACK_USER /opt/stack -su -c "/opt/stack/run.sh > /opt/stack/run.sh.log" $STACK_USER -exit 0 +# Create an upstart job (task) for devstack, which can interact with the console +cat >$STAGING_DIR/etc/init/devstack.conf << EOF +start on stopped rc RUNLEVEL=[2345] + +console output +task + +pre-start script + rm -f /var/run/devstack.succeeded +end script + +script + initctl stop hvc0 || true + + # Read any leftover characters from standard input + while read -n 1 -s -t 0.1 -r ignored; do + true + done + + clear + + chown -R $STACK_USER /opt/stack + + if su -c "/opt/stack/run.sh" $STACK_USER; then + touch /var/run/devstack.succeeded + fi + initctl start hvc0 > /dev/null 2>&1 +end script EOF # Configure the hostname @@ -138,8 +159,9 @@ fi # Configure run.sh cat <$STAGING_DIR/opt/stack/run.sh #!/bin/bash +set -eux cd /opt/stack/devstack -killall screen -VIRT_DRIVER=xenserver FORCE=yes MULTI_HOST=$MULTI_HOST HOST_IP_IFACE=$HOST_IP_IFACE $STACKSH_PARAMS ./stack.sh +./unstack.sh || true +./stack.sh EOF chmod 755 $STAGING_DIR/opt/stack/run.sh diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 110bbd998c..a0cfe27caf 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -350,25 +350,20 @@ COPYENV=${COPYENV:-1} if [ "$WAIT_TILL_LAUNCH" = "1" ] && [ -e ~/.ssh/id_rsa.pub ] && [ "$COPYENV" = "1" ]; then set +x - echo "VM Launched - Waiting for startup script" - # wait for log to appear - while ! ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "[ -e run.sh.log ]"; do + echo "VM Launched - Waiting for devstack to start" + while ! ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "service devstack status | grep -q running"; do sleep 10 done - echo -n "Running" - while [ `ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS pgrep -c run.sh` -ge 1 ] - do + echo -n "devstack is running" + while ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "service devstack status | grep -q running"; do sleep 10 echo -n "." done echo "done!" set -x - # output the run.sh.log - ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS 'cat run.sh.log' - - # Fail if the expected text is not found - ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS 'cat run.sh.log' | grep -q 'stack.sh completed in' + # Fail if devstack did not succeed + ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS 'test -e /var/run/devstack.succeeded' set +x echo "################################################################################" @@ -382,11 +377,12 @@ else echo "" echo "All Finished!" echo "Now, you can monitor the progress of the stack.sh installation by " - echo "tailing /opt/stack/run.sh.log from within your domU." + echo "looking at the console of your domU / checking the log files." echo "" echo "ssh into your domU now: 'ssh stack@$OS_VM_MANAGEMENT_ADDRESS' using your password" - echo "and then do: 'tail -f /opt/stack/run.sh.log'" + echo "and then do: 'sudo service devstack status' to check if devstack is still running." + echo "Check that /var/run/devstack.succeeded exists" echo "" - echo "When the script completes, you can then visit the OpenStack Dashboard" + echo "When devstack completes, you can visit the OpenStack Dashboard" echo "at http://$OS_VM_SERVICES_ADDRESS, and contact other services at the usual ports." fi From 45a21f0e54def308f1d05440f030b49346b73fad Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 25 Sep 2013 10:27:27 -0400 Subject: [PATCH 0385/4704] change git show to | head -1 git show uses default system pager, which for people that have funky pagers, goes sideways. Pipe this through head -1 to ensure we only get the single change line we care about. Change-Id: Iff22612b555bf58fe12101701cfe593f37e8f8de --- functions | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/functions b/functions index f30c7adf15..8c0bc2893c 100644 --- a/functions +++ b/functions @@ -556,7 +556,7 @@ function git_clone { echo "Running in offline mode, clones already exist" # print out the results so we know what change was used in the logs cd $GIT_DEST - git show --oneline --quiet + git show --oneline | head -1 return fi @@ -604,7 +604,7 @@ function git_clone { # print out the results so we know what change was used in the logs cd $GIT_DEST - git show --oneline --quiet + git show --oneline | head -1 } From de60f48ad9d721bafb376a4b18516f3aad60527a Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Wed, 25 Sep 2013 15:38:24 +0100 Subject: [PATCH 0386/4704] fix tee errors Do not specify /dev/fd3 directly, use >&3 instead. This change enables to use stack.sh as an upstart script, and with VERBOSE=False, it will print the expected messages. Fixes bug 1230342 Change-Id: I6e3a81fd435e8c46d553bfdee08f8bf42d0f4387 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 71e7317ee3..449eb06751 100755 --- a/stack.sh +++ b/stack.sh @@ -518,7 +518,7 @@ if [[ -n "$LOGFILE" ]]; then # Set fd 1 and 2 to primary logfile exec 1> "${LOGFILE}" 2>&1 # Set fd 6 to summary logfile and stdout - exec 6> >( tee "${SUMFILE}" /dev/fd/3 ) + exec 6> >( tee "${SUMFILE}" >&3 ) fi echo_summary "stack.sh log $LOGFILE" From 7b7bc9209a533c371a13946eac35f3fa6243f74a Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 23 Sep 2013 13:56:19 -0500 Subject: [PATCH 0387/4704] vSphere hypervisor plugin Convert vSphere hypervisor configuration in Nova to the new plugin setup. Change-Id: Ibf6f5918e6a8d8a7b7784dac832d806e993cff8f --- lib/nova_plugins/hypervisor-vsphere | 72 +++++++++++++++++++++++++++++ stack.sh | 16 ------- 2 files changed, 72 insertions(+), 16 deletions(-) create mode 100644 lib/nova_plugins/hypervisor-vsphere diff --git a/lib/nova_plugins/hypervisor-vsphere b/lib/nova_plugins/hypervisor-vsphere new file mode 100644 index 0000000000..1666246374 --- /dev/null +++ b/lib/nova_plugins/hypervisor-vsphere @@ -0,0 +1,72 @@ +# lib/nova_plugins/hypervisor-vsphere +# Configure the vSphere hypervisor + +# Enable with: +# VIRT_DRIVER=vsphere + +# Dependencies: +# ``functions`` file +# ``nova`` configuration + +# install_nova_hypervisor - install any external requirements +# configure_nova_hypervisor - make configuration changes, including those to other services +# start_nova_hypervisor - start any external services +# stop_nova_hypervisor - stop any external services +# cleanup_nova_hypervisor - remove transient data and cache + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + + +# Entry Points +# ------------ + +# clean_nova_hypervisor - Clean up an installation +function cleanup_nova_hypervisor() { + # This function intentionally left blank + : +} + +# configure_nova_hypervisor - Set config files, create data dirs, etc +function configure_nova_hypervisor() { + iniset $NOVA_CONF DEFAULT compute_driver "vmwareapi.VMwareVCDriver" + VMWAREAPI_USER=${VMWAREAPI_USER:-"root"} + iniset $NOVA_CONF vmware host_ip "$VMWAREAPI_IP" + iniset $NOVA_CONF vmware host_username "$VMWAREAPI_USER" + iniset $NOVA_CONF vmware host_password "$VMWAREAPI_PASSWORD" + iniset $NOVA_CONF vmware cluster_name "$VMWAREAPI_CLUSTER" + if is_service_enabled neutron; then + iniset $NOVA_CONF vmware integration_bridge $OVS_BRIDGE + fi +} + +# install_nova_hypervisor() - Install external components +function install_nova_hypervisor() { + # This function intentionally left blank + : +} + +# start_nova_hypervisor - Start any required external services +function start_nova_hypervisor() { + # This function intentionally left blank + : +} + +# stop_nova_hypervisor - Stop any external services +function stop_nova_hypervisor() { + # This function intentionally left blank + : +} + + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/stack.sh b/stack.sh index 6bab2183d6..f42437104d 100755 --- a/stack.sh +++ b/stack.sh @@ -1089,22 +1089,6 @@ if is_service_enabled nova; then iniset $NOVA_CONF DEFAULT powervm_img_local_path $POWERVM_IMG_LOCAL_PATH - # vSphere API - # ----------- - - elif [ "$VIRT_DRIVER" = 'vsphere' ]; then - echo_summary "Using VMware vCenter driver" - iniset $NOVA_CONF DEFAULT compute_driver "vmwareapi.VMwareVCDriver" - VMWAREAPI_USER=${VMWAREAPI_USER:-"root"} - iniset $NOVA_CONF vmware host_ip "$VMWAREAPI_IP" - iniset $NOVA_CONF vmware host_username "$VMWAREAPI_USER" - iniset $NOVA_CONF vmware host_password "$VMWAREAPI_PASSWORD" - iniset $NOVA_CONF vmware cluster_name "$VMWAREAPI_CLUSTER" - if is_service_enabled neutron; then - iniset $NOVA_CONF vmware integration_bridge $OVS_BRIDGE - fi - - # Default libvirt # --------------- From da481d0d0a641c72fbc98c57711370f3f7309113 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Thu, 26 Sep 2013 13:57:02 +0100 Subject: [PATCH 0388/4704] xenapi: display IP and DevStack result on console The devstack setup service can update /etc/issue, displaying the status of the installation and the VM's management IP. With this change, after the devstack service finsihed, the login prompt will look like this: OpenStack VM - Installed by DevStack Management IP: 10.219.3.108 Devstack run: SUCCEEDED DevStackOSDomU login: This helps people to log in to their system. Change-Id: Idd6bbd5faf9ced5618cd3e95191bfc3b89473fa2 --- tools/xen/build_xva.sh | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh index 7272fe2664..958102b29c 100755 --- a/tools/xen/build_xva.sh +++ b/tools/xen/build_xva.sh @@ -119,6 +119,20 @@ script if su -c "/opt/stack/run.sh" $STACK_USER; then touch /var/run/devstack.succeeded fi + + # Update /etc/issue + { + echo "OpenStack VM - Installed by DevStack" + IPADDR=\$(ip -4 address show eth0 | sed -n 's/.*inet \\([0-9\.]\\+\\).*/\1/p') + echo " Management IP: \$IPADDR" + echo -n " Devstack run: " + if [ -e /var/run/devstack.succeeded ]; then + echo "SUCCEEDED" + else + echo "FAILED" + fi + echo "" + } > /etc/issue initctl start hvc0 > /dev/null 2>&1 end script EOF From 3d84cf2d7c323750971cf2d27f3a4eaa26cb7a9f Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Thu, 12 Sep 2013 13:25:54 -0400 Subject: [PATCH 0389/4704] Enable tenant isolation to tempest for neutron This commit re-enables tenant isolation in tempest for neutron. This is a requirement for running tempest in parallel. This commit depends on tempest change I7587c85017cca09f7a67eae0670f67b2bceacb60 Fixes bug 1216076 Change-Id: I63a30bacd48cecd110fb90e1fc718249c2b1904b --- lib/tempest | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lib/tempest b/lib/tempest index bc0b18d9f4..646d42b8bd 100644 --- a/lib/tempest +++ b/lib/tempest @@ -230,11 +230,6 @@ function configure_tempest() { # Compute iniset $TEMPEST_CONF compute change_password_available False - # Note(nati) current tempest don't create network for each tenant - # so reuse same tenant for now - if is_service_enabled neutron; then - TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-False} - fi iniset $TEMPEST_CONF compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} iniset $TEMPEST_CONF compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED iniset $TEMPEST_CONF compute network_for_ssh $PRIVATE_NETWORK_NAME From 93a7a50c1d4ff1a5cb5e6bd2162102c27fcdbe2d Mon Sep 17 00:00:00 2001 From: Vincent Hou Date: Fri, 27 Sep 2013 06:16:54 -0400 Subject: [PATCH 0390/4704] Add the creation of /var/run/openstack when zeromq is chosen Fixed Bug 1200539. Change-Id: I270623da7026e94d9ece4d5f510cad5a6c4d79ff --- lib/rpc_backend | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/rpc_backend b/lib/rpc_backend index ff87aae2af..63edc07460 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -131,6 +131,9 @@ function install_rpc_backend() { else exit_distro_not_supported "zeromq installation" fi + # Necessary directory for socket location. + sudo mkdir -p /var/run/openstack + sudo chown $STACK_USER /var/run/openstack fi } From 384454de57299981f8020e75bab781f73bacae86 Mon Sep 17 00:00:00 2001 From: Giulio Fidente Date: Fri, 27 Sep 2013 13:17:34 +0200 Subject: [PATCH 0391/4704] ensure tgtd is running in debug mode this change enables on-the-fly tgtd debug before starting cinder Change-Id: I193bfd77c5a82e8347d75e2a7fe670a6e25f5558 --- lib/cinder | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/cinder b/lib/cinder index bec65ed234..ccf38b4dea 100644 --- a/lib/cinder +++ b/lib/cinder @@ -496,6 +496,8 @@ function start_cinder() { # name, and would need to be adjusted too exit_distro_not_supported "restarting tgt" fi + # NOTE(gfidente): ensure tgtd is running in debug mode + sudo tgtadm --mode system --op update --name debug --value on fi screen_it c-api "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF" From 53d6fa604df71ea7294ee9043e420d155c6fd846 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Fri, 27 Sep 2013 12:30:28 +0100 Subject: [PATCH 0392/4704] xenapi: set dhcp timeout on VM installation Set the DHCP timeout to 120 seconds during virtual machine installation. Some users failed to run devstack, due to a low DHCP timeout setting. The default value is 60 seconds. This change sets the value to 120 secs, that should give enough time for most people. Change-Id: I15fde45ed0d005c1a8621134eee6c3c338b5be5d --- tools/xen/devstackubuntupreseed.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/xen/devstackubuntupreseed.cfg b/tools/xen/devstackubuntupreseed.cfg index c559b1e9f5..6a1ae89fd9 100644 --- a/tools/xen/devstackubuntupreseed.cfg +++ b/tools/xen/devstackubuntupreseed.cfg @@ -34,7 +34,7 @@ d-i netcfg/choose_interface select auto # If you have a slow dhcp server and the installer times out waiting for # it, this might be useful. -#d-i netcfg/dhcp_timeout string 60 +d-i netcfg/dhcp_timeout string 120 # If you prefer to configure the network manually, uncomment this line and # the static network configuration below. From d9883407e910da0fc8307f12f76c0c8e594321fe Mon Sep 17 00:00:00 2001 From: Cyril Roelandt Date: Fri, 27 Sep 2013 15:16:51 +0000 Subject: [PATCH 0393/4704] Revert "Revert "Swift: configure Ceilometer when it is enabled"" Commit 6650fda680310e71b5dda7764bf4033f670d90f0 is no longer needed: https://review.openstack.org/#/c/46048 has been merged. This reverts commit 6650fda680310e71b5dda7764bf4033f670d90f0. Change-Id: I47d28a292667eb8ece2061c0ef19c7c925e5747c --- lib/swift | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/lib/swift b/lib/swift index 9c80802ba9..c0dec97c36 100644 --- a/lib/swift +++ b/lib/swift @@ -67,6 +67,10 @@ SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-$SWIFT_LOOPBACK_DISK_SIZE_D # Default is ``staticweb, tempurl, formpost`` SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-tempurl formpost staticweb} +# Set ``SWIFT_EXTRAS_MIDDLEWARE_LAST`` to extras middlewares that need to be at +# the end of the pipeline. +SWIFT_EXTRAS_MIDDLEWARE_LAST=${SWIFT_EXTRAS_MIDDLEWARE_LAST} + # The ring uses a configurable number of bits from a path’s MD5 hash as # a partition index that designates a device. The number of bits kept # from the hash is known as the partition power, and 2 to the partition @@ -255,6 +259,12 @@ function configure_swift() { iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080} + # Configure Ceilometer + if is_service_enabled ceilometer; then + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer use "egg:ceilometer#swift" + SWIFT_EXTRAS_MIDDLEWARE_LAST="${SWIFT_EXTRAS_MIDDLEWARE_LAST} ceilometer" + fi + # By default Swift will be installed with keystone and tempauth middleware # and add the swift3 middleware if its configured for it. The token for # tempauth would be prefixed with the reseller_prefix setting TEMPAUTH_ the @@ -264,6 +274,7 @@ function configure_swift() { fi swift_pipeline+=" authtoken keystoneauth tempauth " sed -i "/^pipeline/ { s/tempauth/${swift_pipeline} ${SWIFT_EXTRAS_MIDDLEWARE}/ ;}" ${SWIFT_CONFIG_PROXY_SERVER} + sed -i "/^pipeline/ { s/proxy-server/${SWIFT_EXTRAS_MIDDLEWARE_LAST} proxy-server/ ; }" ${SWIFT_CONFIG_PROXY_SERVER} iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth account_autocreate iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true From dc0bd1a88613b1659b780cc412527ee88f84c2e8 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Fri, 27 Sep 2013 07:45:56 +0200 Subject: [PATCH 0394/4704] Use the rdo havana repo with the RHEL family In devstack viewpoint there is not too much differences at the moment. But using the grizzly named repo close to havana release, was strange to me. Switching to the repo link which does not have a version like '-3'. Change-Id: Ib421d50d19baeeeff264aa0cb9c105fffcf572f8 --- stack.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 46c3f443c5..4bd186fdc5 100755 --- a/stack.sh +++ b/stack.sh @@ -150,8 +150,8 @@ fi if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then # Installing Open vSwitch on RHEL6 requires enabling the RDO repo. - RHEL6_RDO_REPO_RPM=${RHEL6_RDO_REPO_RPM:-"http://rdo.fedorapeople.org/openstack/openstack-grizzly/rdo-release-grizzly-3.noarch.rpm"} - RHEL6_RDO_REPO_ID=${RHEL6_RDO_REPO_ID:-"openstack-grizzly"} + RHEL6_RDO_REPO_RPM=${RHEL6_RDO_REPO_RPM:-"http://rdo.fedorapeople.org/openstack-havana/rdo-release-havana.rpm"} + RHEL6_RDO_REPO_ID=${RHEL6_RDO_REPO_ID:-"openstack-havana"} if ! yum repolist enabled $RHEL6_RDO_REPO_ID | grep -q $RHEL6_RDO_REPO_ID; then echo "RDO repo not detected; installing" yum_install $RHEL6_RDO_REPO_RPM || \ From 0aa8534ada621becb3a6bd14e4e6b5faabde9dd6 Mon Sep 17 00:00:00 2001 From: JUN JIE NAN Date: Fri, 13 Sep 2013 15:47:09 +0800 Subject: [PATCH 0395/4704] Using no proxy option to skip wget and curl proxy settings in config When end users specify proxy settings in config file for wget /etc/wgetrc: http_proxy = http://... or for curl ${HOME}/.curlrc: proxy = http://... Using `http_proxy="" wget' can not skip the proxy setting in the config files, also it can skip proxy settings in env viriables. In order to skip proxy setting in both env and config file, we pass --no-proxy option for wget, and --noproxy '*' for curl. Fixes bug #1224836 Change-Id: I2b25aeca9edf2ce4525fb1db325e5e24c18b4d55 --- functions | 2 +- lib/glance | 2 +- lib/ironic | 2 +- lib/keystone | 2 +- lib/neutron | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/functions b/functions index 83826f9327..fb2f3a3df1 100644 --- a/functions +++ b/functions @@ -1484,7 +1484,7 @@ function use_exclusive_service { function wait_for_service() { local timeout=$1 local url=$2 - timeout $timeout sh -c "while ! http_proxy= https_proxy= curl -s $url >/dev/null; do sleep 1; done" + timeout $timeout sh -c "while ! curl --noproxy '*' -s $url >/dev/null; do sleep 1; done" } diff --git a/lib/glance b/lib/glance index 7e6968200f..c6f11d06da 100644 --- a/lib/glance +++ b/lib/glance @@ -193,7 +193,7 @@ function start_glance() { screen_it g-reg "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf" screen_it g-api "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then die $LINENO "g-api did not start" fi } diff --git a/lib/ironic b/lib/ironic index 072d2ded82..f3b4a72f66 100644 --- a/lib/ironic +++ b/lib/ironic @@ -194,7 +194,7 @@ function start_ironic() { function start_ironic_api() { screen_it ir-api "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-api --config-file=$IRONIC_CONF_FILE" echo "Waiting for ir-api ($IRONIC_HOSTPORT) to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$IRONIC_HOSTPORT; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$IRONIC_HOSTPORT; do sleep 1; done"; then die $LINENO "ir-api did not start" fi } diff --git a/lib/keystone b/lib/keystone index 699b94abb5..c93a4367d2 100755 --- a/lib/keystone +++ b/lib/keystone @@ -372,7 +372,7 @@ function start_keystone() { fi echo "Waiting for keystone to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s http://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then die $LINENO "keystone did not start" fi diff --git a/lib/neutron b/lib/neutron index 4a3d1b06a6..efbb45c16e 100644 --- a/lib/neutron +++ b/lib/neutron @@ -419,7 +419,7 @@ function start_neutron_service_and_check() { # Start the Neutron service screen_it q-svc "cd $NEUTRON_DIR && python $NEUTRON_BIN_DIR/neutron-server $CFG_FILE_OPTIONS" echo "Waiting for Neutron to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$Q_HOST:$Q_PORT; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$Q_HOST:$Q_PORT; do sleep 1; done"; then die $LINENO "Neutron did not start" fi } From aee9412b4bad788125e513c9d455283f14ed84de Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 30 Sep 2013 11:48:00 +0000 Subject: [PATCH 0396/4704] Allow openrc to be loaded in zsh This fix the test then detect OSX in GetOSVersion that break support of zsh. Fixes bug #1233118 Change-Id: If243fbe59f8f08041327057425018d7ae0d13ab2 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 83826f9327..d9445fe6e7 100644 --- a/functions +++ b/functions @@ -364,7 +364,7 @@ function get_packages() { # GetOSVersion GetOSVersion() { # Figure out which vendor we are - if [[ -n "`which sw_vers 2>/dev/null`" ]]; then + if [[ -x "`which sw_vers 2>/dev/null`" ]]; then # OS/X os_VENDOR=`sw_vers -productName` os_RELEASE=`sw_vers -productVersion` From fa181c30fc7140b1549e955a6a26d11fe015d6ce Mon Sep 17 00:00:00 2001 From: Thomas Maddox Date: Wed, 25 Sep 2013 20:10:22 +0000 Subject: [PATCH 0397/4704] Add back rpc_notifier when ceilometer is enabled This is to reverse what looks like collateral damage from change id Ic375272b751159a64777ca73c1b64515195aacfb. When the Ceilometer service is enabled, we also need to tell nova what to use to send notifications. Change-Id: I0015194cfa819e89ef85eae5020fedd6e7d71894 Closes-Bug: #1231158 --- lib/nova | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/nova b/lib/nova index e5c78d8fe1..99cd843ea1 100644 --- a/lib/nova +++ b/lib/nova @@ -510,6 +510,7 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT instance_usage_audit "True" iniset $NOVA_CONF DEFAULT instance_usage_audit_period "hour" iniset $NOVA_CONF DEFAULT notify_on_state_change "vm_and_task_state" + iniset $NOVA_CONF DEFAULT notification_driver "nova.openstack.common.notifier.rpc_notifier" fi # Provide some transition from ``EXTRA_FLAGS`` to ``EXTRA_OPTS`` From 78ef1f3b2ce978191955f59fcb63892a692c7173 Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Sun, 29 Sep 2013 11:36:28 +0100 Subject: [PATCH 0398/4704] XenAPI: Get the management network dynamically xenbr0 is correct for most installations, but not all. Notable xenserver-core may use a differently named device. Since we can auto detect this, remove the config and do so. Change-Id: I989f6ddd5ffb526ab350f263ef6fc402c596304a --- tools/xen/install_os_domU.sh | 5 +++++ tools/xen/xenrc | 5 +---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 110bbd998c..08e0f787b0 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -96,6 +96,10 @@ create_directory_for_images # # Configure Networking # + +MGT_NETWORK=`xe pif-list management=true params=network-uuid minimal=true` +MGT_BRIDGE_OR_NET_NAME=`xe network-list uuid=$MGT_NETWORK params=bridge minimal=true` + setup_network "$VM_BRIDGE_OR_NET_NAME" setup_network "$MGT_BRIDGE_OR_NET_NAME" setup_network "$PUB_BRIDGE_OR_NET_NAME" @@ -203,6 +207,7 @@ if [ -z "$templateuuid" ]; then # # Install Ubuntu over network # + UBUNTU_INST_BRIDGE_OR_NET_NAME=${UBUNTU_INST_BRIDGE_OR_NET_NAME:-"$MGT_BRIDGE_OR_NET_NAME"} # always update the preseed file, incase we have a newer one PRESEED_URL=${PRESEED_URL:-""} diff --git a/tools/xen/xenrc b/tools/xen/xenrc index f698be1085..82aa29821c 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -20,9 +20,7 @@ OSDOMU_VDI_GB=8 # differ across localised versions of XenServer. If a given bridge/network # was not found, a new network will be created with the specified name. -# The management network is specified by the bridge name. xenbr0 is usually -# the name of the bridge of the network associated with the hypervisor's eth0. -MGT_BRIDGE_OR_NET_NAME="xenbr0" +# Get the management network from the XS installation VM_BRIDGE_OR_NET_NAME="OpenStack VM Network" PUB_BRIDGE_OR_NET_NAME="OpenStack Public Network" XEN_INT_BRIDGE_OR_NET_NAME="OpenStack VM Integration Network" @@ -72,7 +70,6 @@ UBUNTU_INST_HTTP_PROXY="" UBUNTU_INST_LOCALE="en_US" UBUNTU_INST_KEYBOARD="us" # network configuration for ubuntu netinstall -UBUNTU_INST_BRIDGE_OR_NET_NAME=${UBUNTU_INST_BRIDGE_OR_NET_NAME:-"$MGT_BRIDGE_OR_NET_NAME"} UBUNTU_INST_IP="dhcp" UBUNTU_INST_NAMESERVERS="" UBUNTU_INST_NETMASK="" From 1c1aef0eb7796f0fe8b2502eb4aaa62369b7842a Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 1 Oct 2013 07:56:21 -0400 Subject: [PATCH 0399/4704] Revert "Enable tenant isolation to tempest for neutron" This reverts commit 3d84cf2d7c323750971cf2d27f3a4eaa26cb7a9f. This is believed to be the cause for the massive increase in neutron failures in the gate reseting other projects. Realize this is just a work around. Change-Id: Id3c59f3fe9ccbb869eb3200ef7ff2659409e2253 Partial-Bug: 1224001 --- lib/tempest | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/tempest b/lib/tempest index 646d42b8bd..bc0b18d9f4 100644 --- a/lib/tempest +++ b/lib/tempest @@ -230,6 +230,11 @@ function configure_tempest() { # Compute iniset $TEMPEST_CONF compute change_password_available False + # Note(nati) current tempest don't create network for each tenant + # so reuse same tenant for now + if is_service_enabled neutron; then + TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-False} + fi iniset $TEMPEST_CONF compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} iniset $TEMPEST_CONF compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED iniset $TEMPEST_CONF compute network_for_ssh $PRIVATE_NETWORK_NAME From 5946b57bb2b43c1690d85d6423d0a7a56565c6a4 Mon Sep 17 00:00:00 2001 From: Adam Spiers Date: Mon, 30 Sep 2013 23:48:26 +0100 Subject: [PATCH 0400/4704] Show where files are copied for sudo switch to stack user The home directory for the 'stack' user defaults to /opt/stack, which is not obvious to devstack newbies, and can also be overridden by exporting a value for DEST. Therefore it's friendlier to be explicit about the location of this home directory, to which devstack is copied before being run as the 'stack' user. Change-Id: Ia1941a5f2f8cf86a06681e85da52b817a855b8ff --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 03d5632495..b39cd73bb9 100755 --- a/stack.sh +++ b/stack.sh @@ -200,8 +200,8 @@ if [[ $EUID -eq 0 ]]; then ( umask 226 && echo "$STACK_USER ALL=(ALL) NOPASSWD:ALL" \ > /etc/sudoers.d/50_stack_sh ) - echo "Copying files to $STACK_USER user" STACK_DIR="$DEST/${TOP_DIR##*/}" + echo "Copying files to $STACK_DIR" cp -r -f -T "$TOP_DIR" "$STACK_DIR" safe_chown -R $STACK_USER "$STACK_DIR" cd "$STACK_DIR" From c85ade77204af724ee04f7b7d6d406e50f25ead6 Mon Sep 17 00:00:00 2001 From: Adam Spiers Date: Tue, 1 Oct 2013 00:35:16 +0100 Subject: [PATCH 0401/4704] set errexit and xtrace in helper scripts stack.sh invokes some helper scripts as separate processes, rather than by source'ing them. As with stack.sh itself, abort immediately on the first error, so that errors don't compound and result in confusing error messages. If one of these helper scripts aborts, stack.sh itself will also abort in the usual manner. Due to the change in behaviour, tweak some mv invocations to ensure that they don't trigger false failures. As with stack.sh itself, also enable xtrace so we can see exactly what's happening. In particular this allows us to see the cause of any premature termination due to a command failing whilst errexit is enabled. Change-Id: I7a55784c31e5395e29ab9bbe2bb112b83b9be693 --- tools/create_userrc.sh | 27 +++++++++++++++++++++------ tools/fixup_stuff.sh | 2 ++ tools/install_pip.sh | 3 +++ 3 files changed, 26 insertions(+), 6 deletions(-) diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index 619d63f7ff..44b0f6bba0 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -6,6 +6,9 @@ # Warning: This script just for development purposes +set -o errexit +set -o xtrace + ACCOUNT_DIR=./accrc display_help() @@ -138,10 +141,14 @@ s3=`keystone endpoint-get --service s3 | awk '/\|[[:space:]]*s3.publicURL/ {prin mkdir -p "$ACCOUNT_DIR" ACCOUNT_DIR=`readlink -f "$ACCOUNT_DIR"` EUCALYPTUS_CERT=$ACCOUNT_DIR/cacert.pem -mv "$EUCALYPTUS_CERT" "$EUCALYPTUS_CERT.old" &>/dev/null +if [ -e "$EUCALYPTUS_CERT" ]; then + mv "$EUCALYPTUS_CERT" "$EUCALYPTUS_CERT.old" +fi if ! nova x509-get-root-cert "$EUCALYPTUS_CERT"; then echo "Failed to update the root certificate: $EUCALYPTUS_CERT" >&2 - mv "$EUCALYPTUS_CERT.old" "$EUCALYPTUS_CERT" &>/dev/null + if [ -e "$EUCALYPTUS_CERT.old" ]; then + mv "$EUCALYPTUS_CERT.old" "$EUCALYPTUS_CERT" + fi fi @@ -168,12 +175,20 @@ function add_entry(){ local ec2_cert="$rcfile-cert.pem" local ec2_private_key="$rcfile-pk.pem" # Try to preserve the original file on fail (best effort) - mv -f "$ec2_private_key" "$ec2_private_key.old" &>/dev/null - mv -f "$ec2_cert" "$ec2_cert.old" &>/dev/null + if [ -e "$ec2_private_key" ]; then + mv -f "$ec2_private_key" "$ec2_private_key.old" + fi + if [ -e "$ec2_cert" ]; then + mv -f "$ec2_cert" "$ec2_cert.old" + fi # It will not create certs when the password is incorrect if ! nova --os-password "$user_passwd" --os-username "$user_name" --os-tenant-name "$tenant_name" x509-create-cert "$ec2_private_key" "$ec2_cert"; then - mv -f "$ec2_private_key.old" "$ec2_private_key" &>/dev/null - mv -f "$ec2_cert.old" "$ec2_cert" &>/dev/null + if [ -e "$ec2_private_key.old" ]; then + mv -f "$ec2_private_key.old" "$ec2_private_key" + fi + if [ -e "$ec2_cert.old" ]; then + mv -f "$ec2_cert.old" "$ec2_cert" + fi fi cat >"$rcfile" < Date: Tue, 1 Oct 2013 00:56:54 +0100 Subject: [PATCH 0402/4704] Ensure SSL CA certificates are installed for curl On openSUSE, ensure that the ca-certificates-mozilla package is installed to avoid curl aborting with curl: (60) SSL certificate problem: unable to get local issuer certificate when trying to download the pip source tarball. Change-Id: Iaf74204ea5330e9abf56f6c9d5a0f9d83992aa59 --- files/rpms-suse/general | 1 + 1 file changed, 1 insertion(+) diff --git a/files/rpms-suse/general b/files/rpms-suse/general index 355af885d3..c8c234e54c 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -1,4 +1,5 @@ bridge-utils +ca-certificates-mozilla curl euca2ools git-core From 15aa0fc315e231ab3564eab646ca72a359964278 Mon Sep 17 00:00:00 2001 From: Adam Spiers Date: Tue, 1 Oct 2013 01:10:16 +0100 Subject: [PATCH 0403/4704] Uniquify unsupported distro error message Change a distro-not-supported error message in lib/horizon so that it can't be confused with a similar error case in lib/apache. Change-Id: I1197cb4de1497906e93a2c3ce09c3c06afe03b65 --- lib/horizon | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/horizon b/lib/horizon index 5973eb2a9f..f770ded42b 100644 --- a/lib/horizon +++ b/lib/horizon @@ -131,7 +131,7 @@ function init_horizon() { fi sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf else - exit_distro_not_supported "apache configuration" + exit_distro_not_supported "horizon apache configuration" fi # Remove old log files that could mess with how devstack detects whether Horizon From 3ac8612b55b9d79d214ce5a10eb37e3b017a74ad Mon Sep 17 00:00:00 2001 From: Adam Spiers Date: Tue, 1 Oct 2013 01:08:20 +0100 Subject: [PATCH 0404/4704] Don't bail when setting up horizon on openSUSE I85325179f1792d985b0375572abfe8c8a82fecc3 accidentally removed the conditional branch required to prevent setup of horizon aborting on openSUSE, so put it back in. Change-Id: Ia3e4464a2d718e402d84a0bcf60f13ef30404969 --- lib/horizon | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/horizon b/lib/horizon index 5973eb2a9f..048887ee10 100644 --- a/lib/horizon +++ b/lib/horizon @@ -130,6 +130,8 @@ function init_horizon() { HORIZON_REQUIRE='Require all granted' fi sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf + elif is_suse; then + : # nothing to do else exit_distro_not_supported "apache configuration" fi From 6d8fce732523c183fa307c6c5a685e257bdbd78a Mon Sep 17 00:00:00 2001 From: Adam Spiers Date: Tue, 1 Oct 2013 15:59:05 +0100 Subject: [PATCH 0405/4704] Fix handling of pip and virtualenv on openSUSE openSUSE's python-virtualenv rpm depends on the python-pip rpm, but tools/install_pip.sh prefers to deinstall the latter (if installed) and install pip directly from upstream source instead. This deinstallation of python-pip will break if attempted via rpm -e, since rpm does not transitively remove dependents (in this case python-virtualenv). In contrast, "zypper rm" does, so we switch to that. It is safe to remove the python-virtualenv package, since stack.sh will install virtualenv via pip instead. Change-Id: I5bc23de0f2de2e3940c4be3b76b7c0634836239b --- files/rpms-suse/general | 1 - functions | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/files/rpms-suse/general b/files/rpms-suse/general index 355af885d3..c5c41d7009 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -10,7 +10,6 @@ python-setuptools # instead of python-distribute; dist:sle11sp2 python-cmd2 # dist:opensuse-12.3 python-pylint python-unittest2 -python-virtualenv screen tar tcpdump diff --git a/functions b/functions index 83826f9327..bc4f05a90f 100644 --- a/functions +++ b/functions @@ -926,7 +926,7 @@ function uninstall_package() { elif is_fedora; then sudo yum remove -y "$@" elif is_suse; then - sudo rpm -e "$@" + sudo zypper rm "$@" else exit_distro_not_supported "uninstalling packages" fi From 1089b3a5f6ce7742f12842d0f1e30858cd9c1df8 Mon Sep 17 00:00:00 2001 From: Ed Cranford Date: Mon, 30 Sep 2013 11:36:55 -0500 Subject: [PATCH 0406/4704] Adds trove-conductor service to trove. Change-Id: Ibf14267c9a2125218c17fb34761548e339c8e784 --- lib/trove | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/lib/trove b/lib/trove index e64ca5f6ac..17c8c99835 100644 --- a/lib/trove +++ b/lib/trove @@ -109,12 +109,15 @@ function configure_trove() { # (Re)create trove conf files rm -f $TROVE_CONF_DIR/trove.conf rm -f $TROVE_CONF_DIR/trove-taskmanager.conf + rm -f $TROVE_CONF_DIR/trove-conductor.conf + iniset $TROVE_CONF_DIR/trove.conf DEFAULT rabbit_password $RABBIT_PASSWORD iniset $TROVE_CONF_DIR/trove.conf DEFAULT sql_connection `database_connection_url trove` iniset $TROVE_CONF_DIR/trove.conf DEFAULT add_addresses True iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT rabbit_password $RABBIT_PASSWORD iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT sql_connection `database_connection_url trove` + iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT control_exchange trove sed -i "s/localhost/$NETWORK_GATEWAY/g" $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample # (Re)create trove taskmanager conf file if needed @@ -127,6 +130,17 @@ function configure_trove() { iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT fi + + # (Re)create trove conductor conf file if needed + if is_service_enabled tr-cond; then + iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT rabbit_password $RABBIT_PASSWORD + iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT sql_connection `database_connection_url trove` + iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT nova_proxy_admin_user radmin + iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT nova_proxy_admin_tenant_name trove + iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS + iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT + iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT control_exchange trove + fi } # install_troveclient() - Collect source and prepare @@ -152,12 +166,13 @@ function init_trove() { function start_trove() { screen_it tr-api "cd $TROVE_DIR; bin/trove-api --config-file=$TROVE_CONF_DIR/trove.conf --debug 2>&1" screen_it tr-tmgr "cd $TROVE_DIR; bin/trove-taskmanager --config-file=$TROVE_CONF_DIR/trove-taskmanager.conf --debug 2>&1" + screen_it tr-cond "cd $TROVE_DIR; bin/trove-conductor --config-file=$TROVE_CONF_DIR/trove-conductor.conf --debug 2>&1" } # stop_trove() - Stop running processes function stop_trove() { # Kill the trove screen windows - for serv in tr-api tr-tmgr; do + for serv in tr-api tr-tmgr tr-cond; do screen -S $SCREEN_NAME -p $serv -X kill done } From 87acc91fc67dd2c349008aad9a4f6c1770f3eb7e Mon Sep 17 00:00:00 2001 From: Joe Mills Date: Tue, 1 Oct 2013 08:13:06 +0000 Subject: [PATCH 0407/4704] Add MIDONET settings to dhcp.ini The midonet specific settings were not being added to the dhcp specific config file. This change adds those settings. Closes-bug: #1233941 Change-Id: I4155135528c6ba77cf57d30ac256580c7239794f Signed-off-by: Joe Mills --- lib/neutron_plugins/midonet | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet index 0ad760b289..f09c67527e 100644 --- a/lib/neutron_plugins/midonet +++ b/lib/neutron_plugins/midonet @@ -37,6 +37,18 @@ function neutron_plugin_configure_dhcp_agent() { iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver $DHCP_INTERFACE_DRIVER iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces True iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True + if [[ "$MIDONET_API_URI" != "" ]]; then + iniset $Q_DHCP_CONF_FILE MIDONET midonet_uri "$MIDONET_API_URI" + fi + if [[ "$MIDONET_USERNAME" != "" ]]; then + iniset $Q_DHCP_CONF_FILE MIDONET username "$MIDONET_USERNAME" + fi + if [[ "$MIDONET_PASSWORD" != "" ]]; then + iniset $Q_DHCP_CONF_FILE MIDONET password "$MIDONET_PASSWORD" + fi + if [[ "$MIDONET_PROJECT_ID" != "" ]]; then + iniset $Q_DHCP_CONF_FILE MIDONET project_id "$MIDONET_PROJECT_ID" + fi } function neutron_plugin_configure_l3_agent() { From bfb880d547d03e8eb2230b9c9ad6baf374f2d3c3 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Wed, 2 Oct 2013 17:44:18 +0100 Subject: [PATCH 0408/4704] xenapi: increase default memory to 3G Devstack was swapping with 2G Change-Id: I8fe77591cb0ca0f946028d7219b43d77eea3419f --- tools/xen/xenrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/xen/xenrc b/tools/xen/xenrc index f698be1085..6372ea7faa 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -13,7 +13,7 @@ CLEAN_TEMPLATES=${CLEAN_TEMPLATES:-false} # Size of image VDI_MB=${VDI_MB:-5000} -OSDOMU_MEM_MB=2048 +OSDOMU_MEM_MB=3072 OSDOMU_VDI_GB=8 # Network mapping. Specify bridge names or network names. Network names may From 96ba6ec1bf0b7cc54f9968e4cc3aa80b8f2c368e Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Wed, 2 Oct 2013 11:08:56 -0700 Subject: [PATCH 0409/4704] Change DATABASE_HOST default to 127.0.0.1 Attempt to fix what is suspected to be a DNS resolution issue with postgresql check job. Closes-Bug: #1232748 Change-Id: Ic82e54b2af038e6c21d4f026f3da10f34c3c185c --- lib/database | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/database b/lib/database index 442ed56fbe..3c1560964c 100644 --- a/lib/database +++ b/lib/database @@ -64,7 +64,7 @@ function initialize_database_backends { # For backward-compatibility, read in the MYSQL_HOST/USER variables and use # them as the default values for the DATABASE_HOST/USER variables. - MYSQL_HOST=${MYSQL_HOST:-localhost} + MYSQL_HOST=${MYSQL_HOST:-127.0.0.1} MYSQL_USER=${MYSQL_USER:-root} DATABASE_HOST=${DATABASE_HOST:-${MYSQL_HOST}} From 49f4486f1caff209254f560deecd774246c91c79 Mon Sep 17 00:00:00 2001 From: Ryan Hsu Date: Thu, 3 Oct 2013 22:27:03 -0700 Subject: [PATCH 0410/4704] Fix typo in property passed to glance In the upload_image function, a property that is passed to glance in the vmdk conditional block has a typo. Changes "vmware-disktype" to "vmware_disktype" (dash to underscore) Change-Id: I6c4e1875b6ab4544f9742ab08893dae0e86965a0 Closes-Bug: #1235080 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index d9445fe6e7..cc5089f55c 100644 --- a/functions +++ b/functions @@ -1335,7 +1335,7 @@ function upload_image() { vmdk_net_adapter="${props[2]}" fi - glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format bare --disk-format vmdk --property vmware-disktype="$vmdk_disktype" --property vmware_adaptertype="$vmdk_adapter_type" --property hw_vif_model="$vmdk_net_adapter" < "${IMAGE}" + glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format bare --disk-format vmdk --property vmware_disktype="$vmdk_disktype" --property vmware_adaptertype="$vmdk_adapter_type" --property hw_vif_model="$vmdk_net_adapter" < "${IMAGE}" return fi From e6024413ae69bd0ec2abefe613b850680047a09c Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Sun, 15 Sep 2013 18:38:48 +0200 Subject: [PATCH 0411/4704] lib/swift variable changes and dd replaced by truncate - ${SWIFT_DATA_DIR}/drives/images/swift.img replaced by ${SWIFT_DISK_IMAGE}. - using truncate -s command instead of dd over seeking Change-Id: I0dd29af3247ba7819ef0c74775412074b6b62017 --- lib/swift | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/lib/swift b/lib/swift index 9c80802ba9..de52576e64 100644 --- a/lib/swift +++ b/lib/swift @@ -39,6 +39,7 @@ SWIFT3_DIR=$DEST/swift3 # Set ``SWIFT_DATA_DIR`` to the location of swift drives and objects. # Default is the common DevStack data directory. SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DATA_DIR}/swift} +SWIFT_DISK_IMAGE=${SWIFT_DATA_DIR}/drives/images/swift.img # Set ``SWIFT_CONF_DIR`` to the location of the configuration files. # Default is ``/etc/swift``. @@ -55,10 +56,10 @@ fi # swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in # kilobytes. # Default is 1 gigabyte. -SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=1048576 +SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=1G # if tempest enabled the default size is 4 Gigabyte. if is_service_enabled tempest; then - SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=${SWIFT_LOOPBACK_DISK_SIZE:-4194304} + SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=${SWIFT_LOOPBACK_DISK_SIZE:-4G} fi SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-$SWIFT_LOOPBACK_DISK_SIZE_DEFAULT} @@ -103,8 +104,8 @@ function cleanup_swift() { if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 fi - if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then - rm ${SWIFT_DATA_DIR}/drives/images/swift.img + if [[ -e ${SWIFT_DISK_IMAGE} ]]; then + rm ${SWIFT_DISK_IMAGE} fi rm -rf ${SWIFT_DATA_DIR}/run/ if is_apache_enabled_service swift; then @@ -409,28 +410,27 @@ function create_swift_disk() { sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR} # Create a loopback disk and format it to XFS. - if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then + if [[ -e ${SWIFT_DISK_IMAGE} ]]; then if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 - sudo rm -f ${SWIFT_DATA_DIR}/drives/images/swift.img + sudo rm -f ${SWIFT_DISK_IMAGE} fi fi mkdir -p ${SWIFT_DATA_DIR}/drives/images - sudo touch ${SWIFT_DATA_DIR}/drives/images/swift.img - sudo chown $USER: ${SWIFT_DATA_DIR}/drives/images/swift.img + sudo touch ${SWIFT_DISK_IMAGE} + sudo chown $USER: ${SWIFT_DISK_IMAGE} - dd if=/dev/zero of=${SWIFT_DATA_DIR}/drives/images/swift.img \ - bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} + truncate -s ${SWIFT_LOOPBACK_DISK_SIZE} ${SWIFT_DISK_IMAGE} # Make a fresh XFS filesystem - mkfs.xfs -f -i size=1024 ${SWIFT_DATA_DIR}/drives/images/swift.img + mkfs.xfs -f -i size=1024 ${SWIFT_DISK_IMAGE} # Mount the disk with mount options to make it as efficient as possible mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1 if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \ - ${SWIFT_DATA_DIR}/drives/images/swift.img ${SWIFT_DATA_DIR}/drives/sdb1 + ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1 fi # Create a link to the above mount and From 9f878cbe6dcbd26e756546c1fc7a97994c7a311d Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Fri, 4 Oct 2013 09:56:24 +0100 Subject: [PATCH 0412/4704] xenapi: max out VCPU count Give as much VCPUs to the DevStack machine as possible. First asking xenapi about its CPU count, and as a fallback, count the CPUs in dom0. This should result in faster test runs. Change-Id: I1ffb99ecd435f1d7eb5754fe9cd99f0e8ceae6dc --- tools/xen/functions | 32 ++++++++++++++++++++++++++++++++ tools/xen/install_os_domU.sh | 3 +++ 2 files changed, 35 insertions(+) diff --git a/tools/xen/functions b/tools/xen/functions index a5c4b70bc3..c65d919e3f 100644 --- a/tools/xen/functions +++ b/tools/xen/functions @@ -287,3 +287,35 @@ function set_vm_memory() { dynamic-max=${memory}MiB \ uuid=$vm } + +function max_vcpus() { + local vm_name_label + + vm_name_label="$1" + + local vm + local host + local cpu_count + + host=$(xe host-list --minimal) + vm=$(_vm_uuid "$vm_name_label") + + cpu_count=$(xe host-param-get \ + param-name=cpu_info \ + uuid=$host | + sed -e 's/^.*cpu_count: \([0-9]*\);.*$/\1/g') + + if [ -z "$cpu_count" ]; then + # get dom0's vcpu count + cpu_count=$(cat /proc/cpuinfo | grep processor | wc -l) + fi + + # Assert cpu_count is not empty + [ -n "$cpu_count" ] + + # Assert ithas a numeric nonzero value + expr "$cpu_count" + 0 + + xe vm-param-set uuid=$vm VCPUs-max=$cpu_count + xe vm-param-set uuid=$vm VCPUs-at-startup=$cpu_count +} diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 110bbd998c..e69cdea04f 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -263,6 +263,9 @@ $THIS_DIR/prepare_guest_template.sh "$GUEST_NAME" # Set virtual machine parameters set_vm_memory "$GUEST_NAME" "$OSDOMU_MEM_MB" +# Max out VCPU count for better performance +max_vcpus "$GUEST_NAME" + # start the VM to run the prepare steps xe vm-start vm="$GUEST_NAME" From a2fd222ee976e93898a66372ef764b7756724321 Mon Sep 17 00:00:00 2001 From: Joe Mills Date: Fri, 4 Oct 2013 11:46:10 +0000 Subject: [PATCH 0413/4704] Change Midonet vif driver to generic Use generic vif driver for Midonet to support port bindings through the mm-ctl script. Change-Id: Iddc8a1c7b0128a76cd778c0245f2098bfb2c0145 Closes-Bug: 1235202 --- lib/neutron_plugins/midonet | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet index 0ad760b289..193055f7db 100644 --- a/lib/neutron_plugins/midonet +++ b/lib/neutron_plugins/midonet @@ -12,7 +12,7 @@ function is_neutron_ovs_base_plugin() { } function neutron_plugin_create_nova_conf() { - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"midonet.nova.virt.libvirt.vif.MidonetVifDriver"} + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} } function neutron_plugin_install_agent_packages() { From ca5af8615e58b78dbb0242074bc35aec5de1dda5 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 4 Oct 2013 13:33:07 -0500 Subject: [PATCH 0414/4704] Remove general assumption in get_packages() get_packages() always included 'general' as a default 'service' file. Remove this assumption and add it explicitly to the primary package installation call. This allows get_package() to be used in other places where 'general' is not desired to be included. Change-Id: I1eed4386d073d6ae9534aedae32654208c6662e8 --- functions | 4 ++-- tools/install_prereqs.sh | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/functions b/functions index d9445fe6e7..6aee24008c 100644 --- a/functions +++ b/functions @@ -248,7 +248,7 @@ function _get_package_dir() { # - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection # of the package to the distros listed. The distro names are case insensitive. function get_packages() { - local services=$1 + local services=$@ local package_dir=$(_get_package_dir) local file_to_parse local service @@ -260,7 +260,7 @@ function get_packages() { if [[ -z "$DISTRO" ]]; then GetDistro fi - for service in general ${services//,/ }; do + for service in ${services//,/ }; do # Allow individual services to specify dependencies if [[ -e ${package_dir}/${service} ]]; then file_to_parse="${file_to_parse} $service" diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh index 68f11ce35e..0c65fd9b00 100755 --- a/tools/install_prereqs.sh +++ b/tools/install_prereqs.sh @@ -55,7 +55,7 @@ export_proxy_variables # ================ # Install package requirements -install_package $(get_packages $ENABLED_SERVICES) +install_package $(get_packages general $ENABLED_SERVICES) if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then if is_ubuntu || is_fedora; then From 23f69d83e5564ece0308535117cc6d224fcc3557 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 4 Oct 2013 12:35:24 -0500 Subject: [PATCH 0415/4704] Split the creation of $STACK_USER account out of stack.sh Automatically creating a new user account is not always the right course of action when stack.sh is running as root. Plus, the re-exec did not work correctly in some cases. * Create tools/create-stack-user.sh to set up a suitable user for running DevStack * Abort stack.sh and unstack.sh if running as root and suggest creating a suitable user account. Change-Id: I5d967c00c89f32e861449234ea8fe19261cd9ae3 --- README.md | 8 +++- stack.sh | 80 ++++++++++++-------------------------- tools/create-stack-user.sh | 49 +++++++++++++++++++++++ unstack.sh | 6 +++ 4 files changed, 87 insertions(+), 56 deletions(-) create mode 100644 tools/create-stack-user.sh diff --git a/README.md b/README.md index 99e983887e..6dc9ecd1e3 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ You can also pick specific OpenStack project releases by setting the appropriate # Start A Dev Cloud -Installing in a dedicated disposable vm is safer than installing on your dev machine! To start a dev cloud: +Installing in a dedicated disposable vm is safer than installing on your dev machine! Plus you can pick one of the supported Linux distros for your VM. To start a dev cloud run the following NOT AS ROOT (see below for more): ./stack.sh @@ -57,6 +57,12 @@ If the EC2 API is your cup-o-tea, you can create credentials and use euca2ools: # list instances using ec2 api euca-describe-instances +# DevStack Execution Environment + +DevStack runs rampant over the system it runs on, installing things and uninstalling other things. Running this on a system you care about is a recipe for disappointment, or worse. Alas, we're all in the virtualization business here, so run it in a VM. And take advantage of the snapshot capabilities of your hypervisor of choice to reduce testing cycle times. You might even save enough time to write one more feature before the next feature freeze... + +``stack.sh`` needs to have root access for a lot of tasks, but it also needs to have not-root permissions for most of its work and for all of the OpenStack services. So ``stack.sh`` specifically does not run if you are root. This is a recent change (Oct 2013) from the previous behaviour of automatically creating a ``stack`` user. Automatically creating a user account is not always the right response to running as root, so that bit is now an explicit step using ``tools/create-stack-user.sh``. Run that (as root!) if you do not want to just use your normal login here, which works perfectly fine. + # Customizing You can override environment variables used in `stack.sh` by creating file name `localrc`. It is likely that you will need to do this to tweak your networking configuration should you need to access your cloud from a different host. diff --git a/stack.sh b/stack.sh index b39cd73bb9..86fe82a584 100755 --- a/stack.sh +++ b/stack.sh @@ -172,67 +172,37 @@ fi # ----------- # OpenStack is designed to be run as a non-root user; Horizon will fail to run -# as **root** since Apache will not serve content from **root** user). If -# ``stack.sh`` is run as **root**, it automatically creates a **stack** user with -# sudo privileges and runs as that user. +# as **root** since Apache will not serve content from **root** user). +# ``stack.sh`` must not be run as **root**. It aborts and suggests one course of +# action to create a suitable user account. if [[ $EUID -eq 0 ]]; then - ROOTSLEEP=${ROOTSLEEP:-10} echo "You are running this script as root." - echo "In $ROOTSLEEP seconds, we will create a user '$STACK_USER' and run as that user" - sleep $ROOTSLEEP - - # Give the non-root user the ability to run as **root** via ``sudo`` - is_package_installed sudo || install_package sudo - if ! getent group $STACK_USER >/dev/null; then - echo "Creating a group called $STACK_USER" - groupadd $STACK_USER - fi - if ! getent passwd $STACK_USER >/dev/null; then - echo "Creating a user called $STACK_USER" - useradd -g $STACK_USER -s /bin/bash -d $DEST -m $STACK_USER - fi - - echo "Giving stack user passwordless sudo privileges" - # UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one - grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || - echo "#includedir /etc/sudoers.d" >> /etc/sudoers - ( umask 226 && echo "$STACK_USER ALL=(ALL) NOPASSWD:ALL" \ - > /etc/sudoers.d/50_stack_sh ) - - STACK_DIR="$DEST/${TOP_DIR##*/}" - echo "Copying files to $STACK_DIR" - cp -r -f -T "$TOP_DIR" "$STACK_DIR" - safe_chown -R $STACK_USER "$STACK_DIR" - cd "$STACK_DIR" - if [[ "$SHELL_AFTER_RUN" != "no" ]]; then - exec sudo -u $STACK_USER bash -l -c "set -e; bash stack.sh; bash" - else - exec sudo -u $STACK_USER bash -l -c "set -e; source stack.sh" - fi + echo "Cut it out." + echo "Really." + echo "If you need an account to run DevStack, do this (as root, heh) to create $STACK_USER:" + echo "$TOP_DIR/tools/create-stack-user.sh" exit 1 -else - # We're not **root**, make sure ``sudo`` is available - is_package_installed sudo || die "Sudo is required. Re-run stack.sh as root ONE TIME ONLY to set up sudo." - - # UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one - sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || - echo "#includedir /etc/sudoers.d" | sudo tee -a /etc/sudoers - - # Set up devstack sudoers - TEMPFILE=`mktemp` - echo "$STACK_USER ALL=(root) NOPASSWD:ALL" >$TEMPFILE - # Some binaries might be under /sbin or /usr/sbin, so make sure sudo will - # see them by forcing PATH - echo "Defaults:$STACK_USER secure_path=/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >> $TEMPFILE - chmod 0440 $TEMPFILE - sudo chown root:root $TEMPFILE - sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh - - # Remove old file - sudo rm -f /etc/sudoers.d/stack_sh_nova fi +# We're not **root**, make sure ``sudo`` is available +is_package_installed sudo || install_package sudo + +# UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one +sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || + echo "#includedir /etc/sudoers.d" | sudo tee -a /etc/sudoers + +# Set up devstack sudoers +TEMPFILE=`mktemp` +echo "$STACK_USER ALL=(root) NOPASSWD:ALL" >$TEMPFILE +# Some binaries might be under /sbin or /usr/sbin, so make sure sudo will +# see them by forcing PATH +echo "Defaults:$STACK_USER secure_path=/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >> $TEMPFILE +chmod 0440 $TEMPFILE +sudo chown root:root $TEMPFILE +sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh + + # Create the destination directory and ensure it is writable by the user # and read/executable by everybody for daemons (e.g. apache run for horizon) sudo mkdir -p $DEST diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh new file mode 100644 index 0000000000..2251d1e67c --- /dev/null +++ b/tools/create-stack-user.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash + +# **create-stack-user.sh** + +# Create a user account suitable for running DevStack +# - create a group named $STACK_USER if it does not exist +# - create a user named $STACK_USER if it does not exist +# - home is $DEST +# - configure sudo for $STACK_USER + +# ``stack.sh`` was never intended to run as root. It had a hack to do what is +# now in this script and re-launch itself, but that hack was less than perfect +# and it was time for this nonsense to stop. Run this script as root to create +# the user and configure sudo. + + +# Keep track of the devstack directory +TOP_DIR=$(cd $(dirname "$0")/.. && pwd) + +# Import common functions +source $TOP_DIR/functions + +# Determine what system we are running on. This provides ``os_VENDOR``, +# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` +# and ``DISTRO`` +GetDistro + +# Needed to get ``ENABLED_SERVICES`` +source $TOP_DIR/stackrc + +# Give the non-root user the ability to run as **root** via ``sudo`` +is_package_installed sudo || install_package sudo + +if ! getent group $STACK_USER >/dev/null; then + echo "Creating a group called $STACK_USER" + groupadd $STACK_USER +fi + +if ! getent passwd $STACK_USER >/dev/null; then + echo "Creating a user called $STACK_USER" + useradd -g $STACK_USER -s /bin/bash -d $DEST -m $STACK_USER +fi + +echo "Giving stack user passwordless sudo privileges" +# UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one +grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || + echo "#includedir /etc/sudoers.d" >> /etc/sudoers +( umask 226 && echo "$STACK_USER ALL=(ALL) NOPASSWD:ALL" \ + > /etc/sudoers.d/50_stack_sh ) diff --git a/unstack.sh b/unstack.sh index 05d9fb7c83..c944ccc0fb 100755 --- a/unstack.sh +++ b/unstack.sh @@ -24,6 +24,12 @@ source $TOP_DIR/stackrc # Destination path for service data DATA_DIR=${DATA_DIR:-${DEST}/data} +if [[ $EUID -eq 0 ]]; then + echo "You are running this script as root." + echo "It might work but you will have a better day running it as $STACK_USER" + exit 1 +fi + # Import apache functions source $TOP_DIR/lib/apache From d903476aa5270df703bd22bb58fed2c740042fbf Mon Sep 17 00:00:00 2001 From: Adam Spiers Date: Fri, 4 Oct 2013 23:20:24 +0100 Subject: [PATCH 0416/4704] Fix "instal_prereqs.sh" typo Change-Id: I745b159aea70412d424df506af0e3d1ca2d78034 --- HACKING.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/HACKING.rst b/HACKING.rst index dd665a2304..5f33d770f8 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -38,7 +38,7 @@ such as Grenade, to manage a DevStack installation. A number of additional scripts can be found in the ``tools`` directory that may be useful in supporting DevStack installations. Of particular note are ``info.sh`` -to collect and report information about the installed system, and ``instal_prereqs.sh`` +to collect and report information about the installed system, and ``install_prereqs.sh`` that handles installation of the prerequisite packages for DevStack. It is suitable, for example, to pre-load a system for making a snapshot. From 46ea7238682642990ef67dd73582e86a2d4e2a2d Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Mon, 7 Oct 2013 07:29:27 +0200 Subject: [PATCH 0417/4704] install_pip script fails if pip was not installed 'set -o errexit' recently added to the pip installer script, which causes the script fail when it does not able to find an already installed pip. This change handles the situation when pip is not installed. Change-Id: I18a42d13c4be6699db21ec5b6a095a88a199912d --- tools/install_pip.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 04e18261ac..940bd8c84a 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -50,10 +50,12 @@ GetDistro echo "Distro: $DISTRO" function get_versions() { - PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null) + PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null || true) if [[ -n $PIP ]]; then PIP_VERSION=$($PIP --version | awk '{ print $2}') echo "pip: $PIP_VERSION" + else + echo "pip: Not Installed" fi } From ec0ff2acf8d0f58c3e2750cd94a1eb9949bcdad8 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Wed, 25 Sep 2013 17:29:58 -0700 Subject: [PATCH 0418/4704] Specify agent mode and service cluster uuid for nicira plugin Supports blueprint nsx-integrated-services Change-Id: Ib02716fe447f1d7f47f2f49d16f0d2ad7afe741f --- lib/neutron_plugins/nicira | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/lib/neutron_plugins/nicira b/lib/neutron_plugins/nicira index e9deb64e11..ca89d57fe7 100644 --- a/lib/neutron_plugins/nicira +++ b/lib/neutron_plugins/nicira @@ -119,6 +119,16 @@ function neutron_plugin_configure_service() { if [[ "$NVP_REDIRECTS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT redirects $NVP_REDIRECTS fi + if [[ "$AGENT_MODE" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE nvp agent_mode $AGENT_MODE + if [[ "$AGENT_MODE" == "agentless" ]]; then + if [[ "$DEFAULT_SERVICE_CLUSTER_UUID" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_service_cluster_uuid $DEFAULT_SERVICE_CLUSTER_UUID + else + die $LINENO "Agentless mode requires a service cluster." + fi + fi + fi } function neutron_plugin_setup_interface_driver() { From 976e418a037df3621cf15dfc15df68e2095b28c0 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Wed, 2 Oct 2013 12:59:07 -0700 Subject: [PATCH 0419/4704] Specify ip address for nova metadata server for nicira plugin Supports blueprint nsx-integrated-services Change-Id: I265b9714ca531731b0b2e1b37e64c912666aed80 --- lib/neutron_plugins/nicira | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/neutron_plugins/nicira b/lib/neutron_plugins/nicira index ca89d57fe7..082c84674d 100644 --- a/lib/neutron_plugins/nicira +++ b/lib/neutron_plugins/nicira @@ -127,6 +127,7 @@ function neutron_plugin_configure_service() { else die $LINENO "Agentless mode requires a service cluster." fi + iniset /$Q_PLUGIN_CONF_FILE nvp_metadata metadata_server_address $Q_META_DATA_IP fi fi } From 9732b57e3de7c24cb494c0f923d791a782ca9c9a Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Sat, 21 Sep 2013 01:17:06 +0200 Subject: [PATCH 0420/4704] Nicira plugin: do not die if NVP gateway IP is missing Devstack should not die if the IP and prefix len for establishing a connection to the public network are not provided. In this case, the public gateway IP address used to configure Neutron's public network should be used, together with the prefix length of the public network's CIDR. This patch also ensures $PUBLIC_BRIDGE is created, even if Q_USE_DEBUG_COMMAND is disabled. Finally this patch also adds the teardown operation for restoring the original IP addresses on the interface used for connectivity to the public network implemented on the NVP gateway. Bug #1227750 Change-Id: Ib58738a578c46f2183d503cabfdc6039bfbeb702 --- lib/neutron_thirdparty/nicira | 38 +++++++++++++++++++++++++++++++---- 1 file changed, 34 insertions(+), 4 deletions(-) diff --git a/lib/neutron_thirdparty/nicira b/lib/neutron_thirdparty/nicira index 5a20934a1b..3f2a5af11f 100644 --- a/lib/neutron_thirdparty/nicira +++ b/lib/neutron_thirdparty/nicira @@ -18,22 +18,38 @@ set +o xtrace # to an network that allows it to talk to the gateway for # testing purposes NVP_GATEWAY_NETWORK_INTERFACE=${NVP_GATEWAY_NETWORK_INTERFACE:-eth2} +# Re-declare floating range as it's needed also in stop_nicira, which +# is invoked by unstack.sh +FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28} function configure_nicira() { : } function init_nicira() { - die_if_not_set $LINENO NVP_GATEWAY_NETWORK_CIDR "Please, specify CIDR for the gateway network interface." + if ! is_set NVP_GATEWAY_NETWORK_CIDR; then + NVP_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} + echo "The IP address to set on br-ex was not specified. " + echo "Defaulting to "$NVP_GATEWAY_NETWORK_CIDR + fi # Make sure the interface is up, but not configured - sudo ifconfig $NVP_GATEWAY_NETWORK_INTERFACE up + sudo ip link dev $NVP_GATEWAY_NETWORK_INTERFACE set up + # Save and then flush the IP addresses on the interface + addresses=$(ip addr show dev $NVP_GATEWAY_NETWORK_INTERFACE | grep inet | awk {'print $2'}) sudo ip addr flush $NVP_GATEWAY_NETWORK_INTERFACE # Use the PUBLIC Bridge to route traffic to the NVP gateway # NOTE(armando-migliaccio): if running in a nested environment this will work # only with mac learning enabled, portsecurity and security profiles disabled + # The public bridge might not exist for the NVP plugin if Q_USE_DEBUG_COMMAND is off + # Try to create it anyway + sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE sudo ovs-vsctl -- --may-exist add-port $PUBLIC_BRIDGE $NVP_GATEWAY_NETWORK_INTERFACE nvp_gw_net_if_mac=$(ip link show $NVP_GATEWAY_NETWORK_INTERFACE | awk '/ether/ {print $2}') - sudo ifconfig $PUBLIC_BRIDGE $NVP_GATEWAY_NETWORK_CIDR hw ether $nvp_gw_net_if_mac + sudo ip link dev $PUBLIC_BRIDGE set address $nvp_gw_net_if_mac + for address in $addresses; do + sudo ip addr add dev $PUBLIC_BRIDGE $address + done + sudo ip addr add dev $PUBLIC_BRIDGE $NVP_GATEWAY_NETWORK_CIDR } function install_nicira() { @@ -45,7 +61,21 @@ function start_nicira() { } function stop_nicira() { - : + if ! is_set NVP_GATEWAY_NETWORK_CIDR; then + NVP_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} + echo "The IP address expected on br-ex was not specified. " + echo "Defaulting to "$NVP_GATEWAY_NETWORK_CIDR + fi + sudo ip addr del $NVP_GATEWAY_NETWORK_CIDR dev $PUBLIC_BRIDGE + # Save and then flush remaining addresses on the interface + addresses=$(ip addr show dev $PUBLIC_BRIDGE | grep inet | awk {'print $2'}) + sudo ip addr flush $PUBLIC_BRIDGE + # Try to detach physical interface from PUBLIC_BRIDGE + sudo ovs-vsctl del-port $NVP_GATEWAY_NETWORK_INTERFACE + # Restore addresses on NVP_GATEWAY_NETWORK_INTERFACE + for address in $addresses; do + sudo ip addr add dev $NVP_GATEWAY_NETWORK_INTERFACE $address + done } # Restore xtrace From 4897ff55d77cd957c57f9717785d12f86cd5b824 Mon Sep 17 00:00:00 2001 From: Arata Notsu Date: Wed, 9 Oct 2013 18:49:32 +0900 Subject: [PATCH 0421/4704] Correct logging_context_format_string for Heat We should use "tenant" and "user" instead of "project_name" and "user_name" by calling setup_colorized_logging with these parameters. Change-Id: I47820c890bf4585e7c8f64c41f48d7576ca56862 Closes-Bug: 1237314 --- lib/heat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/heat b/lib/heat index ff9473ecdb..8acadb4ad1 100644 --- a/lib/heat +++ b/lib/heat @@ -86,7 +86,7 @@ function configure_heat() { iniset $HEAT_CONF DEFAULT use_syslog $SYSLOG if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then # Add color to logging output - setup_colorized_logging $HEAT_CONF DEFAULT + setup_colorized_logging $HEAT_CONF DEFAULT tenant user fi # keystone authtoken From b7fcf3f6c0b41bbba16dd52d124711e8e2b8bc9d Mon Sep 17 00:00:00 2001 From: Arata Notsu Date: Thu, 10 Oct 2013 17:56:21 +0900 Subject: [PATCH 0422/4704] Update diskimage-builder's URL diskimage-builder has moved from stackforge to openstack. Change-Id: I5bc8d5d162d7d671e062efd67992f15fbb4307b2 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 3a338d16f2..5c0baf7da9 100644 --- a/stackrc +++ b/stackrc @@ -160,7 +160,7 @@ TEMPEST_BRANCH=${TEMPEST_BRANCH:-master} # diskimage-builder -BM_IMAGE_BUILD_REPO=${BM_IMAGE_BUILD_REPO:-${GIT_BASE}/stackforge/diskimage-builder.git} +BM_IMAGE_BUILD_REPO=${BM_IMAGE_BUILD_REPO:-${GIT_BASE}/openstack/diskimage-builder.git} BM_IMAGE_BUILD_BRANCH=${BM_IMAGE_BUILD_BRANCH:-master} # bm_poseur From 84783c72fe31dbc7656cfb4b9ee0af947e5ce3ed Mon Sep 17 00:00:00 2001 From: Sergey Kraynev Date: Thu, 10 Oct 2013 09:08:48 -0400 Subject: [PATCH 0423/4704] Adding value for lock_path in configuration file Now oslo code include new lockutils. According this code if lock_path is not set in configuration file, will be raised Error message. So for updating lockutils in cinder project is needed lock_path definition in configuration file. Change-Id: I413f0a2ccec0f9d9e06acaa8cc06c41206d9dcc2 --- lib/cinder | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/cinder b/lib/cinder index ccf38b4dea..220488a07e 100644 --- a/lib/cinder +++ b/lib/cinder @@ -233,6 +233,7 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT rootwrap_config "$CINDER_CONF_DIR/rootwrap.conf" iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.contrib.standard_extensions iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH + iniset $CINDER_CONF DEFAULT lock_path $CINDER_STATE_PATH iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL if is_service_enabled ceilometer; then From 3931573f2bdb542ff4299bd548cab3458c3b0c99 Mon Sep 17 00:00:00 2001 From: DennyZhang Date: Fri, 11 Oct 2013 00:12:22 -0500 Subject: [PATCH 0424/4704] mute useless console output, when run ./stack.sh Run ./stack.sh will dump ~400 lines of information, because of tar xvfz pip-*.tar.gz, and python setup.py install. We'd better mute stdout for the two steps, to make console cleaner Change-Id: Icf87947e020acb48d8cbe4cdcc1641f060e50f6d --- tools/install_pip.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 940bd8c84a..455323e6fa 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -72,9 +72,9 @@ function install_get_pip() { function install_pip_tarball() { (cd $FILES; \ curl -O $PIP_TAR_URL; \ - tar xvfz pip-$INSTALL_PIP_VERSION.tar.gz; \ + tar xvfz pip-$INSTALL_PIP_VERSION.tar.gz 1>/dev/null; \ cd pip-$INSTALL_PIP_VERSION; \ - sudo python setup.py install; \ + sudo python setup.py install 1>/dev/null; \ ) } From af15d35414abea1e0dd9792d3fffcffab47afc1c Mon Sep 17 00:00:00 2001 From: Arata Notsu Date: Fri, 11 Oct 2013 21:56:56 +0900 Subject: [PATCH 0425/4704] baremetal: Allow BM_SECOND_MAC to be unset Currently DevStack registers the second nic of the baremetal node even if BM_SECOND_MAC is not set or empty. However an interface with an empty mac address causes dhcp to fail (bug 1238595). And such operation will get to return a error after the bug is resolved. So we should not register the second nic if BM_SECOND_MAC is not set. Related-Bug: #1238595 Change-Id: Ib3cc77686b72311403ccacbd70ae9cf43e6eb4c9 --- lib/baremetal | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/baremetal b/lib/baremetal index 52af420853..f4d8589628 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -449,8 +449,10 @@ function add_baremetal_node() { "$mac_1" \ | grep ' id ' | get_field 2 ) [ $? -eq 0 ] || [ "$id" ] || die $LINENO "Error adding baremetal node" - id2=$(nova baremetal-interface-add "$id" "$mac_2" ) - [ $? -eq 0 ] || [ "$id2" ] || die $LINENO "Error adding interface to barmetal node $id" + if [ -n "$mac_2" ]; then + id2=$(nova baremetal-interface-add "$id" "$mac_2" ) + [ $? -eq 0 ] || [ "$id2" ] || die $LINENO "Error adding interface to barmetal node $id" + fi } From d5644f8b4f56b1aef0efc6ae869029df494c0a93 Mon Sep 17 00:00:00 2001 From: Florent Flament Date: Fri, 11 Oct 2013 15:39:09 +0200 Subject: [PATCH 0426/4704] Updates samples/localrc comment The SWIFT_DATA_DIR default value stated in the last comment of samples/localrc has been updated to match actual SWIFT_DATA_DIR default value ($DEST/data/swift instead of $DEST/swift/data). Addresses Bug: #1238665 Change-Id: I2510f72eb3eda467799202b356abb606930f4d94 --- samples/localrc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/samples/localrc b/samples/localrc index fd7221a0ae..80cf0e75ac 100644 --- a/samples/localrc +++ b/samples/localrc @@ -83,7 +83,8 @@ SWIFT_HASH=66a3d6b56c1f479c8b4e70ab5c2000f5 # Set this to 1 to save some resources: SWIFT_REPLICAS=1 -# The data for Swift is stored in the source tree by default (``$DEST/swift/data``) -# and can be moved by setting ``SWIFT_DATA_DIR``. The directory will be created +# The data for Swift is stored by default in (``$DEST/data/swift``), +# or (``$DATA_DIR/swift``) if ``DATA_DIR`` has been set, and can be +# moved by setting ``SWIFT_DATA_DIR``. The directory will be created # if it does not exist. SWIFT_DATA_DIR=$DEST/data From 8c032d1635320ad9b5162136a8876cc48e7fa8bd Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 23 Sep 2013 13:53:13 -0500 Subject: [PATCH 0427/4704] Convert remaining hypervisors to plugin model Convert BareMetal, OpenVZ, PowerVM hnd libvirt ypervisor configurations in Nova to the new plugin setup. Change-Id: I47d744a2c9fdda0771f5b473ec3b26fb099f7439 --- lib/nova | 153 ++++-------------------- lib/nova_plugins/hypervisor-baremetal | 93 +++++++++++++++ lib/nova_plugins/hypervisor-libvirt | 165 ++++++++++++++++++++++++++ lib/nova_plugins/hypervisor-openvz | 67 +++++++++++ lib/nova_plugins/hypervisor-powervm | 76 ++++++++++++ stack.sh | 95 +-------------- 6 files changed, 426 insertions(+), 223 deletions(-) create mode 100644 lib/nova_plugins/hypervisor-baremetal create mode 100644 lib/nova_plugins/hypervisor-libvirt create mode 100644 lib/nova_plugins/hypervisor-openvz create mode 100644 lib/nova_plugins/hypervisor-powervm diff --git a/lib/nova b/lib/nova index 4c5520785f..8deb3a01a9 100644 --- a/lib/nova +++ b/lib/nova @@ -71,23 +71,24 @@ QEMU_CONF=/etc/libvirt/qemu.conf NOVNC_DIR=$DEST/noVNC SPICE_DIR=$DEST/spice-html5 +# Set default defaults here as some hypervisor drivers override these +PUBLIC_INTERFACE_DEFAULT=br100 +GUEST_INTERFACE_DEFAULT=eth0 +FLAT_NETWORK_BRIDGE_DEFAULT=br100 + +# Get hypervisor configuration +# ---------------------------- + +NOVA_PLUGINS=$TOP_DIR/lib/nova_plugins +if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + # Load plugin + source $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER +fi + # Nova Network Configuration # -------------------------- -# Set defaults according to the virt driver -if [ "$VIRT_DRIVER" = 'baremetal' ]; then - NETWORK_MANAGER=${NETWORK_MANAGER:-FlatManager} - PUBLIC_INTERFACE_DEFAULT=eth0 - FLAT_INTERFACE=${FLAT_INTERFACE:-eth0} - FLAT_NETWORK_BRIDGE_DEFAULT=br100 - STUB_NETWORK=${STUB_NETWORK:-False} -else - PUBLIC_INTERFACE_DEFAULT=br100 - GUEST_INTERFACE_DEFAULT=eth0 - FLAT_NETWORK_BRIDGE_DEFAULT=br100 -fi - NETWORK_MANAGER=${NETWORK_MANAGER:-${NET_MAN:-FlatDHCPManager}} PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT} VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT} @@ -274,83 +275,6 @@ function configure_nova() { fi fi - # Prepare directories and packages for baremetal driver - if is_baremetal; then - configure_baremetal_nova_dirs - fi - - if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - if is_service_enabled neutron && is_neutron_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then - # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces - cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla -[libvirt Management Access] -Identity=unix-group:$LIBVIRT_GROUP -Action=org.libvirt.unix.manage -ResultAny=yes -ResultInactive=yes -ResultActive=yes -EOF" - elif is_suse && [[ $os_RELEASE = 12.2 || "$os_VENDOR" = "SUSE LINUX" ]]; then - # openSUSE < 12.3 or SLE - # Work around the fact that polkit-default-privs overrules pklas - # with 'unix-group:$group'. - sudo bash -c "cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla -[libvirt Management Access] -Identity=unix-user:$USER -Action=org.libvirt.unix.manage -ResultAny=yes -ResultInactive=yes -ResultActive=yes -EOF" - else - # Starting with fedora 18 and opensuse-12.3 enable stack-user to - # virsh -c qemu:///system by creating a policy-kit rule for - # stack-user using the new Javascript syntax - rules_dir=/etc/polkit-1/rules.d - sudo mkdir -p $rules_dir - sudo bash -c "cat < $rules_dir/50-libvirt-$STACK_USER.rules -polkit.addRule(function(action, subject) { - if (action.id == 'org.libvirt.unix.manage' && - subject.user == '"$STACK_USER"') { - return polkit.Result.YES; - } -}); -EOF" - unset rules_dir - fi - fi - - # The user that nova runs as needs to be member of **libvirtd** group otherwise - # nova-compute will be unable to use libvirt. - if ! getent group $LIBVIRT_GROUP >/dev/null; then - sudo groupadd $LIBVIRT_GROUP - fi - add_user_to_group $STACK_USER $LIBVIRT_GROUP - - # libvirt detects various settings on startup, as we potentially changed - # the system configuration (modules, filesystems), we need to restart - # libvirt to detect those changes. - restart_service $LIBVIRT_DAEMON - fi - # Instance Storage # ---------------- @@ -368,6 +292,14 @@ EOF" fi fi fi + + # Rebuild the config file from scratch + create_nova_conf + + if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + # Configure hypervisor plugin + configure_nova_hypervisor + fi } # create_nova_accounts() - Set up common required nova accounts @@ -447,14 +379,6 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT ec2_workers "4" iniset $NOVA_CONF DEFAULT metadata_workers "4" iniset $NOVA_CONF DEFAULT sql_connection `database_connection_url nova` - if is_baremetal; then - iniset $NOVA_CONF baremetal sql_connection `database_connection_url nova_bm` - fi - if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE" - iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none" - iniset $NOVA_CONF DEFAULT use_usb_tablet "False" - fi iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" iniset $NOVA_CONF osapi_v3 enabled "True" @@ -646,37 +570,8 @@ function install_novaclient() { # install_nova() - Collect source and prepare function install_nova() { - if is_service_enabled n-cpu; then - if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then - install_nova_hypervisor - elif [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - if is_ubuntu; then - install_package kvm - install_package libvirt-bin - install_package python-libvirt - elif is_fedora || is_suse; then - install_package kvm - install_package libvirt - install_package libvirt-python - else - exit_distro_not_supported "libvirt installation" - fi - - # Install and configure **LXC** if specified. LXC is another approach to - # splitting a system into many smaller parts. LXC uses cgroups and chroot - # to simulate multiple systems. - if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then - if is_ubuntu; then - if [[ "$DISTRO" > natty ]]; then - install_package cgroup-lite - fi - else - ### FIXME(dtroyer): figure this out - echo "RPM-based cgroup not implemented yet" - yum_install libcgroup-tools - fi - fi - fi + if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + install_nova_hypervisor fi git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH diff --git a/lib/nova_plugins/hypervisor-baremetal b/lib/nova_plugins/hypervisor-baremetal new file mode 100644 index 0000000000..4e7c1734d1 --- /dev/null +++ b/lib/nova_plugins/hypervisor-baremetal @@ -0,0 +1,93 @@ +# lib/nova_plugins/hypervisor-baremetal +# Configure the baremetal hypervisor + +# Enable with: +# VIRT_DRIVER=baremetal + +# Dependencies: +# ``functions`` file +# ``nova`` configuration + +# install_nova_hypervisor - install any external requirements +# configure_nova_hypervisor - make configuration changes, including those to other services +# start_nova_hypervisor - start any external services +# stop_nova_hypervisor - stop any external services +# cleanup_nova_hypervisor - remove transient data and cache + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +NETWORK_MANAGER=${NETWORK_MANAGER:-FlatManager} +PUBLIC_INTERFACE_DEFAULT=eth0 +FLAT_INTERFACE=${FLAT_INTERFACE:-eth0} +FLAT_NETWORK_BRIDGE_DEFAULT=br100 +STUB_NETWORK=${STUB_NETWORK:-False} + + +# Entry Points +# ------------ + +# clean_nova_hypervisor - Clean up an installation +function cleanup_nova_hypervisor() { + # This function intentionally left blank + : +} + +# configure_nova_hypervisor - Set config files, create data dirs, etc +function configure_nova_hypervisor() { + configure_baremetal_nova_dirs + + iniset $NOVA_CONF baremetal sql_connection `database_connection_url nova_bm` + LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.firewall.NoopFirewallDriver"} + iniset $NOVA_CONF DEFAULT compute_driver nova.virt.baremetal.driver.BareMetalDriver + iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER + iniset $NOVA_CONF DEFAULT scheduler_host_manager nova.scheduler.baremetal_host_manager.BaremetalHostManager + iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0 + iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0 + iniset $NOVA_CONF baremetal instance_type_extra_specs cpu_arch:$BM_CPU_ARCH + iniset $NOVA_CONF baremetal driver $BM_DRIVER + iniset $NOVA_CONF baremetal power_manager $BM_POWER_MANAGER + iniset $NOVA_CONF baremetal tftp_root /tftpboot + if [[ "$BM_DNSMASQ_FROM_NOVA_NETWORK" = "True" ]]; then + BM_DNSMASQ_CONF=$NOVA_CONF_DIR/dnsmasq-for-baremetal-from-nova-network.conf + sudo cp "$FILES/dnsmasq-for-baremetal-from-nova-network.conf" "$BM_DNSMASQ_CONF" + iniset $NOVA_CONF DEFAULT dnsmasq_config_file "$BM_DNSMASQ_CONF" + fi + + # Define extra baremetal nova conf flags by defining the array ``EXTRA_BAREMETAL_OPTS``. + for I in "${EXTRA_BAREMETAL_OPTS[@]}"; do + # Attempt to convert flags to options + iniset $NOVA_CONF baremetal ${I/=/ } + done +} + +# install_nova_hypervisor() - Install external components +function install_nova_hypervisor() { + # This function intentionally left blank + : +} + +# start_nova_hypervisor - Start any required external services +function start_nova_hypervisor() { + # This function intentionally left blank + : +} + +# stop_nova_hypervisor - Stop any external services +function stop_nova_hypervisor() { + # This function intentionally left blank + : +} + + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt new file mode 100644 index 0000000000..caf0296ad2 --- /dev/null +++ b/lib/nova_plugins/hypervisor-libvirt @@ -0,0 +1,165 @@ +# lib/nova_plugins/hypervisor-libvirt +# Configure the libvirt hypervisor + +# Enable with: +# VIRT_DRIVER=libvirt + +# Dependencies: +# ``functions`` file +# ``nova`` configuration + +# install_nova_hypervisor - install any external requirements +# configure_nova_hypervisor - make configuration changes, including those to other services +# start_nova_hypervisor - start any external services +# stop_nova_hypervisor - stop any external services +# cleanup_nova_hypervisor - remove transient data and cache + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + + +# Entry Points +# ------------ + +# clean_nova_hypervisor - Clean up an installation +function cleanup_nova_hypervisor() { + # This function intentionally left blank + : +} + +# configure_nova_hypervisor - Set config files, create data dirs, etc +function configure_nova_hypervisor() { + if is_service_enabled neutron && is_neutron_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then + # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces + cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla +[libvirt Management Access] +Identity=unix-group:$LIBVIRT_GROUP +Action=org.libvirt.unix.manage +ResultAny=yes +ResultInactive=yes +ResultActive=yes +EOF" + elif is_suse && [[ $os_RELEASE = 12.2 || "$os_VENDOR" = "SUSE LINUX" ]]; then + # openSUSE < 12.3 or SLE + # Work around the fact that polkit-default-privs overrules pklas + # with 'unix-group:$group'. + sudo bash -c "cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla +[libvirt Management Access] +Identity=unix-user:$USER +Action=org.libvirt.unix.manage +ResultAny=yes +ResultInactive=yes +ResultActive=yes +EOF" + else + # Starting with fedora 18 and opensuse-12.3 enable stack-user to + # virsh -c qemu:///system by creating a policy-kit rule for + # stack-user using the new Javascript syntax + rules_dir=/etc/polkit-1/rules.d + sudo mkdir -p $rules_dir + sudo bash -c "cat < $rules_dir/50-libvirt-$STACK_USER.rules +polkit.addRule(function(action, subject) { + if (action.id == 'org.libvirt.unix.manage' && + subject.user == '"$STACK_USER"') { + return polkit.Result.YES; + } +}); +EOF" + unset rules_dir + fi + fi + + # The user that nova runs as needs to be member of **libvirtd** group otherwise + # nova-compute will be unable to use libvirt. + if ! getent group $LIBVIRT_GROUP >/dev/null; then + sudo groupadd $LIBVIRT_GROUP + fi + add_user_to_group $STACK_USER $LIBVIRT_GROUP + + # libvirt detects various settings on startup, as we potentially changed + # the system configuration (modules, filesystems), we need to restart + # libvirt to detect those changes. + restart_service $LIBVIRT_DAEMON + + iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE" + iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none" + iniset $NOVA_CONF DEFAULT use_usb_tablet "False" + iniset $NOVA_CONF DEFAULT compute_driver "libvirt.LibvirtDriver" + LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} + iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" + # Power architecture currently does not support graphical consoles. + if is_arch "ppc64"; then + iniset $NOVA_CONF DEFAULT vnc_enabled "false" + fi +} + +# install_nova_hypervisor() - Install external components +function install_nova_hypervisor() { + if is_ubuntu; then + install_package kvm + install_package libvirt-bin + install_package python-libvirt + elif is_fedora || is_suse; then + install_package kvm + install_package libvirt + install_package libvirt-python + fi + + # Install and configure **LXC** if specified. LXC is another approach to + # splitting a system into many smaller parts. LXC uses cgroups and chroot + # to simulate multiple systems. + if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then + if is_ubuntu; then + if [[ "$DISTRO" > natty ]]; then + install_package cgroup-lite + fi + else + ### FIXME(dtroyer): figure this out + echo "RPM-based cgroup not implemented yet" + yum_install libcgroup-tools + fi + fi +} + +# start_nova_hypervisor - Start any required external services +function start_nova_hypervisor() { + # This function intentionally left blank + : +} + +# stop_nova_hypervisor - Stop any external services +function stop_nova_hypervisor() { + # This function intentionally left blank + : +} + + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/nova_plugins/hypervisor-openvz b/lib/nova_plugins/hypervisor-openvz new file mode 100644 index 0000000000..fc5ed0cd11 --- /dev/null +++ b/lib/nova_plugins/hypervisor-openvz @@ -0,0 +1,67 @@ +# lib/nova_plugins/hypervisor-openvz +# Configure the openvz hypervisor + +# Enable with: +# VIRT_DRIVER=openvz + +# Dependencies: +# ``functions`` file +# ``nova`` configuration + +# install_nova_hypervisor - install any external requirements +# configure_nova_hypervisor - make configuration changes, including those to other services +# start_nova_hypervisor - start any external services +# stop_nova_hypervisor - stop any external services +# cleanup_nova_hypervisor - remove transient data and cache + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + + +# Entry Points +# ------------ + +# clean_nova_hypervisor - Clean up an installation +function cleanup_nova_hypervisor() { + # This function intentionally left blank + : +} + +# configure_nova_hypervisor - Set config files, create data dirs, etc +function configure_nova_hypervisor() { + iniset $NOVA_CONF DEFAULT compute_driver "openvz.OpenVzDriver" + iniset $NOVA_CONF DEFAULT connection_type "openvz" + LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} + iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" +} + +# install_nova_hypervisor() - Install external components +function install_nova_hypervisor() { + # This function intentionally left blank + : +} + +# start_nova_hypervisor - Start any required external services +function start_nova_hypervisor() { + # This function intentionally left blank + : +} + +# stop_nova_hypervisor - Stop any external services +function stop_nova_hypervisor() { + # This function intentionally left blank + : +} + + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/nova_plugins/hypervisor-powervm b/lib/nova_plugins/hypervisor-powervm new file mode 100644 index 0000000000..561dd9f00b --- /dev/null +++ b/lib/nova_plugins/hypervisor-powervm @@ -0,0 +1,76 @@ +# lib/nova_plugins/hypervisor-powervm +# Configure the PowerVM hypervisor + +# Enable with: +# VIRT_DRIVER=powervm + +# Dependencies: +# ``functions`` file +# ``nova`` configuration + +# install_nova_hypervisor - install any external requirements +# configure_nova_hypervisor - make configuration changes, including those to other services +# start_nova_hypervisor - start any external services +# stop_nova_hypervisor - stop any external services +# cleanup_nova_hypervisor - remove transient data and cache + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + + +# Entry Points +# ------------ + +# clean_nova_hypervisor - Clean up an installation +function cleanup_nova_hypervisor() { + # This function intentionally left blank + : +} + +# configure_nova_hypervisor - Set config files, create data dirs, etc +function configure_nova_hypervisor() { + POWERVM_MGR_TYPE=${POWERVM_MGR_TYPE:-"ivm"} + POWERVM_MGR_HOST=${POWERVM_MGR_HOST:-"powervm.host"} + POWERVM_MGR_USER=${POWERVM_MGR_USER:-"padmin"} + POWERVM_MGR_PASSWD=${POWERVM_MGR_PASSWD:-"password"} + POWERVM_IMG_REMOTE_PATH=${POWERVM_IMG_REMOTE_PATH:-"/tmp"} + POWERVM_IMG_LOCAL_PATH=${POWERVM_IMG_LOCAL_PATH:-"/tmp"} + iniset $NOVA_CONF DEFAULT compute_driver nova.virt.powervm.PowerVMDriver + iniset $NOVA_CONF DEFAULT powervm_mgr_type $POWERVM_MGR_TYPE + iniset $NOVA_CONF DEFAULT powervm_mgr $POWERVM_MGR_HOST + iniset $NOVA_CONF DEFAULT powervm_mgr_user $POWERVM_MGR_USER + iniset $NOVA_CONF DEFAULT powervm_mgr_passwd $POWERVM_MGR_PASSWD + iniset $NOVA_CONF DEFAULT powervm_img_remote_path $POWERVM_IMG_REMOTE_PATH + iniset $NOVA_CONF DEFAULT powervm_img_local_path $POWERVM_IMG_LOCAL_PATH +} + +# install_nova_hypervisor() - Install external components +function install_nova_hypervisor() { + # This function intentionally left blank + : +} + +# start_nova_hypervisor - Start any required external services +function start_nova_hypervisor() { + # This function intentionally left blank + : +} + +# stop_nova_hypervisor - Stop any external services +function stop_nova_hypervisor() { + # This function intentionally left blank + : +} + + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/stack.sh b/stack.sh index 7cd7e30d70..112fbc081e 100755 --- a/stack.sh +++ b/stack.sh @@ -291,13 +291,6 @@ source $TOP_DIR/lib/ldap source $TOP_DIR/lib/ironic source $TOP_DIR/lib/trove -# Look for Nova hypervisor plugin -NOVA_PLUGINS=$TOP_DIR/lib/nova_plugins -if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then - # Load plugin - source $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER -fi - # Set the destination directories for other OpenStack projects OPENSTACKCLIENT_DIR=$DEST/python-openstackclient @@ -882,6 +875,7 @@ if is_service_enabled g-reg; then init_glance fi + # Ironic # ------ @@ -891,7 +885,6 @@ if is_service_enabled ir-api ir-cond; then fi - # Neutron # ------- @@ -917,11 +910,6 @@ fi # Nova # ---- -if is_service_enabled nova; then - echo_summary "Configuring Nova" - configure_nova -fi - if is_service_enabled n-net q-dhcp; then # Delete traces of nova networks from prior runs # Do not kill any dnsmasq instance spawned by NetworkManager @@ -964,8 +952,6 @@ fi if is_service_enabled nova; then echo_summary "Configuring Nova" - # Rebuild the config file from scratch - create_nova_conf init_nova # Additional Nova configuration that is dependent on other services @@ -975,85 +961,6 @@ if is_service_enabled nova; then create_nova_conf_nova_network fi - - if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then - # Configure hypervisor plugin - configure_nova_hypervisor - - - # OpenVZ - # ------ - - elif [ "$VIRT_DRIVER" = 'openvz' ]; then - echo_summary "Using OpenVZ virtualization driver" - iniset $NOVA_CONF DEFAULT compute_driver "openvz.OpenVzDriver" - iniset $NOVA_CONF DEFAULT connection_type "openvz" - LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} - iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" - - - # Bare Metal - # ---------- - - elif [ "$VIRT_DRIVER" = 'baremetal' ]; then - echo_summary "Using BareMetal driver" - LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.firewall.NoopFirewallDriver"} - iniset $NOVA_CONF DEFAULT compute_driver nova.virt.baremetal.driver.BareMetalDriver - iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER - iniset $NOVA_CONF DEFAULT scheduler_host_manager nova.scheduler.baremetal_host_manager.BaremetalHostManager - iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0 - iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0 - iniset $NOVA_CONF baremetal instance_type_extra_specs cpu_arch:$BM_CPU_ARCH - iniset $NOVA_CONF baremetal driver $BM_DRIVER - iniset $NOVA_CONF baremetal power_manager $BM_POWER_MANAGER - iniset $NOVA_CONF baremetal tftp_root /tftpboot - if [[ "$BM_DNSMASQ_FROM_NOVA_NETWORK" = "True" ]]; then - BM_DNSMASQ_CONF=$NOVA_CONF_DIR/dnsmasq-for-baremetal-from-nova-network.conf - sudo cp "$FILES/dnsmasq-for-baremetal-from-nova-network.conf" "$BM_DNSMASQ_CONF" - iniset $NOVA_CONF DEFAULT dnsmasq_config_file "$BM_DNSMASQ_CONF" - fi - - # Define extra baremetal nova conf flags by defining the array ``EXTRA_BAREMETAL_OPTS``. - for I in "${EXTRA_BAREMETAL_OPTS[@]}"; do - # Attempt to convert flags to options - iniset $NOVA_CONF baremetal ${I/=/ } - done - - - # PowerVM - # ------- - - elif [ "$VIRT_DRIVER" = 'powervm' ]; then - echo_summary "Using PowerVM driver" - POWERVM_MGR_TYPE=${POWERVM_MGR_TYPE:-"ivm"} - POWERVM_MGR_HOST=${POWERVM_MGR_HOST:-"powervm.host"} - POWERVM_MGR_USER=${POWERVM_MGR_USER:-"padmin"} - POWERVM_MGR_PASSWD=${POWERVM_MGR_PASSWD:-"password"} - POWERVM_IMG_REMOTE_PATH=${POWERVM_IMG_REMOTE_PATH:-"/tmp"} - POWERVM_IMG_LOCAL_PATH=${POWERVM_IMG_LOCAL_PATH:-"/tmp"} - iniset $NOVA_CONF DEFAULT compute_driver nova.virt.powervm.PowerVMDriver - iniset $NOVA_CONF DEFAULT powervm_mgr_type $POWERVM_MGR_TYPE - iniset $NOVA_CONF DEFAULT powervm_mgr $POWERVM_MGR_HOST - iniset $NOVA_CONF DEFAULT powervm_mgr_user $POWERVM_MGR_USER - iniset $NOVA_CONF DEFAULT powervm_mgr_passwd $POWERVM_MGR_PASSWD - iniset $NOVA_CONF DEFAULT powervm_img_remote_path $POWERVM_IMG_REMOTE_PATH - iniset $NOVA_CONF DEFAULT powervm_img_local_path $POWERVM_IMG_LOCAL_PATH - - - # Default libvirt - # --------------- - - else - echo_summary "Using libvirt virtualization driver" - iniset $NOVA_CONF DEFAULT compute_driver "libvirt.LibvirtDriver" - LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} - iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" - # Power architecture currently does not support graphical consoles. - if is_arch "ppc64"; then - iniset $NOVA_CONF DEFAULT vnc_enabled "false" - fi - fi - init_nova_cells fi From 893e66360caf3bcf0578d4541b3c17d089c33b02 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 13 Sep 2013 15:05:51 -0500 Subject: [PATCH 0428/4704] Add meta-config via local.conf This defines a new local.conf file that is designed to take the place of all of the 'pass-through'[1] configuration options that have been defined in DevStack. * new local.conf file can contain multiple config file settings to be merged in to existing project config files * localrc can be embedded into local.conf and will auto-extract if localrc does not exist * Adds functions get_meta_section(), get_meta_section_files(), merge_config_file() and merge_config_group() * Adds EXTRA_OPTS, EXTRA_BAREMETAL_OPTS, Q_DHCP_EXTRA_DEFAULT_OPTS and Q_SRV_EXTRA_DEFAULT_OPTS to the deprecated warning list at the end of stack.sh [1] Pass-through options are those that do not configure or change DevStack's behaviour but simply set a value in a project config file. This includes most of the EXTRA_XXX_OPTS configuration variables. Change-Id: I367cadc86116621e9574ac203aafdab483d810d3 --- README.md | 39 ++++++++++ functions | 16 ++++ lib/config | 130 +++++++++++++++++++++++++++++++ stack.sh | 102 ++++++++++++++++++++++++ stackrc | 6 +- tests/test_config.sh | 179 +++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 471 insertions(+), 1 deletion(-) create mode 100644 lib/config create mode 100755 tests/test_config.sh diff --git a/README.md b/README.md index 99e983887e..329b94d96f 100644 --- a/README.md +++ b/README.md @@ -244,3 +244,42 @@ To setup a cells environment add the following to your `localrc`: enable_service n-cell Be aware that there are some features currently missing in cells, one notable one being security groups. The exercises have been patched to disable functionality not supported by cells. + + +# Local Configuration + +Historically DevStack has used ``localrc`` to contain all local configuration and customizations. More and more of the configuration variables available for DevStack are passed-through to the individual project configuration files. The old mechanism for this required specific code for each file and did not scale well. This is handled now by a master local configuration file. + +# local.conf + +The new config file ``local.conf`` is an extended-INI format that introduces a new meta-section header that provides some additional information such as a phase name and destination config filename: + + [[ | ]] + +where is one of a set of phase names defined by ``stack.sh`` and is the project config filename. The filename is eval'ed in the stack.sh context so all environment variables are available and may be used. Using the project config file variables in the header is strongly suggested (see example of NOVA_CONF below). If the path of the config file does not exist it is skipped. + +The defined phases are: + +* local - extracts ``localrc`` from ``local.conf`` before ``stackrc`` is sourced +* post-config - runs after the layer 2 services are configured and before they are started +* extra - runs after services are started and before any files in ``extra.d`` are executes + +The file is processed strictly in sequence; meta-sections may be specified more than once but if any settings are duplicated the last to appear in the file will be used. + + [[post-config|$NOVA_CONF]] + [DEFAULT] + use_syslog = True + + [osapi_v3] + enabled = False + +A specific meta-section ``local:localrc`` is used to provide a default localrc file. This allows all custom settings for DevStack to be contained in a single file. ``localrc`` is not overwritten if it exists to preserve compatability. + + [[local|localrc]] + FIXED_RANGE=10.254.1.0/24 + ADMIN_PASSWORD=speciale + LOGFILE=$DEST/logs/stack.sh.log + +Note that ``Q_PLUGIN_CONF_FILE`` is unique in that it is assumed to _NOT_ start with a ``/`` (slash) character. A slash will need to be added: + + [[post-config|/$Q_PLUGIN_CONF_FILE]] diff --git a/functions b/functions index f996ba89ab..87586eb17c 100644 --- a/functions +++ b/functions @@ -155,6 +155,22 @@ function err_if_not_set() { } +# Prints line number and "message" in warning format +# warn $LINENO "message" +function warn() { + local exitcode=$? + errXTRACE=$(set +o | grep xtrace) + set +o xtrace + local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2" + echo $msg 1>&2; + if [[ -n ${SCREEN_LOGDIR} ]]; then + echo $msg >> "${SCREEN_LOGDIR}/error.log" + fi + $errXTRACE + return $exitcode +} + + # HTTP and HTTPS proxy servers are supported via the usual environment variables [1] # ``http_proxy``, ``https_proxy`` and ``no_proxy``. They can be set in # ``localrc`` or on the command line if necessary:: diff --git a/lib/config b/lib/config new file mode 100644 index 0000000000..6f686e9b5d --- /dev/null +++ b/lib/config @@ -0,0 +1,130 @@ +# lib/config - Configuration file manipulation functions + +# These functions have no external dependencies and the following side-effects: +# +# CONFIG_AWK_CMD is defined, default is ``awk`` + +# Meta-config files contain multiple INI-style configuration files +# using a specific new section header to delimit them: +# +# [[group-name|file-name]] +# +# group-name refers to the group of configuration file changes to be processed +# at a particular time. These are called phases in ``stack.sh`` but +# group here as these functions are not DevStack-specific. +# +# file-name is the destination of the config file + +# Save trace setting +C_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Allow the awk command to be overridden on legacy platforms +CONFIG_AWK_CMD=${CONFIG_AWK_CMD:-awk} + +# Get the section for the specific group and config file +# get_meta_section infile group configfile +function get_meta_section() { + local file=$1 + local matchgroup=$2 + local configfile=$3 + + [[ -r $file ]] || return 0 + [[ -z $configfile ]] && return 0 + + $CONFIG_AWK_CMD -v matchgroup=$matchgroup -v configfile=$configfile ' + BEGIN { group = "" } + /^\[\[.+|.*\]\]/ { + if (group == "") { + gsub("[][]", "", $1); + split($1, a, "|"); + if (a[1] == matchgroup && a[2] == configfile) { + group=a[1] + } + } else { + group="" + } + next + } + { + if (group != "") + print $0 + } + ' $file +} + + +# Get a list of config files for a specific group +# get_meta_section_files infile group +function get_meta_section_files() { + local file=$1 + local matchgroup=$2 + + [[ -r $file ]] || return 0 + + $CONFIG_AWK_CMD -v matchgroup=$matchgroup ' + /^\[\[.+\|.*\]\]/ { + gsub("[][]", "", $1); + split($1, a, "|"); + if (a[1] == matchgroup) + print a[2] + } + ' $file +} + + +# Merge the contents of a meta-config file into its destination config file +# If configfile does not exist it will be created. +# merge_config_file infile group configfile +function merge_config_file() { + local file=$1 + local matchgroup=$2 + local configfile=$3 + + [[ -r $configfile ]] || touch $configfile + + get_meta_section $file $matchgroup $configfile | \ + $CONFIG_AWK_CMD -v configfile=$configfile ' + BEGIN { section = "" } + /^\[.+\]/ { + gsub("[][]", "", $1); + section=$1 + next + } + /^ *\#/ { + next + } + /^.+/ { + split($0, d, " *= *") + print "iniset " configfile " " section " " d[1] " \"" d[2] "\"" + } + ' | while read a; do eval "$a"; done + +} + + +# Merge all of the files specified by group +# merge_config_group infile group [group ...] +function merge_config_group() { + local localfile=$1; shift + local matchgroups=$@ + + [[ -r $localfile ]] || return 0 + + for group in $matchgroups; do + for configfile in $(get_meta_section_files $localfile $group); do + if [[ -d $(dirname $configfile) ]]; then + merge_config_file $localfile $group $configfile + fi + done + done +} + + +# Restore xtrace +$C_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/stack.sh b/stack.sh index be04bedade..f6ab4c49f6 100755 --- a/stack.sh +++ b/stack.sh @@ -29,6 +29,9 @@ TOP_DIR=$(cd $(dirname "$0") && pwd) # Import common functions source $TOP_DIR/functions +# Import config functions +source $TOP_DIR/lib/config + # Determine what system we are running on. This provides ``os_VENDOR``, # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` # and ``DISTRO`` @@ -38,6 +41,25 @@ GetDistro # Global Settings # =============== +# Check for a ``localrc`` section embedded in ``local.conf`` and extract if +# ``localrc`` does not already exist + +# Phase: local +rm -f $TOP_DIR/.localrc.auto +if [[ -r $TOP_DIR/local.conf ]]; then + LRC=$(get_meta_section_files $TOP_DIR/local.conf local) + for lfile in $LRC; do + if [[ "$lfile" == "localrc" ]]; then + if [[ -r $TOP_DIR/localrc ]]; then + warn $LINENO "localrc and local.conf:[[local]] both exist, using localrc" + else + echo "# Generated file, do not exit" >$TOP_DIR/.localrc.auto + get_meta_section $TOP_DIR/local.conf local $lfile >>$TOP_DIR/.localrc.auto + fi + fi + done +fi + # ``stack.sh`` is customizable by setting environment variables. Override a # default setting via export:: # @@ -842,6 +864,9 @@ if is_service_enabled sysstat;then fi +# Start Services +# ============== + # Keystone # -------- @@ -1153,6 +1178,14 @@ if is_service_enabled nova && is_baremetal; then fi +# Local Configuration +# =================== + +# Apply configuration from local.conf if it exists for layer 2 services +# Phase: post-config +merge_config_group $TOP_DIR/local.conf post-config + + # Launch Services # =============== @@ -1348,6 +1381,14 @@ for i in BASE_SQL_CONN ENABLED_SERVICES HOST_IP LOGFILE \ done +# Local Configuration +# =================== + +# Apply configuration from local.conf if it exists for layer 2 services +# Phase: extra +merge_config_group $TOP_DIR/local.conf extra + + # Run extras # ========== @@ -1420,5 +1461,66 @@ if [[ -n "$DEPRECATED_TEXT" ]]; then echo_summary "WARNING: $DEPRECATED_TEXT" fi +# Specific warning for deprecated configs +if [[ -n "$EXTRA_OPTS" ]]; then + echo "" + echo_summary "WARNING: EXTRA_OPTS is used" + echo "You are using EXTRA_OPTS to pass configuration into nova.conf." + echo "Please convert that configuration in localrc to a nova.conf section in local.conf:" + echo " +[[post-config|\$NOVA_CONF]] +[DEFAULT] +" + for I in "${EXTRA_OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + echo ${I} + done +fi + +if [[ -n "$EXTRA_BAREMETAL_OPTS" ]]; then + echo "" + echo_summary "WARNING: EXTRA_OPTS is used" + echo "You are using EXTRA_OPTS to pass configuration into nova.conf." + echo "Please convert that configuration in localrc to a nova.conf section in local.conf:" + echo " +[[post-config|\$NOVA_CONF]] +[baremetal] +" + for I in "${EXTRA_BAREMETAL_OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + echo ${I} + done +fi + +if [[ -n "$Q_DHCP_EXTRA_DEFAULT_OPTS" ]]; then + echo "" + echo_summary "WARNING: Q_DHCP_EXTRA_DEFAULT_OPTS is used" + echo "You are using Q_DHCP_EXTRA_DEFAULT_OPTS to pass configuration into $Q_DHCP_CONF_FILE." + echo "Please convert that configuration in localrc to a $Q_DHCP_CONF_FILE section in local.conf:" + echo " +[[post-config|\$Q_DHCP_CONF_FILE]] +[DEFAULT] +" + for I in "${Q_DHCP_EXTRA_DEFAULT_OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + echo ${I} + done +fi + +if [[ -n "$Q_SRV_EXTRA_DEFAULT_OPTS" ]]; then + echo "" + echo_summary "WARNING: Q_SRV_EXTRA_DEFAULT_OPTS is used" + echo "You are using Q_SRV_EXTRA_DEFAULT_OPTS to pass configuration into $NEUTRON_CONF." + echo "Please convert that configuration in localrc to a $NEUTRON_CONF section in local.conf:" + echo " +[[post-config|\$NEUTRON_CONF]] +[DEFAULT] +" + for I in "${Q_SRV_EXTRA_DEFAULT_OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + echo ${I} + done +fi + # Indicate how long this took to run (bash maintained variable ``SECONDS``) echo_summary "stack.sh completed in $SECONDS seconds." diff --git a/stackrc b/stackrc index 3a338d16f2..e4a96160d1 100644 --- a/stackrc +++ b/stackrc @@ -48,8 +48,12 @@ IDENTITY_API_VERSION=2.0 USE_SCREEN=True # allow local overrides of env variables, including repo config -if [ -f $RC_DIR/localrc ]; then +if [[ -f $RC_DIR/localrc ]]; then + # Old-style user-supplied config source $RC_DIR/localrc +elif [[ -f $RC_DIR/.localrc.auto ]]; then + # New-style user-supplied config extracted from local.conf + source $RC_DIR/.localrc.auto fi diff --git a/tests/test_config.sh b/tests/test_config.sh new file mode 100755 index 0000000000..fed2e7d477 --- /dev/null +++ b/tests/test_config.sh @@ -0,0 +1,179 @@ +#!/usr/bin/env bash + +# Tests for DevStack meta-config functions + +TOP=$(cd $(dirname "$0")/.. && pwd) + +# Import common functions +source $TOP/functions + +# Import config functions +source $TOP/lib/config + +# check_result() tests and reports the result values +# check_result "actual" "expected" +function check_result() { + local actual=$1 + local expected=$2 + if [[ "$actual" == "$expected" ]]; then + echo "OK" + else + echo -e "failed: $actual != $expected\n" + fi +} + +TEST_1C_ADD="[eee] +type=new +multi = foo2" + +function create_test1c() { + cat >test1c.conf <test2a.conf <test.conf < Date: Mon, 14 Oct 2013 00:51:10 -0500 Subject: [PATCH 0429/4704] remove useless step in cleanup_rpc_backend It shall not make dir of /var/run/openstack for the cleanup operation. install_rpc_backend will make the directory, which is covered by another take care of this. Change-Id: I2bf1bfb4a6b409cc04f2d7b94dd58627e0134b71 --- lib/rpc_backend | 4 ---- 1 file changed, 4 deletions(-) diff --git a/lib/rpc_backend b/lib/rpc_backend index ff87aae2af..c05bd8cb2a 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -86,10 +86,6 @@ function cleanup_rpc_backend { else exit_distro_not_supported "zeromq installation" fi - - # Necessary directory for socket location. - sudo mkdir -p /var/run/openstack - sudo chown $STACK_USER /var/run/openstack fi } From b5e11ff87409a6cac67378715379f739daaa2b0b Mon Sep 17 00:00:00 2001 From: DennyZhang Date: Mon, 14 Oct 2013 00:53:37 -0500 Subject: [PATCH 0430/4704] fix typo in functions Change-Id: I0d09d6d4f4405d3dc96f7a9eed62f87e5d3f8bc1 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 01e2dfc8fd..6f09630ebd 100644 --- a/functions +++ b/functions @@ -2,7 +2,7 @@ # # The following variables are assumed to be defined by certain functions: # ``ENABLED_SERVICES`` -# ``EROR_ON_CLONE`` +# ``ERROR_ON_CLONE`` # ``FILES`` # ``GLANCE_HOSTPORT`` # ``OFFLINE`` From fdc9ae8b9e52004a4fa0a4e0cf7df67f81cba955 Mon Sep 17 00:00:00 2001 From: Min Li Date: Wed, 9 Oct 2013 15:45:41 -0400 Subject: [PATCH 0431/4704] Fix bugs for installing docker, bug #1237581. -The change in install_docker.sh corrects a typo ('=' should be '-'). This typo resutls in 'unable to locate the packet' error when executing apt-get. -The second change is in hypervisor-docker fix the error for reporting docker is not installed when docker is actually set up. The original line missed the version part of the package name. Change-Id: Ic48f45158cf84f89080f095d53c355e9f6969bfd --- lib/nova_plugins/hypervisor-docker | 2 +- tools/docker/install_docker.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker index 4c8fc279b0..427554b7db 100644 --- a/lib/nova_plugins/hypervisor-docker +++ b/lib/nova_plugins/hypervisor-docker @@ -72,7 +72,7 @@ function install_nova_hypervisor() { fi # Make sure Docker is installed - if ! is_package_installed lxc-docker; then + if ! is_package_installed lxc-docker-${DOCKER_PACKAGE_VERSION}; then die $LINENO "Docker is not installed. Please run tools/docker/install_docker.sh" fi diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh index 289002e8e7..483955bfc2 100755 --- a/tools/docker/install_docker.sh +++ b/tools/docker/install_docker.sh @@ -38,7 +38,7 @@ curl https://get.docker.io/gpg | sudo apt-key add - install_package python-software-properties && \ sudo sh -c "echo deb $DOCKER_APT_REPO docker main > /etc/apt/sources.list.d/docker.list" apt_get update -install_package --force-yes lxc-docker=${DOCKER_PACKAGE_VERSION} socat +install_package --force-yes lxc-docker-${DOCKER_PACKAGE_VERSION} socat # Start the daemon - restart just in case the package ever auto-starts... restart_service docker From 83dcf2046060b275373993959b118bb2f3f3ff58 Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Sun, 29 Sep 2013 21:45:49 +0100 Subject: [PATCH 0432/4704] XenAPI: Get pool default SR rather than "Local storage" Match the nova default of using the Pool default SR as the storage for VMs Change-Id: I3b681ae7062ba0db3d9eab70b410b7d9658e37c9 --- tools/xen/functions | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tools/xen/functions b/tools/xen/functions index a5c4b70bc3..a1d56568a8 100644 --- a/tools/xen/functions +++ b/tools/xen/functions @@ -69,11 +69,17 @@ function install_xapi_plugins_from { } function get_local_sr { - xe sr-list name-label="Local storage" --minimal + xe pool-list params=default-SR minimal=true } function get_local_sr_path { - echo "/var/run/sr-mount/$(get_local_sr)" + pbd_path="/var/run/sr-mount/$(get_local_sr)" + pbd_device_config_path=`xe pbd-list sr-uuid=$(get_local_sr) params=device-config | grep " path: "` + if [ -n "$pbd_device_config_path" ]; then + pbd_uuid=`xe pbd-list sr-uuid=$(get_local_sr) minimal=true` + pbd_path=`xe pbd-param-get uuid=$pbd_uuid param-name=device-config param-key=path || echo ""` + fi + echo $pbd_path } function find_ip_by_name() { From 557744faecc2f6701d1babf5060a771069b22e94 Mon Sep 17 00:00:00 2001 From: DennyZhang Date: Mon, 14 Oct 2013 09:50:13 -0500 Subject: [PATCH 0433/4704] refine rabbit cleanup Beside it's good to killall -9 epmd, if killall epmd fails Change-Id: Ide90ef8ac3339bd70d991666ce0d3550a088670b --- lib/rpc_backend | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/rpc_backend b/lib/rpc_backend index ff87aae2af..61908c41f4 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -63,7 +63,7 @@ function cleanup_rpc_backend { if is_service_enabled rabbit; then # Obliterate rabbitmq-server uninstall_package rabbitmq-server - sudo killall epmd + sudo killall epmd || sudo killall -9 epmd if is_ubuntu; then # And the Erlang runtime too sudo aptitude purge -y ~nerlang From 4fb255cf41d367cd8cc16a0e2d090f1c0733aa84 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 14 Oct 2013 14:07:00 -0400 Subject: [PATCH 0434/4704] add bash8 tool (like pep8, but way hackier) unlike our python code, we have no automatic style checking for bash. For the most part, it's not a big deal, but errant whitespace or incorrect indenting is sometimes annoying to have to -1 people's patches for. Instead of constantly picking it up in manual review maybe we can do better. This is an uber hacky script which could be used to do just that. ./tools/bash8.py file1 file2 file3 ... And it will show issues found with the files at hand. Lightly tested in the existing devstack tree, it exposes a few issues that we might want to think about. This should be python 3 compatible, and includes argparse to provide a basic '-h' support to explain how the command should be run. Change-Id: I5009fa5852595c2953a548e430e5e1ce06ae94e0 --- tools/bash8.py | 84 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 84 insertions(+) create mode 100755 tools/bash8.py diff --git a/tools/bash8.py b/tools/bash8.py new file mode 100755 index 0000000000..82a10107e1 --- /dev/null +++ b/tools/bash8.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# bash8 - a pep8 equivalent for bash scripts +# +# this program attempts to be an automated style checker for bash scripts +# to fill the same part of code review that pep8 does in most OpenStack +# projects. It starts from humble beginnings, and will evolve over time. +# +# Currently Supported checks +# +# Errors +# - E001: check that lines do not end with trailing whitespace +# - E002: ensure that indents are only spaces, and not hard tabs +# - E003: ensure all indents are a multiple of 4 spaces + +import argparse +import fileinput +import re +import sys + + +ERRORS = 0 + + +def print_error(error, line): + global ERRORS + ERRORS = ERRORS + 1 + print("%s: '%s'" % (error, line.rstrip('\n'))) + print(" - %s: L%s" % (fileinput.filename(), fileinput.filelineno())) + + +def check_no_trailing_whitespace(line): + if re.search('[ \t]+$', line): + print_error('E001: Trailing Whitespace', line) + + +def check_indents(line): + m = re.search('^(?P[ \t]+)', line) + if m: + if re.search('\t', m.group('indent')): + print_error('E002: Tab indents', line) + if (len(m.group('indent')) % 4) != 0: + print_error('E003: Indent not multiple of 4', line) + + +def check_files(files): + for line in fileinput.input(files): + check_no_trailing_whitespace(line) + check_indents(line) + + +def get_options(): + parser = argparse.ArgumentParser( + description='A bash script style checker') + parser.add_argument('files', metavar='file', nargs='+', + help='files to scan for errors') + return parser.parse_args() + + +def main(): + opts = get_options() + check_files(opts.files) + + if ERRORS > 0: + print("%d bash8 error(s) found" % ERRORS) + return 1 + else: + return 0 + + +if __name__ == "__main__": + sys.exit(main()) From e0f4065afdd591d2511a4d8689dacab98392b331 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 14 Oct 2013 17:46:51 -0400 Subject: [PATCH 0435/4704] add a simple run_tests.sh to use bash8 this gives a simple way to run against all the files that we'd want to check with bash8. Currently clocking in at 300+ errors (no false pos so far that I see). Change-Id: Idd83b0bf61029b49bb28ad8b6e6261ecbf927555 --- run_tests.sh | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100755 run_tests.sh diff --git a/run_tests.sh b/run_tests.sh new file mode 100755 index 0000000000..9d9d18661e --- /dev/null +++ b/run_tests.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# +# this runs a series of unit tests for devstack to ensure it's functioning + +if [[ -n $@ ]]; then + FILES=$@ +else + LIBS=`find lib -type f | grep -v \.md` + SCRIPTS=`find . -type f -name \*\.sh` + EXTRA="functions" + FILES="$SCRIPTS $LIBS $EXTRA" +fi + +echo "Running bash8..." + +./tools/bash8.py $FILES From 48e1bab5423b8bfa5c5f48736fa0af99e6f0f8fc Mon Sep 17 00:00:00 2001 From: DennyZhang Date: Fri, 11 Oct 2013 22:06:25 -0500 Subject: [PATCH 0436/4704] Fix false negative, when HEAT_CREATE_TEST_IMAGE is unset Fix shell variable comparision bug Closes-Bug: #1239041 Change-Id: Ifbc8545f929eb7bbf9b85df889dfd9fa3a96b7c0 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index bc0b18d9f4..9f41608187 100644 --- a/lib/tempest +++ b/lib/tempest @@ -266,7 +266,7 @@ function configure_tempest() { iniset $TEMPEST_CONF boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # Orchestration test image - if [ $HEAT_CREATE_TEST_IMAGE == "True" ]; then + if [[ "$HEAT_CREATE_TEST_IMAGE" = "True" ]]; then disk_image_create /usr/share/tripleo-image-elements "vm fedora heat-cfntools" "i386" "fedora-vm-heat-cfntools-tempest" iniset $TEMPEST_CONF orchestration image_ref "fedora-vm-heat-cfntools-tempest" fi From c48c3124c87de2c233c2596e1a759106b598b22b Mon Sep 17 00:00:00 2001 From: Roman Prykhodchenko Date: Tue, 1 Oct 2013 17:19:05 +0300 Subject: [PATCH 0437/4704] Enable keystone authentication in Ironic Currently Ironic installation script leaves authenticaiton strategy in its default value which is noauth. This is not relevant for the most of development and testing environments. This patch sets authentication strategy for Ironic to keystone and specifies the path to the policy file. Closes-bug: #1233612 Change-Id: Idacbda05663e7ef949cbce0dbdf28eaa36b6a1a9 --- lib/ironic | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ironic b/lib/ironic index f3b4a72f66..8c1f52b330 100644 --- a/lib/ironic +++ b/lib/ironic @@ -79,6 +79,8 @@ function configure_ironic() { # configure_ironic_api() - Is used by configure_ironic(). Performs # API specific configuration. function configure_ironic_api() { + iniset $IRONIC_CONF_FILE DEFAULT auth_strategy keystone + iniset $IRONIC_CONF_FILE DEFAULT policy_file $IRONIC_POLICY_JSON iniset $IRONIC_CONF_FILE keystone_authtoken auth_host $KEYSTONE_AUTH_HOST iniset $IRONIC_CONF_FILE keystone_authtoken auth_port $KEYSTONE_AUTH_PORT iniset $IRONIC_CONF_FILE keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL From 43e00660c30d5f7b78d9eacfe2540a0c92fe5bb9 Mon Sep 17 00:00:00 2001 From: Roman Prykhodchenko Date: Tue, 15 Oct 2013 17:03:15 +0300 Subject: [PATCH 0438/4704] Install Ironic client Since python-ironicclient was published to github it's reasonable to include it to the default Ironic set up. Change-Id: Id1d0209959a3b482977b5e710c0885c714ad7e10 --- lib/ironic | 20 ++++++++++++++------ stack.sh | 1 + stackrc | 4 ++++ 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/lib/ironic b/lib/ironic index f3b4a72f66..89d0edc1a4 100644 --- a/lib/ironic +++ b/lib/ironic @@ -11,6 +11,7 @@ # ``stack.sh`` calls the entry points in this order: # # install_ironic +# install_ironicclient # configure_ironic # init_ironic # start_ironic @@ -27,6 +28,7 @@ set +o xtrace # Set up default directories IRONIC_DIR=$DEST/ironic +IRONICCLIENT_DIR=$DEST/python-ironicclient IRONIC_AUTH_CACHE_DIR=${IRONIC_AUTH_CACHE_DIR:-/var/cache/ironic} IRONIC_CONF_DIR=${IRONIC_CONF_DIR:-/etc/ironic} IRONIC_CONF_FILE=$IRONIC_CONF_DIR/ironic.conf @@ -45,6 +47,18 @@ IRONIC_HOSTPORT=${IRONIC_HOSTPORT:-$SERVICE_HOST:6385} # Functions # --------- +# install_ironic() - Collect source and prepare +function install_ironic() { + git_clone $IRONIC_REPO $IRONIC_DIR $IRONIC_BRANCH + setup_develop $IRONIC_DIR +} + +# install_ironicclient() - Collect sources and prepare +function install_ironicclient() { + git_clone $IRONICCLIENT_REPO $IRONICCLIENT_DIR $IRONICCLIENT_BRANCH + setup_develop $IRONICCLIENT_DIR +} + # cleanup_ironic() - Remove residual data files, anything left over from previous # runs that would need to clean up. function cleanup_ironic() { @@ -170,12 +184,6 @@ function init_ironic() { create_ironic_accounts } -# install_ironic() - Collect source and prepare -function install_ironic() { - git_clone $IRONIC_REPO $IRONIC_DIR $IRONIC_BRANCH - setup_develop $IRONIC_DIR -} - # start_ironic() - Start running processes, including screen function start_ironic() { # Start Ironic API server, if enabled. diff --git a/stack.sh b/stack.sh index 14ec023a51..2501cd0eb4 100755 --- a/stack.sh +++ b/stack.sh @@ -722,6 +722,7 @@ fi if is_service_enabled ir-api ir-cond; then install_ironic + install_ironicclient configure_ironic fi diff --git a/stackrc b/stackrc index 3f740b5678..0151672c1d 100644 --- a/stackrc +++ b/stackrc @@ -104,6 +104,10 @@ HORIZON_BRANCH=${HORIZON_BRANCH:-master} IRONIC_REPO=${IRONIC_REPO:-${GIT_BASE}/openstack/ironic.git} IRONIC_BRANCH=${IRONIC_BRANCH:-master} +# ironic client +IRONICCLIENT_REPO=${IRONICCLIENT_REPO:-${GIT_BASE}/openstack/python-ironicclient.git} +IRONICCLIENT_BRANCH=${IRONICCLIENT_BRANCH:-master} + # unified auth system (manages accounts/tokens) KEYSTONE_REPO=${KEYSTONE_REPO:-${GIT_BASE}/openstack/keystone.git} KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-master} From cdf3d766478d04e62a860754298e7d86f89b33a9 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 15 Oct 2013 09:42:43 -0500 Subject: [PATCH 0439/4704] Add stack phases to extras.d handling Add hooks to stack.sh, unstack.sh and clean.sh to call the extras.d scripts at multiple points in stack.sh. This allows these scripts to perform installation and startup tasks at similar times as they would if integrated into stack.sh. extras.d/70-tempest.sh is present as an example of the structure of these scripts. See extras.d/README.md for more information. Change-Id: Ic1fe522559b94d204d6c0319a2e3d23684c8d028 --- README.md | 4 ++++ clean.sh | 19 +++++++++++++++++++ extras.d/80-tempest.sh | 32 ++++++++++++++++++++------------ extras.d/README | 14 -------------- extras.d/README.md | 31 +++++++++++++++++++++++++++++++ stack.sh | 34 +++++++++++++++++++++++++++++++++- unstack.sh | 11 +++++++++++ 7 files changed, 118 insertions(+), 27 deletions(-) delete mode 100644 extras.d/README create mode 100644 extras.d/README.md diff --git a/README.md b/README.md index 66e36b22a8..514786c60f 100644 --- a/README.md +++ b/README.md @@ -215,6 +215,10 @@ If tempest has been successfully configured, a basic set of smoke tests can be r $ cd /opt/stack/tempest $ nosetests tempest/scenario/test_network_basic_ops.py +# Additional Projects + +DevStack has a hook mechanism to call out to a dispatch script at specific points in the execution if `stack.sh`, `unstack.sh` and `clean.sh`. This allows higher-level projects, especially those that the lower level projects have no dependency on, to be added to DevStack without modifying the scripts. Tempest is built this way as an example of how to structure the dispatch script, see `extras.d/80-tempest.sh`. See `extras.d/README.md` for more information. + # Multi-Node Setup A more interesting setup involves running multiple compute nodes, with Neutron networks connecting VMs on different compute nodes. diff --git a/clean.sh b/clean.sh index 6ceb5a4933..395941ae21 100755 --- a/clean.sh +++ b/clean.sh @@ -47,6 +47,15 @@ source $TOP_DIR/lib/neutron source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/ldap +# Extras Source +# -------------- + +# Phase: source +if [[ -d $TOP_DIR/extras.d ]]; then + for i in $TOP_DIR/extras.d/*.sh; do + [[ -r $i ]] && source $i source + done +fi # See if there is anything running... # need to adapt when run_service is merged @@ -56,6 +65,16 @@ if [[ -n "$SESSION" ]]; then $TOP_DIR/unstack.sh --all fi +# Run extras +# ========== + +# Phase: clean +if [[ -d $TOP_DIR/extras.d ]]; then + for i in $TOP_DIR/extras.d/*.sh; do + [[ -r $i ]] && source $i clean + done +fi + # Clean projects cleanup_oslo cleanup_cinder diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh index f159955726..75b702c700 100644 --- a/extras.d/80-tempest.sh +++ b/extras.d/80-tempest.sh @@ -1,21 +1,29 @@ # tempest.sh - DevStack extras script -source $TOP_DIR/lib/tempest - -if [[ "$1" == "stack" ]]; then - # Configure Tempest last to ensure that the runtime configuration of - # the various OpenStack services can be queried. - if is_service_enabled tempest; then - echo_summary "Configuring Tempest" +if is_service_enabled tempest; then + if [[ "$1" == "source" ]]; then + # Initial source + source $TOP_DIR/lib/tempest + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + echo_summary "Installing Tempest" install_tempest + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + # Tempest config must come after layer 2 services are running + : + elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + echo_summary "Initializing Tempest" configure_tempest init_tempest fi -fi -if [[ "$1" == "unstack" ]]; then - # no-op - : -fi + if [[ "$1" == "unstack" ]]; then + # no-op + : + fi + if [[ "$1" == "clean" ]]; then + # no-op + : + fi +fi diff --git a/extras.d/README b/extras.d/README deleted file mode 100644 index ffc6793abd..0000000000 --- a/extras.d/README +++ /dev/null @@ -1,14 +0,0 @@ -The extras.d directory contains project initialization scripts to be -sourced by stack.sh at the end of its run. This is expected to be -used by external projects that want to be configured, started and -stopped with DevStack. - -Order is controlled by prefixing the script names with the a two digit -sequence number. Script names must end with '.sh'. This provides a -convenient way to disable scripts by simoy renaming them. - -DevStack reserves the sequence numbers 00 through 09 and 90 through 99 -for its own use. - -The scripts are called with an argument of 'stack' by stack.sh and -with an argument of 'unstack' by unstack.sh. diff --git a/extras.d/README.md b/extras.d/README.md new file mode 100644 index 0000000000..591e438b02 --- /dev/null +++ b/extras.d/README.md @@ -0,0 +1,31 @@ +# Extras Hooks + +The `extras.d` directory contains project dispatch scripts that are called +at specific times by `stack.sh`, `unstack.sh` and `clean.sh`. These hooks are +used to install, configure and start additional projects during a DevStack run +without any modifications to the base DevStack scripts. + +When `stack.sh` reaches one of the hook points it sources the scripts in `extras.d` +that end with `.sh`. To control the order that the scripts are sourced their +names start with a two digit sequence number. DevStack reserves the sequence +numbers 00 through 09 and 90 through 99 for its own use. + +The scripts are sourced at each hook point so they should not declare anything +at the top level that would cause a problem, specifically, functions. This does +allow the entire `stack.sh` variable space to be available. The scripts are +sourced with one or more arguments, the first of which defines the hook phase: + +arg 1: source | stack | unstack | clean + + source: always called first in any of the scripts, used to set the + initial defaults in a lib/* script or similar + + stack: called by stack.sh. There are three possible values for + the second arg to distinguish the phase stack.sh is in: + + arg 2: install | post-config | extra + + unstack: called by unstack.sh + + clean: called by clean.sh. Remember, clean.sh also calls unstack.sh + so that work need not be repeated. diff --git a/stack.sh b/stack.sh index 14ec023a51..aa0efea487 100755 --- a/stack.sh +++ b/stack.sh @@ -313,6 +313,16 @@ source $TOP_DIR/lib/ldap source $TOP_DIR/lib/ironic source $TOP_DIR/lib/trove +# Extras Source +# -------------- + +# Phase: source +if [[ -d $TOP_DIR/extras.d ]]; then + for i in $TOP_DIR/extras.d/*.sh; do + [[ -r $i ]] && source $i source + done +fi + # Set the destination directories for other OpenStack projects OPENSTACKCLIENT_DIR=$DEST/python-openstackclient @@ -725,6 +735,16 @@ if is_service_enabled ir-api ir-cond; then configure_ironic fi +# Extras Install +# -------------- + +# Phase: install +if [[ -d $TOP_DIR/extras.d ]]; then + for i in $TOP_DIR/extras.d/*.sh; do + [[ -r $i ]] && source $i stack install + done +fi + if [[ $TRACK_DEPENDS = True ]]; then $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff; then @@ -1000,6 +1020,17 @@ if is_service_enabled nova && is_baremetal; then fi +# Extras Configuration +# ==================== + +# Phase: post-config +if [[ -d $TOP_DIR/extras.d ]]; then + for i in $TOP_DIR/extras.d/*.sh; do + [[ -r $i ]] && source $i stack post-config + done +fi + + # Local Configuration # =================== @@ -1214,9 +1245,10 @@ merge_config_group $TOP_DIR/local.conf extra # Run extras # ========== +# Phase: extra if [[ -d $TOP_DIR/extras.d ]]; then for i in $TOP_DIR/extras.d/*.sh; do - [[ -r $i ]] && source $i stack + [[ -r $i ]] && source $i stack extra done fi diff --git a/unstack.sh b/unstack.sh index c944ccc0fb..67c8b7c7b1 100755 --- a/unstack.sh +++ b/unstack.sh @@ -42,6 +42,16 @@ source $TOP_DIR/lib/neutron source $TOP_DIR/lib/ironic source $TOP_DIR/lib/trove +# Extras Source +# -------------- + +# Phase: source +if [[ -d $TOP_DIR/extras.d ]]; then + for i in $TOP_DIR/extras.d/*.sh; do + [[ -r $i ]] && source $i source + done +fi + # Determine what system we are running on. This provides ``os_VENDOR``, # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` GetOSVersion @@ -53,6 +63,7 @@ fi # Run extras # ========== +# Phase: unstack if [[ -d $TOP_DIR/extras.d ]]; then for i in $TOP_DIR/extras.d/*.sh; do [[ -r $i ]] && source $i unstack From 75e851a6de99d57eaab3e682b249067cb6065cd0 Mon Sep 17 00:00:00 2001 From: Sahid Orentino Ferdjaoui Date: Wed, 16 Oct 2013 08:34:05 +0000 Subject: [PATCH 0440/4704] exercices: aggregates needs to be more flexible The actual regex checks a result in python format and because of the change in the bug 1132961, Jekins failed. I have update the regex to work with the old result and the new result. Change-Id: I393e1358f99be5f20d9ac8b3e214355a453ecfcb Closes-Bug: 1239726 --- exercises/aggregates.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh index e2baecdb11..e5fc7dec84 100755 --- a/exercises/aggregates.sh +++ b/exercises/aggregates.sh @@ -100,7 +100,7 @@ META_DATA_2_KEY=foo META_DATA_3_KEY=bar #ensure no additional metadata is set -nova aggregate-details $AGGREGATE_ID | egrep "{u'availability_zone': u'$AGGREGATE_A_ZONE'}|{}" +nova aggregate-details $AGGREGATE_ID | egrep "\|[{u ]*'availability_zone.+$AGGREGATE_A_ZONE'[ }]*\|" nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_1_KEY}=123 nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY @@ -117,7 +117,7 @@ nova aggregate-details $AGGREGATE_ID | grep $META_DATA_3_KEY nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die $LINENO "ERROR metadata was not cleared" nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_3_KEY $META_DATA_1_KEY -nova aggregate-details $AGGREGATE_ID | egrep "{u'availability_zone': u'$AGGREGATE_A_ZONE'}|{}" +nova aggregate-details $AGGREGATE_ID | egrep "\|[{u ]*'availability_zone.+$AGGREGATE_A_ZONE'[ }]*\|" # Test aggregate-add/remove-host From bd8ac01b02cafba7cfd98364c8f3009c19042da4 Mon Sep 17 00:00:00 2001 From: Mike Perez Date: Tue, 20 Aug 2013 21:53:30 -0700 Subject: [PATCH 0441/4704] Default to Cinder REST API v2 Set OS_VOLUME_API_VERSION environment variable to 2 so we use specifically Cinder REST API v2. v1 is still enabled in the catalog, but we want more exposure to v2 for testing. Change-Id: I6c2f29edf44a0f58a7830fe4dd2db35f2db3658c --- openrc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/openrc b/openrc index 3de7e3958f..d5b215603a 100644 --- a/openrc +++ b/openrc @@ -81,3 +81,8 @@ export OS_CACERT=$INT_CA_DIR/ca-chain.pem export NOVA_VERSION=${NOVA_VERSION:-1.1} # In the future this will change names: export COMPUTE_API_VERSION=${COMPUTE_API_VERSION:-$NOVA_VERSION} + +# Currently cinderclient needs you to specify the *volume api* version. This +# needs to match the config of your catalog returned by Keystone. +export CINDER_VERSION=${CINDER_VERSION:-2} +export OS_VOLUME_API_VERSION=${OS_VOLUME_API_VERSION:-$CINDER_VERSION} From 65f1af6dd3ea97803cbd6f910e5619cca3ac5173 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 16 Oct 2013 12:10:13 -0500 Subject: [PATCH 0442/4704] Fix fixup_stuff.sh package permissions fix There are a number of different attempts to fix this issue, specifcally on RHEL6. None of them actually get it right. * This does not upgrade an OS installed package because we trust them to not make these sorts of permissions mistakes. Also we do not have nor want to figure out the right version that the OpenStack projects will require. * This specfically targets the upstream package versions as we do not know how later versions behave. This should address the following reviews: * https://review.openstack.org/#/c/50540/ * https://review.openstack.org/#/c/51233/ (1238707) * https://review.openstack.org/#/c/51651/ (1239747) * https://review.openstack.org/#/c/51843/ * https://review.openstack.org/#/c/51838/ * https://review.openstack.org/#/c/52148/ (1236941) Change-Id: I99906451dc25654628187b383e8893cce0e276bf --- tools/fixup_stuff.sh | 40 +++++++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index f3c0f9810d..9e65b7c21e 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -35,25 +35,35 @@ FILES=$TOP_DIR/files # Python Packages # --------------- -# Pre-install affected packages so we can fix the permissions -pip_install prettytable -pip_install httplib2 +# get_package_path python-package # in import notation +function get_package_path() { + local package=$1 + echo $(python -c "import os; import $package; print(os.path.split(os.path.realpath($package.__file__))[0])") +} -SITE_DIRS=$(python -c "import site; import os; print os.linesep.join(site.getsitepackages())") -for dir in $SITE_DIRS; do - # Fix prettytable 0.7.2 permissions - if [[ -r $dir/prettytable.py ]]; then - sudo chmod +r $dir/prettytable-0.7.2*/* - fi +# Pre-install affected packages so we can fix the permissions +# These can go away once we are confident that pip 1.4.1+ is available everywhere - # Fix httplib2 0.8 permissions - httplib_dir=httplib2-0.8.egg-info - if [[ -d $dir/$httplib_dir ]]; then - sudo chmod +r $dir/$httplib_dir/* - fi +# Fix prettytable 0.7.2 permissions +# Don't specify --upgrade so we use the existing package if present +pip_install prettytable +PACKAGE_DIR=$(get_package_path prettytable) +# Only fix version 0.7.2 +dir=$(echo $PACKAGE_DIR/prettytable-0.7.2*) +if [[ -d $dir ]]; then + sudo chmod +r $dir/* +fi -done +# Fix httplib2 0.8 permissions +# Don't specify --upgrade so we use the existing package if present +pip_install httplib2 +PACKAGE_DIR=$(get_package_path httplib2) +# Only fix version 0.8 +dir=$(echo $PACKAGE_DIR-0.8*) +if [[ -d $dir ]]; then + sudo chmod +r $dir/* +fi # RHEL6 From cbce1fa418ccb271879040b117b96038fefb479f Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 26 Sep 2013 09:20:04 +0000 Subject: [PATCH 0443/4704] Just reset the repo instead of trying to co files We can just reset the repo instead of trying to checkout specific files. This fix external repos which doesn't not have requirements.txt or test-requirements.txt. Closes-Bug: 1231334 Change-Id: Iab898f5e8a422cc0cbfe44839c938f22c7525fd8 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index d969677fc5..92b4ee11cf 100644 --- a/functions +++ b/functions @@ -1257,7 +1257,7 @@ function setup_develop() { # Undo requirements changes, if we made them if [ $update_requirements -eq 0 ]; then - (cd $project_dir && git checkout -- requirements.txt test-requirements.txt setup.py) + (cd $project_dir && git reset --hard) fi } From 8b5d3cf3df65682f94a1885ef71d2fb31bdfb3ba Mon Sep 17 00:00:00 2001 From: Arata Notsu Date: Thu, 17 Oct 2013 21:42:49 +0900 Subject: [PATCH 0444/4704] Do not install pip when OFFLINE=True install_pip.sh trys to fetch pip from the internet even if OFFLINE=True. It causes stack.sh to fail if the environment is actually disconnected from the internet. With this patch, stack.sh skips install_pip.sh if OFFLINE=True. Change-Id: Ica9e5cfa0a4ee684c05393896c2fd6ddbd9ccd06 Closes-Bug: 1240956 --- stack.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index aa0efea487..625eb5f956 100755 --- a/stack.sh +++ b/stack.sh @@ -588,7 +588,9 @@ echo_summary "Installing package prerequisites" source $TOP_DIR/tools/install_prereqs.sh # Configure an appropriate python environment -$TOP_DIR/tools/install_pip.sh +if [[ "$OFFLINE" != "True" ]]; then + $TOP_DIR/tools/install_pip.sh +fi # Do the ugly hacks for borken packages and distros $TOP_DIR/tools/fixup_stuff.sh From 741fc5c08496db1518a7698b093aa1f696f67c4f Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Wed, 16 Oct 2013 17:48:16 -0400 Subject: [PATCH 0445/4704] Use nova.conf for auth_token configs. Updates lib/nova so that we use the application config file (nova.conf) instead of the Nova api-paste.ini config file. Related-Bug #1240753 Change-Id: I393a67f1f005e775928130c9241aa7e25c391ae3 --- lib/nova | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/lib/nova b/lib/nova index 8deb3a01a9..5ff5099c6d 100644 --- a/lib/nova +++ b/lib/nova @@ -212,26 +212,24 @@ function configure_nova() { configure_nova_rootwrap if is_service_enabled n-api; then - # Use the sample http middleware configuration supplied in the - # Nova sources. This paste config adds the configuration required - # for Nova to validate Keystone tokens. - # Remove legacy paste config if present rm -f $NOVA_DIR/bin/nova-api-paste.ini # Get the sample configuration file in place cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF_DIR - iniset $NOVA_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST + # Comment out the keystone configs in Nova's api-paste.ini. + # We are using nova.conf to configure this instead. + inicomment $NOVA_API_PASTE_INI filter:authtoken auth_host if is_service_enabled tls-proxy; then - iniset $NOVA_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + inicomment $NOVA_API_PASTE_INI filter:authtoken auth_protocol fi - iniset $NOVA_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $NOVA_API_PASTE_INI filter:authtoken admin_user nova - iniset $NOVA_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD + inicomment $NOVA_API_PASTE_INI filter:authtoken admin_tenant_name + inicomment $NOVA_API_PASTE_INI filter:authtoken admin_user + inicomment $NOVA_API_PASTE_INI filter:authtoken admin_password fi - iniset $NOVA_API_PASTE_INI filter:authtoken signing_dir $NOVA_AUTH_CACHE_DIR + inicomment $NOVA_API_PASTE_INI filter:authtoken signing_dir if is_service_enabled n-cpu; then # Force IP forwarding on, just on case @@ -394,7 +392,20 @@ function create_nova_conf() { # Set the service port for a proxy to take the original iniset $NOVA_CONF DEFAULT osapi_compute_listen_port "$NOVA_SERVICE_PORT_INT" fi + + # Add keystone authtoken configuration + + iniset $NOVA_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST + if is_service_enabled tls-proxy; then + iniset $NOVA_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + fi + iniset $NOVA_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $NOVA_CONF keystone_authtoken admin_user nova + iniset $NOVA_CONF keystone_authtoken admin_password $SERVICE_PASSWORD fi + + iniset $NOVA_CONF keystone_authtoken signing_dir $NOVA_AUTH_CACHE_DIR + if is_service_enabled cinder; then iniset $NOVA_CONF DEFAULT volume_api_class "nova.volume.cinder.API" fi From 82dea7c64a1a7ac81a1a02753e516bb1d67eebd2 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Wed, 16 Oct 2013 18:57:15 -0400 Subject: [PATCH 0446/4704] Use cinder.conf for auth_token configs. Updates lib/cinder so that we use the application config file (cinder.conf) instead of the Cinder api-paste.ini config file. Related-Bug #1240753 Change-Id: I6636d33ee522757145ac97fc354324a8b9379700 --- lib/cinder | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/lib/cinder b/lib/cinder index 220488a07e..f6f137cabd 100644 --- a/lib/cinder +++ b/lib/cinder @@ -202,15 +202,25 @@ function configure_cinder() { sudo mv $TEMPFILE /etc/sudoers.d/cinder-rootwrap cp $CINDER_DIR/etc/cinder/api-paste.ini $CINDER_API_PASTE_INI - iniset $CINDER_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST - iniset $CINDER_API_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT - iniset $CINDER_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $CINDER_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $CINDER_API_PASTE_INI filter:authtoken admin_user cinder - iniset $CINDER_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD - iniset $CINDER_API_PASTE_INI filter:authtoken signing_dir $CINDER_AUTH_CACHE_DIR + + inicomment $CINDER_API_PASTE_INI filter:authtoken auth_host + inicomment $CINDER_API_PASTE_INI filter:authtoken auth_port + inicomment $CINDER_API_PASTE_INI filter:authtoken auth_protocol + inicomment $CINDER_API_PASTE_INI filter:authtoken admin_tenant_name + inicomment $CINDER_API_PASTE_INI filter:authtoken admin_user + inicomment $CINDER_API_PASTE_INI filter:authtoken admin_password + inicomment $CINDER_API_PASTE_INI filter:authtoken signing_dir cp $CINDER_DIR/etc/cinder/cinder.conf.sample $CINDER_CONF + + iniset $CINDER_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $CINDER_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $CINDER_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $CINDER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $CINDER_CONF keystone_authtoken admin_user cinder + iniset $CINDER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD + iniset $CINDER_CONF keystone_authtoken signing_dir $CINDER_AUTH_CACHE_DIR + iniset $CINDER_CONF DEFAULT auth_strategy keystone iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $CINDER_CONF DEFAULT verbose True From 6d4a9a87b7aebca2de7bfe034dff630d49f52883 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 14 Oct 2013 16:20:32 +0200 Subject: [PATCH 0447/4704] Don't kill ceilometer prematurally in devstackgate This change ensure that 'ceilometer' processes are not killed by stack.sh when USE_SCREEN=False Fixes bug #1234254 Change-Id: I48dbf18ea0b169cdb5295a709d82c025f6fb8930 --- lib/ceilometer | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 1b0431906a..cd4c4d8656 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -134,12 +134,12 @@ function install_ceilometerclient() { # start_ceilometer() - Start running processes, including screen function start_ceilometer() { - screen_it ceilometer-acompute "sg $LIBVIRT_GROUP \"ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" - screen_it ceilometer-acentral "ceilometer-agent-central --config-file $CEILOMETER_CONF" - screen_it ceilometer-collector "ceilometer-collector --config-file $CEILOMETER_CONF" - screen_it ceilometer-api "ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" - screen_it ceilometer-alarm-notifier "ceilometer-alarm-notifier --config-file $CEILOMETER_CONF" - screen_it ceilometer-alarm-evaluator "ceilometer-alarm-evaluator --config-file $CEILOMETER_CONF" + screen_it ceilometer-acompute "cd ; sg $LIBVIRT_GROUP \"ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" + screen_it ceilometer-acentral "cd ; ceilometer-agent-central --config-file $CEILOMETER_CONF" + screen_it ceilometer-collector "cd ; ceilometer-collector --config-file $CEILOMETER_CONF" + screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" + screen_it ceilometer-alarm-notifier "cd ; ceilometer-alarm-notifier --config-file $CEILOMETER_CONF" + screen_it ceilometer-alarm-evaluator "cd ; ceilometer-alarm-evaluator --config-file $CEILOMETER_CONF" } # stop_ceilometer() - Stop running processes From a20c620c7d323b8f489cb20ac64c7ab62c8bb213 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 14 Oct 2013 16:16:35 +0200 Subject: [PATCH 0448/4704] Don't kill sar prematurally in devstack-gate This change ensure that 'sar' is not killed by stack.sh when USE_SCREEN=False Fixes bug #1238482 Change-Id: Id354619a43c27eabbc57f61ba33be2a9493244aa --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index aa0efea487..89a03b5a9b 100755 --- a/stack.sh +++ b/stack.sh @@ -840,7 +840,7 @@ init_service_check # If enabled, systat has to start early to track OpenStack service startup. if is_service_enabled sysstat;then if [[ -n ${SCREEN_LOGDIR} ]]; then - screen_it sysstat "sar -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL" + screen_it sysstat "cd ; sar -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL" else screen_it sysstat "sar $SYSSTAT_INTERVAL" fi From c01e6a789e7e79e735ca3a66965db07622ab3bea Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 14 Oct 2013 16:26:02 +0200 Subject: [PATCH 0449/4704] Don't kill nova-bm-deploy-helper prematurally This change ensure that 'nova-baremetal-deploy-helper' process is not killed by stack.sh when USE_SCREEN=False Change-Id: I84f6f3c3d09bf0cd0d4c5d94eb486a1f7d0b1d0f --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 89a03b5a9b..cf3baa2de0 100755 --- a/stack.sh +++ b/stack.sh @@ -1222,7 +1222,7 @@ if is_service_enabled nova && is_baremetal; then fi # ensure callback daemon is running sudo pkill nova-baremetal-deploy-helper || true - screen_it baremetal "nova-baremetal-deploy-helper" + screen_it baremetal "cd ; nova-baremetal-deploy-helper" fi # Save some values we generated for later use From 8111ef0aa55d2bec3ca958940171a5c9992eaee9 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Fri, 18 Oct 2013 16:21:26 +0200 Subject: [PATCH 0450/4704] Decrease tempest BUILD_TIMEOUT The BUILD_TIMEOUT is the generally used timeout option in tempest. Almost never expected to any operation takes more than 60 sec, the 400 sec is too match for timeout. Changing the BUILD_TIMEOUT to 196 sec, it is still expected to be safe. It can make faster the failing test jobs. Change-Id: I7e7c767400ca448cb86d27b60a1229a2afa69726 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 9f41608187..e8edeb37ca 100644 --- a/lib/tempest +++ b/lib/tempest @@ -48,7 +48,7 @@ TEMPEST_STATE_PATH=${TEMPEST_STATE_PATH:=$DATA_DIR/tempest} NOVA_SOURCE_DIR=$DEST/nova BUILD_INTERVAL=1 -BUILD_TIMEOUT=400 +BUILD_TIMEOUT=196 BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-0.3.1" From 105c6e8718da2db50e48cb4a68be8522a80e101e Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Fri, 18 Oct 2013 15:33:26 +0100 Subject: [PATCH 0451/4704] Create-stack-user script should have execute permissions Currently running stack.sh as root advises you about this script, which is not executable Change-Id: I674af044b8f3c31bcc86be5c6552e8086453d5cd --- tools/create-stack-user.sh | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 tools/create-stack-user.sh diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh old mode 100644 new mode 100755 From f43f3a59c26979f40510b7531b587b029088c871 Mon Sep 17 00:00:00 2001 From: DennyZhang Date: Fri, 11 Oct 2013 23:09:47 -0500 Subject: [PATCH 0452/4704] ./stack.sh complain no /etc/nova/nova.conf If nova.conf doesn't exist, mute error generated by grep. Closes-Bug: #1239044 Change-Id: Ia497e2a9d8395cc11850fb16fd4075af9855b2a5 --- functions | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/functions b/functions index 01e2dfc8fd..eca0f9be16 100644 --- a/functions +++ b/functions @@ -697,7 +697,8 @@ function iniset() { local section=$2 local option=$3 local value=$4 - if ! grep -q "^\[$section\]" "$file"; then + + if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then # Add section at the end echo -e "\n[$section]" >>"$file" fi From acb52e5db6884e6d2eeef8351306736a70345556 Mon Sep 17 00:00:00 2001 From: DennyZhang Date: Fri, 11 Oct 2013 00:08:29 -0500 Subject: [PATCH 0453/4704] detect failure of 'keystone token-get' When 'keystone token-get' fails, the caller can't detect the failure. This cause troulbe shooting a bit complicated. Change-Id: I3c58c5fd0e92a87e87546ea797904e08646a1097 Closes-Bug: #1238412 --- exercises/neutron-adv-test.sh | 1 + stack.sh | 1 + tools/upload_image.sh | 1 + 3 files changed, 3 insertions(+) diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index abb29cf333..e0c37ef723 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -102,6 +102,7 @@ KEYSTONE="keystone" # and save it. TOKEN=`keystone token-get | grep ' id ' | awk '{print $4}'` +die_if_not_set $LINENO TOKEN "Keystone fail to get token" # Various functions # ----------------- diff --git a/stack.sh b/stack.sh index 7cd7e30d70..024c52ef98 100755 --- a/stack.sh +++ b/stack.sh @@ -1203,6 +1203,7 @@ fi if is_service_enabled g-reg; then TOKEN=$(keystone token-get | grep ' id ' | get_field 2) + die_if_not_set $LINENO TOKEN "Keystone fail to get token" if is_baremetal; then echo_summary "Creating and uploading baremetal images" diff --git a/tools/upload_image.sh b/tools/upload_image.sh index dd21c9f2a8..d81a5c8dab 100755 --- a/tools/upload_image.sh +++ b/tools/upload_image.sh @@ -33,6 +33,7 @@ fi # Get a token to authenticate to glance TOKEN=$(keystone token-get | grep ' id ' | get_field 2) +die_if_not_set $LINENO TOKEN "Keystone fail to get token" # Glance connection info. Note the port must be specified. GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_HOST:9292} From b8dd27bf457d1c7a7ad0f1b3a946529c8a1d073f Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 17 Oct 2013 12:03:55 -0500 Subject: [PATCH 0454/4704] Fix typos and thinkos in docs Updates for the new major features and some clarification Partial-Bug: #1235626 Change-Id: If2da63e62a14894e498b4163b5052d9b2b2069ed --- HACKING.rst | 28 ++++---- README.md | 165 ++++++++++++++++++++++++++++++--------------- extras.d/README.md | 7 +- stack.sh | 2 +- 4 files changed, 131 insertions(+), 71 deletions(-) diff --git a/HACKING.rst b/HACKING.rst index 5f33d770f8..3c08e679d9 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -5,10 +5,10 @@ Contributing to DevStack General ------- -DevStack is written in POSIX shell script. This choice was made because -it best illustrates the configuration steps that this implementation takes -on setting up and interacting with OpenStack components. DevStack specifically -uses Bash and is compatible with Bash 3. +DevStack is written in UNIX shell script. It uses a number of bash-isms +and so is limited to Bash (version 3 and up) and compatible shells. +Shell script was chosen because it best illustrates the steps used to +set up and interact with OpenStack components. DevStack's official repository is located on GitHub at https://github.com/openstack-dev/devstack.git. Besides the master branch that @@ -54,14 +54,14 @@ Sometimes the script needs to know the location of the DevStack install director ``TOP_DIR`` should always point there, even if the script itself is located in a subdirectory:: - # Keep track of the current devstack directory. + # Keep track of the current DevStack directory. TOP_DIR=$(cd $(dirname "$0") && pwd) Many scripts will utilize shared functions from the ``functions`` file. There are also rc files (``stackrc`` and ``openrc``) that are often included to set the primary configuration of the user environment:: - # Keep track of the current devstack directory. + # Keep track of the current DevStack directory. TOP_DIR=$(cd $(dirname "$0") && pwd) # Import common functions @@ -100,13 +100,14 @@ stackrc ------- ``stackrc`` is the global configuration file for DevStack. It is responsible for -calling ``localrc`` if it exists so configuration can be overridden by the user. +calling ``local.conf`` (or ``localrc`` if it exists) so local user configuration +is recognized. The criteria for what belongs in ``stackrc`` can be vaguely summarized as follows: -* All project respositories and branches (for historical reasons) -* Global configuration that may be referenced in ``localrc``, i.e. ``DEST``, ``DATA_DIR`` +* All project repositories and branches handled directly in ``stack.sh`` +* Global configuration that may be referenced in ``local.conf``, i.e. ``DEST``, ``DATA_DIR`` * Global service configuration like ``ENABLED_SERVICES`` * Variables used by multiple services that do not have a clear owner, i.e. ``VOLUME_BACKING_FILE_SIZE`` (nova-volumes and cinder) or ``PUBLIC_NETWORK_NAME`` @@ -116,8 +117,9 @@ follows: not be changed for other reasons but the earlier file needs to dereference a variable set in the later file. This should be rare. -Also, variable declarations in ``stackrc`` do NOT allow overriding (the form -``FOO=${FOO:-baz}``); if they did then they can already be changed in ``localrc`` +Also, variable declarations in ``stackrc`` before ``local.conf`` is sourced +do NOT allow overriding (the form +``FOO=${FOO:-baz}``); if they did then they can already be changed in ``local.conf`` and can stay in the project file. @@ -139,7 +141,9 @@ verbose in the comments _ABOVE_ the code they pertain to. Shocco also supports Markdown formatting in the comments; use it sparingly. Specifically, ``stack.sh`` uses Markdown headers to divide the script into logical sections. -.. _shocco: http://rtomayko.github.com/shocco/ +.. _shocco: https://github.com/dtroyer/shocco/tree/rst_support + +The script used to drive shocco is tools/build_docs.sh. Exercises diff --git a/README.md b/README.md index 514786c60f..640fab65f9 100644 --- a/README.md +++ b/README.md @@ -6,35 +6,39 @@ DevStack is a set of scripts and utilities to quickly deploy an OpenStack cloud. * To describe working configurations of OpenStack (which code branches work together? what do config files look like for those branches?) * To make it easier for developers to dive into OpenStack so that they can productively contribute without having to understand every part of the system at once * To make it easy to prototype cross-project features -* To sanity-check OpenStack builds (used in gating commits to the primary repos) +* To provide an environment for the OpenStack CI testing on every commit to the projects -Read more at http://devstack.org (built from the gh-pages branch) +Read more at http://devstack.org. -IMPORTANT: Be sure to carefully read `stack.sh` and any other scripts you execute before you run them, as they install software and may alter your networking configuration. We strongly recommend that you run `stack.sh` in a clean and disposable vm when you are first getting started. - -# DevStack on Xenserver - -If you would like to use Xenserver as the hypervisor, please refer to the instructions in `./tools/xen/README.md`. - -# DevStack on Docker - -If you would like to use Docker as the hypervisor, please refer to the instructions in `./tools/docker/README.md`. +IMPORTANT: Be sure to carefully read `stack.sh` and any other scripts you +execute before you run them, as they install software and will alter your +networking configuration. We strongly recommend that you run `stack.sh` +in a clean and disposable vm when you are first getting started. # Versions -The devstack master branch generally points to trunk versions of OpenStack components. For older, stable versions, look for branches named stable/[release] in the DevStack repo. For example, you can do the following to create a diablo OpenStack cloud: +The DevStack master branch generally points to trunk versions of OpenStack +components. For older, stable versions, look for branches named +stable/[release] in the DevStack repo. For example, you can do the +following to create a grizzly OpenStack cloud: - git checkout stable/diablo + git checkout stable/grizzly ./stack.sh -You can also pick specific OpenStack project releases by setting the appropriate `*_BRANCH` variables in `localrc` (look in `stackrc` for the default set). Usually just before a release there will be milestone-proposed branches that need to be tested:: +You can also pick specific OpenStack project releases by setting the appropriate +`*_BRANCH` variables in the ``localrc`` section of `local.conf` (look in +`stackrc` for the default set). Usually just before a release there will be +milestone-proposed branches that need to be tested:: GLANCE_REPO=https://github.com/openstack/glance.git GLANCE_BRANCH=milestone-proposed # Start A Dev Cloud -Installing in a dedicated disposable vm is safer than installing on your dev machine! Plus you can pick one of the supported Linux distros for your VM. To start a dev cloud run the following NOT AS ROOT (see below for more): +Installing in a dedicated disposable VM is safer than installing on your +dev machine! Plus you can pick one of the supported Linux distros for +your VM. To start a dev cloud run the following NOT AS ROOT (see +**DevStack Execution Environment** below for more on user accounts): ./stack.sh @@ -45,7 +49,7 @@ When the script finishes executing, you should be able to access OpenStack endpo We also provide an environment file that you can use to interact with your cloud via CLI: - # source openrc file to load your environment with osapi and ec2 creds + # source openrc file to load your environment with OpenStack CLI creds . openrc # list instances nova list @@ -61,16 +65,37 @@ If the EC2 API is your cup-o-tea, you can create credentials and use euca2ools: DevStack runs rampant over the system it runs on, installing things and uninstalling other things. Running this on a system you care about is a recipe for disappointment, or worse. Alas, we're all in the virtualization business here, so run it in a VM. And take advantage of the snapshot capabilities of your hypervisor of choice to reduce testing cycle times. You might even save enough time to write one more feature before the next feature freeze... -``stack.sh`` needs to have root access for a lot of tasks, but it also needs to have not-root permissions for most of its work and for all of the OpenStack services. So ``stack.sh`` specifically does not run if you are root. This is a recent change (Oct 2013) from the previous behaviour of automatically creating a ``stack`` user. Automatically creating a user account is not always the right response to running as root, so that bit is now an explicit step using ``tools/create-stack-user.sh``. Run that (as root!) if you do not want to just use your normal login here, which works perfectly fine. +``stack.sh`` needs to have root access for a lot of tasks, but uses ``sudo`` +for all of those tasks. However, it needs to be not-root for most of its +work and for all of the OpenStack services. ``stack.sh`` specifically +does not run if started as root. + +This is a recent change (Oct 2013) from the previous behaviour of +automatically creating a ``stack`` user. Automatically creating +user accounts is not the right response to running as root, so +that bit is now an explicit step using ``tools/create-stack-user.sh``. +Run that (as root!) or just check it out to see what DevStack's +expectations are for the account it runs under. Many people simply +use their usual login (the default 'ubuntu' login on a UEC image +for example). # Customizing -You can override environment variables used in `stack.sh` by creating file name `localrc`. It is likely that you will need to do this to tweak your networking configuration should you need to access your cloud from a different host. +You can override environment variables used in `stack.sh` by creating file +name `local.conf` with a ``locarc`` section as shown below. It is likely +that you will need to do this to tweak your networking configuration should +you need to access your cloud from a different host. + + [[local|localrc]] + VARIABLE=value + +See the **Local Configuration** section below for more details. # Database Backend Multiple database backends are available. The available databases are defined in the lib/databases directory. -`mysql` is the default database, choose a different one by putting the following in `localrc`: +`mysql` is the default database, choose a different one by putting the +following in the `localrc` section: disable_service mysql enable_service postgresql @@ -81,7 +106,7 @@ Multiple database backends are available. The available databases are defined in Multiple RPC backends are available. Currently, this includes RabbitMQ (default), Qpid, and ZeroMQ. Your backend of -choice may be selected via the `localrc`. +choice may be selected via the `localrc` section. Note that selecting more than one RPC backend will result in a failure. @@ -95,9 +120,10 @@ Example (Qpid): # Apache Frontend -Apache web server is enabled for wsgi services by setting `APACHE_ENABLED_SERVICES` in your localrc. But remember to enable these services at first as above. +Apache web server is enabled for wsgi services by setting +`APACHE_ENABLED_SERVICES` in your ``localrc`` section. Remember to +enable these services at first as above. -Example: APACHE_ENABLED_SERVICES+=keystone,swift # Swift @@ -108,23 +134,23 @@ vm. When running with only one replica the account, container and object services will run directly in screen. The others services like replicator, updaters or auditor runs in background. -If you would like to enable Swift you can add this to your `localrc` : +If you would like to enable Swift you can add this to your `localrc` section: enable_service s-proxy s-object s-container s-account If you want a minimal Swift install with only Swift and Keystone you -can have this instead in your `localrc`: +can have this instead in your `localrc` section: disable_all_services enable_service key mysql s-proxy s-object s-container s-account If you only want to do some testing of a real normal swift cluster with multiple replicas you can do so by customizing the variable -`SWIFT_REPLICAS` in your `localrc` (usually to 3). +`SWIFT_REPLICAS` in your `localrc` section (usually to 3). # Swift S3 -If you are enabling `swift3` in `ENABLED_SERVICES` devstack will +If you are enabling `swift3` in `ENABLED_SERVICES` DevStack will install the swift3 middleware emulation. Swift will be configured to act as a S3 endpoint for Keystone so effectively replacing the `nova-objectstore`. @@ -137,7 +163,7 @@ services are started in background and managed by `swift-init` tool. Basic Setup In order to enable Neutron a single node setup, you'll need the -following settings in your `localrc` : +following settings in your `localrc` section: disable_service n-net enable_service q-svc @@ -146,12 +172,15 @@ following settings in your `localrc` : enable_service q-l3 enable_service q-meta enable_service neutron - # Optional, to enable tempest configuration as part of devstack + # Optional, to enable tempest configuration as part of DevStack enable_service tempest Then run `stack.sh` as normal. -devstack supports adding specific Neutron configuration flags to the service, Open vSwitch plugin and LinuxBridge plugin configuration files. To make use of this feature, the following variables are defined and can be configured in your `localrc` file: +DevStack supports setting specific Neutron configuration flags to the +service, Open vSwitch plugin and LinuxBridge plugin configuration files. +To make use of this feature, the following variables are defined and can +be configured in your `localrc` section: Variable Name Config File Section Modified ------------------------------------------------------------------------------------- @@ -160,12 +189,14 @@ devstack supports adding specific Neutron configuration flags to the service, Op Q_AGENT_EXTRA_SRV_OPTS Plugin `OVS` (for Open Vswitch) or `LINUX_BRIDGE` (for LinuxBridge) Q_SRV_EXTRA_DEFAULT_OPTS Service DEFAULT -An example of using the variables in your `localrc` is below: +An example of using the variables in your `localrc` section is below: Q_AGENT_EXTRA_AGENT_OPTS=(tunnel_type=vxlan vxlan_udp_port=8472) Q_SRV_EXTRA_OPTS=(tenant_network_type=vxlan) -devstack also supports configuring the Neutron ML2 plugin. The ML2 plugin can run with the OVS, LinuxBridge, or Hyper-V agents on compute hosts. A simple way to configure the ml2 plugin is shown below: +DevStack also supports configuring the Neutron ML2 plugin. The ML2 plugin +can run with the OVS, LinuxBridge, or Hyper-V agents on compute hosts. A +simple way to configure the ml2 plugin is shown below: # VLAN configuration Q_PLUGIN=ml2 @@ -179,7 +210,9 @@ devstack also supports configuring the Neutron ML2 plugin. The ML2 plugin can ru Q_PLUGIN=ml2 Q_ML2_TENANT_NETWORK_TYPE=vxlan -The above will default in devstack to using the OVS on each compute host. To change this, set the `Q_AGENT` variable to the agent you want to run (e.g. linuxbridge). +The above will default in DevStack to using the OVS on each compute host. +To change this, set the `Q_AGENT` variable to the agent you want to run +(e.g. linuxbridge). Variable Name Notes ------------------------------------------------------------------------------------- @@ -194,13 +227,13 @@ The above will default in devstack to using the OVS on each compute host. To cha # Heat Heat is disabled by default. To enable it you'll need the following settings -in your `localrc` : +in your `localrc` section: enable_service heat h-api h-api-cfn h-api-cw h-eng Heat can also run in standalone mode, and be configured to orchestrate on an external OpenStack cloud. To launch only Heat in standalone mode -you'll need the following settings in your `localrc` : +you'll need the following settings in your `localrc` section: disable_all_services enable_service rabbit mysql heat h-api h-api-cfn h-api-cw h-eng @@ -215,9 +248,23 @@ If tempest has been successfully configured, a basic set of smoke tests can be r $ cd /opt/stack/tempest $ nosetests tempest/scenario/test_network_basic_ops.py +# DevStack on Xenserver + +If you would like to use Xenserver as the hypervisor, please refer to the instructions in `./tools/xen/README.md`. + +# DevStack on Docker + +If you would like to use Docker as the hypervisor, please refer to the instructions in `./tools/docker/README.md`. + # Additional Projects -DevStack has a hook mechanism to call out to a dispatch script at specific points in the execution if `stack.sh`, `unstack.sh` and `clean.sh`. This allows higher-level projects, especially those that the lower level projects have no dependency on, to be added to DevStack without modifying the scripts. Tempest is built this way as an example of how to structure the dispatch script, see `extras.d/80-tempest.sh`. See `extras.d/README.md` for more information. +DevStack has a hook mechanism to call out to a dispatch script at specific +points in the execution of `stack.sh`, `unstack.sh` and `clean.sh`. This +allows upper-layer projects, especially those that the lower layer projects +have no dependency on, to be added to DevStack without modifying the core +scripts. Tempest is built this way as an example of how to structure the +dispatch script, see `extras.d/80-tempest.sh`. See `extras.d/README.md` +for more information. # Multi-Node Setup @@ -232,7 +279,8 @@ You should run at least one "controller node", which should have a `stackrc` tha enable_service q-meta enable_service neutron -You likely want to change your `localrc` to run a scheduler that will balance VMs across hosts: +You likely want to change your `localrc` section to run a scheduler that +will balance VMs across hosts: SCHEDULER=nova.scheduler.simple.SimpleScheduler @@ -249,7 +297,7 @@ You can then run many compute nodes, each of which should have a `stackrc` which Cells is a new scaling option with a full spec at http://wiki.openstack.org/blueprint-nova-compute-cells. -To setup a cells environment add the following to your `localrc`: +To setup a cells environment add the following to your `localrc` section: enable_service n-cell @@ -264,32 +312,41 @@ Historically DevStack has used ``localrc`` to contain all local configuration an The new config file ``local.conf`` is an extended-INI format that introduces a new meta-section header that provides some additional information such as a phase name and destination config filename: - [[ | ]] + [[ | ]] -where is one of a set of phase names defined by ``stack.sh`` and is the project config filename. The filename is eval'ed in the stack.sh context so all environment variables are available and may be used. Using the project config file variables in the header is strongly suggested (see example of NOVA_CONF below). If the path of the config file does not exist it is skipped. +where ```` is one of a set of phase names defined by ``stack.sh`` +and ```` is the configuration filename. The filename is +eval'ed in the ``stack.sh`` context so all environment variables are +available and may be used. Using the project config file variables in +the header is strongly suggested (see the ``NOVA_CONF`` example below). +If the path of the config file does not exist it is skipped. The defined phases are: -* local - extracts ``localrc`` from ``local.conf`` before ``stackrc`` is sourced -* post-config - runs after the layer 2 services are configured and before they are started -* extra - runs after services are started and before any files in ``extra.d`` are executes +* **local** - extracts ``localrc`` from ``local.conf`` before ``stackrc`` is sourced +* **post-config** - runs after the layer 2 services are configured and before they are started +* **extra** - runs after services are started and before any files in ``extra.d`` are executed The file is processed strictly in sequence; meta-sections may be specified more than once but if any settings are duplicated the last to appear in the file will be used. - [[post-config|$NOVA_CONF]] - [DEFAULT] - use_syslog = True + [[post-config|$NOVA_CONF]] + [DEFAULT] + use_syslog = True - [osapi_v3] - enabled = False + [osapi_v3] + enabled = False -A specific meta-section ``local:localrc`` is used to provide a default localrc file. This allows all custom settings for DevStack to be contained in a single file. ``localrc`` is not overwritten if it exists to preserve compatability. +A specific meta-section ``local|localrc`` is used to provide a default +``localrc`` file (actually ``.localrc.auto``). This allows all custom +settings for DevStack to be contained in a single file. If ``localrc`` +exists it will be used instead to preserve backward-compatibility. - [[local|localrc]] - FIXED_RANGE=10.254.1.0/24 - ADMIN_PASSWORD=speciale - LOGFILE=$DEST/logs/stack.sh.log + [[local|localrc]] + FIXED_RANGE=10.254.1.0/24 + ADMIN_PASSWORD=speciale + LOGFILE=$DEST/logs/stack.sh.log -Note that ``Q_PLUGIN_CONF_FILE`` is unique in that it is assumed to _NOT_ start with a ``/`` (slash) character. A slash will need to be added: +Note that ``Q_PLUGIN_CONF_FILE`` is unique in that it is assumed to *NOT* +start with a ``/`` (slash) character. A slash will need to be added: - [[post-config|/$Q_PLUGIN_CONF_FILE]] + [[post-config|/$Q_PLUGIN_CONF_FILE]] diff --git a/extras.d/README.md b/extras.d/README.md index 591e438b02..88e4265ced 100644 --- a/extras.d/README.md +++ b/extras.d/README.md @@ -10,12 +10,11 @@ that end with `.sh`. To control the order that the scripts are sourced their names start with a two digit sequence number. DevStack reserves the sequence numbers 00 through 09 and 90 through 99 for its own use. -The scripts are sourced at each hook point so they should not declare anything -at the top level that would cause a problem, specifically, functions. This does -allow the entire `stack.sh` variable space to be available. The scripts are +The scripts are sourced at the beginning of each script that calls them. The +entire `stack.sh` variable space is available. The scripts are sourced with one or more arguments, the first of which defines the hook phase: -arg 1: source | stack | unstack | clean + source | stack | unstack | clean source: always called first in any of the scripts, used to set the initial defaults in a lib/* script or similar diff --git a/stack.sh b/stack.sh index aa0efea487..b3380a8775 100755 --- a/stack.sh +++ b/stack.sh @@ -53,7 +53,7 @@ if [[ -r $TOP_DIR/local.conf ]]; then if [[ -r $TOP_DIR/localrc ]]; then warn $LINENO "localrc and local.conf:[[local]] both exist, using localrc" else - echo "# Generated file, do not exit" >$TOP_DIR/.localrc.auto + echo "# Generated file, do not edit" >$TOP_DIR/.localrc.auto get_meta_section $TOP_DIR/local.conf local $lfile >>$TOP_DIR/.localrc.auto fi fi From 2e159460126febc8be6d65477cc94ef6ef159649 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 21 Oct 2013 13:06:11 -0700 Subject: [PATCH 0455/4704] Allow starting nova-compute manually This breaks out the code that starts nova-compute into a separate function. This will be used for upgrade testing so that we can arrange for a nova-compute running on a different version of the code to be running alongside the rest of the stack. Change-Id: I88687cefdac7fa4a3c45789461a95fd8d061aba6 --- lib/nova | 35 +++++++++++++++++++++++------------ 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/lib/nova b/lib/nova index 5ff5099c6d..09c6a50dd5 100644 --- a/lib/nova +++ b/lib/nova @@ -610,20 +610,10 @@ function start_nova_api() { fi } -# start_nova() - Start running processes, including screen -function start_nova() { +# start_nova_compute() - Start the compute process +function start_nova_compute() { NOVA_CONF_BOTTOM=$NOVA_CONF - # ``screen_it`` checks ``is_service_enabled``, it is not needed here - screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor" - - if is_service_enabled n-cell; then - NOVA_CONF_BOTTOM=$NOVA_CELLS_CONF - screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor --config-file $NOVA_CELLS_CONF" - screen_it n-cell-region "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CONF" - screen_it n-cell-child "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CELLS_CONF" - fi - if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then # The group **$LIBVIRT_GROUP** is added to the current user in this script. # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group. @@ -639,6 +629,22 @@ function start_nova() { fi screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM" fi +} + +# start_nova() - Start running processes, including screen +function start_nova_rest() { + NOVA_CONF_BOTTOM=$NOVA_CONF + + # ``screen_it`` checks ``is_service_enabled``, it is not needed here + screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor" + + if is_service_enabled n-cell; then + NOVA_CONF_BOTTOM=$NOVA_CELLS_CONF + screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor --config-file $NOVA_CELLS_CONF" + screen_it n-cell-region "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CONF" + screen_it n-cell-child "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CELLS_CONF" + fi + screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert" screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network --config-file $NOVA_CONF_BOTTOM" screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler --config-file $NOVA_CONF_BOTTOM" @@ -655,6 +661,11 @@ function start_nova() { screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore" } +function start_nova() { + start_nova_compute + start_nova_rest +} + # stop_nova() - Stop running processes (non-screen) function stop_nova() { # Kill the nova screen windows From a45a0a0276f542ef5d624067e98dfa2de830fd84 Mon Sep 17 00:00:00 2001 From: Denis Egorenko Date: Tue, 1 Oct 2013 16:03:39 +0000 Subject: [PATCH 0456/4704] Added Savanna Project Added services Savanna, Savanna Dashboard, Savanna python client. Implements blueprint devstack-savanna-support Implements blueprint devstack-integration Change-Id: I8725f59a0cc9aef4817988470313136c56711cf1 --- exercises/savanna.sh | 43 +++++++++++++++++++ extras.d/70-savanna.sh | 31 ++++++++++++++ lib/savanna | 97 ++++++++++++++++++++++++++++++++++++++++++ lib/savanna-dashboard | 70 ++++++++++++++++++++++++++++++ 4 files changed, 241 insertions(+) create mode 100755 exercises/savanna.sh create mode 100644 extras.d/70-savanna.sh create mode 100644 lib/savanna create mode 100644 lib/savanna-dashboard diff --git a/exercises/savanna.sh b/exercises/savanna.sh new file mode 100755 index 0000000000..fc3f9760e5 --- /dev/null +++ b/exercises/savanna.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +# **savanna.sh** + +# Sanity check that Savanna started if enabled + +echo "*********************************************************************" +echo "Begin DevStack Exercise: $0" +echo "*********************************************************************" + +# This script exits on an error so that errors don't compound and you see +# only the first error that occurred. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + + +# Settings +# ======== + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import configuration +source $TOP_DIR/openrc + +# Import exercise configuration +source $TOP_DIR/exerciserc + +is_service_enabled savanna || exit 55 + +curl http://$SERVICE_HOST:8386/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Savanna API not functioning!" + +set +o xtrace +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" diff --git a/extras.d/70-savanna.sh b/extras.d/70-savanna.sh new file mode 100644 index 0000000000..f6881cc4f6 --- /dev/null +++ b/extras.d/70-savanna.sh @@ -0,0 +1,31 @@ +# savanna.sh - DevStack extras script to install Savanna + +if is_service_enabled savanna; then + if [[ "$1" == "source" ]]; then + # Initial source + source $TOP_DIR/lib/savanna + source $TOP_DIR/lib/savanna-dashboard + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + echo_summary "Installing Savanna" + install_savanna + if is_service_enabled horizon; then + install_savanna_dashboard + fi + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + echo_summary "Configuring Savanna" + configure_savanna + if is_service_enabled horizon; then + configure_savanna_dashboard + fi + elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + echo_summary "Initializing Savanna" + start_savanna + fi + + if [[ "$1" == "unstack" ]]; then + stop_savanna + if is_service_enabled horizon; then + cleanup_savanna_dashboard + fi + fi +fi diff --git a/lib/savanna b/lib/savanna new file mode 100644 index 0000000000..e9dbe72643 --- /dev/null +++ b/lib/savanna @@ -0,0 +1,97 @@ +# lib/savanna + +# Dependencies: +# ``functions`` file +# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined +# ``ADMIN_{TENANT_NAME|PASSWORD}`` must be defined + +# ``stack.sh`` calls the entry points in this order: +# +# install_savanna +# configure_savanna +# start_savanna +# stop_savanna + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default repos +SAVANNA_REPO=${SAVANNA_REPO:-${GIT_BASE}/openstack/savanna.git} +SAVANNA_BRANCH=${SAVANNA_BRANCH:-master} + +# Set up default directories +SAVANNA_DIR=$DEST/savanna +SAVANNA_CONF_DIR=${SAVANNA_CONF_DIR:-/etc/savanna} +SAVANNA_CONF_FILE=savanna.conf +ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-admin} +ADMIN_NAME=${ADMIN_NAME:-admin} +ADMIN_PASSWORD=${ADMIN_PASSWORD:-nova} +SAVANNA_DEBUG=${SAVANNA_DEBUG:-True} + +# Support entry points installation of console scripts +if [[ -d $SAVANNA_DIR/bin ]]; then + SAVANNA_BIN_DIR=$SAVANNA_DIR/bin +else + SAVANNA_BIN_DIR=$(get_python_exec_prefix) +fi + +# Functions +# --------- + +# configure_savanna() - Set config files, create data dirs, etc +function configure_savanna() { + + if [[ ! -d $SAVANNA_CONF_DIR ]]; then + sudo mkdir -p $SAVANNA_CONF_DIR + fi + sudo chown $STACK_USER $SAVANNA_CONF_DIR + + # Copy over savanna configuration file and configure common parameters. + cp $SAVANNA_DIR/etc/savanna/savanna.conf.sample $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE + + iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_password $ADMIN_PASSWORD + iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_username $ADMIN_NAME + iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $ADMIN_TENANT_NAME + iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG + + recreate_database savanna utf8 + iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE database sql_connection `database_connection_url savanna` + inicomment $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE database connection + + if is_service_enabled neutron; then + iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT use_neutron true + iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT use_floating_ips true + fi + + iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT use_syslog $SYSLOG +} + +# install_savanna() - Collect source and prepare +function install_savanna() { + git_clone $SAVANNA_REPO $SAVANNA_DIR $SAVANNA_BRANCH + setup_develop $SAVANNA_DIR +} + +# start_savanna() - Start running processes, including screen +function start_savanna() { + screen_it savanna "cd $SAVANNA_DIR && $SAVANNA_BIN_DIR/savanna-api --config-file $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE" +} + +# stop_savanna() - Stop running processes +function stop_savanna() { + # Kill the Savanna screen windows + screen -S $SCREEN_NAME -p savanna -X kill +} + + +# Restore xtrace +$XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/savanna-dashboard b/lib/savanna-dashboard new file mode 100644 index 0000000000..9562db4e1c --- /dev/null +++ b/lib/savanna-dashboard @@ -0,0 +1,70 @@ +# lib/savanna-dashboard + +# Dependencies: +# ``functions`` file +# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined +# ``SERVICE_HOST + +# ``stack.sh`` calls the entry points in this order: +# +# install_savanna_dashboard +# configure_savanna_dashboard +# cleanup_savanna_dashboard + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/horizon + +# Defaults +# -------- + +# Set up default repos +SAVANNA_DASHBOARD_REPO=${SAVANNA_DASHBOARD_REPO:-${GIT_BASE}/openstack/savanna-dashboard.git} +SAVANNA_DASHBOARD_BRANCH=${SAVANNA_DASHBOARD_BRANCH:-master} + +SAVANNA_PYTHONCLIENT_REPO=${SAVANNA_PYTHONCLIENT_REPO:-${GIT_BASE}/openstack/python-savannaclient.git} +SAVANNA_PYTHONCLIENT_BRANCH=${SAVANNA_PYTHONCLIENT_BRANCH:-master} + +# Set up default directories +SAVANNA_DASHBOARD_DIR=$DEST/savanna_dashboard +SAVANNA_PYTHONCLIENT_DIR=$DEST/python-savannaclient + +# Functions +# --------- + +function configure_savanna_dashboard() { + + echo -e "SAVANNA_URL = \"http://$SERVICE_HOST:8386/v1.1\"\nAUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py + echo -e "HORIZON_CONFIG['dashboards'] += ('savanna',)\nINSTALLED_APPS += ('savannadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py + + if is_service_enabled neutron; then + echo -e "SAVANNA_USE_NEUTRON = True" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py + fi +} + +# install_savanna_dashboard() - Collect source and prepare +function install_savanna_dashboard() { + install_python_savannaclient + git_clone $SAVANNA_DASHBOARD_REPO $SAVANNA_DASHBOARD_DIR $SAVANNA_DASHBOARD_BRANCH + setup_develop $SAVANNA_DASHBOARD_DIR +} + +function install_python_savannaclient() { + git_clone $SAVANNA_PYTHONCLIENT_REPO $SAVANNA_PYTHONCLIENT_DIR $SAVANNA_PYTHONCLIENT_BRANCH + setup_develop $SAVANNA_PYTHONCLIENT_DIR +} + +# Cleanup file settings.py from Savanna +function cleanup_savanna_dashboard() { + sed -i '/savanna/d' $HORIZON_DIR/openstack_dashboard/settings.py +} + +# Restore xtrace +$XTRACE + +# Local variables: +# mode: shell-script +# End: + From e095daa73267cedbd3cc7b68f517bbe0624f770e Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Thu, 29 Aug 2013 15:45:20 -0400 Subject: [PATCH 0457/4704] Make nova use fatal_deprecations=true We should not be using deprecated config options here, so lets set fatal_deprecations=True to make sure. Stop using deprecated LibvirtHybridOVSBridgeDriver Change-Id: I0a43780270d092a42ede6c0667343f0d02b3aa67 --- lib/neutron_plugins/ovs_base | 8 +------- lib/nova | 1 + 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 2666d8e8ba..1214f3bcbd 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -73,13 +73,7 @@ function _neutron_ovs_base_configure_l3_agent() { } function _neutron_ovs_base_configure_nova_vif_driver() { - # The hybrid VIF driver needs to be specified when Neutron Security Group - # is enabled (until vif_security attributes are supported in VIF extension) - if [[ "$Q_USE_SECGROUP" == "True" ]]; then - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} - else - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} - fi + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} } # Restore xtrace diff --git a/lib/nova b/lib/nova index 5ff5099c6d..615683437d 100644 --- a/lib/nova +++ b/lib/nova @@ -377,6 +377,7 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT ec2_workers "4" iniset $NOVA_CONF DEFAULT metadata_workers "4" iniset $NOVA_CONF DEFAULT sql_connection `database_connection_url nova` + iniset $NOVA_CONF DEFAULT fatal_deprecations "True" iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" iniset $NOVA_CONF osapi_v3 enabled "True" From dc30bd3eb457aaea66451621695cddfa8213a169 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 22 Oct 2013 07:30:47 -0400 Subject: [PATCH 0458/4704] exit cleanup in functions we should always use die instead of exit so that we know why we failed. Also remove instances where exit is called after die, as that is a noop. Change-Id: I8e08cce63d35c503c36ff1e09805f3db427d082d --- functions | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/functions b/functions index d969677fc5..0aef47ef78 100644 --- a/functions +++ b/functions @@ -580,7 +580,8 @@ function git_clone { if echo $GIT_REF | egrep -q "^refs"; then # If our branch name is a gerrit style refs/changes/... if [[ ! -d $GIT_DEST ]]; then - [[ "$ERROR_ON_CLONE" = "True" ]] && exit 1 + [[ "$ERROR_ON_CLONE" = "True" ]] && \ + die $LINENO "Cloning not allowed in this configuration" git clone $GIT_REMOTE $GIT_DEST fi cd $GIT_DEST @@ -588,7 +589,8 @@ function git_clone { else # do a full clone only if the directory doesn't exist if [[ ! -d $GIT_DEST ]]; then - [[ "$ERROR_ON_CLONE" = "True" ]] && exit 1 + [[ "$ERROR_ON_CLONE" = "True" ]] && \ + die $LINENO "Cloning not allowed in this configuration" git clone $GIT_REMOTE $GIT_DEST cd $GIT_DEST # This checkout syntax works for both branches and tags @@ -612,8 +614,7 @@ function git_clone { elif [[ -n "`git show-ref refs/remotes/origin/$GIT_REF`" ]]; then git_update_remote_branch $GIT_REF else - echo $GIT_REF is neither branch nor tag - exit 1 + die $LINENO "$GIT_REF is neither branch nor tag" fi fi @@ -1562,7 +1563,6 @@ function _ping_check_novanet() { else die $LINENO "[Fail] Could ping server" fi - exit 1 fi } @@ -1575,7 +1575,6 @@ function get_instance_ip(){ if [[ $ip = "" ]];then echo "$nova_result" die $LINENO "[Fail] Coudn't get ipaddress of VM" - exit 1 fi echo $ip } From 6832272a1816238d6671865771b92691dc65a205 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 21 Oct 2013 18:11:40 -0400 Subject: [PATCH 0459/4704] add lib/stackforge to let us test wsme / pecan from git wsme and pecan libraries have migrated to stackforge for development. If we support them in devstack, we can use their git version instead of the release version, which ensures that they won't break the rest of OpenStack when they cut a new release. This is similar to how oslo testing works. Long term we probably want a more generic mechanism to handle this, but for now, this should get us rolling, and get them gating. Change-Id: Icf3475f433081c7c625864107d7e118e214396e1 --- lib/stackforge | 67 ++++++++++++++++++++++++++++++++++++++++++++++++++ stack.sh | 4 +++ stackrc | 10 ++++++++ 3 files changed, 81 insertions(+) create mode 100644 lib/stackforge diff --git a/lib/stackforge b/lib/stackforge new file mode 100644 index 0000000000..4b79de0c94 --- /dev/null +++ b/lib/stackforge @@ -0,0 +1,67 @@ +# lib/stackforge +# +# Functions to install stackforge libraries that we depend on so +# that we can try their git versions during devstack gate. +# +# This is appropriate for python libraries that release to pypi and are +# expected to be used beyond OpenStack like, but are requirements +# for core services in global-requirements. +# * wsme +# * pecan +# +# This is not appropriate for stackforge projects which are early stage +# OpenStack tools + +# Dependencies: +# ``functions`` file + +# ``stack.sh`` calls the entry points in this order: +# +# install_stackforge + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- +WSME_DIR=$DEST/wsme +PECAN_DIR=$DEST/pecan + +# Entry Points +# ------------ + +# install_stackforge() - Collect source and prepare +function install_stackforge() { + # TODO(sdague): remove this once we get to Icehouse, this just makes + # for a smoother transition of existing users. + cleanup_stackforge + + git_clone $WSME_REPO $WSME_DIR $WSME_BRANCH + setup_develop $WSME_DIR + + git_clone $PECAN_REPO $PECAN_DIR $PECAN_BRANCH + setup_develop $PECAN_DIR +} + +# cleanup_stackforge() - purge possibly old versions of stackforge libraries +function cleanup_stackforge() { + # this means we've got an old version installed, lets get rid of it + # otherwise python hates itself + for lib in wsme pecan; do + if ! python -c "import $lib" 2>/dev/null; then + echo "Found old $lib... removing to ensure consistency" + local PIP_CMD=$(get_pip_command) + pip_install $lib + sudo $PIP_CMD uninstall -y $lib + fi + done +} + +# Restore xtrace +$XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/stack.sh b/stack.sh index aa0efea487..df5cd4cc47 100755 --- a/stack.sh +++ b/stack.sh @@ -299,6 +299,7 @@ source $TOP_DIR/lib/apache source $TOP_DIR/lib/tls source $TOP_DIR/lib/infra source $TOP_DIR/lib/oslo +source $TOP_DIR/lib/stackforge source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance @@ -627,6 +628,9 @@ install_infra # Install oslo libraries that have graduated install_oslo +# Install stackforge libraries for testing +install_stackforge + # Install clients libraries install_keystoneclient install_glanceclient diff --git a/stackrc b/stackrc index 3f740b5678..b9d636a2b3 100644 --- a/stackrc +++ b/stackrc @@ -193,6 +193,16 @@ TROVE_BRANCH=${TROVE_BRANCH:-master} TROVECLIENT_REPO=${TROVECLIENT_REPO:-${GIT_BASE}/openstack/python-troveclient.git} TROVECLIENT_BRANCH=${TROVECLIENT_BRANCH:-master} +# stackforge libraries that are used by OpenStack core services +# wsme +WSME_REPO=${WSME_REPO:-${GIT_BASE}/stackforge/wsme.git} +WSME_BRANCH=${WSME_BRANCH:-master} + +# pecan +PECAN_REPO=${PECAN_REPO:-${GIT_BASE}/stackforge/pecan.git} +PECAN_BRANCH=${PECAN_BRANCH:-master} + + # Nova hypervisor configuration. We default to libvirt with **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can # also install an **LXC**, **OpenVZ** or **XenAPI** based system. If xenserver-core From 537d4025c511d9b162726bb5c972da72028573ed Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 22 Oct 2013 07:43:22 -0400 Subject: [PATCH 0460/4704] whitespace cleanup on functions & lib/config fix some of the bash8 issues found in functions and lib/config, part of the long march towards fixing all the bash8 issues. Change-Id: Ia131f64870acb0f9d196fe1a9a45d633abb6fc4d --- functions | 50 +++++++++++++++++++++++++------------------------- lib/config | 14 +++++++------- 2 files changed, 32 insertions(+), 32 deletions(-) diff --git a/functions b/functions index 4afebe020e..0ab2afcafa 100644 --- a/functions +++ b/functions @@ -1372,9 +1372,9 @@ function upload_image() { IMAGE="$FILES/${IMAGE_FNAME}" IMAGE_NAME="${IMAGE_FNAME%.xen-raw.tgz}" glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ --name "$IMAGE_NAME" --is-public=True \ --container-format=tgz --disk-format=raw \ --property vm_mode=xen < "${IMAGE}" @@ -1397,11 +1397,11 @@ function upload_image() { mkdir "$xdir" tar -zxf $FILES/$IMAGE_FNAME -C "$xdir" KERNEL=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do - [ -f "$f" ] && echo "$f" && break; done; true) + [ -f "$f" ] && echo "$f" && break; done; true) RAMDISK=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do - [ -f "$f" ] && echo "$f" && break; done; true) + [ -f "$f" ] && echo "$f" && break; done; true) IMAGE=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do - [ -f "$f" ] && echo "$f" && break; done; true) + [ -f "$f" ] && echo "$f" && break; done; true) if [[ -z "$IMAGE_NAME" ]]; then IMAGE_NAME=$(basename "$IMAGE" ".img") fi @@ -1692,23 +1692,23 @@ function check_path_perm_sanity() { # # _vercmp_r sep ver1 ver2 function _vercmp_r { - typeset sep - typeset -a ver1=() ver2=() - sep=$1; shift - ver1=("${@:1:sep}") - ver2=("${@:sep+1}") + typeset sep + typeset -a ver1=() ver2=() + sep=$1; shift + ver1=("${@:1:sep}") + ver2=("${@:sep+1}") - if ((ver1 > ver2)); then - echo 1; return 0 - elif ((ver2 > ver1)); then - echo -1; return 0 - fi + if ((ver1 > ver2)); then + echo 1; return 0 + elif ((ver2 > ver1)); then + echo -1; return 0 + fi - if ((sep <= 1)); then - echo 0; return 0 - fi + if ((sep <= 1)); then + echo 0; return 0 + fi - _vercmp_r $((sep-1)) "${ver1[@]:1}" "${ver2[@]:1}" + _vercmp_r $((sep-1)) "${ver1[@]:1}" "${ver2[@]:1}" } @@ -1730,13 +1730,13 @@ function _vercmp_r { # # vercmp_numbers ver1 ver2 vercmp_numbers() { - typeset v1=$1 v2=$2 sep - typeset -a ver1 ver2 + typeset v1=$1 v2=$2 sep + typeset -a ver1 ver2 - IFS=. read -ra ver1 <<< "$v1" - IFS=. read -ra ver2 <<< "$v2" + IFS=. read -ra ver1 <<< "$v1" + IFS=. read -ra ver2 <<< "$v2" - _vercmp_r "${#ver1[@]}" "${ver1[@]}" "${ver2[@]}" + _vercmp_r "${#ver1[@]}" "${ver1[@]}" "${ver2[@]}" } diff --git a/lib/config b/lib/config index 6f686e9b5d..91cefe48cc 100644 --- a/lib/config +++ b/lib/config @@ -10,7 +10,7 @@ # [[group-name|file-name]] # # group-name refers to the group of configuration file changes to be processed -# at a particular time. These are called phases in ``stack.sh`` but +# at a particular time. These are called phases in ``stack.sh`` but # group here as these functions are not DevStack-specific. # # file-name is the destination of the config file @@ -64,12 +64,12 @@ function get_meta_section_files() { [[ -r $file ]] || return 0 $CONFIG_AWK_CMD -v matchgroup=$matchgroup ' - /^\[\[.+\|.*\]\]/ { - gsub("[][]", "", $1); - split($1, a, "|"); - if (a[1] == matchgroup) - print a[2] - } + /^\[\[.+\|.*\]\]/ { + gsub("[][]", "", $1); + split($1, a, "|"); + if (a[1] == matchgroup) + print a[2] + } ' $file } From 3bdb922c4054a55f03b3db94721997e52415e76d Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 22 Oct 2013 08:36:16 -0400 Subject: [PATCH 0461/4704] fix bash8 indent problems in lib/neutron and friends Change-Id: Ia83ce84b792494800fbfe7baa6423c8de9260014 --- lib/neutron | 26 +++++++++++++------------- lib/neutron_plugins/midonet | 4 ++-- lib/neutron_plugins/nec | 18 +++++++++--------- lib/neutron_plugins/nicira | 8 ++++---- 4 files changed, 28 insertions(+), 28 deletions(-) diff --git a/lib/neutron b/lib/neutron index 778717d7a9..44fb9e1005 100644 --- a/lib/neutron +++ b/lib/neutron @@ -79,8 +79,8 @@ NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron} # Support entry points installation of console scripts if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then NEUTRON_BIN_DIR=$NEUTRON_DIR/bin - else -NEUTRON_BIN_DIR=$(get_python_exec_prefix) +else + NEUTRON_BIN_DIR=$(get_python_exec_prefix) fi NEUTRON_CONF_DIR=/etc/neutron @@ -373,7 +373,7 @@ function create_neutron_initial_network() { iniset $Q_L3_CONF_FILE DEFAULT router_id $ROUTER_ID fi fi - fi + fi } # init_neutron() - Initialize databases, etc. @@ -404,7 +404,7 @@ function install_neutron_agent_packages() { fi if is_service_enabled q-lbaas; then - neutron_agent_lbaas_install_agent_packages + neutron_agent_lbaas_install_agent_packages fi } @@ -414,13 +414,13 @@ function start_neutron_service_and_check() { local cfg_file local CFG_FILE_OPTIONS="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do - CFG_FILE_OPTIONS+=" --config-file /$cfg_file" + CFG_FILE_OPTIONS+=" --config-file /$cfg_file" done # Start the Neutron service screen_it q-svc "cd $NEUTRON_DIR && python $NEUTRON_BIN_DIR/neutron-server $CFG_FILE_OPTIONS" echo "Waiting for Neutron to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$Q_HOST:$Q_PORT; do sleep 1; done"; then - die $LINENO "Neutron did not start" + die $LINENO "Neutron did not start" fi } @@ -712,9 +712,9 @@ function _neutron_setup_rootwrap() { # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d`` # location moved in newer versions, prefer new location if test -r $NEUTRON_DIR/etc/neutron/rootwrap.conf; then - sudo cp -p $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE + sudo cp -p $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE else - sudo cp -p $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE + sudo cp -p $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE fi sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE sudo chown root:root $Q_RR_CONF_FILE @@ -848,11 +848,11 @@ function _ssh_check_neutron() { # please refer to ``lib/neutron_thirdparty/README.md`` for details NEUTRON_THIRD_PARTIES="" for f in $TOP_DIR/lib/neutron_thirdparty/*; do - third_party=$(basename $f) - if is_service_enabled $third_party; then - source $TOP_DIR/lib/neutron_thirdparty/$third_party - NEUTRON_THIRD_PARTIES="$NEUTRON_THIRD_PARTIES,$third_party" - fi + third_party=$(basename $f) + if is_service_enabled $third_party; then + source $TOP_DIR/lib/neutron_thirdparty/$third_party + NEUTRON_THIRD_PARTIES="$NEUTRON_THIRD_PARTIES,$third_party" + fi done function _neutron_third_party_do() { diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet index 074f847330..cf45a9d11d 100644 --- a/lib/neutron_plugins/midonet +++ b/lib/neutron_plugins/midonet @@ -52,11 +52,11 @@ function neutron_plugin_configure_dhcp_agent() { } function neutron_plugin_configure_l3_agent() { - die $LINENO "q-l3 must not be executed with MidoNet plugin!" + die $LINENO "q-l3 must not be executed with MidoNet plugin!" } function neutron_plugin_configure_plugin_agent() { - die $LINENO "q-agt must not be executed with MidoNet plugin!" + die $LINENO "q-agt must not be executed with MidoNet plugin!" } function neutron_plugin_configure_service() { diff --git a/lib/neutron_plugins/nec b/lib/neutron_plugins/nec index 79d41dbf77..3806c32c75 100644 --- a/lib/neutron_plugins/nec +++ b/lib/neutron_plugins/nec @@ -101,15 +101,15 @@ function _neutron_setup_ovs_tunnels() { local id=0 GRE_LOCAL_IP=${GRE_LOCAL_IP:-$HOST_IP} if [ -n "$GRE_REMOTE_IPS" ]; then - for ip in ${GRE_REMOTE_IPS//:/ } - do - if [[ "$ip" == "$GRE_LOCAL_IP" ]]; then - continue - fi - sudo ovs-vsctl --no-wait add-port $bridge gre$id -- \ - set Interface gre$id type=gre options:remote_ip=$ip - id=`expr $id + 1` - done + for ip in ${GRE_REMOTE_IPS//:/ } + do + if [[ "$ip" == "$GRE_LOCAL_IP" ]]; then + continue + fi + sudo ovs-vsctl --no-wait add-port $bridge gre$id -- \ + set Interface gre$id type=gre options:remote_ip=$ip + id=`expr $id + 1` + done fi } diff --git a/lib/neutron_plugins/nicira b/lib/neutron_plugins/nicira index 082c84674d..7c99b692d6 100644 --- a/lib/neutron_plugins/nicira +++ b/lib/neutron_plugins/nicira @@ -58,13 +58,13 @@ function neutron_plugin_configure_dhcp_agent() { } function neutron_plugin_configure_l3_agent() { - # Nicira plugin does not run L3 agent - die $LINENO "q-l3 should must not be executed with Nicira plugin!" + # Nicira plugin does not run L3 agent + die $LINENO "q-l3 should must not be executed with Nicira plugin!" } function neutron_plugin_configure_plugin_agent() { - # Nicira plugin does not run L2 agent - die $LINENO "q-agt must not be executed with Nicira plugin!" + # Nicira plugin does not run L2 agent + die $LINENO "q-agt must not be executed with Nicira plugin!" } function neutron_plugin_configure_service() { From 101b4248428b4c3d7757e15ff4e19d3b4f85a51f Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 22 Oct 2013 08:47:11 -0400 Subject: [PATCH 0462/4704] fix whitespace in the rest of lib/* this brings this in line with bash8 checker Change-Id: Ib34a2292dd5bc259069457461041ec9cd4fd2957 --- lib/baremetal | 100 +++++++++++++------------- lib/glance | 2 +- lib/ironic | 2 +- lib/keystone | 2 +- lib/neutron_thirdparty/trema | 4 +- lib/nova | 41 ++++++----- lib/nova_plugins/hypervisor-baremetal | 4 +- lib/nova_plugins/hypervisor-libvirt | 8 +-- lib/rpc_backend | 6 +- lib/swift | 64 ++++++++--------- lib/tempest | 20 +++--- lib/trove | 15 ++-- 12 files changed, 134 insertions(+), 134 deletions(-) diff --git a/lib/baremetal b/lib/baremetal index f4d8589628..141c28d15f 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -256,19 +256,19 @@ function upload_baremetal_deploy() { # load them into glance BM_DEPLOY_KERNEL_ID=$(glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ - --name $BM_DEPLOY_KERNEL \ - --is-public True --disk-format=aki \ - < $TOP_DIR/files/$BM_DEPLOY_KERNEL | grep ' id ' | get_field 2) + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name $BM_DEPLOY_KERNEL \ + --is-public True --disk-format=aki \ + < $TOP_DIR/files/$BM_DEPLOY_KERNEL | grep ' id ' | get_field 2) BM_DEPLOY_RAMDISK_ID=$(glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ - --name $BM_DEPLOY_RAMDISK \ - --is-public True --disk-format=ari \ - < $TOP_DIR/files/$BM_DEPLOY_RAMDISK | grep ' id ' | get_field 2) + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name $BM_DEPLOY_RAMDISK \ + --is-public True --disk-format=ari \ + < $TOP_DIR/files/$BM_DEPLOY_RAMDISK | grep ' id ' | get_field 2) } # create a basic baremetal flavor, associated with deploy kernel & ramdisk @@ -278,11 +278,11 @@ function create_baremetal_flavor() { aki=$1 ari=$2 nova flavor-create $BM_FLAVOR_NAME $BM_FLAVOR_ID \ - $BM_FLAVOR_RAM $BM_FLAVOR_ROOT_DISK $BM_FLAVOR_CPU + $BM_FLAVOR_RAM $BM_FLAVOR_ROOT_DISK $BM_FLAVOR_CPU nova flavor-key $BM_FLAVOR_NAME set \ - "cpu_arch"="$BM_FLAVOR_ARCH" \ - "baremetal:deploy_kernel_id"="$aki" \ - "baremetal:deploy_ramdisk_id"="$ari" + "cpu_arch"="$BM_FLAVOR_ARCH" \ + "baremetal:deploy_kernel_id"="$aki" \ + "baremetal:deploy_ramdisk_id"="$ari" } @@ -311,19 +311,19 @@ function extract_and_upload_k_and_r_from_image() { # load them into glance KERNEL_ID=$(glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ - --name $image_name-kernel \ - --is-public True --disk-format=aki \ - < $TOP_DIR/files/$OUT_KERNEL | grep ' id ' | get_field 2) + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name $image_name-kernel \ + --is-public True --disk-format=aki \ + < $TOP_DIR/files/$OUT_KERNEL | grep ' id ' | get_field 2) RAMDISK_ID=$(glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ - --name $image_name-initrd \ - --is-public True --disk-format=ari \ - < $TOP_DIR/files/$OUT_RAMDISK | grep ' id ' | get_field 2) + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name $image_name-initrd \ + --is-public True --disk-format=ari \ + < $TOP_DIR/files/$OUT_RAMDISK | grep ' id ' | get_field 2) } @@ -365,11 +365,11 @@ function upload_baremetal_image() { mkdir "$xdir" tar -zxf $FILES/$IMAGE_FNAME -C "$xdir" KERNEL=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do - [ -f "$f" ] && echo "$f" && break; done; true) + [ -f "$f" ] && echo "$f" && break; done; true) RAMDISK=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do - [ -f "$f" ] && echo "$f" && break; done; true) + [ -f "$f" ] && echo "$f" && break; done; true) IMAGE=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do - [ -f "$f" ] && echo "$f" && break; done; true) + [ -f "$f" ] && echo "$f" && break; done; true) if [[ -z "$IMAGE_NAME" ]]; then IMAGE_NAME=$(basename "$IMAGE" ".img") fi @@ -403,19 +403,19 @@ function upload_baremetal_image() { --container-format ari \ --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2) else - # TODO(deva): add support for other image types - return + # TODO(deva): add support for other image types + return fi glance \ - --os-auth-token $token \ - --os-image-url http://$GLANCE_HOSTPORT \ - image-create \ - --name "${IMAGE_NAME%.img}" --is-public True \ - --container-format $CONTAINER_FORMAT \ - --disk-format $DISK_FORMAT \ - ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} \ - ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}" + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name "${IMAGE_NAME%.img}" --is-public True \ + --container-format $CONTAINER_FORMAT \ + --disk-format $DISK_FORMAT \ + ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} \ + ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}" # override DEFAULT_IMAGE_NAME so that tempest can find the image # that we just uploaded in glance @@ -439,15 +439,15 @@ function add_baremetal_node() { mac_2=${2:-$BM_SECOND_MAC} id=$(nova baremetal-node-create \ - --pm_address="$BM_PM_ADDR" \ - --pm_user="$BM_PM_USER" \ - --pm_password="$BM_PM_PASS" \ - "$BM_HOSTNAME" \ - "$BM_FLAVOR_CPU" \ - "$BM_FLAVOR_RAM" \ - "$BM_FLAVOR_ROOT_DISK" \ - "$mac_1" \ - | grep ' id ' | get_field 2 ) + --pm_address="$BM_PM_ADDR" \ + --pm_user="$BM_PM_USER" \ + --pm_password="$BM_PM_PASS" \ + "$BM_HOSTNAME" \ + "$BM_FLAVOR_CPU" \ + "$BM_FLAVOR_RAM" \ + "$BM_FLAVOR_ROOT_DISK" \ + "$mac_1" \ + | grep ' id ' | get_field 2 ) [ $? -eq 0 ] || [ "$id" ] || die $LINENO "Error adding baremetal node" if [ -n "$mac_2" ]; then id2=$(nova baremetal-interface-add "$id" "$mac_2" ) diff --git a/lib/glance b/lib/glance index c6f11d06da..75e3dd053d 100644 --- a/lib/glance +++ b/lib/glance @@ -194,7 +194,7 @@ function start_glance() { screen_it g-api "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then - die $LINENO "g-api did not start" + die $LINENO "g-api did not start" fi } diff --git a/lib/ironic b/lib/ironic index 89d0edc1a4..649c1c2cd6 100644 --- a/lib/ironic +++ b/lib/ironic @@ -203,7 +203,7 @@ function start_ironic_api() { screen_it ir-api "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-api --config-file=$IRONIC_CONF_FILE" echo "Waiting for ir-api ($IRONIC_HOSTPORT) to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$IRONIC_HOSTPORT; do sleep 1; done"; then - die $LINENO "ir-api did not start" + die $LINENO "ir-api did not start" fi } diff --git a/lib/keystone b/lib/keystone index c93a4367d2..beddb1cd75 100755 --- a/lib/keystone +++ b/lib/keystone @@ -373,7 +373,7 @@ function start_keystone() { echo "Waiting for keystone to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then - die $LINENO "keystone did not start" + die $LINENO "keystone did not start" fi # Start proxies if enabled diff --git a/lib/neutron_thirdparty/trema b/lib/neutron_thirdparty/trema index 09dc46bd83..5b5c4590c3 100644 --- a/lib/neutron_thirdparty/trema +++ b/lib/neutron_thirdparty/trema @@ -66,8 +66,8 @@ function init_trema() { cp $TREMA_SS_DIR/sliceable_switch_null.conf $TREMA_SS_CONFIG sed -i -e "s|^\$apps_dir.*$|\$apps_dir = \"$TREMA_DIR/apps\"|" \ - -e "s|^\$db_dir.*$|\$db_dir = \"$TREMA_SS_DB_DIR\"|" \ - $TREMA_SS_CONFIG + -e "s|^\$db_dir.*$|\$db_dir = \"$TREMA_SS_DB_DIR\"|" \ + $TREMA_SS_CONFIG } function gem_install() { diff --git a/lib/nova b/lib/nova index 09332cf941..809f8e5896 100644 --- a/lib/nova +++ b/lib/nova @@ -465,27 +465,27 @@ function create_nova_conf() { fi if is_service_enabled n-novnc || is_service_enabled n-xvnc; then - # Address on which instance vncservers will listen on compute hosts. - # For multi-host, this should be the management ip of the compute host. - VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} - VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1} - iniset $NOVA_CONF DEFAULT vnc_enabled true - iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN" - iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" + # Address on which instance vncservers will listen on compute hosts. + # For multi-host, this should be the management ip of the compute host. + VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} + VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1} + iniset $NOVA_CONF DEFAULT vnc_enabled true + iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN" + iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" else - iniset $NOVA_CONF DEFAULT vnc_enabled false + iniset $NOVA_CONF DEFAULT vnc_enabled false fi if is_service_enabled n-spice; then - # Address on which instance spiceservers will listen on compute hosts. - # For multi-host, this should be the management ip of the compute host. - SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1} - SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1} - iniset $NOVA_CONF spice enabled true - iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN" - iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" + # Address on which instance spiceservers will listen on compute hosts. + # For multi-host, this should be the management ip of the compute host. + SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1} + SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1} + iniset $NOVA_CONF spice enabled true + iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN" + iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" else - iniset $NOVA_CONF spice enabled false + iniset $NOVA_CONF spice enabled false fi iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST" @@ -602,7 +602,7 @@ function start_nova_api() { screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api" echo "Waiting for nova-api to start..." if ! wait_for_service $SERVICE_TIMEOUT http://$SERVICE_HOST:$service_port; then - die $LINENO "nova-api did not start" + die $LINENO "nova-api did not start" fi # Start proxies if enabled @@ -620,10 +620,9 @@ function start_nova_compute() { # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group. screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM'" elif [[ "$VIRT_DRIVER" = 'fake' ]]; then - for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE` - do - screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM" - done + for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do + screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM" + done else if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then start_nova_hypervisor diff --git a/lib/nova_plugins/hypervisor-baremetal b/lib/nova_plugins/hypervisor-baremetal index 4e7c1734d1..660c977bde 100644 --- a/lib/nova_plugins/hypervisor-baremetal +++ b/lib/nova_plugins/hypervisor-baremetal @@ -61,8 +61,8 @@ function configure_nova_hypervisor() { # Define extra baremetal nova conf flags by defining the array ``EXTRA_BAREMETAL_OPTS``. for I in "${EXTRA_BAREMETAL_OPTS[@]}"; do - # Attempt to convert flags to options - iniset $NOVA_CONF baremetal ${I/=/ } + # Attempt to convert flags to options + iniset $NOVA_CONF baremetal ${I/=/ } done } diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index caf0296ad2..6fae0b17d0 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -82,10 +82,10 @@ EOF" sudo mkdir -p $rules_dir sudo bash -c "cat < $rules_dir/50-libvirt-$STACK_USER.rules polkit.addRule(function(action, subject) { - if (action.id == 'org.libvirt.unix.manage' && - subject.user == '"$STACK_USER"') { - return polkit.Result.YES; - } + if (action.id == 'org.libvirt.unix.manage' && + subject.user == '"$STACK_USER"') { + return polkit.Result.YES; + } }); EOF" unset rules_dir diff --git a/lib/rpc_backend b/lib/rpc_backend index 44c1e44817..a323d649a7 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -102,9 +102,9 @@ function install_rpc_backend() { if is_fedora; then install_package qpid-cpp-server if [[ $DISTRO =~ (rhel6) ]]; then - # RHEL6 leaves "auth=yes" in /etc/qpidd.conf, it needs to - # be no or you get GSS authentication errors as it - # attempts to default to this. + # RHEL6 leaves "auth=yes" in /etc/qpidd.conf, it needs to + # be no or you get GSS authentication errors as it + # attempts to default to this. sudo sed -i.bak 's/^auth=yes$/auth=no/' /etc/qpidd.conf fi elif is_ubuntu; then diff --git a/lib/swift b/lib/swift index 6ab43c420f..8726f1e7fc 100644 --- a/lib/swift +++ b/lib/swift @@ -104,17 +104,17 @@ ACCOUNT_PORT_BASE=${ACCOUNT_PORT_BASE:-6012} # cleanup_swift() - Remove residual data files function cleanup_swift() { - rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz} - if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then - sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 - fi - if [[ -e ${SWIFT_DISK_IMAGE} ]]; then - rm ${SWIFT_DISK_IMAGE} - fi - rm -rf ${SWIFT_DATA_DIR}/run/ - if is_apache_enabled_service swift; then - _cleanup_swift_apache_wsgi - fi + rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz} + if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then + sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 + fi + if [[ -e ${SWIFT_DISK_IMAGE} ]]; then + rm ${SWIFT_DISK_IMAGE} + fi + rm -rf ${SWIFT_DATA_DIR}/run/ + if is_apache_enabled_service swift; then + _cleanup_swift_apache_wsgi + fi } # _cleanup_swift_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file @@ -192,7 +192,7 @@ function _config_swift_apache_wsgi() { sudo cp ${SWIFT_DIR}/examples/apache2/account-server.template ${apache_vhost_dir}/account-server-${node_number} sudo sed -e " - /^#/d;/^$/d; + /^#/d;/^$/d; s/%PORT%/$account_port/g; s/%SERVICENAME%/account-server-${node_number}/g; s/%APACHE_NAME%/${APACHE_NAME}/g; @@ -202,7 +202,7 @@ function _config_swift_apache_wsgi() { sudo cp ${SWIFT_DIR}/examples/wsgi/account-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/account-server-${node_number}.wsgi sudo sed -e " - /^#/d;/^$/d; + /^#/d;/^$/d; s/%SERVICECONF%/account-server\/${node_number}.conf/g; " -i ${SWIFT_APACHE_WSGI_DIR}/account-server-${node_number}.wsgi done @@ -577,26 +577,26 @@ function start_swift() { return 0 fi - # By default with only one replica we are launching the proxy, - # container, account and object server in screen in foreground and - # other services in background. If we have SWIFT_REPLICAS set to something - # greater than one we first spawn all the swift services then kill the proxy - # service so we can run it in foreground in screen. ``swift-init ... - # {stop|restart}`` exits with '1' if no servers are running, ignore it just - # in case - swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true - if [[ ${SWIFT_REPLICAS} == 1 ]]; then + # By default with only one replica we are launching the proxy, + # container, account and object server in screen in foreground and + # other services in background. If we have SWIFT_REPLICAS set to something + # greater than one we first spawn all the swift services then kill the proxy + # service so we can run it in foreground in screen. ``swift-init ... + # {stop|restart}`` exits with '1' if no servers are running, ignore it just + # in case + swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true + if [[ ${SWIFT_REPLICAS} == 1 ]]; then todo="object container account" - fi - for type in proxy ${todo}; do - swift-init --run-dir=${SWIFT_DATA_DIR}/run ${type} stop || true - done - screen_it s-proxy "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v" - if [[ ${SWIFT_REPLICAS} == 1 ]]; then - for type in object container account; do - screen_it s-${type} "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v" - done - fi + fi + for type in proxy ${todo}; do + swift-init --run-dir=${SWIFT_DATA_DIR}/run ${type} stop || true + done + screen_it s-proxy "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v" + if [[ ${SWIFT_REPLICAS} == 1 ]]; then + for type in object container account; do + screen_it s-${type} "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v" + done + fi } # stop_swift() - Stop running processes (non-screen) diff --git a/lib/tempest b/lib/tempest index 9f41608187..8e4e5210ea 100644 --- a/lib/tempest +++ b/lib/tempest @@ -193,7 +193,7 @@ function configure_tempest() { # If namespaces are disabled, devstack will create a single # public router that tempest should be configured to use. public_router_id=$(neutron router-list | awk "/ $Q_ROUTER_NAME / \ - { print \$2 }") + { print \$2 }") fi fi @@ -328,15 +328,15 @@ function init_tempest() { local disk_image="$image_dir/${base_image_name}-blank.img" # if the cirros uec downloaded and the system is uec capable if [ -f "$kernel" -a -f "$ramdisk" -a -f "$disk_image" -a "$VIRT_DRIVER" != "openvz" \ - -a \( "$LIBVIRT_TYPE" != "lxc" -o "$VIRT_DRIVER" != "libvirt" \) ]; then - echo "Prepare aki/ari/ami Images" - ( #new namespace - # tenant:demo ; user: demo - source $TOP_DIR/accrc/demo/demo - euca-bundle-image -i "$kernel" --kernel true -d "$BOTO_MATERIALS_PATH" - euca-bundle-image -i "$ramdisk" --ramdisk true -d "$BOTO_MATERIALS_PATH" - euca-bundle-image -i "$disk_image" -d "$BOTO_MATERIALS_PATH" - ) 2>&1 &1 Date: Tue, 22 Oct 2013 10:06:06 -0400 Subject: [PATCH 0463/4704] clean up whitespace issues on exercises and friends Change-Id: I812a73e46ddd4d5fed4d304d9ef92c1de243f497 --- exercises/boot_from_volume.sh | 2 +- exercises/docker.sh | 3 +- exercises/euca.sh | 52 +++++++++++++++++------------------ exercises/floating_ips.sh | 4 +-- exercises/neutron-adv-test.sh | 24 ++++++++-------- exercises/volumes.sh | 2 +- files/keystone_data.sh | 24 ++++++++-------- tests/functions.sh | 8 +++--- 8 files changed, 59 insertions(+), 60 deletions(-) diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index fe27bd0956..634a6d526c 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -119,7 +119,7 @@ nova flavor-list INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) + INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) fi # Clean-up from previous runs diff --git a/exercises/docker.sh b/exercises/docker.sh index 0672bc0087..10c5436c35 100755 --- a/exercises/docker.sh +++ b/exercises/docker.sh @@ -62,7 +62,7 @@ die_if_not_set $LINENO IMAGE "Failure getting image $DOCKER_IMAGE_NAME" INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) + INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) fi # Clean-up from previous runs @@ -102,4 +102,3 @@ set +o xtrace echo "*********************************************************************" echo "SUCCESS: End DevStack Exercise: $0" echo "*********************************************************************" - diff --git a/exercises/euca.sh b/exercises/euca.sh index 64c0014236..ed521e4f7f 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -87,31 +87,31 @@ fi # Volumes # ------- if is_service_enabled c-vol && ! is_service_enabled n-cell; then - VOLUME_ZONE=`euca-describe-availability-zones | head -n1 | cut -f2` - die_if_not_set $LINENO VOLUME_ZONE "Failure to find zone for volume" - - VOLUME=`euca-create-volume -s 1 -z $VOLUME_ZONE | cut -f2` - die_if_not_set $LINENO VOLUME "Failure to create volume" - - # Test that volume has been created - VOLUME=`euca-describe-volumes $VOLUME | cut -f2` - die_if_not_set $LINENO VOLUME "Failure to get volume" - - # Test volume has become available - if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then - die $LINENO "volume didn't become available within $RUNNING_TIMEOUT seconds" - fi - - # Attach volume to an instance - euca-attach-volume -i $INSTANCE -d $ATTACH_DEVICE $VOLUME || \ - die $LINENO "Failure attaching volume $VOLUME to $INSTANCE" - if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -A 1 in-use | grep -q attach; do sleep 1; done"; then - die $LINENO "Could not attach $VOLUME to $INSTANCE" - fi - - # Detach volume from an instance - euca-detach-volume $VOLUME || \ - die $LINENO "Failure detaching volume $VOLUME to $INSTANCE" + VOLUME_ZONE=`euca-describe-availability-zones | head -n1 | cut -f2` + die_if_not_set $LINENO VOLUME_ZONE "Failure to find zone for volume" + + VOLUME=`euca-create-volume -s 1 -z $VOLUME_ZONE | cut -f2` + die_if_not_set $LINENO VOLUME "Failure to create volume" + + # Test that volume has been created + VOLUME=`euca-describe-volumes $VOLUME | cut -f2` + die_if_not_set $LINENO VOLUME "Failure to get volume" + + # Test volume has become available + if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then + die $LINENO "volume didn't become available within $RUNNING_TIMEOUT seconds" + fi + + # Attach volume to an instance + euca-attach-volume -i $INSTANCE -d $ATTACH_DEVICE $VOLUME || \ + die $LINENO "Failure attaching volume $VOLUME to $INSTANCE" + if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -A 1 in-use | grep -q attach; do sleep 1; done"; then + die $LINENO "Could not attach $VOLUME to $INSTANCE" + fi + + # Detach volume from an instance + euca-detach-volume $VOLUME || \ + die $LINENO "Failure detaching volume $VOLUME to $INSTANCE" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then die $LINENO "Could not detach $VOLUME to $INSTANCE" fi @@ -120,7 +120,7 @@ if is_service_enabled c-vol && ! is_service_enabled n-cell; then euca-delete-volume $VOLUME || \ die $LINENO "Failure to delete volume" if ! timeout $ACTIVE_TIMEOUT sh -c "while euca-describe-volumes | grep $VOLUME; do sleep 1; done"; then - die $LINENO "Could not delete $VOLUME" + die $LINENO "Could not delete $VOLUME" fi else echo "Volume Tests Skipped" diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 2833b650ba..1a1608c872 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -113,7 +113,7 @@ nova flavor-list INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) + INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) fi # Clean-up from previous runs @@ -168,7 +168,7 @@ if ! is_service_enabled neutron; then # list floating addresses if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep $TEST_FLOATING_POOL | grep -q $TEST_FLOATING_IP; do sleep 1; done"; then die $LINENO "Floating IP not allocated" - fi + fi fi # Dis-allow icmp traffic (ping) diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index e0c37ef723..7dfa5dc161 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -273,12 +273,12 @@ function create_vms { } function ping_ip { - # Test agent connection. Assumes namespaces are disabled, and - # that DHCP is in use, but not L3 - local VM_NAME=$1 - local NET_NAME=$2 - IP=$(get_instance_ip $VM_NAME $NET_NAME) - ping_check $NET_NAME $IP $BOOT_TIMEOUT + # Test agent connection. Assumes namespaces are disabled, and + # that DHCP is in use, but not L3 + local VM_NAME=$1 + local NET_NAME=$2 + IP=$(get_instance_ip $VM_NAME $NET_NAME) + ping_check $NET_NAME $IP $BOOT_TIMEOUT } function check_vm { @@ -330,12 +330,12 @@ function delete_network { } function delete_networks { - foreach_tenant_net 'delete_network ${%TENANT%_NAME} %NUM%' - #TODO(nati) add secuirty group check after it is implemented - # source $TOP_DIR/openrc demo1 demo1 - # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0 - # source $TOP_DIR/openrc demo2 demo2 - # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0 + foreach_tenant_net 'delete_network ${%TENANT%_NAME} %NUM%' + # TODO(nati) add secuirty group check after it is implemented + # source $TOP_DIR/openrc demo1 demo1 + # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0 + # source $TOP_DIR/openrc demo2 demo2 + # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0 } function create_all { diff --git a/exercises/volumes.sh b/exercises/volumes.sh index e536d16249..9ee9fa910a 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -117,7 +117,7 @@ nova flavor-list INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) + INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) fi # Clean-up from previous runs diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 3f3137cb14..ea2d52d114 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -66,12 +66,12 @@ fi # Heat if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then HEAT_USER=$(get_id keystone user-create --name=heat \ - --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=heat@example.com) + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=heat@example.com) keystone user-role-add --tenant-id $SERVICE_TENANT \ - --user-id $HEAT_USER \ - --role-id $SERVICE_ROLE + --user-id $HEAT_USER \ + --role-id $SERVICE_ROLE # heat_stack_user role is for users created by Heat keystone role-create --name heat_stack_user if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then @@ -126,16 +126,16 @@ fi # Ceilometer if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]]; then CEILOMETER_USER=$(get_id keystone user-create --name=ceilometer \ - --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=ceilometer@example.com) + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=ceilometer@example.com) keystone user-role-add --tenant-id $SERVICE_TENANT \ - --user-id $CEILOMETER_USER \ - --role-id $ADMIN_ROLE + --user-id $CEILOMETER_USER \ + --role-id $ADMIN_ROLE # Ceilometer needs ResellerAdmin role to access swift account stats. keystone user-role-add --tenant-id $SERVICE_TENANT \ - --user-id $CEILOMETER_USER \ - --role-id $RESELLER_ROLE + --user-id $CEILOMETER_USER \ + --role-id $RESELLER_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then CEILOMETER_SERVICE=$(get_id keystone service-create \ --name=ceilometer \ diff --git a/tests/functions.sh b/tests/functions.sh index 7d486d4cc5..40376aa63f 100755 --- a/tests/functions.sh +++ b/tests/functions.sh @@ -122,16 +122,16 @@ fi # test empty option if ini_has_option test.ini ddd empty; then - echo "OK: ddd.empty present" + echo "OK: ddd.empty present" else - echo "ini_has_option failed: ddd.empty not found" + echo "ini_has_option failed: ddd.empty not found" fi # test non-empty option if ini_has_option test.ini bbb handlers; then - echo "OK: bbb.handlers present" + echo "OK: bbb.handlers present" else - echo "ini_has_option failed: bbb.handlers not found" + echo "ini_has_option failed: bbb.handlers not found" fi # test changing empty option From b83c365cf540261c9455a41f4f96aa3c0695fa9c Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 22 Oct 2013 10:08:04 -0400 Subject: [PATCH 0464/4704] clean up whitespace on stack.sh Change-Id: If73435968cfbd0dd3cc519f0a30e02bec5fcb386 --- stack.sh | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/stack.sh b/stack.sh index f54d0f240b..5813a8ad09 100755 --- a/stack.sh +++ b/stack.sh @@ -1018,7 +1018,7 @@ if is_service_enabled nova && is_baremetal; then prepare_baremetal_toolchain configure_baremetal_nova_dirs if [[ "$BM_USE_FAKE_ENV" = "True" ]]; then - create_fake_baremetal_env + create_fake_baremetal_env fi fi @@ -1180,26 +1180,26 @@ if is_service_enabled g-reg; then die_if_not_set $LINENO TOKEN "Keystone fail to get token" if is_baremetal; then - echo_summary "Creating and uploading baremetal images" + echo_summary "Creating and uploading baremetal images" - # build and upload separate deploy kernel & ramdisk - upload_baremetal_deploy $TOKEN + # build and upload separate deploy kernel & ramdisk + upload_baremetal_deploy $TOKEN - # upload images, separating out the kernel & ramdisk for PXE boot - for image_url in ${IMAGE_URLS//,/ }; do - upload_baremetal_image $image_url $TOKEN - done + # upload images, separating out the kernel & ramdisk for PXE boot + for image_url in ${IMAGE_URLS//,/ }; do + upload_baremetal_image $image_url $TOKEN + done else - echo_summary "Uploading images" + echo_summary "Uploading images" - # Option to upload legacy ami-tty, which works with xenserver - if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then - IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz" - fi + # Option to upload legacy ami-tty, which works with xenserver + if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then + IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz" + fi - for image_url in ${IMAGE_URLS//,/ }; do - upload_image $image_url $TOKEN - done + for image_url in ${IMAGE_URLS//,/ }; do + upload_image $image_url $TOKEN + done fi fi @@ -1211,7 +1211,7 @@ fi if is_service_enabled nova && is_baremetal; then # create special flavor for baremetal if we know what images to associate [[ -n "$BM_DEPLOY_KERNEL_ID" ]] && [[ -n "$BM_DEPLOY_RAMDISK_ID" ]] && \ - create_baremetal_flavor $BM_DEPLOY_KERNEL_ID $BM_DEPLOY_RAMDISK_ID + create_baremetal_flavor $BM_DEPLOY_KERNEL_ID $BM_DEPLOY_RAMDISK_ID # otherwise user can manually add it later by calling nova-baremetal-manage [[ -n "$BM_FIRST_MAC" ]] && add_baremetal_node @@ -1233,7 +1233,7 @@ fi CURRENT_RUN_TIME=$(date "+$TIMESTAMP_FORMAT") echo "# $CURRENT_RUN_TIME" >$TOP_DIR/.stackenv for i in BASE_SQL_CONN ENABLED_SERVICES HOST_IP LOGFILE \ - SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP; do + SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP; do echo $i=${!i} >>$TOP_DIR/.stackenv done From 02d7fe13bb714c3c8c28fbe16ecbeac472a80094 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 22 Oct 2013 11:31:21 -0400 Subject: [PATCH 0465/4704] add support for heredoc folding of lines this change in the parser allows for us to have heredocs folded into logical lines. Change-Id: I51ebe6cd7b89b5f7194e947896f20b6750e972e3 --- tools/bash8.py | 35 +++++++++++++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/tools/bash8.py b/tools/bash8.py index 82a10107e1..edf7da4645 100755 --- a/tools/bash8.py +++ b/tools/bash8.py @@ -55,10 +55,41 @@ def check_indents(line): print_error('E003: Indent not multiple of 4', line) +def starts_multiline(line): + m = re.search("[^<]<<\s*(?P\w+)", line) + if m: + return m.group('token') + else: + return False + + +def end_of_multiline(line, token): + if token: + return re.search("^%s\s*$" % token, line) is not None + return False + + def check_files(files): + in_multiline = False + logical_line = "" + token = False for line in fileinput.input(files): - check_no_trailing_whitespace(line) - check_indents(line) + # NOTE(sdague): multiline processing of heredocs is interesting + if not in_multiline: + logical_line = line + token = starts_multiline(line) + if token: + in_multiline = True + continue + else: + logical_line = logical_line + line + if not end_of_multiline(line, token): + continue + else: + in_multiline = False + + check_no_trailing_whitespace(logical_line) + check_indents(logical_line) def get_options(): From 0b865a55f2b6fa1435e8bf6df09218a9bf7a0ca0 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 22 Oct 2013 11:37:35 -0400 Subject: [PATCH 0466/4704] final bash8 files for the rest of devstack With this devstack/master is bash8 clean, and ready for enforcement Change-Id: I03fc89b401e6b7a23224d71472122c1bfa3ad0bd --- tools/build_bm_multi.sh | 4 +- tools/build_uec.sh | 6 +- tools/create_userrc.sh | 8 +- tools/jenkins/jenkins_home/build_jenkins.sh | 16 +-- tools/xen/install_os_domU.sh | 10 +- tools/xen/scripts/install-os-vpx.sh | 114 ++++++++++---------- tools/xen/scripts/uninstall-os-vpx.sh | 58 +++++----- 7 files changed, 108 insertions(+), 108 deletions(-) diff --git a/tools/build_bm_multi.sh b/tools/build_bm_multi.sh index 52b9b4ea32..328d5762fc 100755 --- a/tools/build_bm_multi.sh +++ b/tools/build_bm_multi.sh @@ -22,8 +22,8 @@ run_bm STACKMASTER $HEAD_HOST "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vn if [ ! "$TERMINATE" = "1" ]; then echo "Waiting for head node ($HEAD_HOST) to start..." if ! timeout 60 sh -c "while ! wget -q -O- http://$HEAD_HOST | grep -q username; do sleep 1; done"; then - echo "Head node did not start" - exit 1 + echo "Head node did not start" + exit 1 fi fi diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 6c4a26c2e3..bce051a0b7 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -229,8 +229,8 @@ EOF # (re)start a metadata service ( - pid=`lsof -iTCP@192.168.$GUEST_NETWORK.1:4567 -n | awk '{print $2}' | tail -1` - [ -z "$pid" ] || kill -9 $pid + pid=`lsof -iTCP@192.168.$GUEST_NETWORK.1:4567 -n | awk '{print $2}' | tail -1` + [ -z "$pid" ] || kill -9 $pid ) cd $vm_dir/uec python meta.py 192.168.$GUEST_NETWORK.1:4567 & @@ -268,7 +268,7 @@ if [ "$WAIT_TILL_LAUNCH" = "1" ]; then sleep 2 while [ ! -e "$vm_dir/console.log" ]; do - sleep 1 + sleep 1 done tail -F $vm_dir/console.log & diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index 44b0f6bba0..8383fe7d77 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -105,15 +105,15 @@ if [ -z "$OS_PASSWORD" ]; then fi if [ -z "$OS_TENANT_NAME" -a -z "$OS_TENANT_ID" ]; then - export OS_TENANT_NAME=admin + export OS_TENANT_NAME=admin fi if [ -z "$OS_USERNAME" ]; then - export OS_USERNAME=admin + export OS_USERNAME=admin fi if [ -z "$OS_AUTH_URL" ]; then - export OS_AUTH_URL=http://localhost:5000/v2.0/ + export OS_AUTH_URL=http://localhost:5000/v2.0/ fi USER_PASS=${USER_PASS:-$OS_PASSWORD} @@ -249,7 +249,7 @@ if [ $MODE != "create" ]; then for user_id_at_name in `keystone user-list --tenant-id $tenant_id | awk 'BEGIN {IGNORECASE = 1} /true[[:space:]]*\|[^|]*\|$/ {print $2 "@" $4}'`; do read user_id user_name <<< `echo "$user_id_at_name" | sed 's/@/ /'` if [ $MODE = one -a "$user_name" != "$USER_NAME" ]; then - continue; + continue; fi add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS" done diff --git a/tools/jenkins/jenkins_home/build_jenkins.sh b/tools/jenkins/jenkins_home/build_jenkins.sh index e0e774ee9e..a556db0f1d 100755 --- a/tools/jenkins/jenkins_home/build_jenkins.sh +++ b/tools/jenkins/jenkins_home/build_jenkins.sh @@ -6,8 +6,8 @@ set -o errexit # Make sure only root can run our script if [[ $EUID -ne 0 ]]; then - echo "This script must be run as root" - exit 1 + echo "This script must be run as root" + exit 1 fi # This directory @@ -31,15 +31,15 @@ apt-get install -y --force-yes $DEPS # Install jenkins if [ ! -e /var/lib/jenkins ]; then - echo "Jenkins installation failed" - exit 1 + echo "Jenkins installation failed" + exit 1 fi # Make sure user has configured a jenkins ssh pubkey if [ ! -e /var/lib/jenkins/.ssh/id_rsa.pub ]; then - echo "Public key for jenkins is missing. This is used to ssh into your instances." - echo "Please run "su -c ssh-keygen jenkins" before proceeding" - exit 1 + echo "Public key for jenkins is missing. This is used to ssh into your instances." + echo "Please run "su -c ssh-keygen jenkins" before proceeding" + exit 1 fi # Setup sudo @@ -96,7 +96,7 @@ PLUGINS=http://hudson-ci.org/downloads/plugins/build-timeout/1.6/build-timeout.h # Configure plugins for plugin in ${PLUGINS//,/ }; do - name=`basename $plugin` + name=`basename $plugin` dest=/var/lib/jenkins/plugins/$name if [ ! -e $dest ]; then curl -L $plugin -o $dest diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 0f314bfa9a..9a2f5a8c03 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -44,9 +44,9 @@ source $THIS_DIR/xenrc xe_min() { - local cmd="$1" - shift - xe "$cmd" --minimal "$@" + local cmd="$1" + shift + xe "$cmd" --minimal "$@" } # @@ -132,8 +132,8 @@ HOST_IP=$(xenapi_ip_on "$MGT_BRIDGE_OR_NET_NAME") # Set up ip forwarding, but skip on xcp-xapi if [ -a /etc/sysconfig/network ]; then if ! grep -q "FORWARD_IPV4=YES" /etc/sysconfig/network; then - # FIXME: This doesn't work on reboot! - echo "FORWARD_IPV4=YES" >> /etc/sysconfig/network + # FIXME: This doesn't work on reboot! + echo "FORWARD_IPV4=YES" >> /etc/sysconfig/network fi fi # Also, enable ip forwarding in rc.local, since the above trick isn't working diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh index 7469e0c10b..7b0d891493 100755 --- a/tools/xen/scripts/install-os-vpx.sh +++ b/tools/xen/scripts/install-os-vpx.sh @@ -42,69 +42,69 @@ EOF get_params() { - while getopts "hbn:r:l:t:" OPTION; - do - case $OPTION in - h) usage - exit 1 - ;; - n) - BRIDGE=$OPTARG - ;; - l) - NAME_LABEL=$OPTARG - ;; - t) - TEMPLATE_NAME=$OPTARG - ;; - ?) - usage - exit - ;; - esac - done - if [[ -z $BRIDGE ]] - then - BRIDGE=xenbr0 - fi - - if [[ -z $TEMPLATE_NAME ]]; then - echo "Please specify a template name" >&2 - exit 1 - fi - - if [[ -z $NAME_LABEL ]]; then - echo "Please specify a name-label for the new VM" >&2 - exit 1 - fi + while getopts "hbn:r:l:t:" OPTION; + do + case $OPTION in + h) usage + exit 1 + ;; + n) + BRIDGE=$OPTARG + ;; + l) + NAME_LABEL=$OPTARG + ;; + t) + TEMPLATE_NAME=$OPTARG + ;; + ?) + usage + exit + ;; + esac + done + if [[ -z $BRIDGE ]] + then + BRIDGE=xenbr0 + fi + + if [[ -z $TEMPLATE_NAME ]]; then + echo "Please specify a template name" >&2 + exit 1 + fi + + if [[ -z $NAME_LABEL ]]; then + echo "Please specify a name-label for the new VM" >&2 + exit 1 + fi } xe_min() { - local cmd="$1" - shift - xe "$cmd" --minimal "$@" + local cmd="$1" + shift + xe "$cmd" --minimal "$@" } find_network() { - result=$(xe_min network-list bridge="$1") - if [ "$result" = "" ] - then - result=$(xe_min network-list name-label="$1") - fi - echo "$result" + result=$(xe_min network-list bridge="$1") + if [ "$result" = "" ] + then + result=$(xe_min network-list name-label="$1") + fi + echo "$result" } create_vif() { - local v="$1" - echo "Installing VM interface on [$BRIDGE]" - local out_network_uuid=$(find_network "$BRIDGE") - xe vif-create vm-uuid="$v" network-uuid="$out_network_uuid" device="0" + local v="$1" + echo "Installing VM interface on [$BRIDGE]" + local out_network_uuid=$(find_network "$BRIDGE") + xe vif-create vm-uuid="$v" network-uuid="$out_network_uuid" device="0" } @@ -112,20 +112,20 @@ create_vif() # Make the VM auto-start on server boot. set_auto_start() { - local v="$1" - xe vm-param-set uuid="$v" other-config:auto_poweron=true + local v="$1" + xe vm-param-set uuid="$v" other-config:auto_poweron=true } destroy_vifs() { - local v="$1" - IFS=, - for vif in $(xe_min vif-list vm-uuid="$v") - do - xe vif-destroy uuid="$vif" - done - unset IFS + local v="$1" + IFS=, + for vif in $(xe_min vif-list vm-uuid="$v") + do + xe vif-destroy uuid="$vif" + done + unset IFS } diff --git a/tools/xen/scripts/uninstall-os-vpx.sh b/tools/xen/scripts/uninstall-os-vpx.sh index ac260949c4..1ed249433a 100755 --- a/tools/xen/scripts/uninstall-os-vpx.sh +++ b/tools/xen/scripts/uninstall-os-vpx.sh @@ -22,63 +22,63 @@ set -ex # By default, don't remove the templates REMOVE_TEMPLATES=${REMOVE_TEMPLATES:-"false"} if [ "$1" = "--remove-templates" ]; then - REMOVE_TEMPLATES=true + REMOVE_TEMPLATES=true fi xe_min() { - local cmd="$1" - shift - xe "$cmd" --minimal "$@" + local cmd="$1" + shift + xe "$cmd" --minimal "$@" } destroy_vdi() { - local vbd_uuid="$1" - local type=$(xe_min vbd-list uuid=$vbd_uuid params=type) - local dev=$(xe_min vbd-list uuid=$vbd_uuid params=userdevice) - local vdi_uuid=$(xe_min vbd-list uuid=$vbd_uuid params=vdi-uuid) + local vbd_uuid="$1" + local type=$(xe_min vbd-list uuid=$vbd_uuid params=type) + local dev=$(xe_min vbd-list uuid=$vbd_uuid params=userdevice) + local vdi_uuid=$(xe_min vbd-list uuid=$vbd_uuid params=vdi-uuid) - if [ "$type" == 'Disk' ] && [ "$dev" != 'xvda' ] && [ "$dev" != '0' ]; then - xe vdi-destroy uuid=$vdi_uuid - fi + if [ "$type" == 'Disk' ] && [ "$dev" != 'xvda' ] && [ "$dev" != '0' ]; then + xe vdi-destroy uuid=$vdi_uuid + fi } uninstall() { - local vm_uuid="$1" - local power_state=$(xe_min vm-list uuid=$vm_uuid params=power-state) + local vm_uuid="$1" + local power_state=$(xe_min vm-list uuid=$vm_uuid params=power-state) - if [ "$power_state" != "halted" ]; then - xe vm-shutdown vm=$vm_uuid force=true - fi + if [ "$power_state" != "halted" ]; then + xe vm-shutdown vm=$vm_uuid force=true + fi - for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g'); do - destroy_vdi "$v" - done + for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g'); do + destroy_vdi "$v" + done - xe vm-uninstall vm=$vm_uuid force=true >/dev/null + xe vm-uninstall vm=$vm_uuid force=true >/dev/null } uninstall_template() { - local vm_uuid="$1" + local vm_uuid="$1" - for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g'); do - destroy_vdi "$v" - done + for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g'); do + destroy_vdi "$v" + done - xe template-uninstall template-uuid=$vm_uuid force=true >/dev/null + xe template-uninstall template-uuid=$vm_uuid force=true >/dev/null } # remove the VMs and their disks for u in $(xe_min vm-list other-config:os-vpx=true | sed -e 's/,/ /g'); do - uninstall "$u" + uninstall "$u" done # remove the templates if [ "$REMOVE_TEMPLATES" == "true" ]; then - for u in $(xe_min template-list other-config:os-vpx=true | sed -e 's/,/ /g'); do - uninstall_template "$u" - done + for u in $(xe_min template-list other-config:os-vpx=true | sed -e 's/,/ /g'); do + uninstall_template "$u" + done fi From 9b973670a6c200e5f6251bb21eb443be619694c6 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 16 Oct 2013 15:13:56 -0500 Subject: [PATCH 0467/4704] Add the doc build tools tools/build_docs.sh generates the devstack.org website from the static pages and generated pages created by running shocco against a DevStack checkout. Note that while this is the complete auto page generation of the devstack.org site, pushing the content back to GitHub is limited to those with push access to the current repo. Partial-bug 1235626 Change-Id: I61dc3d56e4a4832a9ddd1904dd8af65c15a17e50 --- tools/build_docs.sh | 135 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 135 insertions(+) create mode 100755 tools/build_docs.sh diff --git a/tools/build_docs.sh b/tools/build_docs.sh new file mode 100755 index 0000000000..216e557025 --- /dev/null +++ b/tools/build_docs.sh @@ -0,0 +1,135 @@ +#!/usr/bin/env bash + +# **build_docs.sh** - Build the gh-pages docs for DevStack +# +# - Install shocco if not found on PATH +# - Clone MASTER_REPO branch MASTER_BRANCH +# - Re-creates ``docs`` directory from existing repo + new generated script docs + +# Usage: +## build_docs.sh [[-b branch] [-p] repo] | . +## -b branch The DevStack branch to check out (default is master; ignored if +## repo is not specified) +## -p Push the resulting docs tree to the source repo; fatal error if +## repo is not specified +## repo The DevStack repository to clone (default is DevStack github repo) +## If a repo is not supplied use the current directory +## (assumed to be a DevStack checkout) as the source. +## . Use the current repo and branch (do not use with -p to +## prevent stray files in the workspace being added tot he docs) + +# Defaults +# -------- + +# Source repo/branch for DevStack +MASTER_REPO=${MASTER_REPO:-https://github.com/openstack-dev/devstack.git} +MASTER_BRANCH=${MASTER_BRANCH:-master} + +# http://devstack.org is a GitHub gh-pages site in the https://github.com/cloudbuilders/devtack.git repo +GH_PAGES_REPO=git@github.com:cloudbuilders/devstack.git + +# Uses this shocco branch: https://github.com/dtroyer/shocco/tree/rst_support +SHOCCO=${SHOCCO:-shocco} +if ! which shocco; then + if [[ ! -x shocco/shocco ]]; then + if [[ -z "$INSTALL_SHOCCO" ]]; then + echo "shocco not found in \$PATH, please set environment variable SHOCCO" + exit 1 + fi + echo "Installing local copy of shocco" + git clone -b rst_support https://github.com/dtroyer/shocco shocco + cd shocco + ./configure + make + cd .. + fi + SHOCCO=shocco/shocco +fi + +# Process command-line args +while getopts b:p c; do + case $c in + b) MASTER_BRANCH=$OPTARG + ;; + p) PUSH_REPO=1 + ;; + esac +done +shift `expr $OPTIND - 1` + +# Sanity check the args +if [[ "$1" == "." ]]; then + REPO="" + if [[ -n $PUSH_REPO ]]; then + echo "Push not allowed from an active workspace" + unset PUSH_REPO + fi +else + if [[ -z "$1" ]]; then + REPO=$MASTER_REPO + else + REPO=$1 + fi +fi + +# Check out a specific DevStack branch +if [[ -n $REPO ]]; then + # Make a workspace + TMP_ROOT=$(mktemp -d devstack-docs-XXXX) + echo "Building docs in $TMP_ROOT" + cd $TMP_ROOT + + # Get the master branch + git clone $REPO devstack + cd devstack + git checkout $MASTER_BRANCH +fi + +# Processing +# ---------- + +# Assumption is we are now in the DevStack repo workspace to be processed + +# Pull the latest docs branch from devstack.org repo +rm -rf docs || true +git clone -b gh-pages $GH_PAGES_REPO docs + +# Build list of scripts to process +FILES="" +for f in $(find . -name .git -prune -o \( -type f -name \*.sh -not -path \*shocco/\* -print \)); do + echo $f + FILES+="$f " + mkdir -p docs/`dirname $f`; + $SHOCCO $f > docs/$f.html +done +for f in $(find functions lib samples -type f -name \*); do + echo $f + FILES+="$f " + mkdir -p docs/`dirname $f`; + $SHOCCO $f > docs/$f.html +done +echo "$FILES" >docs-files + +# Switch to the gh_pages repo +cd docs + +# Collect the new generated pages +find . -name \*.html -print0 | xargs -0 git add + +# Push our changes back up to the docs branch +if ! git diff-index HEAD --quiet; then + git commit -a -m "Update script docs" + if [[ -n $PUSH ]]; then + git push + fi +fi + +# Clean up or report the temp workspace +if [[ -n REPO && -n $PUSH_REPO ]]; then + rm -rf $TMP_ROOT +else + if [[ -z "$TMP_ROOT" ]]; then + TMP_ROOT="$(pwd)" + fi + echo "Built docs in $TMP_ROOT" +fi From 23178a997a3b0abd1922f356e572e2933f454dc1 Mon Sep 17 00:00:00 2001 From: DennyZhang Date: Tue, 22 Oct 2013 17:07:32 -0500 Subject: [PATCH 0468/4704] add assertions for blind grep blind grep is error-prone. Add assertions for errors we can not handle Change-Id: Ibe19085545ecc848498506e8b8ee14e71825b273 --- exercises/aggregates.sh | 3 +++ exercises/floating_ips.sh | 1 + exercises/neutron-adv-test.sh | 6 ++++++ exercises/sec_groups.sh | 1 + exercises/volumes.sh | 1 + lib/neutron | 10 ++++++++++ lib/swift | 5 +++++ stack.sh | 2 ++ tools/jenkins/adapters/euca.sh | 1 + tools/jenkins/adapters/floating_ips.sh | 1 + tools/jenkins/adapters/volumes.sh | 1 + 11 files changed, 32 insertions(+) diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh index e5fc7dec84..6cc81ae11a 100755 --- a/exercises/aggregates.sh +++ b/exercises/aggregates.sh @@ -66,7 +66,10 @@ exit_if_aggregate_present() { exit_if_aggregate_present $AGGREGATE_NAME AGGREGATE_ID=$(nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE_NAME " | get_field 1) +die_if_not_set $LINENO AGGREGATE_ID "Failure creating AGGREGATE_ID for $AGGREGATE_NAME $AGGREGATE_A_ZONE" + AGGREGATE2_ID=$(nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1) +die_if_not_set $LINENO AGGREGATE2_ID "Fail creating AGGREGATE2_ID for $AGGREGATE2_NAME $AGGREGATE_A_ZONE" # check aggregate created nova aggregate-list | grep -q " $AGGREGATE_NAME " || die $LINENO "Aggregate $AGGREGATE_NAME not created" diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 1a1608c872..4d71d49163 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -114,6 +114,7 @@ INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) + die_if_not_set $LINENO INSTANCE_TYPE "Failure retrieving INSTANCE_TYPE" fi # Clean-up from previous runs diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index 7dfa5dc161..28e0a3d441 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -139,24 +139,28 @@ function foreach_tenant_net { function get_image_id { local IMAGE_ID=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) + die_if_not_set $LINENO IMAGE_ID "Failure retrieving IMAGE_ID" echo "$IMAGE_ID" } function get_tenant_id { local TENANT_NAME=$1 local TENANT_ID=`keystone tenant-list | grep " $TENANT_NAME " | head -n 1 | get_field 1` + die_if_not_set $LINENO TENANT_ID "Failure retrieving TENANT_ID for $TENANT_NAME" echo "$TENANT_ID" } function get_user_id { local USER_NAME=$1 local USER_ID=`keystone user-list | grep $USER_NAME | awk '{print $2}'` + die_if_not_set $LINENO USER_ID "Failure retrieving USER_ID for $USER_NAME" echo "$USER_ID" } function get_role_id { local ROLE_NAME=$1 local ROLE_ID=`keystone role-list | grep $ROLE_NAME | awk '{print $2}'` + die_if_not_set $LINENO ROLE_ID "Failure retrieving ROLE_ID for $ROLE_NAME" echo "$ROLE_ID" } @@ -169,6 +173,7 @@ function get_network_id { function get_flavor_id { local INSTANCE_TYPE=$1 local FLAVOR_ID=`nova flavor-list | grep $INSTANCE_TYPE | awk '{print $2}'` + die_if_not_set $LINENO FLAVOR_ID "Failure retrieving FLAVOR_ID for $INSTANCE_TYPE" echo "$FLAVOR_ID" } @@ -234,6 +239,7 @@ function create_network { local TENANT_ID=$(get_tenant_id $TENANT) source $TOP_DIR/openrc $TENANT $TENANT local NET_ID=$(neutron net-create --tenant_id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' ) + die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $TENANT_ID $NET_NAME $EXTRA" neutron subnet-create --ip_version 4 --tenant_id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR neutron-debug probe-create --device-owner compute $NET_ID source $TOP_DIR/openrc demo demo diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh index 7d80570326..eb32cc7aa7 100755 --- a/exercises/sec_groups.sh +++ b/exercises/sec_groups.sh @@ -56,6 +56,7 @@ done # Check to make sure rules were added SEC_GROUP_RULES=( $(nova secgroup-list-rules $SEC_GROUP_NAME | grep -v \- | grep -v 'Source Group' | cut -d '|' -f3 | tr -d ' ') ) +die_if_not_set $LINENO SEC_GROUP_RULES "Failure retrieving SEC_GROUP_RULES for $SEC_GROUP_NAME" for i in "${RULES_TO_ADD[@]}"; do skip= for j in "${SEC_GROUP_RULES[@]}"; do diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 9ee9fa910a..77fa4ebc25 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -118,6 +118,7 @@ INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) + die_if_not_set $LINENO INSTANCE_TYPE "Failure retrieving INSTANCE_TYPE" fi # Clean-up from previous runs diff --git a/lib/neutron b/lib/neutron index 44fb9e1005..9227f19b35 100644 --- a/lib/neutron +++ b/lib/neutron @@ -322,6 +322,7 @@ function create_neutron_accounts() { function create_neutron_initial_network() { TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1) + die_if_not_set $LINENO TENANT_ID "Failure retrieving TENANT_ID for demo" # Create a small network # Since neutron command is executed in admin context at this point, @@ -336,12 +337,16 @@ function create_neutron_initial_network() { sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE done NET_ID=$(neutron net-create $PHYSICAL_NETWORK --tenant_id $TENANT_ID --provider:network_type flat --provider:physical_network "$PHYSICAL_NETWORK" | grep ' id ' | get_field 2) + die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID" SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $TENANT_ID" sudo ifconfig $OVS_PHYSICAL_BRIDGE up sudo route add default gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE else NET_ID=$(neutron net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) + die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID" SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $TENANT_ID" fi if [[ "$Q_L3_ENABLED" == "True" ]]; then @@ -349,14 +354,18 @@ function create_neutron_initial_network() { if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then # create a tenant-owned router. ROUTER_ID=$(neutron router-create --tenant_id $TENANT_ID $Q_ROUTER_NAME | grep ' id ' | get_field 2) + die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $TENANT_ID $Q_ROUTER_NAME" else # Plugin only supports creating a single router, which should be admin owned. ROUTER_ID=$(neutron router-create $Q_ROUTER_NAME | grep ' id ' | get_field 2) + die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $Q_ROUTER_NAME" fi neutron router-interface-add $ROUTER_ID $SUBNET_ID # Create an external network, and a subnet. Configure the external network as router gw EXT_NET_ID=$(neutron net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2) + die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME" EXT_GW_IP=$(neutron subnet-create --ip_version 4 ${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} --gateway $PUBLIC_NETWORK_GATEWAY --name $PUBLIC_SUBNET_NAME $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2) + die_if_not_set $LINENO EXT_GW_IP "Failure creating EXT_GW_IP" neutron router-gateway-set $ROUTER_ID $EXT_NET_ID if is_service_enabled q-l3; then @@ -366,6 +375,7 @@ function create_neutron_initial_network() { sudo ip addr add $EXT_GW_IP/$CIDR_LEN dev $PUBLIC_BRIDGE sudo ip link set $PUBLIC_BRIDGE up ROUTER_GW_IP=`neutron port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' '{ print $8; }'` + die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP" sudo route add -net $FIXED_RANGE gw $ROUTER_GW_IP fi if [[ "$Q_USE_NAMESPACE" == "False" ]]; then diff --git a/lib/swift b/lib/swift index 8726f1e7fc..c338375f60 100644 --- a/lib/swift +++ b/lib/swift @@ -492,14 +492,19 @@ function create_swift_accounts() { fi SWIFT_TENANT_TEST1=$(keystone tenant-create --name=swifttenanttest1 | grep " id " | get_field 2) + die_if_not_set $LINENO SWIFT_TENANT_TEST1 "Failure creating SWIFT_TENANT_TEST1" SWIFT_USER_TEST1=$(keystone user-create --name=swiftusertest1 --pass=testing --email=test@example.com | grep " id " | get_field 2) + die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1" keystone user-role-add --user-id $SWIFT_USER_TEST1 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST1 SWIFT_USER_TEST3=$(keystone user-create --name=swiftusertest3 --pass=testing3 --email=test3@example.com | grep " id " | get_field 2) + die_if_not_set $LINENO SWIFT_USER_TEST3 "Failure creating SWIFT_USER_TEST3" keystone user-role-add --user-id $SWIFT_USER_TEST3 --role-id $ANOTHER_ROLE --tenant-id $SWIFT_TENANT_TEST1 SWIFT_TENANT_TEST2=$(keystone tenant-create --name=swifttenanttest2 | grep " id " | get_field 2) + die_if_not_set $LINENO SWIFT_TENANT_TEST2 "Failure creating SWIFT_TENANT_TEST2" SWIFT_USER_TEST2=$(keystone user-create --name=swiftusertest2 --pass=testing2 --email=test2@example.com | grep " id " | get_field 2) + die_if_not_set $LINENO SWIFT_USER_TEST2 "Failure creating SWIFT_USER_TEST2" keystone user-role-add --user-id $SWIFT_USER_TEST2 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST2 } diff --git a/stack.sh b/stack.sh index 5813a8ad09..3c4afd9fe7 100755 --- a/stack.sh +++ b/stack.sh @@ -1068,7 +1068,9 @@ fi # Create an access key and secret key for nova ec2 register image if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nova; then NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1) + die_if_not_set $LINENO NOVA_USER_ID "Failure retrieving NOVA_USER_ID for nova" NOVA_TENANT_ID=$(keystone tenant-list | grep " $SERVICE_TENANT_NAME " | get_field 1) + die_if_not_set $LINENO NOVA_TENANT_ID "Failure retrieving NOVA_TENANT_ID for $SERVICE_TENANT_NAME" CREDS=$(keystone ec2-credentials-create --user_id $NOVA_USER_ID --tenant_id $NOVA_TENANT_ID) ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }') SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }') diff --git a/tools/jenkins/adapters/euca.sh b/tools/jenkins/adapters/euca.sh index b49ce9f21f..a7e635c694 100755 --- a/tools/jenkins/adapters/euca.sh +++ b/tools/jenkins/adapters/euca.sh @@ -5,4 +5,5 @@ set -o errexit TOP_DIR=$(cd ../../.. && pwd) HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2` +die_if_not_set $LINENO HEAD_IP "Failure retrieving HEAD_IP" ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./euca.sh' diff --git a/tools/jenkins/adapters/floating_ips.sh b/tools/jenkins/adapters/floating_ips.sh index a97f93578a..8da1eeb97a 100755 --- a/tools/jenkins/adapters/floating_ips.sh +++ b/tools/jenkins/adapters/floating_ips.sh @@ -5,4 +5,5 @@ set -o errexit TOP_DIR=$(cd ../../.. && pwd) HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2` +die_if_not_set $LINENO HEAD_IP "Failure retrieving HEAD_IP" ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./floating_ips.sh' diff --git a/tools/jenkins/adapters/volumes.sh b/tools/jenkins/adapters/volumes.sh index ec292097fa..0a0b6c0548 100755 --- a/tools/jenkins/adapters/volumes.sh +++ b/tools/jenkins/adapters/volumes.sh @@ -5,4 +5,5 @@ set -o errexit TOP_DIR=$(cd ../../.. && pwd) HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2` +die_if_not_set $LINENO HEAD_IP "Failure retrieving HEAD_IP" ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./volumes.sh' From 7a4ae3d24260cc2cd8eaed495829ec44ff121458 Mon Sep 17 00:00:00 2001 From: Akihiro MOTOKI Date: Thu, 10 Oct 2013 00:40:38 +0900 Subject: [PATCH 0469/4704] Modification for Ubuntu 13.10 and minor fixes in Neutron NEC plugin Modifications for Ubuntu 13.10: * Add .conf suffix to apache2 config files. In Ubuntu 13.10, files in sites-available should have ".conf" suffix. Otherwise it is not recognized by a2ensite. * libglib2.0-dev is added to lib/files/apt/trema. Trema is an OpenFlow controler framework used by Neutron NEC plugin Ubuntu package dependency seems to be changed. Minor cleanups are also done in OVS configuration: * Set datapath_id before connecting to the OpenFlow controller to ensure datapath_id changes after connected. Previously datapath_id is changed after connecting to the controller. * Drop "0x" prefix from datapath_id passed to OVS. OVS ignores datapath_id with 0x prefix. * Fix a bug that SKIP_OVS_BRIDGE_SETUP skips all confiugration of the plugin agent. It should skip only OVS setup. Change-Id: Ifac3def8decda577b5740c82fe8d24e8520c7777 --- files/apts/trema | 1 + lib/neutron_plugins/nec | 11 ++++++++--- lib/neutron_thirdparty/trema | 2 +- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/files/apts/trema b/files/apts/trema index e33ccd3004..09cb7c6794 100644 --- a/files/apts/trema +++ b/files/apts/trema @@ -6,6 +6,7 @@ rubygems1.8 ruby1.8-dev libpcap-dev libsqlite3-dev +libglib2.0-dev # Sliceable Switch sqlite3 diff --git a/lib/neutron_plugins/nec b/lib/neutron_plugins/nec index 3806c32c75..d8d8b7ce7e 100644 --- a/lib/neutron_plugins/nec +++ b/lib/neutron_plugins/nec @@ -55,21 +55,26 @@ function neutron_plugin_configure_l3_agent() { _neutron_ovs_base_configure_l3_agent } -function neutron_plugin_configure_plugin_agent() { +function _quantum_plugin_setup_bridge() { if [[ "$SKIP_OVS_BRIDGE_SETUP" = "True" ]]; then return fi # Set up integration bridge _neutron_ovs_base_setup_bridge $OVS_BRIDGE - sudo ovs-vsctl --no-wait set-controller $OVS_BRIDGE tcp:$OFC_OFP_HOST:$OFC_OFP_PORT # Generate datapath ID from HOST_IP - local dpid=$(printf "0x%07d%03d%03d%03d\n" ${HOST_IP//./ }) + local dpid=$(printf "%07d%03d%03d%03d\n" ${HOST_IP//./ }) sudo ovs-vsctl --no-wait set Bridge $OVS_BRIDGE other-config:datapath-id=$dpid sudo ovs-vsctl --no-wait set-fail-mode $OVS_BRIDGE secure + sudo ovs-vsctl --no-wait set-controller $OVS_BRIDGE tcp:$OFC_OFP_HOST:$OFC_OFP_PORT if [ -n "$OVS_INTERFACE" ]; then sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $OVS_INTERFACE fi _neutron_setup_ovs_tunnels $OVS_BRIDGE +} + +function neutron_plugin_configure_plugin_agent() { + _quantum_plugin_setup_bridge + AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-nec-agent" _neutron_ovs_base_configure_firewall_driver diff --git a/lib/neutron_thirdparty/trema b/lib/neutron_thirdparty/trema index 5b5c4590c3..9efd3f6c39 100644 --- a/lib/neutron_thirdparty/trema +++ b/lib/neutron_thirdparty/trema @@ -28,7 +28,7 @@ TREMA_TMP_DIR=$TREMA_DATA_DIR/trema TREMA_LOG_LEVEL=${TREMA_LOG_LEVEL:-info} TREMA_SS_CONFIG=$TREMA_SS_ETC_DIR/sliceable.conf -TREMA_SS_APACHE_CONFIG=/etc/apache2/sites-available/sliceable_switch +TREMA_SS_APACHE_CONFIG=/etc/apache2/sites-available/sliceable_switch.conf # configure_trema - Set config files, create data dirs, etc function configure_trema() { From ae9c41727abcab19bed8aa5f72c052ccd442f4ea Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 7 Mar 2013 15:23:46 +0000 Subject: [PATCH 0470/4704] Allow configure cinder with the nfs driver This patch allow to use nfs as cinder driver. To use it, in the localrc we can now set CINDER_DRIVER=nfs CINDER_NFS_SERVERPATH=172.16.0.50:/export_cinder The nfs-server is not setup by devstack. Change-Id: I8e240d00b58f272d04ab2c0922c551b1f7266260 --- lib/cinder | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/cinder b/lib/cinder index f6f137cabd..cab8b6e4fe 100644 --- a/lib/cinder +++ b/lib/cinder @@ -281,6 +281,11 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT xenapi_nfs_server "$CINDER_XENAPI_NFS_SERVER" iniset $CINDER_CONF DEFAULT xenapi_nfs_serverpath "$CINDER_XENAPI_NFS_SERVERPATH" ) + elif [ "$CINDER_DRIVER" == "nfs" ]; then + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.nfs.NfsDriver" + iniset $CINDER_CONF DEFAULT nfs_shares_config "$CINDER_CONF_DIR/nfs_shares.conf" + echo "$CINDER_NFS_SERVERPATH" | sudo tee "$CINDER_CONF_DIR/nfs_shares.conf" + sudo chmod 666 $CINDER_CONF_DIR/nfs_shares.conf elif [ "$CINDER_DRIVER" == "sheepdog" ]; then iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.sheepdog.SheepdogDriver" elif [ "$CINDER_DRIVER" == "glusterfs" ]; then From 6db29904df63ae26a0f4a4b2e4e0c2e6f2cef669 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 22 Oct 2013 09:22:36 -0700 Subject: [PATCH 0471/4704] Start nova-compute with child cell conf A recent commit broke the cells support and switched nova-compute to always start with the API cell .conf. This corrects the regression. Change-Id: I633344c8784c154f61e751cd0a408196e61525b3 Closes-bug: 1243961 --- lib/nova | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/lib/nova b/lib/nova index 809f8e5896..31f286d943 100644 --- a/lib/nova +++ b/lib/nova @@ -613,21 +613,25 @@ function start_nova_api() { # start_nova_compute() - Start the compute process function start_nova_compute() { - NOVA_CONF_BOTTOM=$NOVA_CONF + if is_service_enabled n-cell; then + local compute_cell_conf=$NOVA_CELLS_CONF + else + local compute_cell_conf=$NOVA_CONF + fi if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then # The group **$LIBVIRT_GROUP** is added to the current user in this script. # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group. - screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM'" + screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf'" elif [[ "$VIRT_DRIVER" = 'fake' ]]; then for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do - screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM" + screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" done else if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then start_nova_hypervisor fi - screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM" + screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" fi } From 386ae8c17162d8cc950c0f6c71fa364b9cbea9d4 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Mon, 21 Oct 2013 09:27:18 +0200 Subject: [PATCH 0472/4704] Additional ssh tests with tempest The run_ssh option is used on the default 'false', so several test case and validation step was disabled. It was disabled because: * Admin password injection with the cirros image is not supported. (We 'cannot' inject password/shadow to a ram disk.) * In the current system setup floating IP is required for connecting with neutron The run_ssh boolean option will be removed from tempest, it will be replaced with ssh_connect_method and ssh_auth_method. Since using a floating ip with nova flat network is not required in these case, the 'fixed'/private IPs (ssh_connect_method) will be used with nova network , and we will use the 'floating' IPs with neutron when the NAMESPACES are enabled(default). The default value of ssh_auth_method is keypair, it works in both cases. Change-Id: I3d47811d801985687526749a430ed6db64224f99 --- lib/tempest | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/tempest b/lib/tempest index 8e4e5210ea..677d66b50d 100644 --- a/lib/tempest +++ b/lib/tempest @@ -75,6 +75,7 @@ function configure_tempest() { local public_router_id local tenant_networks_reachable local boto_instance_type="m1.tiny" + local ssh_connect_method="fixed" # TODO(afazekas): # sudo python setup.py deploy @@ -182,10 +183,13 @@ function configure_tempest() { if [ "$Q_USE_NAMESPACE" != "False" ]; then tenant_networks_reachable=false + ssh_connect_method="floating" else tenant_networks_reachable=true fi + ssh_connect_method=${TEMPEST_SSH_CONNECT_METHOD:-$ssh_connect_method} + if is_service_enabled q-l3; then public_network_id=$(neutron net-list | grep $PUBLIC_NETWORK_NAME | \ awk '{print $2}') @@ -248,6 +252,7 @@ function configure_tempest() { iniset $TEMPEST_CONF compute flavor_ref_alt $flavor_ref_alt iniset $TEMPEST_CONF compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False} iniset $TEMPEST_CONF compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} + iniset $TEMPEST_CONF compute ssh_connect_method $ssh_connect_method # Compute admin iniset $TEMPEST_CONF "compute-admin" password "$password" # DEPRECATED From 33eaa5698ca3ced12d7ab5a181cc381bdb19ce76 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Thu, 24 Oct 2013 14:12:44 +0100 Subject: [PATCH 0473/4704] Use heat's default value for max_template_size Instead of aligning value with tempest, make tempest use heat's default value as that is what most people will be deploying with. Change-Id: I77549f2b5e953ff712c50a2b372f6b04725d5eb0 --- lib/heat | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/heat b/lib/heat index 8acadb4ad1..da4505e2f9 100644 --- a/lib/heat +++ b/lib/heat @@ -118,9 +118,6 @@ function configure_heat() { iniset $HEAT_CONF heat_api_cloudwatch bind_host $HEAT_API_CW_HOST iniset $HEAT_CONF heat_api_cloudwatch bind_port $HEAT_API_CW_PORT - # Set limits to match tempest defaults - iniset $HEAT_CONF DEFAULT max_template_size 10240 - # heat environment sudo mkdir -p $HEAT_ENV_DIR sudo chown $STACK_USER $HEAT_ENV_DIR From cb961597cc30f9d8ece17529f09a8291454827e3 Mon Sep 17 00:00:00 2001 From: Adam Spiers Date: Sat, 5 Oct 2013 12:11:07 +0100 Subject: [PATCH 0474/4704] Fix shocco errors and warnings A few Markdown-oriented issues were causing Docutils errors to leak into the end-user docs on http://devstack.org Change-Id: I51fa9698afb1bfb48596478d83bd1fdcd84ac52e --- exercises/swift.sh | 2 +- functions | 27 +++++++++++++++------------ lib/baremetal | 6 ++++-- lib/ceilometer | 2 ++ lib/database | 13 ++++++++----- lib/neutron | 3 ++- lib/swift | 4 ++-- tools/build_ramdisk.sh | 9 ++++----- tools/create-stack-user.sh | 2 ++ tools/fixup_stuff.sh | 6 +++++- 10 files changed, 45 insertions(+), 29 deletions(-) diff --git a/exercises/swift.sh b/exercises/swift.sh index b9f1b566bb..25ea6719c1 100755 --- a/exercises/swift.sh +++ b/exercises/swift.sh @@ -2,7 +2,7 @@ # **swift.sh** -# Test swift via the ``swift`` command line from ``python-swiftclient` +# Test swift via the ``swift`` command line from ``python-swiftclient`` echo "*********************************************************************" echo "Begin DevStack Exercise: $0" diff --git a/functions b/functions index af5a37da17..8d076b72e5 100644 --- a/functions +++ b/functions @@ -54,7 +54,7 @@ function address_in_net() { # Wrapper for ``apt-get`` to set cache and proxy environment variables -# Uses globals ``OFFLINE``, ``*_proxy` +# Uses globals ``OFFLINE``, ``*_proxy`` # apt_get operation package [package ...] function apt_get() { [[ "$OFFLINE" = "True" || -z "$@" ]] && return @@ -260,7 +260,8 @@ function _get_package_dir() { # # Only packages required for the services in 1st argument will be # included. Two bits of metadata are recognized in the prerequisite files: -# - ``# NOPRIME`` defers installation to be performed later in stack.sh +# +# - ``# NOPRIME`` defers installation to be performed later in `stack.sh` # - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection # of the package to the distros listed. The distro names are case insensitive. function get_packages() { @@ -982,7 +983,7 @@ function is_set() { # Wrapper for ``pip install`` to set cache and proxy environment variables # Uses globals ``OFFLINE``, ``PIP_DOWNLOAD_CACHE``, ``PIP_USE_MIRRORS``, -# ``TRACK_DEPENDS``, ``*_proxy` +# ``TRACK_DEPENDS``, ``*_proxy`` # pip_install package [package ...] function pip_install { [[ "$OFFLINE" = "True" || -z "$@" ]] && return @@ -1011,8 +1012,7 @@ function pip_install { # /tmp/$USER-pip-build. Even if a later component specifies foo < # 1.1, the existing extracted build will be used and cause # confusing errors. By creating unique build directories we avoid - # this problem. See - # https://github.com/pypa/pip/issues/709 + # this problem. See https://github.com/pypa/pip/issues/709 local pip_build_tmp=$(mktemp --tmpdir -d pip-build.XXXXX) $SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \ @@ -1146,8 +1146,8 @@ function screen_rc { } -# Helper to remove the *.failure files under $SERVICE_DIR/$SCREEN_NAME -# This is used for service_check when all the screen_it are called finished +# Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``. +# This is used for ``service_check`` when all the ``screen_it`` are called finished # init_service_check function init_service_check() { SCREEN_NAME=${SCREEN_NAME:-stack} @@ -1301,10 +1301,12 @@ function trueorfalse() { } -# Retrieve an image from a URL and upload into Glance +# Retrieve an image from a URL and upload into Glance. # Uses the following variables: -# ``FILES`` must be set to the cache dir -# ``GLANCE_HOSTPORT`` +# +# - ``FILES`` must be set to the cache dir +# - ``GLANCE_HOSTPORT`` +# # upload_image image-url glance-token function upload_image() { local image_url=$1 @@ -1466,7 +1468,8 @@ function upload_image() { # When called from stackrc/localrc DATABASE_BACKENDS has not been # initialized yet, just save the configuration selection and call back later # to validate it. -# $1 The name of the database backend to use (mysql, postgresql, ...) +# +# ``$1`` - the name of the database backend to use (mysql, postgresql, ...) function use_database { if [[ -z "$DATABASE_BACKENDS" ]]; then # No backends registered means this is likely called from ``localrc`` @@ -1507,7 +1510,7 @@ function wait_for_service() { # Wrapper for ``yum`` to set proxy environment variables -# Uses globals ``OFFLINE``, ``*_proxy` +# Uses globals ``OFFLINE``, ``*_proxy`` # yum_install package [package ...] function yum_install() { [[ "$OFFLINE" = "True" ]] && return diff --git a/lib/baremetal b/lib/baremetal index 141c28d15f..5606230eac 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -83,8 +83,10 @@ BM_POSEUR_EXTRA_OPTS=${BM_POSEUR_EXTRA_OPTS:-} # To provide PXE, configure nova-network's dnsmasq rather than run the one # dedicated to baremetal. When enable this, make sure these conditions are # fulfilled: +# # 1) nova-compute and nova-network runs on the same host # 2) nova-network uses FlatDHCPManager +# # NOTE: the other BM_DNSMASQ_* have no effect on the behavior if this option # is enabled. BM_DNSMASQ_FROM_NOVA_NETWORK=`trueorfalse False $BM_DNSMASQ_FROM_NOVA_NETWORK` @@ -198,8 +200,8 @@ function create_fake_baremetal_env() { BM_FIRST_MAC=$(sudo $bm_poseur get-macs) # NOTE: there is currently a limitation in baremetal driver - # that requires second MAC even if it is not used. - # Passing a fake value allows this to work. + # that requires second MAC even if it is not used. + # Passing a fake value allows this to work. # TODO(deva): remove this after driver issue is fixed. BM_SECOND_MAC='12:34:56:78:90:12' } diff --git a/lib/ceilometer b/lib/ceilometer index cd4c4d8656..a471d9c7e6 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -2,9 +2,11 @@ # Install and start **Ceilometer** service # To enable a minimal set of Ceilometer services, add the following to localrc: +# # enable_service ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api # # To ensure Ceilometer alarming services are enabled also, further add to the localrc: +# # enable_service ceilometer-alarm-notifier ceilometer-alarm-evaluator # Dependencies: diff --git a/lib/database b/lib/database index 3c1560964c..c3fd435eb2 100644 --- a/lib/database +++ b/lib/database @@ -9,10 +9,11 @@ # This is a wrapper for the specific database backends available. # Each database must implement four functions: -# recreate_database_$DATABASE_TYPE -# install_database_$DATABASE_TYPE -# configure_database_$DATABASE_TYPE -# database_connection_url_$DATABASE_TYPE +# +# - recreate_database_$DATABASE_TYPE +# - install_database_$DATABASE_TYPE +# - configure_database_$DATABASE_TYPE +# - database_connection_url_$DATABASE_TYPE # # and call register_database $DATABASE_TYPE @@ -22,7 +23,9 @@ set +o xtrace # Register a database backend -# $1 The name of the database backend +# +# $1 The name of the database backend +# # This is required to be defined before the specific database scripts are sourced function register_database { [ -z "$DATABASE_BACKENDS" ] && DATABASE_BACKENDS=$1 || DATABASE_BACKENDS+=" $1" diff --git a/lib/neutron b/lib/neutron index 44fb9e1005..00852df05a 100644 --- a/lib/neutron +++ b/lib/neutron @@ -208,7 +208,7 @@ source $TOP_DIR/lib/neutron_plugins/services/loadbalancer source $TOP_DIR/lib/neutron_plugins/services/vpn # Firewall Service Plugin functions -# -------------------------------- +# --------------------------------- source $TOP_DIR/lib/neutron_plugins/services/firewall # Use security group or not @@ -494,6 +494,7 @@ function _configure_neutron_common() { # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``. # For addition plugin config files, set ``Q_PLUGIN_EXTRA_CONF_PATH``, # ``Q_PLUGIN_EXTRA_CONF_FILES``. For example: + # # ``Q_PLUGIN_EXTRA_CONF_FILES=(file1, file2)`` neutron_plugin_configure_common diff --git a/lib/swift b/lib/swift index 8726f1e7fc..3c3b8b1d38 100644 --- a/lib/swift +++ b/lib/swift @@ -268,8 +268,8 @@ function configure_swift() { # By default Swift will be installed with keystone and tempauth middleware # and add the swift3 middleware if its configured for it. The token for - # tempauth would be prefixed with the reseller_prefix setting TEMPAUTH_ the - # token for keystoneauth would have the standard reseller_prefix AUTH_ + # tempauth would be prefixed with the reseller_prefix setting `TEMPAUTH_` the + # token for keystoneauth would have the standard reseller_prefix `AUTH_` if is_service_enabled swift3;then swift_pipeline=" swift3 s3token " fi diff --git a/tools/build_ramdisk.sh b/tools/build_ramdisk.sh index 2c45568531..3d9f76f4a5 100755 --- a/tools/build_ramdisk.sh +++ b/tools/build_ramdisk.sh @@ -84,11 +84,10 @@ if [ ! -r $CACHEDIR/$DIST_NAME-base.img ]; then $TOOLS_DIR/get_uec_image.sh $DIST_NAME $CACHEDIR/$DIST_NAME-base.img fi -# Finds the next available NBD device -# Exits script if error connecting or none free +# Finds and returns full device path for the next available NBD device. +# Exits script if error connecting or none free. # map_nbd image -# Returns full nbd device path -function map_nbd { +function map_nbd() { for i in `seq 0 15`; do if [ ! -e /sys/block/nbd$i/pid ]; then NBD=/dev/nbd$i @@ -156,7 +155,7 @@ if [ ! -r $IMG_FILE ]; then # Pre-create the image file # FIXME(dt): This should really get the partition size to - # pre-create the image file + # pre-create the image file dd if=/dev/zero of=$IMG_FILE_TMP bs=1 count=1 seek=$((2*1024*1024*1024)) # Create filesystem image for RAM disk dd if=${NBD}p1 of=$IMG_FILE_TMP bs=1M diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh index 2251d1e67c..50f6592a3a 100755 --- a/tools/create-stack-user.sh +++ b/tools/create-stack-user.sh @@ -5,7 +5,9 @@ # Create a user account suitable for running DevStack # - create a group named $STACK_USER if it does not exist # - create a user named $STACK_USER if it does not exist +# # - home is $DEST +# # - configure sudo for $STACK_USER # ``stack.sh`` was never intended to run as root. It had a hack to do what is diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 9e65b7c21e..325a6d6be1 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -5,11 +5,15 @@ # fixup_stuff.sh # # All distro and package specific hacks go in here +# # - prettytable 0.7.2 permissions are 600 in the package and # pip 1.4 doesn't fix it (1.3 did) +# # - httplib2 0.8 permissions are 600 in the package and # pip 1.4 doesn't fix it (1.3 did) +# # - RHEL6: +# # - set selinux not enforcing # - (re)start messagebus daemon # - remove distro packages python-crypto and python-lxml @@ -90,7 +94,7 @@ if [[ $DISTRO =~ (rhel6) ]]; then # fresh system via Anaconda and the dependency chain # ``cas`` -> ``python-paramiko`` -> ``python-crypto``. # ``pip uninstall pycrypto`` will remove the packaged ``.egg-info`` - # file but leave most of the actual library files behind in + # file but leave most of the actual library files behind in # ``/usr/lib64/python2.6/Crypto``. Later ``pip install pycrypto`` # will install over the packaged files resulting # in a useless mess of old, rpm-packaged files and pip-installed files. From 6730a9d1c67a8740611c972aad1e3d2c5feebebb Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 24 Oct 2013 15:28:34 +0000 Subject: [PATCH 0475/4704] Handle the CM service availability in tempest This patch handle the ceilometer service availability in tempest. Change-Id: Ib0d1d7b858ff327785ebbcc27d7f920fb4a32444 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 8e4e5210ea..b3df1398cc 100644 --- a/lib/tempest +++ b/lib/tempest @@ -296,7 +296,7 @@ function configure_tempest() { iniset $TEMPEST_CONF cli cli_dir $NOVA_BIN_DIR # service_available - for service in nova cinder glance neutron swift heat horizon ; do + for service in nova cinder glance neutron swift heat horizon ceilometer; do if is_service_enabled $service ; then iniset $TEMPEST_CONF service_available $service "True" else From 1ed64cbbf09d76df0b1ce0d5095373c2bf1053c6 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 23 Oct 2013 10:37:05 +0200 Subject: [PATCH 0476/4704] Ensure cm-api is ready before start cm-alarm The patch ensure that ceilometer-api is ready before starting the ceilometer-alarm-evaluator service. This ensure that ceilometer-alarm-evaluator doesn't log a error message on startup due to not yet available ceilometer-api. Closes bug: #1243249 Change-Id: Icff3e972ec485f26c014071f68079593a14b7240 --- lib/ceilometer | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/ceilometer b/lib/ceilometer index cd4c4d8656..06f215e9d8 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -138,6 +138,12 @@ function start_ceilometer() { screen_it ceilometer-acentral "cd ; ceilometer-agent-central --config-file $CEILOMETER_CONF" screen_it ceilometer-collector "cd ; ceilometer-collector --config-file $CEILOMETER_CONF" screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" + + echo "Waiting for ceilometer-api to start..." + if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://localhost:8777/v2/ >/dev/null; do sleep 1; done"; then + die $LINENO "ceilometer-api did not start" + fi + screen_it ceilometer-alarm-notifier "cd ; ceilometer-alarm-notifier --config-file $CEILOMETER_CONF" screen_it ceilometer-alarm-evaluator "cd ; ceilometer-alarm-evaluator --config-file $CEILOMETER_CONF" } From 6fa67c99ba687f659fab0ad3f965993d833ca2b4 Mon Sep 17 00:00:00 2001 From: Adam Spiers Date: Thu, 24 Oct 2013 16:32:21 +0100 Subject: [PATCH 0477/4704] git-ignore files generated by "./tools/build_docs.sh ." Change-Id: Ibf190998e52e7814ddc7f7ab4cf174aee28df9bf --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 798b0814c9..0c22c6b62a 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,5 @@ stack-screenrc accrc .stackenv .prereqs +docs/ +docs-files From 6a5aa7c6a20435bbd276a0f1823396b52a8f0daf Mon Sep 17 00:00:00 2001 From: Adam Spiers Date: Thu, 24 Oct 2013 11:27:02 +0100 Subject: [PATCH 0478/4704] Fix some Markdown formatting issues Address miscellaneous issues with Markdown formatting in comments which are consumed by shocco when generating the online documentation. Change-Id: I953075cdbddbf1f119c6c7e35f039e2e54b79078 --- exercises/aggregates.sh | 13 ++--- exercises/boot_from_volume.sh | 5 +- functions | 21 ++++---- lib/apache | 26 +++++----- lib/baremetal | 78 ++++++++++++++++-------------- lib/ceilometer | 20 ++++---- lib/cinder | 30 +++++++----- lib/database | 7 +-- lib/databases/mysql | 3 +- lib/databases/postgresql | 3 +- lib/glance | 30 ++++++------ lib/heat | 27 ++++++----- lib/horizon | 30 ++++++------ lib/infra | 14 +++--- lib/ironic | 31 ++++++------ lib/keystone | 40 +++++++-------- lib/ldap | 10 ++-- lib/neutron | 37 +++++++------- lib/nova | 34 +++++++------ lib/nova_plugins/hypervisor-docker | 8 +-- lib/oslo | 12 +++-- lib/rpc_backend | 20 ++++---- lib/savanna-dashboard | 13 ++--- lib/swift | 37 +++++++------- lib/tempest | 61 ++++++++++++----------- lib/template | 26 +++++----- lib/tls | 36 ++++++++------ lib/trove | 7 +-- 28 files changed, 367 insertions(+), 312 deletions(-) diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh index e5fc7dec84..96241f9b34 100755 --- a/exercises/aggregates.sh +++ b/exercises/aggregates.sh @@ -3,12 +3,13 @@ # **aggregates.sh** # This script demonstrates how to use host aggregates: -# * Create an Aggregate -# * Updating Aggregate details -# * Testing Aggregate metadata -# * Testing Aggregate delete -# * Testing General Aggregates (https://blueprints.launchpad.net/nova/+spec/general-host-aggregates) -# * Testing add/remove hosts (with one host) +# +# * Create an Aggregate +# * Updating Aggregate details +# * Testing Aggregate metadata +# * Testing Aggregate delete +# * Testing General Aggregates (https://blueprints.launchpad.net/nova/+spec/general-host-aggregates) +# * Testing add/remove hosts (with one host) echo "**************************************************" echo "Begin DevStack Exercise: $0" diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index 634a6d526c..3b3d3ba63b 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -3,8 +3,9 @@ # **boot_from_volume.sh** # This script demonstrates how to boot from a volume. It does the following: -# * Create a bootable volume -# * Boot a volume-backed instance +# +# * Create a bootable volume +# * Boot a volume-backed instance echo "*********************************************************************" echo "Begin DevStack Exercise: $0" diff --git a/functions b/functions index 8d076b72e5..f4fb8065db 100644 --- a/functions +++ b/functions @@ -1,16 +1,17 @@ # functions - Common functions used by DevStack components # # The following variables are assumed to be defined by certain functions: -# ``ENABLED_SERVICES`` -# ``ERROR_ON_CLONE`` -# ``FILES`` -# ``GLANCE_HOSTPORT`` -# ``OFFLINE`` -# ``PIP_DOWNLOAD_CACHE`` -# ``PIP_USE_MIRRORS`` -# ``RECLONE`` -# ``TRACK_DEPENDS`` -# ``http_proxy``, ``https_proxy``, ``no_proxy`` +# +# - ``ENABLED_SERVICES`` +# - ``ERROR_ON_CLONE`` +# - ``FILES`` +# - ``GLANCE_HOSTPORT`` +# - ``OFFLINE`` +# - ``PIP_DOWNLOAD_CACHE`` +# - ``PIP_USE_MIRRORS`` +# - ``RECLONE`` +# - ``TRACK_DEPENDS`` +# - ``http_proxy``, ``https_proxy``, ``no_proxy`` # Save trace setting diff --git a/lib/apache b/lib/apache index 3a1f6f1263..41d6fcc381 100644 --- a/lib/apache +++ b/lib/apache @@ -2,15 +2,16 @@ # Functions to control configuration and operation of apache web server # Dependencies: -# ``functions`` file -# is_apache_enabled_service -# install_apache_wsgi -# config_apache_wsgi -# enable_apache_site -# disable_apache_site -# start_apache_server -# stop_apache_server -# restart_apache_server +# +# - ``functions`` file +# - is_apache_enabled_service +# - install_apache_wsgi +# - config_apache_wsgi +# - enable_apache_site +# - disable_apache_site +# - start_apache_server +# - stop_apache_server +# - restart_apache_server # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -116,6 +117,7 @@ function restart_apache_server() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/baremetal b/lib/baremetal index 5606230eac..a0df85e700 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -1,19 +1,19 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2012 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +## vim: tabstop=4 shiftwidth=4 softtabstop=4 + +## Copyright (c) 2012 Hewlett-Packard Development Company, L.P. +## All Rights Reserved. +## +## Licensed under the Apache License, Version 2.0 (the "License"); you may +## not use this file except in compliance with the License. You may obtain +## a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +## License for the specific language governing permissions and limitations +## under the License. # This file provides devstack with the environment and utilities to @@ -24,7 +24,8 @@ # control physical hardware resources on the same network, if you know # the MAC address(es) and IPMI credentials. # -# At a minimum, to enable the baremetal driver, you must set these in loclarc: +# At a minimum, to enable the baremetal driver, you must set these in localrc: +# # VIRT_DRIVER=baremetal # ENABLED_SERVICES="$ENABLED_SERVICES,baremetal" # @@ -38,11 +39,13 @@ # Below that, various functions are defined, which are called by devstack # in the following order: # -# before nova-cpu starts: +# before nova-cpu starts: +# # - prepare_baremetal_toolchain # - configure_baremetal_nova_dirs # -# after nova and glance have started: +# after nova and glance have started: +# # - build_and_upload_baremetal_deploy_k_and_r $token # - create_baremetal_flavor $BM_DEPLOY_KERNEL_ID $BM_DEPLOY_RAMDISK_ID # - upload_baremetal_image $url $token @@ -58,11 +61,13 @@ set +o xtrace # ------------------- # sub-driver to use for kernel deployment -# - nova.virt.baremetal.pxe.PXE -# - nova.virt.baremetal.tilera.TILERA +# +# - nova.virt.baremetal.pxe.PXE +# - nova.virt.baremetal.tilera.TILERA BM_DRIVER=${BM_DRIVER:-nova.virt.baremetal.pxe.PXE} # sub-driver to use for remote power management +# # - nova.virt.baremetal.fake.FakePowerManager, for manual power control # - nova.virt.baremetal.ipmi.IPMI, for remote IPMI # - nova.virt.baremetal.tilera_pdu.Pdu, for TilePro hardware @@ -84,11 +89,11 @@ BM_POSEUR_EXTRA_OPTS=${BM_POSEUR_EXTRA_OPTS:-} # dedicated to baremetal. When enable this, make sure these conditions are # fulfilled: # -# 1) nova-compute and nova-network runs on the same host -# 2) nova-network uses FlatDHCPManager +# 1) nova-compute and nova-network runs on the same host +# 2) nova-network uses FlatDHCPManager # # NOTE: the other BM_DNSMASQ_* have no effect on the behavior if this option -# is enabled. +# is enabled. BM_DNSMASQ_FROM_NOVA_NETWORK=`trueorfalse False $BM_DNSMASQ_FROM_NOVA_NETWORK` # BM_DNSMASQ_IFACE should match FLAT_NETWORK_BRIDGE @@ -105,9 +110,9 @@ fi # BM_DNSMASQ_DNS provide dns server to bootstrap clients BM_DNSMASQ_DNS=${BM_DNSMASQ_DNS:-} -# BM_FIRST_MAC *must* be set to the MAC address of the node you will boot. -# This is passed to dnsmasq along with the kernel/ramdisk to -# deploy via PXE. +# BM_FIRST_MAC *must* be set to the MAC address of the node you will +# boot. This is passed to dnsmasq along with the kernel/ramdisk to +# deploy via PXE. BM_FIRST_MAC=${BM_FIRST_MAC:-} # BM_SECOND_MAC is only important if the host has >1 NIC. @@ -121,9 +126,9 @@ BM_PM_ADDR=${BM_PM_ADDR:-0.0.0.0} BM_PM_USER=${BM_PM_USER:-user} BM_PM_PASS=${BM_PM_PASS:-pass} -# BM_FLAVOR_* options are arbitrary and not necessarily related to physical -# hardware capacity. These can be changed if you are testing -# BaremetalHostManager with multiple nodes and different flavors. +# BM_FLAVOR_* options are arbitrary and not necessarily related to +# physical hardware capacity. These can be changed if you are testing +# BaremetalHostManager with multiple nodes and different flavors. BM_CPU_ARCH=${BM_CPU_ARCH:-x86_64} BM_FLAVOR_CPU=${BM_FLAVOR_CPU:-1} BM_FLAVOR_RAM=${BM_FLAVOR_RAM:-1024} @@ -288,8 +293,8 @@ function create_baremetal_flavor() { } -# pull run-time kernel/ramdisk out of disk image and load into glance -# note that $file is currently expected to be in qcow2 format +# Pull run-time kernel/ramdisk out of disk image and load into glance. +# Note that $file is currently expected to be in qcow2 format. # Sets KERNEL_ID and RAMDISK_ID # # Usage: extract_and_upload_k_and_r_from_image $token $file @@ -432,7 +437,7 @@ function clear_baremetal_of_all_nodes() { done } -# inform nova-baremetal about nodes, MACs, etc +# Inform nova-baremetal about nodes, MACs, etc. # Defaults to using BM_FIRST_MAC and BM_SECOND_MAC if parameters not specified # # Usage: add_baremetal_node @@ -461,6 +466,7 @@ function add_baremetal_node() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/ceilometer b/lib/ceilometer index a471d9c7e6..9257611fc6 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -10,6 +10,7 @@ # enable_service ceilometer-alarm-notifier ceilometer-alarm-evaluator # Dependencies: +# # - functions # - OS_AUTH_URL for auth in api # - DEST set to the destination directory @@ -18,12 +19,12 @@ # stack.sh # --------- -# install_ceilometer -# configure_ceilometer -# init_ceilometer -# start_ceilometer -# stop_ceilometer -# cleanup_ceilometer +# - install_ceilometer +# - configure_ceilometer +# - init_ceilometer +# - start_ceilometer +# - stop_ceilometer +# - cleanup_ceilometer # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -156,6 +157,7 @@ function stop_ceilometer() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/cinder b/lib/cinder index f6f137cabd..ae0e28c544 100644 --- a/lib/cinder +++ b/lib/cinder @@ -2,19 +2,20 @@ # Install and start **Cinder** volume service # Dependencies: +# # - functions # - DEST, DATA_DIR, STACK_USER must be defined -# SERVICE_{TENANT_NAME|PASSWORD} must be defined -# ``KEYSTONE_TOKEN_FORMAT`` must be defined +# - SERVICE_{TENANT_NAME|PASSWORD} must be defined +# - ``KEYSTONE_TOKEN_FORMAT`` must be defined # stack.sh # --------- -# install_cinder -# configure_cinder -# init_cinder -# start_cinder -# stop_cinder -# cleanup_cinder +# - install_cinder +# - configure_cinder +# - init_cinder +# - start_cinder +# - stop_cinder +# - cleanup_cinder # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -82,7 +83,8 @@ VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} # Functions # --------- # _clean_lvm_lv removes all cinder LVM volumes -# _clean_lvm_lv $VOLUME_GROUP $VOLUME_NAME_PREFIX +# +# Usage: _clean_lvm_lv $VOLUME_GROUP $VOLUME_NAME_PREFIX function _clean_lvm_lv() { local vg=$1 local lv_prefix=$2 @@ -98,7 +100,8 @@ function _clean_lvm_lv() { # _clean_lvm_backing_file() removes the backing file of the # volume group used by cinder -# _clean_lvm_backing_file() $VOLUME_GROUP +# +# Usage: _clean_lvm_backing_file() $VOLUME_GROUP function _clean_lvm_backing_file() { local vg=$1 @@ -546,6 +549,7 @@ function stop_cinder() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/database b/lib/database index c3fd435eb2..0661049e70 100644 --- a/lib/database +++ b/lib/database @@ -124,6 +124,7 @@ function database_connection_url { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/databases/mysql b/lib/databases/mysql index 41e3236f69..0eb8fdd7a2 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -2,7 +2,8 @@ # Functions to control the configuration and operation of the **MySQL** database backend # Dependencies: -# DATABASE_{HOST,USER,PASSWORD} must be defined +# +# - DATABASE_{HOST,USER,PASSWORD} must be defined # Save trace setting MY_XTRACE=$(set +o | grep xtrace) diff --git a/lib/databases/postgresql b/lib/databases/postgresql index b173772170..519479ad68 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -2,7 +2,8 @@ # Functions to control the configuration and operation of the **PostgreSQL** database backend # Dependencies: -# DATABASE_{HOST,USER,PASSWORD} must be defined +# +# - DATABASE_{HOST,USER,PASSWORD} must be defined # Save trace setting PG_XTRACE=$(set +o | grep xtrace) diff --git a/lib/glance b/lib/glance index 75e3dd053d..eb727f1e2a 100644 --- a/lib/glance +++ b/lib/glance @@ -2,20 +2,21 @@ # Functions to control the configuration and operation of the **Glance** service # Dependencies: -# ``functions`` file -# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined -# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined -# ``SERVICE_HOST`` -# ``KEYSTONE_TOKEN_FORMAT`` must be defined +# +# - ``functions`` file +# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined +# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined +# - ``SERVICE_HOST`` +# - ``KEYSTONE_TOKEN_FORMAT`` must be defined # ``stack.sh`` calls the entry points in this order: # -# install_glance -# configure_glance -# init_glance -# start_glance -# stop_glance -# cleanup_glance +# - install_glance +# - configure_glance +# - init_glance +# - start_glance +# - stop_glance +# - cleanup_glance # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -209,6 +210,7 @@ function stop_glance() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/heat b/lib/heat index 8acadb4ad1..bf4d4bce9c 100644 --- a/lib/heat +++ b/lib/heat @@ -2,21 +2,23 @@ # Install and start **Heat** service # To enable, add the following to localrc -# ENABLED_SERVICES+=,heat,h-api,h-api-cfn,h-api-cw,h-eng +# +# ENABLED_SERVICES+=,heat,h-api,h-api-cfn,h-api-cw,h-eng # Dependencies: +# # - functions # stack.sh # --------- -# install_heatclient -# install_heat -# configure_heatclient -# configure_heat -# init_heat -# start_heat -# stop_heat -# cleanup_heat +# - install_heatclient +# - install_heat +# - configure_heatclient +# - configure_heat +# - init_heat +# - start_heat +# - stop_heat +# - cleanup_heat # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -198,6 +200,7 @@ function disk_image_create { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/horizon b/lib/horizon index 63caf3c0f2..af0db49da5 100644 --- a/lib/horizon +++ b/lib/horizon @@ -1,21 +1,20 @@ # lib/horizon # Functions to control the configuration and operation of the horizon service -# # Dependencies: -# ``functions`` file -# ``apache`` file -# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined -# +# +# - ``functions`` file +# - ``apache`` file +# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined # ``stack.sh`` calls the entry points in this order: # -# install_horizon -# configure_horizon -# init_horizon -# start_horizon -# stop_horizon -# cleanup_horizon +# - install_horizon +# - configure_horizon +# - init_horizon +# - start_horizon +# - stop_horizon +# - cleanup_horizon # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -25,8 +24,6 @@ set +o xtrace # Defaults # -------- -# - # Set up default directories HORIZON_DIR=$DEST/horizon @@ -183,6 +180,7 @@ function stop_horizon() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/infra b/lib/infra index 0b732598ff..0dcf0ad980 100644 --- a/lib/infra +++ b/lib/infra @@ -5,12 +5,13 @@ # requirements as a global list # Dependencies: -# ``functions`` file +# +# - ``functions`` file # ``stack.sh`` calls the entry points in this order: # -# unfubar_setuptools -# install_infra +# - unfubar_setuptools +# - install_infra # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -51,6 +52,7 @@ function install_infra() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/ironic b/lib/ironic index 649c1c2cd6..ff99e58c19 100644 --- a/lib/ironic +++ b/lib/ironic @@ -2,21 +2,21 @@ # Functions to control the configuration and operation of the **Ironic** service # Dependencies: -# ``functions`` file -# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined -# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined -# ``SERVICE_HOST`` -# ``KEYSTONE_TOKEN_FORMAT`` must be defined +# +# - ``functions`` file +# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined +# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined +# - ``SERVICE_HOST`` +# - ``KEYSTONE_TOKEN_FORMAT`` must be defined # ``stack.sh`` calls the entry points in this order: # -# install_ironic -# install_ironicclient -# configure_ironic -# init_ironic -# start_ironic -# stop_ironic -# cleanup_ironic +# - install_ironic +# - install_ironicclient +# - init_ironic +# - start_ironic +# - stop_ironic +# - cleanup_ironic # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -225,6 +225,7 @@ function stop_ironic() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/keystone b/lib/keystone index beddb1cd75..7011f66e99 100755 --- a/lib/keystone +++ b/lib/keystone @@ -2,25 +2,26 @@ # Functions to control the configuration and operation of **Keystone** # Dependencies: -# ``functions`` file -# ``DEST``, ``STACK_USER`` -# ``IDENTITY_API_VERSION`` -# ``BASE_SQL_CONN`` -# ``SERVICE_HOST``, ``SERVICE_PROTOCOL`` -# ``SERVICE_TOKEN`` -# ``S3_SERVICE_PORT`` (template backend only) +# +# - ``functions`` file +# - ``DEST``, ``STACK_USER`` +# - ``IDENTITY_API_VERSION`` +# - ``BASE_SQL_CONN`` +# - ``SERVICE_HOST``, ``SERVICE_PROTOCOL`` +# - ``SERVICE_TOKEN`` +# - ``S3_SERVICE_PORT`` (template backend only) # ``stack.sh`` calls the entry points in this order: # -# install_keystone -# configure_keystone -# _config_keystone_apache_wsgi -# init_keystone -# start_keystone -# create_keystone_accounts -# stop_keystone -# cleanup_keystone -# _cleanup_keystone_apache_wsgi +# - install_keystone +# - configure_keystone +# - _config_keystone_apache_wsgi +# - init_keystone +# - start_keystone +# - create_keystone_accounts +# - stop_keystone +# - cleanup_keystone +# - _cleanup_keystone_apache_wsgi # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -393,6 +394,7 @@ function stop_keystone() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/ldap b/lib/ldap index 2a24ccddf7..80992a7a09 100644 --- a/lib/ldap +++ b/lib/ldap @@ -2,7 +2,8 @@ # Functions to control the installation and configuration of **ldap** # ``lib/keystone`` calls the entry points in this order: -# install_ldap() +# +# - install_ldap() # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -91,6 +92,7 @@ function clear_ldap_state() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/neutron b/lib/neutron index 00852df05a..50bdb74217 100644 --- a/lib/neutron +++ b/lib/neutron @@ -7,24 +7,24 @@ # ``stack.sh`` calls the entry points in this order: # -# install_neutron -# install_neutronclient -# install_neutron_agent_packages -# install_neutron_third_party -# configure_neutron -# init_neutron -# configure_neutron_third_party -# init_neutron_third_party -# start_neutron_third_party -# create_nova_conf_neutron -# start_neutron_service_and_check -# create_neutron_initial_network -# setup_neutron_debug -# start_neutron_agents +# - install_neutron +# - install_neutronclient +# - install_neutron_agent_packages +# - install_neutron_third_party +# - configure_neutron +# - init_neutron +# - configure_neutron_third_party +# - init_neutron_third_party +# - start_neutron_third_party +# - create_nova_conf_neutron +# - start_neutron_service_and_check +# - create_neutron_initial_network +# - setup_neutron_debug +# - start_neutron_agents # # ``unstack.sh`` calls the entry points in this order: # -# stop_neutron +# - stop_neutron # Functions in lib/neutron are classified into the following categories: # @@ -891,6 +891,7 @@ function stop_neutron_third_party() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/nova b/lib/nova index 809f8e5896..98b32ead18 100644 --- a/lib/nova +++ b/lib/nova @@ -2,22 +2,23 @@ # Functions to control the configuration and operation of the **Nova** service # Dependencies: -# ``functions`` file -# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined -# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined -# ``LIBVIRT_TYPE`` must be defined -# ``INSTANCE_NAME_PREFIX``, ``VOLUME_NAME_PREFIX`` must be defined -# ``KEYSTONE_TOKEN_FORMAT`` must be defined +# +# - ``functions`` file +# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined +# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined +# - ``LIBVIRT_TYPE`` must be defined +# - ``INSTANCE_NAME_PREFIX``, ``VOLUME_NAME_PREFIX`` must be defined +# - ``KEYSTONE_TOKEN_FORMAT`` must be defined # ``stack.sh`` calls the entry points in this order: # -# install_nova -# configure_nova -# create_nova_conf -# init_nova -# start_nova -# stop_nova -# cleanup_nova +# - install_nova +# - configure_nova +# - create_nova_conf +# - init_nova +# - start_nova +# - stop_nova +# - cleanup_nova # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -683,6 +684,7 @@ function stop_nova() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker index 427554b7db..300522fb48 100644 --- a/lib/nova_plugins/hypervisor-docker +++ b/lib/nova_plugins/hypervisor-docker @@ -2,11 +2,13 @@ # Configure the Docker hypervisor # Enable with: -# VIRT_DRIVER=docker +# +# VIRT_DRIVER=docker # Dependencies: -# ``functions`` file -# ``nova`` and ``glance`` configurations +# +# - ``functions`` file +# - ``nova`` and ``glance`` configurations # install_nova_hypervisor - install any external requirements # configure_nova_hypervisor - make configuration changes, including those to other services diff --git a/lib/oslo b/lib/oslo index f77a4fa941..816ae9a48a 100644 --- a/lib/oslo +++ b/lib/oslo @@ -6,11 +6,12 @@ # pre-released versions of oslo libraries. # Dependencies: -# ``functions`` file +# +# - ``functions`` file # ``stack.sh`` calls the entry points in this order: # -# install_oslo +# - install_oslo # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -52,6 +53,7 @@ function cleanup_oslo() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/rpc_backend b/lib/rpc_backend index a323d649a7..ae83e85e89 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -3,15 +3,16 @@ # rpc backend settings # Dependencies: -# ``functions`` file -# ``RABBIT_{HOST|PASSWORD}`` must be defined when RabbitMQ is used +# +# - ``functions`` file +# - ``RABBIT_{HOST|PASSWORD}`` must be defined when RabbitMQ is used # ``stack.sh`` calls the entry points in this order: # -# check_rpc_backend -# install_rpc_backend -# restart_rpc_backend -# iniset_rpc_backend +# - check_rpc_backend +# - install_rpc_backend +# - restart_rpc_backend +# - iniset_rpc_backend # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -200,6 +201,7 @@ function qpid_is_supported() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/savanna-dashboard b/lib/savanna-dashboard index 9562db4e1c..e96762285c 100644 --- a/lib/savanna-dashboard +++ b/lib/savanna-dashboard @@ -1,15 +1,16 @@ # lib/savanna-dashboard # Dependencies: -# ``functions`` file -# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined -# ``SERVICE_HOST +# +# - ``functions`` file +# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined +# - ``SERVICE_HOST`` # ``stack.sh`` calls the entry points in this order: # -# install_savanna_dashboard -# configure_savanna_dashboard -# cleanup_savanna_dashboard +# - install_savanna_dashboard +# - configure_savanna_dashboard +# - cleanup_savanna_dashboard # Save trace setting XTRACE=$(set +o | grep xtrace) diff --git a/lib/swift b/lib/swift index 3c3b8b1d38..db6ae18bef 100644 --- a/lib/swift +++ b/lib/swift @@ -2,22 +2,24 @@ # Functions to control the configuration and operation of the **Swift** service # Dependencies: -# ``functions`` file -# ``apache`` file -# ``DEST``, ``SCREEN_NAME``, `SWIFT_HASH` must be defined -# ``STACK_USER`` must be defined -# ``SWIFT_DATA_DIR`` or ``DATA_DIR`` must be defined -# ``lib/keystone`` file +# +# - ``functions`` file +# - ``apache`` file +# - ``DEST``, ``SCREEN_NAME``, `SWIFT_HASH` must be defined +# - ``STACK_USER`` must be defined +# - ``SWIFT_DATA_DIR`` or ``DATA_DIR`` must be defined +# - ``lib/keystone`` file +# # ``stack.sh`` calls the entry points in this order: # -# install_swift -# _config_swift_apache_wsgi -# configure_swift -# init_swift -# start_swift -# stop_swift -# cleanup_swift -# _cleanup_swift_apache_wsgi +# - install_swift +# - _config_swift_apache_wsgi +# - configure_swift +# - init_swift +# - start_swift +# - stop_swift +# - cleanup_swift +# - _cleanup_swift_apache_wsgi # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -617,6 +619,7 @@ function stop_swift() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/tempest b/lib/tempest index 8e4e5210ea..25814b6250 100644 --- a/lib/tempest +++ b/lib/tempest @@ -2,34 +2,38 @@ # Install and configure Tempest # Dependencies: -# ``functions`` file -# ``lib/nova`` service is running -# -# - ``DEST``, ``FILES`` -# - ``ADMIN_PASSWORD`` -# - ``DEFAULT_IMAGE_NAME`` -# - ``S3_SERVICE_PORT`` -# - ``SERVICE_HOST`` -# - ``BASE_SQL_CONN`` ``lib/database`` declares -# - ``PUBLIC_NETWORK_NAME`` -# - ``Q_USE_NAMESPACE`` -# - ``Q_ROUTER_NAME`` -# - ``VIRT_DRIVER`` -# - ``LIBVIRT_TYPE`` -# - ``KEYSTONE_SERVICE_PROTOCOL``, ``KEYSTONE_SERVICE_HOST`` from lib/keystone +# +# - ``functions`` file +# - ``lib/nova`` service is running +# - Global vars that are assumed to be defined: +# - ``DEST``, ``FILES`` +# - ``ADMIN_PASSWORD`` +# - ``DEFAULT_IMAGE_NAME`` +# - ``S3_SERVICE_PORT`` +# - ``SERVICE_HOST`` +# - ``BASE_SQL_CONN`` ``lib/database`` declares +# - ``PUBLIC_NETWORK_NAME`` +# - ``Q_USE_NAMESPACE`` +# - ``Q_ROUTER_NAME`` +# - ``VIRT_DRIVER`` +# - ``LIBVIRT_TYPE`` +# - ``KEYSTONE_SERVICE_PROTOCOL``, ``KEYSTONE_SERVICE_HOST`` from lib/keystone +# # Optional Dependencies: -# ALT_* (similar vars exists in keystone_data.sh) -# ``LIVE_MIGRATION_AVAILABLE`` -# ``USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION`` -# ``DEFAULT_INSTANCE_TYPE`` -# ``DEFAULT_INSTANCE_USER`` -# ``CINDER_MULTI_LVM_BACKEND`` -# ``HEAT_CREATE_TEST_IMAGE`` +# +# - ``ALT_*`` (similar vars exists in keystone_data.sh) +# - ``LIVE_MIGRATION_AVAILABLE`` +# - ``USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION`` +# - ``DEFAULT_INSTANCE_TYPE`` +# - ``DEFAULT_INSTANCE_USER`` +# - ``CINDER_MULTI_LVM_BACKEND`` +# - ``HEAT_CREATE_TEST_IMAGE`` +# # ``stack.sh`` calls the entry points in this order: # -# install_tempest -# configure_tempest -# init_tempest +# - install_tempest +# - configure_tempest +# - init_tempest # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -345,6 +349,7 @@ function init_tempest() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/template b/lib/template index 72904fe1c6..629e110271 100644 --- a/lib/template +++ b/lib/template @@ -3,18 +3,19 @@ # # Dependencies: -# ``functions`` file -# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined -# +# +# - ``functions`` file +# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined +# - # ``stack.sh`` calls the entry points in this order: # -# install_XXXX -# configure_XXXX -# init_XXXX -# start_XXXX -# stop_XXXX -# cleanup_XXXX +# - install_XXXX +# - configure_XXXX +# - init_XXXX +# - start_XXXX +# - stop_XXXX +# - cleanup_XXXX # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -79,6 +80,7 @@ function stop_XXXX() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/tls b/lib/tls index f7dcffa32d..a1a7fddc18 100644 --- a/lib/tls +++ b/lib/tls @@ -1,24 +1,27 @@ # lib/tls # Functions to control the configuration and operation of the TLS proxy service -# Dependencies: # !! source _before_ any services that use ``SERVICE_HOST`` -# ``functions`` file -# ``DEST``, ``DATA_DIR`` must be defined -# ``HOST_IP``, ``SERVICE_HOST`` -# ``KEYSTONE_TOKEN_FORMAT`` must be defined +# +# Dependencies: +# +# - ``functions`` file +# - ``DEST``, ``DATA_DIR`` must be defined +# - ``HOST_IP``, ``SERVICE_HOST`` +# - ``KEYSTONE_TOKEN_FORMAT`` must be defined # Entry points: -# configure_CA -# init_CA +# +# - configure_CA +# - init_CA -# configure_proxy -# start_tls_proxy +# - configure_proxy +# - start_tls_proxy -# make_root_ca -# make_int_ca -# new_cert $INT_CA_DIR int-server "abc" -# start_tls_proxy HOST_IP 5000 localhost 5000 +# - make_root_ca +# - make_int_ca +# - new_cert $INT_CA_DIR int-server "abc" +# - start_tls_proxy HOST_IP 5000 localhost 5000 # Defaults @@ -321,6 +324,7 @@ function start_tls_proxy() { } -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/trove b/lib/trove index 0a19d031ac..c40006bf5d 100644 --- a/lib/trove +++ b/lib/trove @@ -181,6 +181,7 @@ function stop_trove() { # Restore xtrace $XTRACE -# Local variables: -# mode: shell-script -# End: +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: From 4b2c5ed209514534a0ead54cd3c91fc3b42d6194 Mon Sep 17 00:00:00 2001 From: Adam Spiers Date: Thu, 24 Oct 2013 17:40:13 +0100 Subject: [PATCH 0479/4704] only clone docs from gh-pages branch if they aren't there Allow the caller of this script to decide which commit from the gh-pages branch should be used to build the docs. This also avoid excessive repeated git clones during development. Change-Id: I3e58eef0ac03b15903c06d5632c0eb41413db02c --- tools/build_docs.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/build_docs.sh b/tools/build_docs.sh index 216e557025..1c145e237f 100755 --- a/tools/build_docs.sh +++ b/tools/build_docs.sh @@ -91,8 +91,9 @@ fi # Assumption is we are now in the DevStack repo workspace to be processed # Pull the latest docs branch from devstack.org repo -rm -rf docs || true -git clone -b gh-pages $GH_PAGES_REPO docs +if ! [ -d docs ]; then + git clone -b gh-pages $GH_PAGES_REPO docs +fi # Build list of scripts to process FILES="" From fd98edb469884610031207695ec91c2db8c7ab93 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 24 Oct 2013 14:57:59 -0400 Subject: [PATCH 0480/4704] create an UNDO_REQUIREMENTS variable in the gate, we actually need to *not* roll back the requirements, otherwise tempest can wedge the world because it runs under tox. Make a variable that we can set to false in the gate to ensure that we don't roll back the requirements changes. Change-Id: I2b842ecc3f6e8b917dd721729640000bd7e7fb78 --- functions | 15 ++++++++++++--- stackrc | 3 +++ 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/functions b/functions index c707e556c6..bece5a4a56 100644 --- a/functions +++ b/functions @@ -1257,9 +1257,18 @@ function setup_develop() { # ensure that further actions can do things like setup.py sdist safe_chown -R $STACK_USER $1/*.egg-info - # Undo requirements changes, if we made them - if [ $update_requirements -eq 0 ]; then - (cd $project_dir && git reset --hard) + # We've just gone and possibly modified the user's source tree in an + # automated way, which is considered bad form if it's a development + # tree because we've screwed up their next git checkin. So undo it. + # + # However... there are some circumstances, like running in the gate + # where we really really want the overridden version to stick. So provide + # a variable that tells us whether or not we should UNDO the requirements + # changes (this will be set to False in the OpenStack ci gate) + if [ $UNDO_REQUIREMENTS = "True"]; then + if [ $update_requirements -eq 0 ]; then + (cd $project_dir && git reset --hard) + fi fi } diff --git a/stackrc b/stackrc index 0151672c1d..7069327dd7 100644 --- a/stackrc +++ b/stackrc @@ -297,6 +297,9 @@ SCREEN_NAME=${SCREEN_NAME:-stack} # Do not install packages tagged with 'testonly' by default INSTALL_TESTONLY_PACKAGES=${INSTALL_TESTONLY_PACKAGES:-False} +# Undo requirements changes by global requirements +UNDO_REQUIREMENTS=${UNDO_REQUIREMENTS:-True} + # Local variables: # mode: shell-script # End: From 5a77d03addeab27704eeeb6a43ea75c1a9941764 Mon Sep 17 00:00:00 2001 From: Sam Alba Date: Mon, 21 Oct 2013 16:17:30 -0700 Subject: [PATCH 0481/4704] Removed docker version requirement Change-Id: I9f04db46706bf6453def09ca1e22e0db918d811b Closes-Bug: #1237581 --- lib/nova_plugins/hypervisor-docker | 3 +-- tools/docker/install_docker.sh | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker index 427554b7db..70b1b6bdc5 100644 --- a/lib/nova_plugins/hypervisor-docker +++ b/lib/nova_plugins/hypervisor-docker @@ -37,7 +37,6 @@ DOCKER_REGISTRY_IMAGE=${DOCKER_REGISTRY_IMAGE:-http://get.docker.io/images/opens DOCKER_REGISTRY_IMAGE_NAME=docker-registry DOCKER_REPOSITORY_NAME=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT}/${DOCKER_IMAGE_NAME} -DOCKER_PACKAGE_VERSION=${DOCKER_PACKAGE_VERSION:-0.6.1} DOCKER_APT_REPO=${DOCKER_APT_REPO:-https://get.docker.io/ubuntu} @@ -72,7 +71,7 @@ function install_nova_hypervisor() { fi # Make sure Docker is installed - if ! is_package_installed lxc-docker-${DOCKER_PACKAGE_VERSION}; then + if ! is_package_installed lxc-docker; then die $LINENO "Docker is not installed. Please run tools/docker/install_docker.sh" fi diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh index 483955bfc2..2e5b510c41 100755 --- a/tools/docker/install_docker.sh +++ b/tools/docker/install_docker.sh @@ -38,7 +38,7 @@ curl https://get.docker.io/gpg | sudo apt-key add - install_package python-software-properties && \ sudo sh -c "echo deb $DOCKER_APT_REPO docker main > /etc/apt/sources.list.d/docker.list" apt_get update -install_package --force-yes lxc-docker-${DOCKER_PACKAGE_VERSION} socat +install_package --force-yes lxc-docker socat # Start the daemon - restart just in case the package ever auto-starts... restart_service docker From 62e8a30abd9d2504bfca1c1c1c72151d729cc9c8 Mon Sep 17 00:00:00 2001 From: Peter Feiner Date: Thu, 24 Oct 2013 17:49:00 -0400 Subject: [PATCH 0482/4704] Fix apache horizon permissions on Ubuntu 13.10 Fixes bug 1241574. Change-Id: If6c30874267a6bf30c114146f83b2d3220f32c1a --- lib/horizon | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/horizon b/lib/horizon index 63caf3c0f2..80f8df71f4 100644 --- a/lib/horizon +++ b/lib/horizon @@ -123,6 +123,11 @@ function init_horizon() { # Be a good citizen and use the distro tools here sudo touch $horizon_conf sudo a2ensite horizon.conf + if [[ "$DISTRO" == "saucy" ]]; then + # Ubuntu 13.10 has Require all denied in apache2.conf + # and requires explicit Require all granted + HORIZON_REQUIRE='Require all granted' + fi elif is_fedora; then if [[ "$os_RELEASE" -ge "18" ]]; then # fedora 18 has Require all denied in its httpd.conf From 20150864eb3ea1fbaa616f6a6cc022dc9bc13c14 Mon Sep 17 00:00:00 2001 From: Sam Alba Date: Thu, 24 Oct 2013 17:09:40 -0700 Subject: [PATCH 0483/4704] Removed dependency from dotcloud repos for the Nova docker driver Since the Nova driver is in Nova core from the Havana release, this will just clean the docker nova driver install. Change-Id: Ic98012b8b5e54e727a1b11f4d32f6623d2067621 --- lib/nova_plugins/hypervisor-docker | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker index 427554b7db..2451982ed9 100644 --- a/lib/nova_plugins/hypervisor-docker +++ b/lib/nova_plugins/hypervisor-docker @@ -24,7 +24,6 @@ set +o xtrace # Set up default directories DOCKER_DIR=$DEST/docker -DOCKER_REPO=${DOCKER_REPO:-https://github.com/dotcloud/openstack-docker.git} DOCKER_BRANCH=${DOCKER_BRANCH:-master} DOCKER_UNIX_SOCKET=/var/run/docker.sock @@ -54,10 +53,6 @@ function cleanup_nova_hypervisor() { # configure_nova_hypervisor - Set config files, create data dirs, etc function configure_nova_hypervisor() { - git_clone $DOCKER_REPO $DOCKER_DIR $DOCKER_BRANCH - - ln -snf ${DOCKER_DIR}/nova-driver $NOVA_DIR/nova/virt/docker - iniset $NOVA_CONF DEFAULT compute_driver docker.DockerDriver iniset $GLANCE_API_CONF DEFAULT container_formats ami,ari,aki,bare,ovf,docker From 4540d00ebdd3283de47d6546b6e7575e2e9041ff Mon Sep 17 00:00:00 2001 From: Aaron Rosen Date: Thu, 24 Oct 2013 13:59:33 -0700 Subject: [PATCH 0484/4704] All neutron plugins should now use LibvirtGenericVIFDriver Change-Id: I70015ae55fe6db9c6c4663a8d021fe9cfe2eddcf --- lib/neutron | 4 ++++ lib/neutron_plugins/bigswitch_floodlight | 2 +- lib/neutron_plugins/linuxbridge_agent | 2 +- lib/neutron_plugins/nicira | 1 - lib/neutron_plugins/ovs_base | 2 +- lib/neutron_plugins/plumgrid | 3 +-- 6 files changed, 8 insertions(+), 6 deletions(-) diff --git a/lib/neutron b/lib/neutron index 44fb9e1005..3f39d33f41 100644 --- a/lib/neutron +++ b/lib/neutron @@ -110,6 +110,10 @@ Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True} Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False} # The name of the default q-l3 router Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1} +# nova vif driver that all plugins should use +NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} + + # List of config file names in addition to the main plugin config file # See _configure_neutron_common() for details about setting it up declare -a Q_PLUGIN_EXTRA_CONF_FILES diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight index 24507312c7..93ec497bb9 100644 --- a/lib/neutron_plugins/bigswitch_floodlight +++ b/lib/neutron_plugins/bigswitch_floodlight @@ -9,7 +9,7 @@ source $TOP_DIR/lib/neutron_plugins/ovs_base source $TOP_DIR/lib/neutron_thirdparty/bigswitch_floodlight # for third party service specific configuration values function neutron_plugin_create_nova_conf() { - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} + : } function neutron_plugin_install_agent_packages() { diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent index 88c49c5b5e..85e8c085be 100644 --- a/lib/neutron_plugins/linuxbridge_agent +++ b/lib/neutron_plugins/linuxbridge_agent @@ -11,7 +11,7 @@ function is_neutron_ovs_base_plugin() { } function neutron_plugin_create_nova_conf() { - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} + : } function neutron_plugin_install_agent_packages() { diff --git a/lib/neutron_plugins/nicira b/lib/neutron_plugins/nicira index 7c99b692d6..87d3c3d17b 100644 --- a/lib/neutron_plugins/nicira +++ b/lib/neutron_plugins/nicira @@ -26,7 +26,6 @@ function is_neutron_ovs_base_plugin() { } function neutron_plugin_create_nova_conf() { - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtOpenVswitchDriver"} # if n-cpu is enabled, then setup integration bridge if is_service_enabled n-cpu; then setup_integration_bridge diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 1214f3bcbd..89db29d07f 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -73,7 +73,7 @@ function _neutron_ovs_base_configure_l3_agent() { } function _neutron_ovs_base_configure_nova_vif_driver() { - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} + : } # Restore xtrace diff --git a/lib/neutron_plugins/plumgrid b/lib/neutron_plugins/plumgrid index 9d3c92ff51..d4050bb951 100644 --- a/lib/neutron_plugins/plumgrid +++ b/lib/neutron_plugins/plumgrid @@ -9,8 +9,7 @@ set +o xtrace #source $TOP_DIR/lib/neutron_plugins/ovs_base function neutron_plugin_create_nova_conf() { - - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} + : } function neutron_plugin_setup_interface_driver() { From 246d9bbd4a13cc2848411eda81eac8b311850717 Mon Sep 17 00:00:00 2001 From: Thomas Maddox Date: Thu, 24 Oct 2013 18:57:40 +0000 Subject: [PATCH 0485/4704] Add PostgreSQL option for Ceilometer backend Adds the PostgreSQL case for a Ceilometer backend and initializes Ceilometer accordingly. Closes-Bug: #1244381 Change-Id: Iefc5bb7eea6e9efa1f2ad04f1f8dc714e3404c9c --- lib/ceilometer | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index cd4c4d8656..a79ca55183 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -91,7 +91,7 @@ function configure_ceilometer() { iniset $CEILOMETER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $CEILOMETER_CONF keystone_authtoken signing_dir $CEILOMETER_AUTH_CACHE_DIR - if [[ "$CEILOMETER_BACKEND" = 'mysql' ]]; then + if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then iniset $CEILOMETER_CONF database connection `database_connection_url ceilometer` else iniset $CEILOMETER_CONF database connection mongodb://localhost:27017/ceilometer @@ -116,7 +116,7 @@ function init_ceilometer() { sudo chown $STACK_USER $CEILOMETER_AUTH_CACHE_DIR rm -f $CEILOMETER_AUTH_CACHE_DIR/* - if [[ "$CEILOMETER_BACKEND" = 'mysql' ]]; then + if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then recreate_database ceilometer utf8 $CEILOMETER_BIN_DIR/ceilometer-dbsync fi From 1ce2ffd15fbb9423cd5f705e10d34dee5e23a4d5 Mon Sep 17 00:00:00 2001 From: "Joe H. Rahme" Date: Tue, 22 Oct 2013 15:19:09 +0200 Subject: [PATCH 0486/4704] Enables Swift crossdomain middleware by default This step is needed to enable its testing in Tempest. The patch adds a variable SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH that holds a list of middlewares inserted in the pipeline before authentication middlewares (tempauth, keystoneauth, ...). Change-Id: I1927103feff997a354ccf82ccf12aa77db083ad3 --- lib/swift | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/lib/swift b/lib/swift index 6ab43c420f..2b23e44d78 100644 --- a/lib/swift +++ b/lib/swift @@ -72,6 +72,10 @@ SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-tempurl formpost staticweb} # the end of the pipeline. SWIFT_EXTRAS_MIDDLEWARE_LAST=${SWIFT_EXTRAS_MIDDLEWARE_LAST} +# Set ``SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH`` to extras middlewares that need to be at +# the beginning of the pipeline, before authentication middlewares. +SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH=${SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH:-crossdomain} + # The ring uses a configurable number of bits from a path’s MD5 hash as # a partition index that designates a device. The number of bits kept # from the hash is known as the partition power, and 2 to the partition @@ -210,7 +214,7 @@ function _config_swift_apache_wsgi() { # configure_swift() - Set config files, create data dirs and loop image function configure_swift() { - local swift_pipeline=" " + local swift_pipeline="${SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH}" local node_number local swift_node_config local swift_log_dir @@ -271,7 +275,7 @@ function configure_swift() { # tempauth would be prefixed with the reseller_prefix setting TEMPAUTH_ the # token for keystoneauth would have the standard reseller_prefix AUTH_ if is_service_enabled swift3;then - swift_pipeline=" swift3 s3token " + swift_pipeline+=" swift3 s3token " fi swift_pipeline+=" authtoken keystoneauth tempauth " sed -i "/^pipeline/ { s/tempauth/${swift_pipeline} ${SWIFT_EXTRAS_MIDDLEWARE}/ ;}" ${SWIFT_CONFIG_PROXY_SERVER} @@ -283,6 +287,9 @@ function configure_swift() { iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth reseller_prefix iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth reseller_prefix "TEMPAUTH" + # Configure Crossdomain + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:crossdomain use "egg:swift#crossdomain" + # Configure Keystone sed -i '/^# \[filter:authtoken\]/,/^# \[filter:keystoneauth\]$/ s/^#[ \t]*//' ${SWIFT_CONFIG_PROXY_SERVER} iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_host $KEYSTONE_AUTH_HOST From 388e36c98bf3efae3a1e25eabdd291f0b5b5e7c9 Mon Sep 17 00:00:00 2001 From: Peter Feiner Date: Thu, 24 Oct 2013 18:51:44 -0400 Subject: [PATCH 0487/4704] zsh openrc compatibility Replacing $BASH_SOURCE with ${BASH_SOURCE:-$0} makes devstack zsh friendly: in bash, $BASH_SOURCE is used per usual; in zsh, where $BASH_SOURCE isn't defined, $0 is used, which, unlike in bash, evaluates to the current source file. Now you can source devstack's openrc from a zsh shell. Tested with bash and zsh from directories other than the root devstack directory. Change-Id: Iab1a817b15d86144163b5094bb58f94b15c598a0 --- eucarc | 2 +- openrc | 2 +- stackrc | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/eucarc b/eucarc index 2b0f7dd143..350235106c 100644 --- a/eucarc +++ b/eucarc @@ -13,7 +13,7 @@ if [[ -n "$2" ]]; then fi # Find the other rc files -RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd) +RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) # Get user configuration source $RC_DIR/openrc diff --git a/openrc b/openrc index 3de7e3958f..5344d247bd 100644 --- a/openrc +++ b/openrc @@ -18,7 +18,7 @@ if [[ -n "$2" ]]; then fi # Find the other rc files -RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd) +RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) # Import common functions source $RC_DIR/functions diff --git a/stackrc b/stackrc index 0151672c1d..4da481e98e 100644 --- a/stackrc +++ b/stackrc @@ -1,7 +1,7 @@ # stackrc # # Find the other rc files -RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd) +RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) # Destination path for installation DEST=/opt/stack From f7cfa0c6e7a965949441ded6a789e12e5bc58039 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 25 Oct 2013 13:26:17 -0400 Subject: [PATCH 0488/4704] put the stackforge library installation behind a conditional we don't actually want to drag in stackforge libraries all the time, instead ensure that we have enabled stackforge_libs before doing it. Change-Id: Ic1c2e3d19e106a2aa0db9725d16a8b207546c23d --- stack.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 45604da140..6de7599bbf 100755 --- a/stack.sh +++ b/stack.sh @@ -631,7 +631,9 @@ install_infra install_oslo # Install stackforge libraries for testing -install_stackforge +if is_service_enabled stackforge_libs; then + install_stackforge +fi # Install clients libraries install_keystoneclient From f470d95b907b91e2879e389ea75dbdb1cb525b74 Mon Sep 17 00:00:00 2001 From: Chuck Short Date: Fri, 25 Oct 2013 15:08:44 -0700 Subject: [PATCH 0489/4704] Enable trusty Enable Trusty Tahir (14.04) the next Ubuntu LTS release. Change-Id: I48fe52345fefaf9ac7ba4be7d3f5675f72eea754 Signed-off-by: Chuck Short --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 6de7599bbf..36312ea619 100755 --- a/stack.sh +++ b/stack.sh @@ -131,7 +131,7 @@ disable_negated_services # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|saucy|7.0|wheezy|sid|testing|jessie|f16|f17|f18|f19|opensuse-12.2|rhel6) ]]; then +if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|saucy|trusty|7.0|wheezy|sid|testing|jessie|f16|f17|f18|f19|opensuse-12.2|rhel6) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From e4a523f543e59d92ab227b5bcfaad09fc171b6a9 Mon Sep 17 00:00:00 2001 From: Joe Mills Date: Mon, 28 Oct 2013 07:38:55 +0000 Subject: [PATCH 0490/4704] Cleanup unused settings, use interface_driver setter * Remove unused MIDONET specific settings from nova.conf * Remove unused MIDONET specific settings from dhcp_agent.ini * Move the interface_driver settings to the proper setting function so that it can be used by lbaas_agent.ini as well. Change-Id: Id686ff5f55db00bce42b8a2bd56e7655c73211fb Closes-Bug: #1245401 --- lib/neutron_plugins/midonet | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet index cf45a9d11d..e406146bbe 100644 --- a/lib/neutron_plugins/midonet +++ b/lib/neutron_plugins/midonet @@ -32,23 +32,10 @@ function neutron_plugin_configure_debug_command() { function neutron_plugin_configure_dhcp_agent() { DHCP_DRIVER=${DHCP_DRIVER:-"neutron.plugins.midonet.agent.midonet_driver.DhcpNoOpDriver"} - DHCP_INTERFACE_DRIVER=${DHCP_INTEFACE_DRIVER:-"neutron.plugins.midonet.agent.midonet_driver.MidonetInterfaceDriver"} + neutron_plugin_setup_interface_driver $Q_DHCP_CONF_FILE iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_driver $DHCP_DRIVER - iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver $DHCP_INTERFACE_DRIVER iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces True iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True - if [[ "$MIDONET_API_URI" != "" ]]; then - iniset $Q_DHCP_CONF_FILE MIDONET midonet_uri "$MIDONET_API_URI" - fi - if [[ "$MIDONET_USERNAME" != "" ]]; then - iniset $Q_DHCP_CONF_FILE MIDONET username "$MIDONET_USERNAME" - fi - if [[ "$MIDONET_PASSWORD" != "" ]]; then - iniset $Q_DHCP_CONF_FILE MIDONET password "$MIDONET_PASSWORD" - fi - if [[ "$MIDONET_PROJECT_ID" != "" ]]; then - iniset $Q_DHCP_CONF_FILE MIDONET project_id "$MIDONET_PROJECT_ID" - fi } function neutron_plugin_configure_l3_agent() { @@ -78,8 +65,8 @@ function neutron_plugin_configure_service() { } function neutron_plugin_setup_interface_driver() { - # May change in the future - : + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.MidonetInterfaceDriver } function has_neutron_plugin_security_group() { From 8787e0fd0c4b2ec29e6016a13e913a9ef3ac5444 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Mon, 28 Oct 2013 18:15:57 +0000 Subject: [PATCH 0491/4704] xenapi: always reset JeOS's network If the JeOS template contained an exotic network configuration, the VM prep step might fail. This patch resets the networking of the VM before starting it. Fixes bug 1245607 Change-Id: I921f1fdd0709d7a7760c4bb165e32f3898098bff --- tools/xen/install_os_domU.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 9a2f5a8c03..33dc26f1bb 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -271,6 +271,12 @@ set_vm_memory "$GUEST_NAME" "$OSDOMU_MEM_MB" # Max out VCPU count for better performance max_vcpus "$GUEST_NAME" +# Wipe out all network cards +destroy_all_vifs_of "$GUEST_NAME" + +# Add only one interface to prepare the guest template +add_interface "$GUEST_NAME" "$MGT_BRIDGE_OR_NET_NAME" "0" + # start the VM to run the prepare steps xe vm-start vm="$GUEST_NAME" From b245c5d21c4eee77e4649e0115d579b7e28c9851 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Wed, 30 Oct 2013 15:11:08 -0400 Subject: [PATCH 0492/4704] Set Nova auth_proto regardless of tls-proxy. We actually want to comment out the keystone auth_proto in Nova's api-paste.ini file regardless of the tls-proxy setting. Likewise lets always set it in nova.conf as well. This should fix an issue in trying to get this in: https://review.openstack.org/#/c/52825/ Change-Id: I1b8202aa1666cbb6ca13d2f77d50fa0175969266 --- lib/nova | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/lib/nova b/lib/nova index 31f286d943..150025f35b 100644 --- a/lib/nova +++ b/lib/nova @@ -221,9 +221,7 @@ function configure_nova() { # Comment out the keystone configs in Nova's api-paste.ini. # We are using nova.conf to configure this instead. inicomment $NOVA_API_PASTE_INI filter:authtoken auth_host - if is_service_enabled tls-proxy; then - inicomment $NOVA_API_PASTE_INI filter:authtoken auth_protocol - fi + inicomment $NOVA_API_PASTE_INI filter:authtoken auth_protocol inicomment $NOVA_API_PASTE_INI filter:authtoken admin_tenant_name inicomment $NOVA_API_PASTE_INI filter:authtoken admin_user inicomment $NOVA_API_PASTE_INI filter:authtoken admin_password @@ -397,9 +395,7 @@ function create_nova_conf() { # Add keystone authtoken configuration iniset $NOVA_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST - if is_service_enabled tls-proxy; then - iniset $NOVA_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - fi + iniset $NOVA_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL iniset $NOVA_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $NOVA_CONF keystone_authtoken admin_user nova iniset $NOVA_CONF keystone_authtoken admin_password $SERVICE_PASSWORD From 24f6efadbdef558655abc5f1052bb61ae87b55f0 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Thu, 31 Oct 2013 10:27:58 -0400 Subject: [PATCH 0493/4704] Add FORCE_CONFIG_DRIVE and make it the default Adds a new FORCE_CONFIG_DRIVE option to lib/nova which is by default enabled. Using config drive should speed things up a bit and is a more likely production default instead of file injection. Change-Id: I2388ef0df12a6289b619bfaf30cb952fcc48ef41 --- lib/nova | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/nova b/lib/nova index e9fbd560b4..8ef5d9a427 100644 --- a/lib/nova +++ b/lib/nova @@ -63,6 +63,10 @@ NOVA_ROOTWRAP=$(get_rootwrap_location nova) # NOTE: Set API_RATE_LIMIT="False" to turn OFF rate limiting API_RATE_LIMIT=${API_RATE_LIMIT:-"True"} +# Option to enable/disable config drive +# NOTE: Set FORCE_CONFIG_DRIVE="False" to turn OFF config drive +FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"always"} + # Nova supports pluggable schedulers. The default ``FilterScheduler`` # should work in most cases. SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler} @@ -428,6 +432,9 @@ function create_nova_conf() { if [ "$API_RATE_LIMIT" != "True" ]; then iniset $NOVA_CONF DEFAULT api_rate_limit "False" fi + if [ "$FORCE_CONFIG_DRIVE" != "False" ]; then + iniset $NOVA_CONF DEFAULT force_config_drive "$FORCE_CONFIG_DRIVE" + fi # Format logging if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then setup_colorized_logging $NOVA_CONF DEFAULT From 861463fa591be56b5936777539d2349abc1cea00 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 31 Oct 2013 11:08:49 -0400 Subject: [PATCH 0494/4704] Remove docker exercise No other hypervisor has a dedicated exercise, docker should be tested just by testing the nova APIs, not on it's own. Change-Id: Ifc788815380e8502449171410dea8260786a1e79 --- exercises/docker.sh | 104 -------------------------------------------- 1 file changed, 104 deletions(-) delete mode 100755 exercises/docker.sh diff --git a/exercises/docker.sh b/exercises/docker.sh deleted file mode 100755 index 10c5436c35..0000000000 --- a/exercises/docker.sh +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/env bash - -# **docker** - -# Test Docker hypervisor - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# Skip if the hypervisor is not Docker -[[ "$VIRT_DRIVER" == "docker" ]] || exit 55 - -# Import docker functions and declarations -source $TOP_DIR/lib/nova_plugins/hypervisor-docker - -# Image and flavor are ignored but the CLI requires them... - -# Instance type to create -DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} - -# Boot this image, use first AMI image if unset -DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} - -# Instance name -VM_NAME=ex-docker - - -# Launching a server -# ================== - -# Grab the id of the image to launch -IMAGE=$(glance image-list | egrep " $DOCKER_IMAGE_NAME:latest " | get_field 1) -die_if_not_set $LINENO IMAGE "Failure getting image $DOCKER_IMAGE_NAME" - -# Select a flavor -INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) -if [[ -z "$INSTANCE_TYPE" ]]; then - # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) -fi - -# Clean-up from previous runs -nova delete $VM_NAME || true -if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then - die $LINENO "server didn't terminate!" -fi - -# Boot instance -# ------------- - -VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE $VM_NAME | grep ' id ' | get_field 2) -die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME" - -# Check that the status is active within ACTIVE_TIMEOUT seconds -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then - die $LINENO "server didn't become active!" -fi - -# Get the instance IP -IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2) -die_if_not_set $LINENO IP "Failure retrieving IP address" - -# Private IPs can be pinged in single node deployments -ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT - -# Clean up -# -------- - -# Delete instance -nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME" -if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then - die $LINENO "Server $VM_NAME not deleted" -fi - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" From 1143f7e45fd2760b8d5fecc8fbd598078ba92fd3 Mon Sep 17 00:00:00 2001 From: Jeff Peeler Date: Thu, 31 Oct 2013 16:21:52 -0400 Subject: [PATCH 0495/4704] Turn off Nova firewall driver when using Neutron As referenced in the OpenStack documentation, the firewall_driver in nova.conf should be set to nova.virt.firewall.NoopFirewallDriver, "so that nova-compute does not perform iptables-based filtering itself". Without this change, the driver gets set to nova.virt.libvirt.firewall.IptablesFirewallDriver, which seems to make networking unusable. Change-Id: Id9cce0f5f4efe719683aaf3284b128188b61b919 Closes-bug: #1246888 --- lib/neutron | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/neutron b/lib/neutron index 9834b59f66..098a589592 100644 --- a/lib/neutron +++ b/lib/neutron @@ -272,6 +272,7 @@ function create_nova_conf_neutron() { if [[ "$Q_USE_SECGROUP" == "True" ]]; then LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver + iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER iniset $NOVA_CONF DEFAULT security_group_api neutron fi From d561b70930f7184ade05953faa11a47dc250a16c Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 22 Oct 2013 17:46:00 -0500 Subject: [PATCH 0496/4704] Set keystone.conf to mode 0600 Set keystone.conf readable only by owner Fixes CVE-2013-1977 Fixed bug: 1168252 Change-Id: Idd13b7a58e257565052c54f72c65d8dceb23f27a --- lib/keystone | 1 + 1 file changed, 1 insertion(+) mode change 100755 => 100644 lib/keystone diff --git a/lib/keystone b/lib/keystone old mode 100755 new mode 100644 index 7011f66e99..4353ebab1c --- a/lib/keystone +++ b/lib/keystone @@ -126,6 +126,7 @@ function configure_keystone() { if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then cp -p $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF + chmod 600 $KEYSTONE_CONF cp -p $KEYSTONE_DIR/etc/policy.json $KEYSTONE_CONF_DIR if [[ -f "$KEYSTONE_DIR/etc/keystone-paste.ini" ]]; then cp -p "$KEYSTONE_DIR/etc/keystone-paste.ini" "$KEYSTONE_PASTE_INI" From 89d41ca23941d864ac9b5665cd68423311029236 Mon Sep 17 00:00:00 2001 From: DennyZhang Date: Fri, 1 Nov 2013 15:41:01 -0500 Subject: [PATCH 0497/4704] Fix wrong variable comparision in functions A whitespace is missing. Closes-Bug: #1247273 Change-Id: I4bbed6077813c5d7522650baf2e78d9c44c1bba4 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 0a73b9f0ac..197b4cca17 100644 --- a/functions +++ b/functions @@ -1266,7 +1266,7 @@ function setup_develop() { # where we really really want the overridden version to stick. So provide # a variable that tells us whether or not we should UNDO the requirements # changes (this will be set to False in the OpenStack ci gate) - if [ $UNDO_REQUIREMENTS = "True"]; then + if [ $UNDO_REQUIREMENTS = "True" ]; then if [ $update_requirements -eq 0 ]; then (cd $project_dir && git reset --hard) fi From 86199fcdfead581a03b453484879d046729a0fcd Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 23 Oct 2013 02:54:53 -0700 Subject: [PATCH 0498/4704] Clean up use of global conf vars in start_nova Make it clear which services run in the API cell and compute (child) cells by using appropriately named local variables for the conf files. This should help save from future bugs. Also: When cells is enabled, there's no need to run nova-conductor in the API cell right now. Cells bypasses any use of conductor in the API cell. Change-Id: I3af17d3db028f5df36814cb83c7db4de8f141f84 --- lib/nova | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/lib/nova b/lib/nova index 8ef5d9a427..e734550b33 100644 --- a/lib/nova +++ b/lib/nova @@ -645,32 +645,32 @@ function start_nova_compute() { # start_nova() - Start running processes, including screen function start_nova_rest() { - NOVA_CONF_BOTTOM=$NOVA_CONF - - # ``screen_it`` checks ``is_service_enabled``, it is not needed here - screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor" - + local api_cell_conf=$NOVA_CONF if is_service_enabled n-cell; then - NOVA_CONF_BOTTOM=$NOVA_CELLS_CONF - screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor --config-file $NOVA_CELLS_CONF" - screen_it n-cell-region "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CONF" - screen_it n-cell-child "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CELLS_CONF" + local compute_cell_conf=$NOVA_CELLS_CONF + else + local compute_cell_conf=$NOVA_CONF fi - screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert" - screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network --config-file $NOVA_CONF_BOTTOM" - screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler --config-file $NOVA_CONF_BOTTOM" - screen_it n-api-meta "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api-metadata --config-file $NOVA_CONF_BOTTOM" + # ``screen_it`` checks ``is_service_enabled``, it is not needed here + screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor --config-file $compute_cell_conf" + screen_it n-cell-region "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $api_cell_conf" + screen_it n-cell-child "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $compute_cell_conf" + + screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert --config-file $api_cell_conf" + screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network --config-file $compute_cell_conf" + screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf" + screen_it n-api-meta "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf" - screen_it n-novnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-novncproxy --config-file $NOVA_CONF --web $NOVNC_DIR" - screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $NOVA_CONF" - screen_it n-spice "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $NOVA_CONF --web $SPICE_DIR" - screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth" + screen_it n-novnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_DIR" + screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf" + screen_it n-spice "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_DIR" + screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth --config-file $api_cell_conf" # Starting the nova-objectstore only if swift3 service is not enabled. # Swift will act as s3 objectstore. is_service_enabled swift3 || \ - screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore" + screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore --config-file $api_cell_conf" } function start_nova() { @@ -683,7 +683,7 @@ function stop_nova() { # Kill the nova screen windows # Some services are listed here twice since more than one instance # of a service may be running in certain configs. - for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cond n-cell n-cell n-api-meta; do + for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta; do screen -S $SCREEN_NAME -p $serv -X kill done if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then From e8fa8537a577aeceef70f3d651522705df4353c5 Mon Sep 17 00:00:00 2001 From: DennyZhang Date: Sun, 3 Nov 2013 12:22:04 -0600 Subject: [PATCH 0499/4704] enhance logging for depend tracking mode Add some extra log, to make the console output more understandable Change-Id: If80cb9dba1031ad268bf9b6266ca83f93071a2ad --- stack.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/stack.sh b/stack.sh index 36312ea619..f6c4824ea8 100755 --- a/stack.sh +++ b/stack.sh @@ -757,6 +757,7 @@ fi if [[ $TRACK_DEPENDS = True ]]; then $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff; then + echo "Detect some changes for installed packages of pip, in depend tracking mode" cat $DEST/requires.diff fi echo "Ran stack.sh in depend tracking mode, bailing out now" From eca0a1c7b901e9602c7df89c32b35402360d22f6 Mon Sep 17 00:00:00 2001 From: Paul Czarkowski Date: Tue, 29 Oct 2013 17:15:22 -0500 Subject: [PATCH 0500/4704] removed unecessary lines for docker install * script was trying to copy file from a source that doesn't exist to destination that does. * variable DOCKER_BRANCH no longer used. Change-Id: I50fd55e97dfc4eaaa3c1f2bfc42e8cfc0e76cdf4 --- lib/nova_plugins/hypervisor-docker | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker index 2451982ed9..ac509c59c2 100644 --- a/lib/nova_plugins/hypervisor-docker +++ b/lib/nova_plugins/hypervisor-docker @@ -24,7 +24,6 @@ set +o xtrace # Set up default directories DOCKER_DIR=$DEST/docker -DOCKER_BRANCH=${DOCKER_BRANCH:-master} DOCKER_UNIX_SOCKET=/var/run/docker.sock DOCKER_PID_FILE=/var/run/docker.pid @@ -55,8 +54,6 @@ function cleanup_nova_hypervisor() { function configure_nova_hypervisor() { iniset $NOVA_CONF DEFAULT compute_driver docker.DockerDriver iniset $GLANCE_API_CONF DEFAULT container_formats ami,ari,aki,bare,ovf,docker - - sudo cp -p ${DOCKER_DIR}/nova-driver/docker.filters $NOVA_CONF_DIR/rootwrap.d } # install_nova_hypervisor() - Install external components From 610af8cfa7ff9cadac80fdd37924ecd8fe0d546e Mon Sep 17 00:00:00 2001 From: Chris Buccella Date: Tue, 5 Nov 2013 12:56:34 +0000 Subject: [PATCH 0501/4704] Fix horizon config under Apache 2.4 Apache 2.4 now uses mod_authz_host for acces control. Horizon's Apache config needs an update to allow access to its directory, otherwise a 403 will be returned. This change replaces a similar previous fixes done for Fedora 18 and Ubuntu 13.10, since this is an Apache version issue, not a distro-specific one. Change-Id: Iecc17600d8e1aae6a7b0929b1493d712c307616f Closes-Bug: #1243075 --- functions | 12 ++++++++++++ lib/horizon | 15 +++++---------- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/functions b/functions index 0a73b9f0ac..5ca5eee925 100644 --- a/functions +++ b/functions @@ -557,6 +557,18 @@ function is_arch { [ "($uname -m)" = "$ARCH_TYPE" ] } +# Checks if installed Apache is <= given version +# $1 = x.y.z (version string of Apache) +function check_apache_version { + local cmd="apachectl" + if ! [[ -x $(which apachectl 2>/dev/null) ]]; then + cmd="/usr/sbin/apachectl" + fi + + local version=$($cmd -v | grep version | grep -Po 'Apache/\K[^ ]*') + expr "$version" '>=' $1 > /dev/null +} + # git clone only if directory doesn't exist already. Since ``DEST`` might not # be owned by the installation user, we create the directory and change the # ownership to the proper user. diff --git a/lib/horizon b/lib/horizon index c116ec257a..4cb2828f10 100644 --- a/lib/horizon +++ b/lib/horizon @@ -112,7 +112,12 @@ function init_horizon() { # Create an empty directory that apache uses as docroot sudo mkdir -p $HORIZON_DIR/.blackhole + # Apache 2.4 uses mod_authz_host for access control now (instead of "Allow") HORIZON_REQUIRE='' + if check_apache_version "2.4" ; then + HORIZON_REQUIRE='Require all granted' + fi + local horizon_conf=/etc/$APACHE_NAME/$APACHE_CONF_DIR/horizon.conf if is_ubuntu; then # Clean up the old config name @@ -120,17 +125,7 @@ function init_horizon() { # Be a good citizen and use the distro tools here sudo touch $horizon_conf sudo a2ensite horizon.conf - if [[ "$DISTRO" == "saucy" ]]; then - # Ubuntu 13.10 has Require all denied in apache2.conf - # and requires explicit Require all granted - HORIZON_REQUIRE='Require all granted' - fi elif is_fedora; then - if [[ "$os_RELEASE" -ge "18" ]]; then - # fedora 18 has Require all denied in its httpd.conf - # and requires explicit Require all granted - HORIZON_REQUIRE='Require all granted' - fi sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf elif is_suse; then : # nothing to do From 237225dd394d43e17c5406c8c0549e3e7d5e4cd2 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Wed, 6 Nov 2013 15:41:04 +0100 Subject: [PATCH 0502/4704] Fixing space issues between " and ] When we are using '[' aka. ``test`` , before the closing ']' we need to use a space. Otherwise the commands return with '2' so the "expression" will be a constant false. Change-Id: I673762e802c28335e03390b6608cf6bbee6aaba6 --- functions | 2 +- tools/build_ramdisk.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/functions b/functions index 0a73b9f0ac..197b4cca17 100644 --- a/functions +++ b/functions @@ -1266,7 +1266,7 @@ function setup_develop() { # where we really really want the overridden version to stick. So provide # a variable that tells us whether or not we should UNDO the requirements # changes (this will be set to False in the OpenStack ci gate) - if [ $UNDO_REQUIREMENTS = "True"]; then + if [ $UNDO_REQUIREMENTS = "True" ]; then if [ $update_requirements -eq 0 ]; then (cd $project_dir && git reset --hard) fi diff --git a/tools/build_ramdisk.sh b/tools/build_ramdisk.sh index 3d9f76f4a5..737255578a 100755 --- a/tools/build_ramdisk.sh +++ b/tools/build_ramdisk.sh @@ -22,7 +22,7 @@ cleanup() { umount $MNTDIR rmdir $MNTDIR fi - if [ -n "$DEV_FILE_TMP" -a -e "$DEV_FILE_TMP "]; then + if [ -n "$DEV_FILE_TMP" -a -e "$DEV_FILE_TMP" ]; then rm -f $DEV_FILE_TMP fi if [ -n "$IMG_FILE_TMP" -a -e "$IMG_FILE_TMP" ]; then From c04ddbe8680dd91d2749e74b36728aee27036dea Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 6 Nov 2013 02:15:11 -0600 Subject: [PATCH 0503/4704] Robustify shocco install and config * shocco has some non-optional prereqs, make sure they are present if shocco is being installed * set the path to installed shocco correctly * add the working dir to .gitignore Change-Id: If786ea9e28d3595775f7b86d2fe760dff8047f49 --- .gitignore | 1 + tools/build_docs.sh | 13 +++++++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index 0c22c6b62a..a3d5b0d02a 100644 --- a/.gitignore +++ b/.gitignore @@ -13,5 +13,6 @@ stack-screenrc accrc .stackenv .prereqs +devstack-docs-* docs/ docs-files diff --git a/tools/build_docs.sh b/tools/build_docs.sh index 216e557025..8dca524f9f 100755 --- a/tools/build_docs.sh +++ b/tools/build_docs.sh @@ -28,22 +28,31 @@ MASTER_BRANCH=${MASTER_BRANCH:-master} # http://devstack.org is a GitHub gh-pages site in the https://github.com/cloudbuilders/devtack.git repo GH_PAGES_REPO=git@github.com:cloudbuilders/devstack.git +# Keep track of the devstack directory +TOP_DIR=$(cd $(dirname "$0")/.. && pwd) + # Uses this shocco branch: https://github.com/dtroyer/shocco/tree/rst_support SHOCCO=${SHOCCO:-shocco} if ! which shocco; then - if [[ ! -x shocco/shocco ]]; then + if [[ ! -x $TOP_DIR/shocco/shocco ]]; then if [[ -z "$INSTALL_SHOCCO" ]]; then echo "shocco not found in \$PATH, please set environment variable SHOCCO" exit 1 fi echo "Installing local copy of shocco" + if ! which pygmentize; then + sudo pip install Pygments + fi + if ! which rst2html.py; then + sudo pip install docutils + fi git clone -b rst_support https://github.com/dtroyer/shocco shocco cd shocco ./configure make cd .. fi - SHOCCO=shocco/shocco + SHOCCO=$TOP_DIR/shocco/shocco fi # Process command-line args From 5a35e73b3419df571f55efd2a68ef565469e3d1b Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Tue, 29 Oct 2013 08:23:43 +0100 Subject: [PATCH 0504/4704] Option for installing spice-html5 and novnc from packages The novnc and spice-html5 is installed from git repository by default, but not from an openstack* repository. In order to add vnc/spice proxy related gating tests they should be installed from packages. New boolean variables added to control the installation source: NOVNC_FROM_PACKAGE and SPICE_FROM_PACKAGE Related changes: https://review.openstack.org/#/c/51790/ https://review.openstack.org/#/c/50822/ Change-Id: I1e55fd99edd30876924a13160afb74ff3e97c485 --- lib/nova | 29 ++++++++++++++++++++++++----- stack.sh | 10 ---------- 2 files changed, 24 insertions(+), 15 deletions(-) diff --git a/lib/nova b/lib/nova index 00f977d3db..eacd41599d 100644 --- a/lib/nova +++ b/lib/nova @@ -73,9 +73,6 @@ SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler} QEMU_CONF=/etc/libvirt/qemu.conf -NOVNC_DIR=$DEST/noVNC -SPICE_DIR=$DEST/spice-html5 - # Set default defaults here as some hypervisor drivers override these PUBLIC_INTERFACE_DEFAULT=br100 GUEST_INTERFACE_DEFAULT=eth0 @@ -590,6 +587,28 @@ function install_nova() { install_nova_hypervisor fi + if is_service_enabled n-novnc; then + # a websockets/html5 or flash powered VNC console for vm instances + if trueorfalse True "$NOVNC_FROM_PACKAGE"; then + NOVNC_WEB_DIR=/usr/share/novnc + install_package novnc + else + NOVNC_WEB_DIR=$DEST/noVNC + git_clone $NOVNC_REPO $NOVNC_WEB_DIR $NOVNC_BRANCH + fi + fi + + if is_service_enabled n-spice; then + # a websockets/html5 or flash powered SPICE console for vm instances + if trueorfalse True "$SPICE_FROM_PACKAGE"; then + SPICE_WEB_DIR=/usr/share/spice-html5 + install_package spice-html5 + else + SPICE_WEB_DIR=$DEST/spice-html5 + git_clone $SPICE_REPO $SPICE_WEB_DIR $SPICE_BRANCH + fi + fi + git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH setup_develop $NOVA_DIR sudo install -D -m 0644 -o $STACK_USER {$NOVA_DIR/tools/,/etc/bash_completion.d/}nova-manage.bash_completion @@ -658,9 +677,9 @@ function start_nova_rest() { screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf" screen_it n-api-meta "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf" - screen_it n-novnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_DIR" + screen_it n-novnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR" screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf" - screen_it n-spice "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_DIR" + screen_it n-spice "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR" screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth --config-file $api_cell_conf" # Starting the nova-objectstore only if swift3 service is not enabled. diff --git a/stack.sh b/stack.sh index 36312ea619..47d93bd642 100755 --- a/stack.sh +++ b/stack.sh @@ -694,16 +694,6 @@ if is_service_enabled nova; then configure_nova fi -if is_service_enabled n-novnc; then - # a websockets/html5 or flash powered VNC console for vm instances - git_clone $NOVNC_REPO $NOVNC_DIR $NOVNC_BRANCH -fi - -if is_service_enabled n-spice; then - # a websockets/html5 or flash powered SPICE console for vm instances - git_clone $SPICE_REPO $SPICE_DIR $SPICE_BRANCH -fi - if is_service_enabled horizon; then # dashboard install_horizon From feb28837f4db9177835f94b6b9899a90c45a685d Mon Sep 17 00:00:00 2001 From: Ryan Hsu Date: Thu, 7 Nov 2013 12:12:35 -0800 Subject: [PATCH 0505/4704] Add new stack phase: post-extra The current existing phases "post-config" and "extra" are not sufficient to allow local.conf overrides to extra type services because they run after the services are installed and configured. This commit introduces a new phase called "post-extra" that runs after these existing phases. With this change, users are able to leverage local.conf to provide overridden options to services like Tempest. Change-Id: I5d758eebfda804dd1d8cbc3d5cc35ef4dcc8c96f Closes-Bug: #1249085 --- README.md | 1 + extras.d/80-tempest.sh | 3 +++ extras.d/README.md | 4 ++-- stack.sh | 7 +++++++ 4 files changed, 13 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 640fab65f9..34cd2efc90 100644 --- a/README.md +++ b/README.md @@ -326,6 +326,7 @@ The defined phases are: * **local** - extracts ``localrc`` from ``local.conf`` before ``stackrc`` is sourced * **post-config** - runs after the layer 2 services are configured and before they are started * **extra** - runs after services are started and before any files in ``extra.d`` are executed +* **post-extra** - runs after files in ``extra.d`` are executed The file is processed strictly in sequence; meta-sections may be specified more than once but if any settings are duplicated the last to appear in the file will be used. diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh index 75b702c700..0186e36aee 100644 --- a/extras.d/80-tempest.sh +++ b/extras.d/80-tempest.sh @@ -14,6 +14,9 @@ if is_service_enabled tempest; then echo_summary "Initializing Tempest" configure_tempest init_tempest + elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then + # local.conf Tempest option overrides + : fi if [[ "$1" == "unstack" ]]; then diff --git a/extras.d/README.md b/extras.d/README.md index 88e4265ced..1dd17da2d6 100644 --- a/extras.d/README.md +++ b/extras.d/README.md @@ -19,10 +19,10 @@ sourced with one or more arguments, the first of which defines the hook phase: source: always called first in any of the scripts, used to set the initial defaults in a lib/* script or similar - stack: called by stack.sh. There are three possible values for + stack: called by stack.sh. There are four possible values for the second arg to distinguish the phase stack.sh is in: - arg 2: install | post-config | extra + arg 2: install | post-config | extra | post-extra unstack: called by unstack.sh diff --git a/stack.sh b/stack.sh index 47d93bd642..c24257d815 100755 --- a/stack.sh +++ b/stack.sh @@ -1252,6 +1252,13 @@ if [[ -d $TOP_DIR/extras.d ]]; then done fi +# Local Configuration +# =================== + +# Apply configuration from local.conf if it exists for layer 2 services +# Phase: post-extra +merge_config_group $TOP_DIR/local.conf post-extra + # Run local script # ================ From add4ca3ef0f916e31a781e118c8c4d04a9bec5cf Mon Sep 17 00:00:00 2001 From: Ben Nemec Date: Fri, 8 Nov 2013 17:22:51 +0000 Subject: [PATCH 0506/4704] Fix FROM_PACKAGE checks for novnc and spice The logic for installing novnc and spice from packages is broken, which makes it impossible to install from git, which makes bug 1248923 more serious. Change-Id: I9ae722a5470a16555bca9018da342485f6d3e896 Related-Bug: 1248923 --- lib/nova | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/nova b/lib/nova index eacd41599d..b9fa3e1b9c 100644 --- a/lib/nova +++ b/lib/nova @@ -589,7 +589,8 @@ function install_nova() { if is_service_enabled n-novnc; then # a websockets/html5 or flash powered VNC console for vm instances - if trueorfalse True "$NOVNC_FROM_PACKAGE"; then + NOVNC_FROM_PACKAGE=`trueorfalse True $NOVNC_FROM_PACKAGE` + if [ "$NOVNC_FROM_PACKAGE" = "True" ]; then NOVNC_WEB_DIR=/usr/share/novnc install_package novnc else @@ -600,7 +601,8 @@ function install_nova() { if is_service_enabled n-spice; then # a websockets/html5 or flash powered SPICE console for vm instances - if trueorfalse True "$SPICE_FROM_PACKAGE"; then + SPICE_FROM_PACKAGE=`trueorfalse True $SPICE_FROM_PACKAGE` + if [ "$SPICE_FROM_PACKAGE" = "True" ]; then SPICE_WEB_DIR=/usr/share/spice-html5 install_package spice-html5 else From 645171c5fdfa1d43441c411c00c8080218cb6bbe Mon Sep 17 00:00:00 2001 From: Ilya Kharin Date: Tue, 12 Nov 2013 12:44:20 +0400 Subject: [PATCH 0507/4704] Don't install novnc from package by default By default the installation is failed because the novnc package has got the a dependency of the nova-common package. The last package provides "/etc/nova/*" files with appropriate privileges. It potentially brings to the problem of an access to those files. Change-Id: I1689bef817365b10bf972dc4a8033892ad5939d1 Related-Bug: 1248923 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index b9fa3e1b9c..5b6f50e9ec 100644 --- a/lib/nova +++ b/lib/nova @@ -589,7 +589,7 @@ function install_nova() { if is_service_enabled n-novnc; then # a websockets/html5 or flash powered VNC console for vm instances - NOVNC_FROM_PACKAGE=`trueorfalse True $NOVNC_FROM_PACKAGE` + NOVNC_FROM_PACKAGE=`trueorfalse False $NOVNC_FROM_PACKAGE` if [ "$NOVNC_FROM_PACKAGE" = "True" ]; then NOVNC_WEB_DIR=/usr/share/novnc install_package novnc From 4df4a15a7213f24c99506269acb0129689be56a8 Mon Sep 17 00:00:00 2001 From: Roman Bogorodskiy Date: Tue, 12 Nov 2013 12:09:40 +0000 Subject: [PATCH 0508/4704] Fix typo in config section name in README.md s/locarc/localrc/ Change-Id: Ia5cdfa8e6995e5cad80750372faa35927d4d8e48 --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 640fab65f9..93d139621f 100644 --- a/README.md +++ b/README.md @@ -82,7 +82,7 @@ for example). # Customizing You can override environment variables used in `stack.sh` by creating file -name `local.conf` with a ``locarc`` section as shown below. It is likely +name `local.conf` with a ``localrc`` section as shown below. It is likely that you will need to do this to tweak your networking configuration should you need to access your cloud from a different host. From 261852ddfca72882e3e1f8a0bd3011465b1e70ac Mon Sep 17 00:00:00 2001 From: Noorul Islam K M Date: Tue, 12 Nov 2013 20:24:57 +0530 Subject: [PATCH 0509/4704] Add pip folder to ignore list Change-Id: I26da80b9e09774b9940d382a32cf69e75c61a527 --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 0c22c6b62a..380eacfb5b 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,7 @@ localrc local.sh files/*.gz files/images +files/pip-* stack-screenrc *.pem accrc From 4debfe2b2da5011a93b44d09283b8dfdaf40c0bc Mon Sep 17 00:00:00 2001 From: John Griffith Date: Fri, 1 Nov 2013 00:00:40 +0000 Subject: [PATCH 0510/4704] Add driver_cert wrapper for cinder This adds a simple wrapper to call tempest volume tests. The idea is to make it easy to execute and capture results from tempest.api.volume.test_* Concept is for drivers in Cinder to configure cinder.conf as needed and then run this script which will restart services and kick off the tempest tests, and capture the output to a logfile for submission. To run, 1. deploy devstack as normal with tempest included in enabled_services 2. modify cinder.conf appropriately for your driver 3. execute the script devstack/driver_certs/cinder_driver_cert.sh Change-Id: I98ec9e1e418a8416406db5e2e6ffd21992e392cf --- driver_certs/cinder_driver_cert.sh | 87 ++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100755 driver_certs/cinder_driver_cert.sh diff --git a/driver_certs/cinder_driver_cert.sh b/driver_certs/cinder_driver_cert.sh new file mode 100755 index 0000000000..18bef8b3b5 --- /dev/null +++ b/driver_certs/cinder_driver_cert.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash + +# **cinder_cert.sh** + +CERT_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $CERT_DIR/..; pwd) + +source $TOP_DIR/functions +source $TOP_DIR/stackrc +source $TOP_DIR/openrc +source $TOP_DIR/lib/tempest +source $TOP_DIR/lib/cinder + +TEMPFILE=`mktemp` +RECLONE=True + +function log_message() { + MESSAGE=$1 + STEP_HEADER=$2 + if [[ "$STEP_HEADER" = "True" ]]; then + echo -e "\n========================================================" | tee -a $TEMPFILE + fi + echo -e `date +%m/%d/%y/%T:`"${MESSAGE}" | tee -a $TEMPFILE + if [[ "$STEP_HEADER" = "True" ]]; then + echo -e "========================================================" | tee -a $TEMPFILE + fi +} + +if [[ "$OFFLINE" = "True" ]]; then + echo "ERROR: Driver cert requires fresh clone/pull from ${CINDER_BRANCH}" + echo " Please set OFFLINE=False and retry." + exit 1 +fi + +log_message "RUNNING CINDER DRIVER CERTIFICATION CHECK", True +log_message "Output is being logged to: $TEMPFILE" + +cd $CINDER_DIR +log_message "Cloning to ${CINDER_REPO}...", True +install_cinder + +log_message "Pull a fresh Clone of cinder repo...", True +git status | tee -a $TEMPFILE +git log --pretty=oneline -n 1 | tee -a $TEMPFILE + +log_message "Gathering copy of cinder.conf file (passwords will be scrubbed)...", True +cat /etc/cinder/cinder.conf | egrep -v "(^#.*|^$)" | tee -a $TEMPFILE +sed -i "s/\(.*password.*=\).*$/\1 xxx/i" $TEMPFILE +log_message "End of cinder.conf.", True + +cd $TOP_DIR +# Verify tempest is installed/enabled +if ! is_service_enabled tempest; then + log_message "ERROR!!! Cert requires tempest in enabled_services!", True + log_message" Please add tempest to enabled_services and retry." + exit 1 +fi + +cd $TEMPEST_DIR +install_tempest + +log_message "Verify tempest is current....", True +git status | tee -a $TEMPFILE +log_message "Check status and get latest commit..." +git log --pretty=oneline -n 1 | tee -a $TEMPFILE + + +#stop and restart cinder services +log_message "Restart Cinder services...", True +stop_cinder +sleep 1 +start_cinder +sleep 5 + +# run tempest api/volume/test_* +log_message "Run the actual tempest volume tests (run_tests.sh -N tempest.api.volume.test_*)...", True +exec 2> >(tee -a $TEMPFILE) +`./run_tests.sh -N tempest.api.volume.test_*` +if [[ $? = 0 ]]; then + log_message "CONGRATULATIONS!!! Device driver PASSED!", True + log_message "Submit output: ($TEMPFILE)" + exit 0 +else + log_message "SORRY!!! Device driver FAILED!", True + log_message "Check output in $TEMPFILE" + exit 1 +fi From 5ea53ee5f710076eba80ee9677afd3769fd2f36a Mon Sep 17 00:00:00 2001 From: Arnaud Legendre Date: Fri, 1 Nov 2013 16:42:54 -0700 Subject: [PATCH 0511/4704] Use vmdk descriptor to populate image properties image_upload.sh doesn't use the descriptor properties embedded inside the vmdk file. This requires the user to manually change the filename of the vmdk file to add the properties (disk type, storage adapter and network adapter). In case of a sparse monolithic sparse or stream-optimized sparse, these properties are extracted from the descriptor. The user can still override these values by modifying the filename. Change-Id: I1734311c66efe60a1a30e3ea63cc2a9da9cdb5b4 Closes-Bug: #1247300 --- functions | 30 +++++++++++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/functions b/functions index 83826f9327..bec76b77ed 100644 --- a/functions +++ b/functions @@ -1320,18 +1320,42 @@ function upload_image() { # Before we can upload vmdk type images to glance, we need to know it's # disk type, storage adapter, and networking adapter. These values are - # passed to glance as custom properties. We take these values from the + # passed to glance as custom properties. + # We take these values from the vmdk file if populated. Otherwise, we use # vmdk filename, which is expected in the following format: # # -:: # # If the filename does not follow the above format then the vsphere # driver will supply default values. + + # vmdk adapter type + vmdk_adapter_type="$(head -25 $IMAGE | grep -a -F -m 1 'ddb.adapterType =' $IMAGE)" + vmdk_adapter_type="${vmdk_adapter_type#*\"}" + vmdk_adapter_type="${vmdk_adapter_type%?}" + + # vmdk disk type + vmdk_create_type="$(head -25 $IMAGE | grep -a -F -m 1 'createType=' $IMAGE)" + vmdk_create_type="${vmdk_create_type#*\"}" + vmdk_create_type="${vmdk_create_type%?}" + if [[ "$vmdk_create_type" = "monolithicSparse" ]]; then + vmdk_disktype="sparse" + elif [[ "$vmdk_create_type" = "monolithicFlat" ]]; then + die $LINENO "Monolithic flat disks should use a descriptor-data pair." \ + "Please provide the disk and not the descriptor." + else + #TODO(alegendre): handle streamOptimized once supported by VMware driver. + vmdk_disktype="preallocated" + fi property_string=`echo "$IMAGE_NAME" | grep -oP '(?<=-)(?!.*-).+:.+:.+$'` if [[ ! -z "$property_string" ]]; then IFS=':' read -a props <<< "$property_string" - vmdk_disktype="${props[0]}" - vmdk_adapter_type="${props[1]}" + if [[ ! -z "${props[0]}" ]]; then + vmdk_disktype="${props[0]}" + fi + if [[ ! -z "${props[1]}" ]]; then + vmdk_adapter_type="${props[1]}" + fi vmdk_net_adapter="${props[2]}" fi From 66c54249805c9a6e863c81b754f4abae71aa1b2b Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Tue, 12 Nov 2013 16:24:14 -0800 Subject: [PATCH 0512/4704] Bump SWIFT_LOOPBACK_DISK_SIZE_DEFAULT over swift max_file_size Swift is returning 50x error codes because its disk is too small, set size bigger then max_file_size in an attempt to fix the problem, or at least reduce it. "we create a 4GB device, but swift thinks it can write 5GB, hence fail" --sdague This patch based off of Iccd6368e4df71abb5ccfe7d361c64d86e1071d35 Change-Id: Ib56a98cd74e7edf1fa90facc25c72632d43180f1 Related-Bug: #1225664 --- lib/swift | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/swift b/lib/swift index b46537f5a3..83c4ebb49d 100644 --- a/lib/swift +++ b/lib/swift @@ -59,9 +59,9 @@ fi # kilobytes. # Default is 1 gigabyte. SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=1G -# if tempest enabled the default size is 4 Gigabyte. +# if tempest enabled the default size is 6 Gigabyte. if is_service_enabled tempest; then - SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=${SWIFT_LOOPBACK_DISK_SIZE:-4G} + SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=${SWIFT_LOOPBACK_DISK_SIZE:-6G} fi SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-$SWIFT_LOOPBACK_DISK_SIZE_DEFAULT} From fc1b7786eb68f8df254804590f2809c0e342a3ab Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 23 Oct 2013 06:46:43 +0000 Subject: [PATCH 0513/4704] cm-cpu-agent only support libvirt driver ceilometer-compute-agent only support libvirt driver Change-Id: I8b92ef10f52388ead11f8ce51c9ab119f953efae --- lib/ceilometer | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/ceilometer b/lib/ceilometer index f95ed302ce..dcadb07899 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -137,7 +137,9 @@ function install_ceilometerclient() { # start_ceilometer() - Start running processes, including screen function start_ceilometer() { - screen_it ceilometer-acompute "cd ; sg $LIBVIRT_GROUP \"ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" + if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then + screen_it ceilometer-acompute "cd ; sg $LIBVIRT_GROUP \"ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" + fi screen_it ceilometer-acentral "cd ; ceilometer-agent-central --config-file $CEILOMETER_CONF" screen_it ceilometer-collector "cd ; ceilometer-collector --config-file $CEILOMETER_CONF" screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" From 8ceb794c65742c573ca555ff6b8c9cd470a52304 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 23 Oct 2013 09:26:25 +0200 Subject: [PATCH 0514/4704] Allow users to configure the CM pipeline interval The patch allows users to configure the ceilometer pipeline interval. In localrc, we can add CEILOMETER_PIPELINE_INTERVAL=10 to handle the pipeline each 10 seconds instead of the default 10 minutes. Change-Id: Ic5216adbdfd70ade38912871ac6df3be732bf780 --- lib/ceilometer | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/ceilometer b/lib/ceilometer index dcadb07899..e626427777 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -82,6 +82,10 @@ function configure_ceilometer() { cp $CEILOMETER_DIR/etc/ceilometer/pipeline.yaml $CEILOMETER_CONF_DIR iniset $CEILOMETER_CONF DEFAULT policy_file $CEILOMETER_CONF_DIR/policy.json + if [ "$CEILOMETER_PIPELINE_INTERVAL" ]; then + sed -i "s/interval:.*/interval: ${CEILOMETER_PIPELINE_INTERVAL}/" $CEILOMETER_CONF_DIR/pipeline.yaml + fi + # the compute and central agents need these credentials in order to # call out to the public nova and glance APIs iniset $CEILOMETER_CONF DEFAULT os_username ceilometer From e231438bf62adb9014e644a443c2165a89812fd3 Mon Sep 17 00:00:00 2001 From: Angus Salkeld Date: Thu, 14 Nov 2013 14:36:46 +1100 Subject: [PATCH 0515/4704] Use the oslo db database connection for Heat This is the preferred configuration now, and sql_connection is deprecated. Change-Id: I2d8342b5025ab481e1db0521a3e0610b73bda9de --- lib/heat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/heat b/lib/heat index 8f123ea212..7a9ef0da26 100644 --- a/lib/heat +++ b/lib/heat @@ -80,7 +80,7 @@ function configure_heat() { iniset $HEAT_CONF DEFAULT heat_metadata_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT iniset $HEAT_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1/waitcondition iniset $HEAT_CONF DEFAULT heat_watch_server_url http://$HEAT_API_CW_HOST:$HEAT_API_CW_PORT - iniset $HEAT_CONF DEFAULT sql_connection `database_connection_url heat` + iniset $HEAT_CONF database connection `database_connection_url heat` iniset $HEAT_CONF DEFAULT auth_encryption_key `hexdump -n 16 -v -e '/1 "%02x"' /dev/random` # logging From d0059595529883719726ec146534a6639dbae65e Mon Sep 17 00:00:00 2001 From: Roman Prykhodchenko Date: Thu, 14 Nov 2013 09:58:53 +0200 Subject: [PATCH 0516/4704] Enable/disable ironic in tempest config In order to run tempest tests for Ironic in devstack the availability of the Ironic service must be set in the tempest config. This patch adds a shortcut for Ironic services and sets availability of Ironic in tempest config. Change-Id: I206fc2ea13412ceb128f8bfe90c153348d6f2f3e --- functions | 1 + lib/tempest | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/functions b/functions index afb75cce9c..9c65cd1209 100644 --- a/functions +++ b/functions @@ -841,6 +841,7 @@ function is_service_enabled() { [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0 [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0 [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0 + [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && return 0 [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0 [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && return 0 [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && return 0 diff --git a/lib/tempest b/lib/tempest index ec1fc90b76..fca3884e7f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -300,7 +300,7 @@ function configure_tempest() { iniset $TEMPEST_CONF cli cli_dir $NOVA_BIN_DIR # service_available - for service in nova cinder glance neutron swift heat horizon ceilometer; do + for service in nova cinder glance neutron swift heat horizon ceilometer ironic; do if is_service_enabled $service ; then iniset $TEMPEST_CONF service_available $service "True" else From 394c11c72191fff6eed1543600260b9de7f55676 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A9ri=20Le=20Bouder?= Date: Tue, 5 Nov 2013 10:35:55 +0100 Subject: [PATCH 0517/4704] horizon: drop nodejs dependency since commit a0739c9423a4c559b45af96fa4cdb30539dcdbd7, horizon use a pure Python alternative to nodejs Change-Id: I231b453e42c303c3cc29e8bea4d7b935fecdccd2 --- files/apts/horizon | 2 -- files/rpms-suse/horizon | 1 - files/rpms/horizon | 1 - lib/horizon | 9 --------- tools/fixup_stuff.sh | 3 +-- 5 files changed, 1 insertion(+), 15 deletions(-) diff --git a/files/apts/horizon b/files/apts/horizon index 0865931d44..8969046355 100644 --- a/files/apts/horizon +++ b/files/apts/horizon @@ -19,5 +19,3 @@ python-kombu python-coverage python-cherrypy3 # why? python-migrate -nodejs -nodejs-legacy # dist:quantal diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon index 73932acc1d..d3bde2690c 100644 --- a/files/rpms-suse/horizon +++ b/files/rpms-suse/horizon @@ -1,6 +1,5 @@ apache2 # NOPRIME apache2-mod_wsgi # NOPRIME -nodejs python-CherryPy # why? (coming from apts) python-Paste python-PasteDeploy diff --git a/files/rpms/horizon b/files/rpms/horizon index 0ca18cadb7..aa27ab4e97 100644 --- a/files/rpms/horizon +++ b/files/rpms/horizon @@ -3,7 +3,6 @@ django-registration gcc httpd # NOPRIME mod_wsgi # NOPRIME -nodejs # NOPRIME pylint python-anyjson python-BeautifulSoup diff --git a/lib/horizon b/lib/horizon index 4cb2828f10..5bff712743 100644 --- a/lib/horizon +++ b/lib/horizon @@ -153,15 +153,6 @@ function install_horizon() { # Apache installation, because we mark it NOPRIME install_apache_wsgi - # NOTE(sdague) quantal changed the name of the node binary - if is_ubuntu; then - if [[ ! -e "/usr/bin/node" ]]; then - install_package nodejs-legacy - fi - elif is_fedora && [[ $DISTRO =~ (rhel6) || "$os_RELEASE" -ge "18" ]]; then - install_package nodejs - fi - git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH $HORIZON_TAG } diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 325a6d6be1..f9362307d8 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -76,8 +76,7 @@ fi if [[ $DISTRO =~ (rhel6) ]]; then # Disable selinux to avoid configuring to allow Apache access - # to Horizon files or run nodejs (LP#1175444) - # FIXME(dtroyer): see if this can be skipped without node or if Horizon is not enabled + # to Horizon files (LP#1175444) if selinuxenabled; then sudo setenforce 0 fi From 2b8814d0ecbca897f4bcfdf1117e773bc4b45e77 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Wed, 25 Sep 2013 17:07:06 +0100 Subject: [PATCH 0518/4704] xenapi: enable user to specify FLAT_NETWORK_BRIDGE install_os_domU.sh failed, if the FLAT_NETWORK_BRIDGE is found in localrc. As nova looks up the network by either name-label or bridge name, it makes sense to enable the user to specify this parameter. As an example, if the user wants to use name-labels to specify networks, and those name-labels could be used in domU to create bridges: VM_BRIDGE_OR_NET_NAME="osvmnet" FLAT_NETWORK_BRIDGE="osvmnet" In this case, the domU will know only about a name label, so it could be decoupled from which xapi bridges used. This change also adds some fixes (missing double quotes). Change-Id: I045e367ef441be20c4e8cb8af3c1149392db796b --- tools/xen/functions | 4 ++-- tools/xen/install_os_domU.sh | 15 +++++++++------ 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/tools/xen/functions b/tools/xen/functions index b0b077d8d1..563303da21 100644 --- a/tools/xen/functions +++ b/tools/xen/functions @@ -137,14 +137,14 @@ function _network_exists() { local name_label name_label=$1 - ! [ -z $(xe network-list name-label="$name_label" --minimal) ] + ! [ -z "$(xe network-list name-label="$name_label" --minimal)" ] } function _bridge_exists() { local bridge bridge=$1 - ! [ -z $(xe network-list bridge="$bridge" --minimal) ] + ! [ -z "$(xe network-list bridge="$bridge" --minimal)" ] } function _network_uuid() { diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 33dc26f1bb..6ce334bc00 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -111,12 +111,15 @@ if is_service_enabled neutron; then fi if parameter_is_specified "FLAT_NETWORK_BRIDGE"; then - cat >&2 << EOF -ERROR: FLAT_NETWORK_BRIDGE is specified in localrc file -This is considered as an error, as its value will be derived from the -VM_BRIDGE_OR_NET_NAME variable's value. + if [ "$(bridge_for "$VM_BRIDGE_OR_NET_NAME")" != "$(bridge_for "$FLAT_NETWORK_BRIDGE")" ]; then + cat >&2 << EOF +ERROR: FLAT_NETWORK_BRIDGE is specified in localrc file, and either no network +found on XenServer by searching for networks by that value as name-label or +bridge name or the network found does not match the network specified by +VM_BRIDGE_OR_NET_NAME. Please check your localrc file. EOF - exit 1 + exit 1 + fi fi if ! xenapi_is_listening_on "$MGT_BRIDGE_OR_NET_NAME"; then @@ -310,7 +313,7 @@ if is_service_enabled neutron; then "xen_integration_bridge=${XEN_INTEGRATION_BRIDGE}" fi -FLAT_NETWORK_BRIDGE=$(bridge_for "$VM_BRIDGE_OR_NET_NAME") +FLAT_NETWORK_BRIDGE="${FLAT_NETWORK_BRIDGE:-$(bridge_for "$VM_BRIDGE_OR_NET_NAME")}" append_kernel_cmdline "$GUEST_NAME" "flat_network_bridge=${FLAT_NETWORK_BRIDGE}" # Add a separate xvdb, if it was requested From 61ae7c166c59f6dae28e9f9437cfe4468c600808 Mon Sep 17 00:00:00 2001 From: Peter Feiner Date: Fri, 15 Nov 2013 10:42:30 -0500 Subject: [PATCH 0519/4704] support memcache for keystone token backend Change-Id: I0c85a64932c39264b73cff4f9d952d0dbdf49e5b --- lib/keystone | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/lib/keystone b/lib/keystone index 4353ebab1c..0521bd3eac 100644 --- a/lib/keystone +++ b/lib/keystone @@ -190,6 +190,8 @@ function configure_keystone() { if [[ "$KEYSTONE_TOKEN_BACKEND" = "sql" ]]; then iniset $KEYSTONE_CONF token driver keystone.token.backends.sql.Token + elif [[ "$KEYSTONE_TOKEN_BACKEND" = "memcache" ]]; then + iniset $KEYSTONE_CONF token driver keystone.token.backends.memcache.Token else iniset $KEYSTONE_CONF token driver keystone.token.backends.kvs.Token fi @@ -350,6 +352,17 @@ function install_keystone() { if is_service_enabled ldap; then install_ldap fi + if [[ "$KEYSTONE_TOKEN_BACKEND" = "memcache" ]]; then + # Install memcached and the memcache Python library that keystone uses. + # Unfortunately the Python library goes by different names in the .deb + # and .rpm circles. + install_package memcached + if is_ubuntu; then + install_package python-memcache + else + install_package python-memcached + fi + fi git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH setup_develop $KEYSTONE_DIR if is_apache_enabled_service key; then From 3e439448b5ab1ea1cf2bfaef5d08c6ce41819912 Mon Sep 17 00:00:00 2001 From: Arnaud Legendre Date: Fri, 15 Nov 2013 16:06:03 -0800 Subject: [PATCH 0520/4704] upload_image.sh should handle file URLs upload_image.sh doesn't handle correctly file URLs: a file URL works only if the file is already in the cache. This patch provides support for file URLs of local files (RFC 1738) http://tools.ietf.org/html/rfc1738 Change-Id: I107299c543cfa189e32848c32eefdbeb51a5e1f5 Closes-Bug: #1251752 --- functions | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/functions b/functions index effdc53afb..a9363f8c3e 100644 --- a/functions +++ b/functions @@ -1337,11 +1337,24 @@ function upload_image() { # Create a directory for the downloaded image tarballs. mkdir -p $FILES/images - # Downloads the image (uec ami+aki style), then extracts it. - IMAGE_FNAME=`basename "$image_url"` - if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then - wget -c $image_url -O $FILES/$IMAGE_FNAME - if [[ $? -ne 0 ]]; then + if [[ $image_url != file* ]]; then + # Downloads the image (uec ami+aki style), then extracts it. + IMAGE_FNAME=`basename "$image_url"` + if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then + wget -c $image_url -O $FILES/$IMAGE_FNAME + if [[ $? -ne 0 ]]; then + echo "Not found: $image_url" + return + fi + fi + IMAGE="$FILES/${IMAGE_FNAME}" + else + # File based URL (RFC 1738): file://host/path + # Remote files are not considered here. + # *nix: file:///home/user/path/file + # windows: file:///C:/Documents%20and%20Settings/user/path/file + IMAGE=$(echo $image_url | sed "s/^file:\/\///g") + if [[ ! -f $IMAGE || "$(stat -c "%s" $IMAGE)" == "0" ]]; then echo "Not found: $image_url" return fi @@ -1349,7 +1362,6 @@ function upload_image() { # OpenVZ-format images are provided as .tar.gz, but not decompressed prior to loading if [[ "$image_url" =~ 'openvz' ]]; then - IMAGE="$FILES/${IMAGE_FNAME}" IMAGE_NAME="${IMAGE_FNAME%.tar.gz}" glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format ami --disk-format ami < "${IMAGE}" return @@ -1357,7 +1369,6 @@ function upload_image() { # vmdk format images if [[ "$image_url" =~ '.vmdk' ]]; then - IMAGE="$FILES/${IMAGE_FNAME}" IMAGE_NAME="${IMAGE_FNAME%.vmdk}" # Before we can upload vmdk type images to glance, we need to know it's @@ -1408,7 +1419,6 @@ function upload_image() { # XenServer-vhd-ovf-format images are provided as .vhd.tgz # and should not be decompressed prior to loading if [[ "$image_url" =~ '.vhd.tgz' ]]; then - IMAGE="$FILES/${IMAGE_FNAME}" IMAGE_NAME="${IMAGE_FNAME%.vhd.tgz}" glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format=ovf --disk-format=vhd < "${IMAGE}" return @@ -1418,7 +1428,6 @@ function upload_image() { # and should not be decompressed prior to loading. # Setting metadata, so PV mode is used. if [[ "$image_url" =~ '.xen-raw.tgz' ]]; then - IMAGE="$FILES/${IMAGE_FNAME}" IMAGE_NAME="${IMAGE_FNAME%.xen-raw.tgz}" glance \ --os-auth-token $token \ @@ -1456,7 +1465,6 @@ function upload_image() { fi ;; *.img) - IMAGE="$FILES/$IMAGE_FNAME"; IMAGE_NAME=$(basename "$IMAGE" ".img") format=$(qemu-img info ${IMAGE} | awk '/^file format/ { print $3; exit }') if [[ ",qcow2,raw,vdi,vmdk,vpc," =~ ",$format," ]]; then @@ -1467,20 +1475,17 @@ function upload_image() { CONTAINER_FORMAT=bare ;; *.img.gz) - IMAGE="$FILES/${IMAGE_FNAME}" IMAGE_NAME=$(basename "$IMAGE" ".img.gz") DISK_FORMAT=raw CONTAINER_FORMAT=bare UNPACK=zcat ;; *.qcow2) - IMAGE="$FILES/${IMAGE_FNAME}" IMAGE_NAME=$(basename "$IMAGE" ".qcow2") DISK_FORMAT=qcow2 CONTAINER_FORMAT=bare ;; *.iso) - IMAGE="$FILES/${IMAGE_FNAME}" IMAGE_NAME=$(basename "$IMAGE" ".iso") DISK_FORMAT=iso CONTAINER_FORMAT=bare From 047cac56e14552eb6b2d374a35f3a092c5f2a5d4 Mon Sep 17 00:00:00 2001 From: Steve Kowalik Date: Thu, 7 Nov 2013 22:36:10 +1100 Subject: [PATCH 0521/4704] Switch the base URL to git.openstack.org The git repositories on github for openstack are mirrors of the primary repositories, which are hosted on git.openstack.org, so switch as much as I can to using the primary, rather than the github mirror. Change-Id: Idcfda49a691582055256b830c61e098f4a271339 --- README.md | 2 +- stackrc | 4 ++-- tools/build_tempest.sh | 2 +- tools/xen/test_functions.sh | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 93d139621f..b2603e75fc 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ You can also pick specific OpenStack project releases by setting the appropriate `stackrc` for the default set). Usually just before a release there will be milestone-proposed branches that need to be tested:: - GLANCE_REPO=https://github.com/openstack/glance.git + GLANCE_REPO=git://git.openstack.org/openstack/glance.git GLANCE_BRANCH=milestone-proposed # Start A Dev Cloud diff --git a/stackrc b/stackrc index 6adb676866..7eda5a5671 100644 --- a/stackrc +++ b/stackrc @@ -62,7 +62,7 @@ fi # Base GIT Repo URL # Another option is http://review.openstack.org/p -GIT_BASE=${GIT_BASE:-https://github.com} +GIT_BASE=${GIT_BASE:-git://git.openstack.org} # metering service CEILOMETER_REPO=${CEILOMETER_REPO:-${GIT_BASE}/openstack/ceilometer.git} @@ -182,7 +182,7 @@ NOVNC_REPO=${NOVNC_REPO:-${GIT_BASE}/kanaka/noVNC.git} NOVNC_BRANCH=${NOVNC_BRANCH:-master} # ryu service -RYU_REPO=${RYU_REPO:-${GIT_BASE}/osrg/ryu.git} +RYU_REPO=${RYU_REPO:-https://github.com/osrg/ryu.git} RYU_BRANCH=${RYU_BRANCH:-master} # a websockets/html5 or flash powered SPICE console for vm instances diff --git a/tools/build_tempest.sh b/tools/build_tempest.sh index 1758e7da33..6c527f5962 100755 --- a/tools/build_tempest.sh +++ b/tools/build_tempest.sh @@ -2,7 +2,7 @@ # # **build_tempest.sh** -# Checkout and prepare a Tempest repo: https://github.com/openstack/tempest.git +# Checkout and prepare a Tempest repo: git://git.openstack.org/openstack/tempest.git function usage { echo "$0 - Check out and prepare a Tempest repo" diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh index 534723833d..0ae2cb7f9a 100755 --- a/tools/xen/test_functions.sh +++ b/tools/xen/test_functions.sh @@ -111,8 +111,8 @@ function test_no_plugin_directory_found { function test_zip_snapshot_location { diff \ - <(zip_snapshot_location "https://github.com/openstack/nova.git" "master") \ - <(echo "https://github.com/openstack/nova/zipball/master") + <(zip_snapshot_location "git://git.openstack.org/openstack/nova.git" "master") \ + <(echo "git://git.openstack.org/openstack/nova/zipball/master") } function test_create_directory_for_kernels { From f653419b15d244fa7e01a101de93174d282185ed Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Sun, 17 Nov 2013 13:03:52 -0600 Subject: [PATCH 0522/4704] Keystone don't use deprecated token_format option devstack was setting the token_format option in the keystone configuration file. This option is deprecated so should not be used. Change-Id: I047de155f0d9d2a1c009533c2f97f505cc80c6de --- lib/keystone | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index 4353ebab1c..978577f55e 100644 --- a/lib/keystone +++ b/lib/keystone @@ -179,7 +179,6 @@ function configure_keystone() { fi iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN" - iniset $KEYSTONE_CONF signing token_format "$KEYSTONE_TOKEN_FORMAT" if [[ "$KEYSTONE_TOKEN_FORMAT" = "UUID" ]]; then iniset $KEYSTONE_CONF token provider keystone.token.providers.uuid.Provider From ca1b85283b2d53e5e6e52a90a57a9310dd948d5c Mon Sep 17 00:00:00 2001 From: DennyZhang Date: Sun, 17 Nov 2013 15:44:32 -0600 Subject: [PATCH 0523/4704] Mute false alarm when installing docker In install_docker.sh, we will restart docker service, then connect to /var/run/docker.sock with retry mechanism. At the first contacting with /var/run/docker.sock, when docker service is not ready, it may complain some error. Mute this false alarm. Change-Id: If00a18d2e3ddee951662e272d47ae84215f16ad2 Closes-Bug: #1252087 --- tools/docker/install_docker.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh index 2e5b510c41..375cfe958b 100755 --- a/tools/docker/install_docker.sh +++ b/tools/docker/install_docker.sh @@ -45,7 +45,7 @@ restart_service docker echo "Waiting for docker daemon to start..." DOCKER_GROUP=$(groups | cut -d' ' -f1) -CONFIGURE_CMD="while ! /bin/echo -e 'GET /v1.3/version HTTP/1.0\n\n' | socat - unix-connect:$DOCKER_UNIX_SOCKET | grep -q '200 OK'; do +CONFIGURE_CMD="while ! /bin/echo -e 'GET /v1.3/version HTTP/1.0\n\n' | socat - unix-connect:$DOCKER_UNIX_SOCKET 2>/dev/null | grep -q '200 OK'; do # Set the right group on docker unix socket before retrying sudo chgrp $DOCKER_GROUP $DOCKER_UNIX_SOCKET sudo chmod g+rw $DOCKER_UNIX_SOCKET From aaac4eede998e6601c879fd359e0cb91c83ba77a Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Mon, 18 Nov 2013 22:12:46 +0000 Subject: [PATCH 0524/4704] Fix stackforge_libs installation step When stackforge_libs is enabled, the WSME and Pecan libraries are checked out from stackforge and installed from source instead of pip. This change introduces a new function to perform the installation without attempting to sync the global requirements list, since the version of setup.py in the global requirements repository breaks the dependencies for WSME (there is no ipaddr library in python 2, so we need to install it, but under python 3 where it is part of the stdlib we cannot include it in the requirements). Fixes bug 1252488 Change-Id: I58357757ac67a919bf70178b76f65fa0a9e16242 --- functions | 22 ++++++++++++++++++---- lib/stackforge | 4 ++-- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/functions b/functions index effdc53afb..ebccb592b7 100644 --- a/functions +++ b/functions @@ -1250,7 +1250,11 @@ function safe_chmod() { # ``pip install -e`` the package, which processes the dependencies # using pip before running `setup.py develop` -# Uses globals ``STACK_USER``, ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR`` +# +# Updates the dependencies in project_dir from the +# openstack/requirements global list before installing anything. +# +# Uses globals ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR`` # setup_develop directory function setup_develop() { local project_dir=$1 @@ -1266,9 +1270,7 @@ function setup_develop() { $SUDO_CMD python update.py $project_dir) fi - pip_install -e $project_dir - # ensure that further actions can do things like setup.py sdist - safe_chown -R $STACK_USER $1/*.egg-info + setup_develop_no_requirements_update $project_dir # We've just gone and possibly modified the user's source tree in an # automated way, which is considered bad form if it's a development @@ -1285,6 +1287,18 @@ function setup_develop() { fi } +# ``pip install -e`` the package, which processes the dependencies +# using pip before running `setup.py develop` +# Uses globals ``STACK_USER`` +# setup_develop_no_requirements_update directory +function setup_develop_no_requirements_update() { + local project_dir=$1 + + pip_install -e $project_dir + # ensure that further actions can do things like setup.py sdist + safe_chown -R $STACK_USER $1/*.egg-info +} + # Service wrapper to start services # start_service service-name diff --git a/lib/stackforge b/lib/stackforge index 4b79de0c94..718b818ff6 100644 --- a/lib/stackforge +++ b/lib/stackforge @@ -39,10 +39,10 @@ function install_stackforge() { cleanup_stackforge git_clone $WSME_REPO $WSME_DIR $WSME_BRANCH - setup_develop $WSME_DIR + setup_develop_no_requirements_update $WSME_DIR git_clone $PECAN_REPO $PECAN_DIR $PECAN_BRANCH - setup_develop $PECAN_DIR + setup_develop_no_requirements_update $PECAN_DIR } # cleanup_stackforge() - purge possibly old versions of stackforge libraries From e578effb330f6ee0adf3b2b19fba1a7f5c64b2f0 Mon Sep 17 00:00:00 2001 From: Stephan Renatus Date: Tue, 19 Nov 2013 13:31:04 +0100 Subject: [PATCH 0525/4704] Make use of STACK_USER instead of relying on USER Quite easily one ends up calling ./stack.sh in an environment that, albeit being user "stack" (for example), doesn't quite meet the expectations of devstack. The errors that follow can be rather hard to track down, as the dependency on `USER` is not mentioned. To remedy this situation, this commit - uses STACK_USER instead of USER and - mentions that dependency in the script headers of lib/* Change-Id: If4cdc39b922ea64b4c0893a0e695ec06349fccc5 --- lib/apache | 6 +++++- lib/ceilometer | 4 ++-- lib/cinder | 2 +- lib/neutron | 3 ++- lib/nova | 2 +- lib/nova_plugins/hypervisor-libvirt | 3 ++- lib/swift | 22 +++++++++++----------- 7 files changed, 24 insertions(+), 18 deletions(-) diff --git a/lib/apache b/lib/apache index 41d6fcc381..8ae78b2181 100644 --- a/lib/apache +++ b/lib/apache @@ -4,6 +4,10 @@ # Dependencies: # # - ``functions`` file +# -``STACK_USER`` must be defined + +# lib/apache exports the following functions: +# # - is_apache_enabled_service # - install_apache_wsgi # - config_apache_wsgi @@ -19,7 +23,7 @@ set +o xtrace # Allow overriding the default Apache user and group, default to # current user and his default group. -APACHE_USER=${APACHE_USER:-$USER} +APACHE_USER=${APACHE_USER:-$STACK_USER} APACHE_GROUP=${APACHE_GROUP:-$(id -gn $APACHE_USER)} diff --git a/lib/ceilometer b/lib/ceilometer index dcadb07899..87bb656642 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -67,10 +67,10 @@ function configure_ceilometer() { setup_develop $CEILOMETER_DIR [ ! -d $CEILOMETER_CONF_DIR ] && sudo mkdir -m 755 -p $CEILOMETER_CONF_DIR - sudo chown $USER $CEILOMETER_CONF_DIR + sudo chown $STACK_USER $CEILOMETER_CONF_DIR [ ! -d $CEILOMETER_API_LOG_DIR ] && sudo mkdir -m 755 -p $CEILOMETER_API_LOG_DIR - sudo chown $USER $CEILOMETER_API_LOG_DIR + sudo chown $STACK_USER $CEILOMETER_API_LOG_DIR iniset_rpc_backend ceilometer $CEILOMETER_CONF DEFAULT diff --git a/lib/cinder b/lib/cinder index 20d6e615f6..96d25058ce 100644 --- a/lib/cinder +++ b/lib/cinder @@ -199,7 +199,7 @@ function configure_cinder() { fi TEMPFILE=`mktemp` - echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_CINDER_SUDOER_CMD" >$TEMPFILE + echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_CINDER_SUDOER_CMD" >$TEMPFILE chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/cinder-rootwrap diff --git a/lib/neutron b/lib/neutron index 098a589592..c4d9abcadc 100644 --- a/lib/neutron +++ b/lib/neutron @@ -4,6 +4,7 @@ # Dependencies: # ``functions`` file # ``DEST`` must be defined +# ``STACK_USER`` must be defined # ``stack.sh`` calls the entry points in this order: # @@ -730,7 +731,7 @@ function _neutron_setup_rootwrap() { # Set up the rootwrap sudoers for neutron TEMPFILE=`mktemp` - echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE + echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/neutron-rootwrap diff --git a/lib/nova b/lib/nova index 5b6f50e9ec..6ab2000111 100644 --- a/lib/nova +++ b/lib/nova @@ -195,7 +195,7 @@ function configure_nova_rootwrap() { # Set up the rootwrap sudoers for nova TEMPFILE=`mktemp` - echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE + echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/nova-rootwrap diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 6fae0b17d0..6f90f4ac17 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -7,6 +7,7 @@ # Dependencies: # ``functions`` file # ``nova`` configuration +# ``STACK_USER`` has to be defined # install_nova_hypervisor - install any external requirements # configure_nova_hypervisor - make configuration changes, including those to other services @@ -68,7 +69,7 @@ EOF" # with 'unix-group:$group'. sudo bash -c "cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla [libvirt Management Access] -Identity=unix-user:$USER +Identity=unix-user:$STACK_USER Action=org.libvirt.unix.manage ResultAny=yes ResultInactive=yes diff --git a/lib/swift b/lib/swift index 83c4ebb49d..c932ea7907 100644 --- a/lib/swift +++ b/lib/swift @@ -225,7 +225,7 @@ function configure_swift() { swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true sudo mkdir -p ${SWIFT_CONF_DIR}/{object,container,account}-server - sudo chown -R $USER: ${SWIFT_CONF_DIR} + sudo chown -R ${STACK_USER}: ${SWIFT_CONF_DIR} if [[ "$SWIFT_CONF_DIR" != "/etc/swift" ]]; then # Some swift tools are hard-coded to use ``/etc/swift`` and are apparently not going to be fixed. @@ -238,7 +238,7 @@ function configure_swift() { # setup) we configure it with our version of rsync. sed -e " s/%GROUP%/${USER_GROUP}/; - s/%USER%/$USER/; + s/%USER%/${STACK_USER}/; s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,; " $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf # rsyncd.conf just prepared for 4 nodes @@ -252,7 +252,7 @@ function configure_swift() { cp ${SWIFT_DIR}/etc/proxy-server.conf-sample ${SWIFT_CONFIG_PROXY_SERVER} iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user - iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${USER} + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${STACK_USER} iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir ${SWIFT_CONF_DIR} @@ -339,7 +339,7 @@ EOF node_path=${SWIFT_DATA_DIR}/${node_number} iniuncomment ${swift_node_config} DEFAULT user - iniset ${swift_node_config} DEFAULT user ${USER} + iniset ${swift_node_config} DEFAULT user ${STACK_USER} iniuncomment ${swift_node_config} DEFAULT bind_port iniset ${swift_node_config} DEFAULT bind_port ${bind_port} @@ -410,7 +410,7 @@ EOF swift_log_dir=${SWIFT_DATA_DIR}/logs rm -rf ${swift_log_dir} mkdir -p ${swift_log_dir}/hourly - sudo chown -R $USER:adm ${swift_log_dir} + sudo chown -R ${STACK_USER}:adm ${swift_log_dir} sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \ tee /etc/rsyslog.d/10-swift.conf if is_apache_enabled_service swift; then @@ -425,9 +425,9 @@ function create_swift_disk() { # First do a bit of setup by creating the directories and # changing the permissions so we can run it as our user. - USER_GROUP=$(id -g) + USER_GROUP=$(id -g ${STACK_USER}) sudo mkdir -p ${SWIFT_DATA_DIR}/{drives,cache,run,logs} - sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR} + sudo chown -R ${STACK_USER}:${USER_GROUP} ${SWIFT_DATA_DIR} # Create a loopback disk and format it to XFS. if [[ -e ${SWIFT_DISK_IMAGE} ]]; then @@ -439,7 +439,7 @@ function create_swift_disk() { mkdir -p ${SWIFT_DATA_DIR}/drives/images sudo touch ${SWIFT_DISK_IMAGE} - sudo chown $USER: ${SWIFT_DISK_IMAGE} + sudo chown ${STACK_USER}: ${SWIFT_DISK_IMAGE} truncate -s ${SWIFT_LOOPBACK_DISK_SIZE} ${SWIFT_DISK_IMAGE} @@ -462,9 +462,9 @@ function create_swift_disk() { node_device=${node}/sdb1 [[ -d $node ]] && continue [[ -d $drive ]] && continue - sudo install -o ${USER} -g $USER_GROUP -d $drive - sudo install -o ${USER} -g $USER_GROUP -d $node_device - sudo chown -R $USER: ${node} + sudo install -o ${STACK_USER} -g $USER_GROUP -d $drive + sudo install -o ${STACK_USER} -g $USER_GROUP -d $node_device + sudo chown -R ${STACK_USER}: ${node} done } # create_swift_accounts() - Set up standard swift accounts and extra From bfb3e5ec9cfb6f06bfc4373e3c795e2918602c8e Mon Sep 17 00:00:00 2001 From: Ryan Hsu Date: Mon, 11 Nov 2013 21:20:14 -0800 Subject: [PATCH 0526/4704] Update vsphere image filename pattern The vsphere image filename pattern has been updated so that semi- colons are used to delimit image properties rather than colons, which are not permitted in Windows filesystems. To support back- wards compatibility, colons can still be used. Change-Id: I29a3ac03dcae294326dc8813a66512a79f705f81 Closes-Bug: #1250319 --- functions | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/functions b/functions index effdc53afb..d3de1427ba 100644 --- a/functions +++ b/functions @@ -1362,15 +1362,19 @@ function upload_image() { # Before we can upload vmdk type images to glance, we need to know it's # disk type, storage adapter, and networking adapter. These values are - # passed to glance as custom properties. + # passed to glance as custom properties. # We take these values from the vmdk file if populated. Otherwise, we use # vmdk filename, which is expected in the following format: # - # -:: + # -;; # # If the filename does not follow the above format then the vsphere # driver will supply default values. + vmdk_adapter_type="" + vmdk_disktype="" + vmdk_net_adapter="" + # vmdk adapter type vmdk_adapter_type="$(head -25 $IMAGE | grep -a -F -m 1 'ddb.adapterType =' $IMAGE)" vmdk_adapter_type="${vmdk_adapter_type#*\"}" @@ -1389,17 +1393,15 @@ function upload_image() { #TODO(alegendre): handle streamOptimized once supported by VMware driver. vmdk_disktype="preallocated" fi - property_string=`echo "$IMAGE_NAME" | grep -oP '(?<=-)(?!.*-).+:.+:.+$'` - if [[ ! -z "$property_string" ]]; then - IFS=':' read -a props <<< "$property_string" - if [[ ! -z "${props[0]}" ]]; then - vmdk_disktype="${props[0]}" - fi - if [[ ! -z "${props[1]}" ]]; then - vmdk_adapter_type="${props[1]}" - fi - vmdk_net_adapter="${props[2]}" - fi + + # NOTE: For backwards compatibility reasons, colons may be used in place + # of semi-colons for property delimiters but they are not permitted + # characters in NTFS filesystems. + property_string=`echo "$IMAGE_NAME" | grep -oP '(?<=-)(?!.*-).+[:;].+[:;].+$'` + IFS=':;' read -a props <<< "$property_string" + vmdk_disktype="${props[0]:-$vmdk_disktype}" + vmdk_adapter_type="${props[1]:-$vmdk_adapter_type}" + vmdk_net_adapter="${props[2]:-$vmdk_net_adapter}" glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format bare --disk-format vmdk --property vmware_disktype="$vmdk_disktype" --property vmware_adaptertype="$vmdk_adapter_type" --property hw_vif_model="$vmdk_net_adapter" < "${IMAGE}" return From 75cb61ba39e17f3e3fb0d8a99b9aecf877e88819 Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Wed, 20 Nov 2013 00:19:59 +0400 Subject: [PATCH 0527/4704] Handle Savanna service availability in tempest Change-Id: I51300304655803f114d3bb911086cd88aa09638f --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index fca3884e7f..f3578b17c3 100644 --- a/lib/tempest +++ b/lib/tempest @@ -300,7 +300,7 @@ function configure_tempest() { iniset $TEMPEST_CONF cli cli_dir $NOVA_BIN_DIR # service_available - for service in nova cinder glance neutron swift heat horizon ceilometer ironic; do + for service in nova cinder glance neutron swift heat horizon ceilometer ironic savanna; do if is_service_enabled $service ; then iniset $TEMPEST_CONF service_available $service "True" else From 40546f79e0e504d2d1470019a61a24da217e14fc Mon Sep 17 00:00:00 2001 From: Emilien Macchi Date: Tue, 24 Sep 2013 15:10:25 +0200 Subject: [PATCH 0528/4704] Add Neutron Metering Agent support In Havana, Neutron has now a Metering Agent which gets meters from virtual routers. This patchs aims to allow devstack using this new service. Change-Id: I17ad83799d60384247b98cc8a93ac032f641c721 Signed-off-by: Emilien Macchi --- README.md | 1 + lib/neutron | 18 ++++++++++++++++ lib/neutron_plugins/services/metering | 30 +++++++++++++++++++++++++++ 3 files changed, 49 insertions(+) create mode 100644 lib/neutron_plugins/services/metering diff --git a/README.md b/README.md index 99e983887e..c94d8bd23a 100644 --- a/README.md +++ b/README.md @@ -139,6 +139,7 @@ following settings in your `localrc` : enable_service q-dhcp enable_service q-l3 enable_service q-meta + enable_service q-metering enable_service neutron # Optional, to enable tempest configuration as part of devstack enable_service tempest diff --git a/lib/neutron b/lib/neutron index 4a3d1b06a6..5334be613b 100644 --- a/lib/neutron +++ b/lib/neutron @@ -202,6 +202,12 @@ source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN # Hardcoding for 1 service plugin for now source $TOP_DIR/lib/neutron_plugins/services/loadbalancer +# Agent metering service plugin functions +# ------------------------------------------- + +# Hardcoding for 1 service plugin for now +source $TOP_DIR/lib/neutron_plugins/services/metering + # VPN service plugin functions # ------------------------------------------- # Hardcoding for 1 service plugin for now @@ -231,6 +237,9 @@ function configure_neutron() { if is_service_enabled q-lbaas; then _configure_neutron_lbaas fi + if is_service_enabled q-metering; then + _configure_neutron_metering + fi if is_service_enabled q-vpn; then _configure_neutron_vpn fi @@ -451,6 +460,10 @@ function start_neutron_agents() { if is_service_enabled q-lbaas; then screen_it q-lbaas "cd $NEUTRON_DIR && python $AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME" fi + + if is_service_enabled q-metering; then + screen_it q-metering "cd $NEUTRON_DIR && python $AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" + fi } # stop_neutron() - Stop running processes (non-screen) @@ -630,6 +643,11 @@ function _configure_neutron_lbaas() { neutron_agent_lbaas_configure_agent } +function _configure_neutron_metering() { + neutron_agent_metering_configure_common + neutron_agent_metering_configure_agent +} + function _configure_neutron_fwaas() { neutron_fwaas_configure_common neutron_fwaas_configure_driver diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering new file mode 100644 index 0000000000..629f3b788a --- /dev/null +++ b/lib/neutron_plugins/services/metering @@ -0,0 +1,30 @@ +# Neutron metering plugin +# --------------------------- + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +AGENT_METERING_BINARY="$NEUTRON_BIN_DIR/neutron-metering-agent" +METERING_PLUGIN="neutron.services.metering.metering_plugin.MeteringPlugin" + +function neutron_agent_metering_configure_common() { + if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then + Q_SERVICE_PLUGIN_CLASSES=$METERING_PLUGIN + else + Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$METERING_PLUGIN" + fi +} + +function neutron_agent_metering_configure_agent() { + METERING_AGENT_CONF_PATH=/etc/neutron/services/metering + mkdir -p $METERING_AGENT_CONF_PATH + + METERING_AGENT_CONF_FILENAME="$METERING_AGENT_CONF_PATH/metering_agent.ini" + + cp $NEUTRON_DIR/etc/metering_agent.ini $METERING_AGENT_CONF_FILENAME +} + +# Restore xtrace +$MY_XTRACE From d254da5213bf0868663b630dbb1ee99fe9157c6f Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Tue, 19 Nov 2013 21:06:29 -0800 Subject: [PATCH 0529/4704] Set swift timeouts higher Devstack is commonly run in a small slow environment, so bump the timeouts up. node_timeout is how long between read operations a node takes to respond to the proxy server conn_timeout is all about how long it takes a connect() system call to return Change-Id: Ib437466a3fc9274b8aa49b19e4fe7fa26f553419 Co-Authored-By: Peter Portante Related-Bug: #1252514 --- lib/swift | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lib/swift b/lib/swift index 83c4ebb49d..927194d8d7 100644 --- a/lib/swift +++ b/lib/swift @@ -266,6 +266,15 @@ function configure_swift() { iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080} + # Devstack is commonly run in a small slow environment, so bump the + # timeouts up. + # node_timeout is how long between read operations a node takes to + # respond to the proxy server + # conn_timeout is all about how long it takes a connect() system call to + # return + iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server node_timeout 120 + iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server conn_timeout 20 + # Configure Ceilometer if is_service_enabled ceilometer; then iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer use "egg:ceilometer#swift" From 3b80bde8c7345a5e8c217b6c5c256c2f83aa7900 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Wed, 20 Nov 2013 17:51:50 -0800 Subject: [PATCH 0530/4704] Check if flavors exist before creating them. * lib/tempest: When creating the m1.tiny and m1.nano flavors ensure that they don't exist first. This is important for Grenade where code may be run multiple times and should expect that some things preexist. Change-Id: I1772d4334f39d612f8a187eb5311a1b2caee3953 --- lib/tempest | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index ec1fc90b76..cbf6a76610 100644 --- a/lib/tempest +++ b/lib/tempest @@ -73,6 +73,7 @@ function configure_tempest() { local password local line local flavors + local available_flavors local flavors_ref local flavor_lines local public_network_id @@ -142,10 +143,15 @@ function configure_tempest() { # If the ``DEFAULT_INSTANCE_TYPE`` not declared, use the new behavior # Tempest creates instane types for himself if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then - nova flavor-create m1.nano 42 64 0 1 + available_flavors=$(nova flavor-list) + if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then + nova flavor-create m1.nano 42 64 0 1 + fi flavor_ref=42 boto_instance_type=m1.nano - nova flavor-create m1.micro 84 128 0 1 + if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then + nova flavor-create m1.micro 84 128 0 1 + fi flavor_ref_alt=84 else # Check Nova for existing flavors and, if set, look for the From 001c7b6c11574f60aecd47a6fc3b8ca54a393105 Mon Sep 17 00:00:00 2001 From: Matt Odden Date: Thu, 21 Nov 2013 22:12:56 +0000 Subject: [PATCH 0531/4704] Remove powervm nova virt driver support The powervm virt driver was removed from nova in a recent change. This functionality is no longer needed in devstack. Change-Id: Iec620938a8cce63e0830fc7b9e9a679b361b4389 --- lib/nova_plugins/hypervisor-powervm | 76 ----------------------------- 1 file changed, 76 deletions(-) delete mode 100644 lib/nova_plugins/hypervisor-powervm diff --git a/lib/nova_plugins/hypervisor-powervm b/lib/nova_plugins/hypervisor-powervm deleted file mode 100644 index 561dd9f00b..0000000000 --- a/lib/nova_plugins/hypervisor-powervm +++ /dev/null @@ -1,76 +0,0 @@ -# lib/nova_plugins/hypervisor-powervm -# Configure the PowerVM hypervisor - -# Enable with: -# VIRT_DRIVER=powervm - -# Dependencies: -# ``functions`` file -# ``nova`` configuration - -# install_nova_hypervisor - install any external requirements -# configure_nova_hypervisor - make configuration changes, including those to other services -# start_nova_hypervisor - start any external services -# stop_nova_hypervisor - stop any external services -# cleanup_nova_hypervisor - remove transient data and cache - -# Save trace setting -MY_XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - - -# Entry Points -# ------------ - -# clean_nova_hypervisor - Clean up an installation -function cleanup_nova_hypervisor() { - # This function intentionally left blank - : -} - -# configure_nova_hypervisor - Set config files, create data dirs, etc -function configure_nova_hypervisor() { - POWERVM_MGR_TYPE=${POWERVM_MGR_TYPE:-"ivm"} - POWERVM_MGR_HOST=${POWERVM_MGR_HOST:-"powervm.host"} - POWERVM_MGR_USER=${POWERVM_MGR_USER:-"padmin"} - POWERVM_MGR_PASSWD=${POWERVM_MGR_PASSWD:-"password"} - POWERVM_IMG_REMOTE_PATH=${POWERVM_IMG_REMOTE_PATH:-"/tmp"} - POWERVM_IMG_LOCAL_PATH=${POWERVM_IMG_LOCAL_PATH:-"/tmp"} - iniset $NOVA_CONF DEFAULT compute_driver nova.virt.powervm.PowerVMDriver - iniset $NOVA_CONF DEFAULT powervm_mgr_type $POWERVM_MGR_TYPE - iniset $NOVA_CONF DEFAULT powervm_mgr $POWERVM_MGR_HOST - iniset $NOVA_CONF DEFAULT powervm_mgr_user $POWERVM_MGR_USER - iniset $NOVA_CONF DEFAULT powervm_mgr_passwd $POWERVM_MGR_PASSWD - iniset $NOVA_CONF DEFAULT powervm_img_remote_path $POWERVM_IMG_REMOTE_PATH - iniset $NOVA_CONF DEFAULT powervm_img_local_path $POWERVM_IMG_LOCAL_PATH -} - -# install_nova_hypervisor() - Install external components -function install_nova_hypervisor() { - # This function intentionally left blank - : -} - -# start_nova_hypervisor - Start any required external services -function start_nova_hypervisor() { - # This function intentionally left blank - : -} - -# stop_nova_hypervisor - Stop any external services -function stop_nova_hypervisor() { - # This function intentionally left blank - : -} - - -# Restore xtrace -$MY_XTRACE - -# Local variables: -# mode: shell-script -# End: From 6db28923263b1d99f03069ccac6126a13bac0b5e Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 22 Nov 2013 12:16:02 -0500 Subject: [PATCH 0532/4704] Add hacking rules for shell scripts This is an attempt to collect the rules that we live by in devstack that are generally held. Writing these down help us figure out ways to put them into bash8 over time. These are a starting point for conversation. Change-Id: Id2b750665871ebbeddf4694ba080c75d2f6f443e --- HACKING.rst | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/HACKING.rst b/HACKING.rst index 3c08e679d9..103b579621 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -227,3 +227,51 @@ These scripts are executed serially by ``exercise.sh`` in testing situations. or graciously handle possible artifacts left over from previous runs if executed again. It is acceptable to require a reboot or even a re-install of DevStack to restore a clean test environment. + + +Bash Style Guidelines +~~~~~~~~~~~~~~~~~~~~~ +Devstack defines a bash set of best practices for maintaining large +collections of bash scripts. These should be considered as part of the +review process. + +We have a preliminary enforcing script for this called bash8 (only a +small number of these rules are enforced). + +Whitespace Rules +---------------- + +- lines should not include trailing whitespace +- there should be no hard tabs in the file +- indents are 4 spaces, and all indentation should be some multiple of + them + +Control Structure Rules +----------------------- +- then should be on the same line as the if +- do should be on the same line as the for + +Example:: + + if [[ -r $TOP_DIR/local.conf ]]; then + LRC=$(get_meta_section_files $TOP_DIR/local.conf local) + for lfile in $LRC; do + if [[ "$lfile" == "localrc" ]]; then + if [[ -r $TOP_DIR/localrc ]]; then + warn $LINENO "localrc and local.conf:[[local]] both exist, using localrc" + else + echo "# Generated file, do not edit" >$TOP_DIR/.localrc.auto + get_meta_section $TOP_DIR/local.conf local $lfile >>$TOP_DIR/.localrc.auto + fi + fi + done + fi + +Variables and Functions +----------------------- +- functions should be used whenever possible for clarity +- functions should use ``local`` variables as much as possible to + ensure they are isolated from the rest of the environment +- local variables should be lower case, global variables should be + upper case +- function names should_have_underscores, NotCamelCase. From 3a82319ad7172de938cb1e7e01a270f41d09fe3d Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Sun, 24 Nov 2013 18:53:20 +0100 Subject: [PATCH 0533/4704] fixup_stuff prettytable min version The minimum prettytable version is changed from 0.6 to 0.7 in the global requirements. If the system has an older prettytable version the fixup_stuff does not takes effect in time, because at fixup time the system has the old version. Ensure the fixup installs the minimum required version in time. Change-Id: If1737dacb25db73b68e707953d05576ad8a97da7 --- tools/fixup_stuff.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index f9362307d8..5fb47dc29b 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -51,7 +51,7 @@ function get_package_path() { # Fix prettytable 0.7.2 permissions # Don't specify --upgrade so we use the existing package if present -pip_install prettytable +pip_install 'prettytable>0.7' PACKAGE_DIR=$(get_package_path prettytable) # Only fix version 0.7.2 dir=$(echo $PACKAGE_DIR/prettytable-0.7.2*) From 480309eea527d7721148f66f557772da0e9b5941 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Sat, 23 Nov 2013 13:02:45 -0500 Subject: [PATCH 0534/4704] Allow overriding USE_GET_PIP via env vars devstack-gate wants to pre-cache and then use get-pip, but we can't throw the flag currently. Make the flag default settable via env vars. Change-Id: I661b52670b6ce494666cbdd611e4eee6b96c8321 Partial-Bug: #1254275 --- tools/install_pip.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 455323e6fa..6b9b25e3e9 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -26,6 +26,7 @@ FILES=$TOP_DIR/files # Handle arguments +USE_GET_PIP=${USE_GET_PIP:-0} INSTALL_PIP_VERSION=${INSTALL_PIP_VERSION:-"1.4.1"} while [[ -n "$1" ]]; do case $1 in @@ -63,7 +64,7 @@ function get_versions() { function install_get_pip() { if [[ ! -r $FILES/get-pip.py ]]; then (cd $FILES; \ - curl $PIP_GET_PIP_URL; \ + curl -O $PIP_GET_PIP_URL; \ ) fi sudo python $FILES/get-pip.py From 674ee84ec6c6cd2e802e132db64855d2f36c16e1 Mon Sep 17 00:00:00 2001 From: Robert Myers Date: Mon, 25 Nov 2013 13:15:35 -0600 Subject: [PATCH 0535/4704] Adding optional colorized log output for trove Change-Id: Ibdaed9c2d1527b4c38551efbc147597e2a668b1a --- lib/trove | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/lib/trove b/lib/trove index c40006bf5d..3f9b1be5f9 100644 --- a/lib/trove +++ b/lib/trove @@ -33,6 +33,17 @@ TROVE_AUTH_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH TROVE_AUTH_CACHE_DIR=${TROVE_AUTH_CACHE_DIR:-/var/cache/trove} TROVE_BIN_DIR=/usr/local/bin +# setup_trove_logging() - Adds logging configuration to conf files +function setup_trove_logging() { + local CONF=$1 + iniset $CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $CONF DEFAULT use_syslog $SYSLOG + if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then + # Add color to logging output + setup_colorized_logging $CONF DEFAULT tenant user + fi +} + # create_trove_accounts() - Set up common required trove accounts # Tenant User Roles @@ -121,6 +132,9 @@ function configure_trove() { iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT control_exchange trove sed -i "s/localhost/$NETWORK_GATEWAY/g" $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample + setup_trove_logging $TROVE_CONF_DIR/trove.conf + setup_trove_logging $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample + # (Re)create trove taskmanager conf file if needed if is_service_enabled tr-tmgr; then iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT rabbit_password $RABBIT_PASSWORD @@ -130,6 +144,7 @@ function configure_trove() { iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_tenant_name trove iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT + setup_trove_logging $TROVE_CONF_DIR/trove-taskmanager.conf fi # (Re)create trove conductor conf file if needed @@ -141,6 +156,7 @@ function configure_trove() { iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT control_exchange trove + setup_trove_logging $TROVE_CONF_DIR/trove-conductor.conf fi } From bd24a8d0f884d27f47834c917c047b54271c1179 Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Fri, 20 Sep 2013 16:26:42 +1000 Subject: [PATCH 0536/4704] Allow deploying keystone with SSL certificates Allow providing certificates through environment variables to be used for keystone, and provide the basis for doing this for other services. It cannot be used in conjunction with tls-proxy as the service provides it's own encrypted endpoint. Impletmenting: blueprint devstack-https Change-Id: I8cf4c9c8c8a6911ae56ebcd14600a9d24cca99a0 --- lib/cinder | 2 ++ lib/glance | 2 ++ lib/heat | 1 + lib/ironic | 1 + lib/keystone | 19 +++++++++++++++- lib/nova | 2 ++ lib/swift | 2 ++ lib/tls | 50 +++++++++++++++++++++++++++++++++++++++++- lib/trove | 4 +++- openrc | 5 +++-- stack.sh | 26 ++++++++++++++++++++-- tools/create_userrc.sh | 5 ++++- 12 files changed, 111 insertions(+), 8 deletions(-) diff --git a/lib/cinder b/lib/cinder index 96d25058ce..9288685365 100644 --- a/lib/cinder +++ b/lib/cinder @@ -209,6 +209,7 @@ function configure_cinder() { inicomment $CINDER_API_PASTE_INI filter:authtoken auth_host inicomment $CINDER_API_PASTE_INI filter:authtoken auth_port inicomment $CINDER_API_PASTE_INI filter:authtoken auth_protocol + inicomment $CINDER_API_PASTE_INI filter:authtoken cafile inicomment $CINDER_API_PASTE_INI filter:authtoken admin_tenant_name inicomment $CINDER_API_PASTE_INI filter:authtoken admin_user inicomment $CINDER_API_PASTE_INI filter:authtoken admin_password @@ -219,6 +220,7 @@ function configure_cinder() { iniset $CINDER_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST iniset $CINDER_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT iniset $CINDER_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $CINDER_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA iniset $CINDER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $CINDER_CONF keystone_authtoken admin_user cinder iniset $CINDER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD diff --git a/lib/glance b/lib/glance index eb727f1e2a..c88f2dc472 100644 --- a/lib/glance +++ b/lib/glance @@ -82,6 +82,7 @@ function configure_glance() { iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $GLANCE_REGISTRY_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_user glance @@ -99,6 +100,7 @@ function configure_glance() { iniset $GLANCE_API_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST iniset $GLANCE_API_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT iniset $GLANCE_API_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $GLANCE_API_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA iniset $GLANCE_API_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ iniset $GLANCE_API_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $GLANCE_API_CONF keystone_authtoken admin_user glance diff --git a/lib/heat b/lib/heat index 7a9ef0da26..e44a618162 100644 --- a/lib/heat +++ b/lib/heat @@ -96,6 +96,7 @@ function configure_heat() { iniset $HEAT_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT iniset $HEAT_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL iniset $HEAT_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + iniset $HEAT_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA iniset $HEAT_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $HEAT_CONF keystone_authtoken admin_user heat iniset $HEAT_CONF keystone_authtoken admin_password $SERVICE_PASSWORD diff --git a/lib/ironic b/lib/ironic index 9f86e841d8..099746ae22 100644 --- a/lib/ironic +++ b/lib/ironic @@ -98,6 +98,7 @@ function configure_ironic_api() { iniset $IRONIC_CONF_FILE keystone_authtoken auth_host $KEYSTONE_AUTH_HOST iniset $IRONIC_CONF_FILE keystone_authtoken auth_port $KEYSTONE_AUTH_PORT iniset $IRONIC_CONF_FILE keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $IRONIC_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA iniset $IRONIC_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ iniset $IRONIC_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $IRONIC_CONF_FILE keystone_authtoken admin_user ironic diff --git a/lib/keystone b/lib/keystone index 978577f55e..4a7d7bb717 100644 --- a/lib/keystone +++ b/lib/keystone @@ -4,6 +4,7 @@ # Dependencies: # # - ``functions`` file +# - ``tls`` file # - ``DEST``, ``STACK_USER`` # - ``IDENTITY_API_VERSION`` # - ``BASE_SQL_CONN`` @@ -79,6 +80,13 @@ KEYSTONE_VALID_IDENTITY_BACKENDS=kvs,ldap,pam,sql # valid assignment backends as per dir keystone/identity/backends KEYSTONE_VALID_ASSIGNMENT_BACKENDS=kvs,ldap,sql +# if we are running with SSL use https protocols +if is_ssl_enabled_service "key"; then + KEYSTONE_AUTH_PROTOCOL="https" + KEYSTONE_SERVICE_PROTOCOL="https" +fi + + # Functions # --------- # cleanup_keystone() - Remove residual data files, anything left over from previous @@ -172,6 +180,15 @@ function configure_keystone() { iniset $KEYSTONE_CONF DEFAULT public_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(public_port)s/" iniset $KEYSTONE_CONF DEFAULT admin_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(admin_port)s/" + # Register SSL certificates if provided + if is_ssl_enabled_service key; then + ensure_certificates KEYSTONE + + iniset $KEYSTONE_CONF ssl enable True + iniset $KEYSTONE_CONF ssl certfile $KEYSTONE_SSL_CERT + iniset $KEYSTONE_CONF ssl keyfile $KEYSTONE_SSL_KEY + fi + if is_service_enabled tls-proxy; then # Set the service ports for a proxy to take the originals iniset $KEYSTONE_CONF DEFAULT public_port $KEYSTONE_SERVICE_PORT_INT @@ -373,7 +390,7 @@ function start_keystone() { fi echo "Waiting for keystone to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then die $LINENO "keystone did not start" fi diff --git a/lib/nova b/lib/nova index 6ab2000111..5fd0bebf65 100644 --- a/lib/nova +++ b/lib/nova @@ -225,6 +225,7 @@ function configure_nova() { inicomment $NOVA_API_PASTE_INI filter:authtoken auth_host inicomment $NOVA_API_PASTE_INI filter:authtoken auth_protocol inicomment $NOVA_API_PASTE_INI filter:authtoken admin_tenant_name + inicomment $NOVA_API_PASTE_INI filter:authtoken cafile inicomment $NOVA_API_PASTE_INI filter:authtoken admin_user inicomment $NOVA_API_PASTE_INI filter:authtoken admin_password fi @@ -399,6 +400,7 @@ function create_nova_conf() { iniset $NOVA_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST iniset $NOVA_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL iniset $NOVA_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $NOVA_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA iniset $NOVA_CONF keystone_authtoken admin_user nova iniset $NOVA_CONF keystone_authtoken admin_password $SERVICE_PASSWORD fi diff --git a/lib/swift b/lib/swift index c103b5ba5f..c0493110b9 100644 --- a/lib/swift +++ b/lib/swift @@ -306,6 +306,7 @@ function configure_swift() { iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_host $KEYSTONE_AUTH_HOST iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_port $KEYSTONE_AUTH_PORT iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken cafile $KEYSTONE_SSL_CA iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_user swift @@ -325,6 +326,7 @@ paste.filter_factory = keystone.middleware.s3_token:filter_factory auth_port = ${KEYSTONE_AUTH_PORT} auth_host = ${KEYSTONE_AUTH_HOST} auth_protocol = ${KEYSTONE_AUTH_PROTOCOL} +cafile = ${KEYSTONE_SSL_CA} auth_token = ${SERVICE_TOKEN} admin_token = ${SERVICE_TOKEN} diff --git a/lib/tls b/lib/tls index a1a7fddc18..6134fa1bad 100644 --- a/lib/tls +++ b/lib/tls @@ -22,7 +22,8 @@ # - make_int_ca # - new_cert $INT_CA_DIR int-server "abc" # - start_tls_proxy HOST_IP 5000 localhost 5000 - +# - ensure_certificates +# - is_ssl_enabled_service # Defaults # -------- @@ -309,6 +310,53 @@ function make_root_CA() { } +# Certificate Input Configuration +# =============================== + +# check to see if the service(s) specified are to be SSL enabled. +# +# Multiple services specified as arguments are ``OR``'ed together; the test +# is a short-circuit boolean, i.e it returns on the first match. +# +# Uses global ``SSL_ENABLED_SERVICES`` +function is_ssl_enabled_service() { + services=$@ + for service in ${services}; do + [[ ,${SSL_ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 + done + return 1 +} + + +# Ensure that the certificates for a service are in place. This function does +# not check that a service is SSL enabled, this should already have been +# completed. +# +# The function expects to find a certificate, key and CA certificate in the +# variables {service}_SSL_CERT, {service}_SSL_KEY and {service}_SSL_CA. For +# example for keystone this would be KEYSTONE_SSL_CERT, KEYSTONE_SSL_KEY and +# KEYSTONE_SSL_CA. If it does not find these certificates the program will +# quit. +function ensure_certificates() { + local service=$1 + + local cert_var="${service}_SSL_CERT" + local key_var="${service}_SSL_KEY" + local ca_var="${service}_SSL_CA" + + local cert=${!cert_var} + local key=${!key_var} + local ca=${!ca_var} + + if [[ !($cert && $key && $ca) ]]; then + die $LINENO "Missing either the ${cert_var} ${key_var} or ${ca_var}" \ + "variable to enable SSL for ${service}" + fi + + cat $ca >> $SSL_BUNDLE_FILE +} + + # Proxy Functions # =============== diff --git a/lib/trove b/lib/trove index c40006bf5d..5ba4de5a4f 100644 --- a/lib/trove +++ b/lib/trove @@ -29,7 +29,6 @@ TROVE_DIR=$DEST/trove TROVECLIENT_DIR=$DEST/python-troveclient TROVE_CONF_DIR=/etc/trove TROVE_LOCAL_CONF_DIR=$TROVE_DIR/etc/trove -TROVE_AUTH_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT//v$IDENTITY_API_VERSION TROVE_AUTH_CACHE_DIR=${TROVE_AUTH_CACHE_DIR:-/var/cache/trove} TROVE_BIN_DIR=/usr/local/bin @@ -102,6 +101,7 @@ function configure_trove() { iniset $TROVE_API_PASTE_INI filter:tokenauth auth_host $KEYSTONE_AUTH_HOST iniset $TROVE_API_PASTE_INI filter:tokenauth auth_port $KEYSTONE_AUTH_PORT iniset $TROVE_API_PASTE_INI filter:tokenauth auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $TROVE_API_PASTE_INI filter:tokenauth cafile $KEYSTONE_SSL_CA iniset $TROVE_API_PASTE_INI filter:tokenauth admin_tenant_name $SERVICE_TENANT_NAME iniset $TROVE_API_PASTE_INI filter:tokenauth admin_user trove iniset $TROVE_API_PASTE_INI filter:tokenauth admin_password $SERVICE_PASSWORD @@ -123,6 +123,8 @@ function configure_trove() { # (Re)create trove taskmanager conf file if needed if is_service_enabled tr-tmgr; then + TROVE_AUTH_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT//v$IDENTITY_API_VERSION + iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT rabbit_password $RABBIT_PASSWORD iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT sql_connection `database_connection_url trove` iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT taskmanager_manager trove.taskmanager.manager.Manager diff --git a/openrc b/openrc index 804bb3f3d7..784b00e51b 100644 --- a/openrc +++ b/openrc @@ -58,6 +58,7 @@ export OS_NO_CACHE=${OS_NO_CACHE:-1} HOST_IP=${HOST_IP:-127.0.0.1} SERVICE_HOST=${SERVICE_HOST:-$HOST_IP} SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} +KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL} # Some exercises call glance directly. On a single-node installation, Glance # should be listening on HOST_IP. If its running elsewhere, it can be set here @@ -71,10 +72,10 @@ export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-2.0} # the user/tenant has access to - including nova, glance, keystone, swift, ... # We currently recommend using the 2.0 *identity api*. # -export OS_AUTH_URL=$SERVICE_PROTOCOL://$SERVICE_HOST:5000/v${OS_IDENTITY_API_VERSION} +export OS_AUTH_URL=$KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:5000/v${OS_IDENTITY_API_VERSION} # Set the pointer to our CA certificate chain. Harmless if TLS is not used. -export OS_CACERT=$INT_CA_DIR/ca-chain.pem +export OS_CACERT=${OS_CACERT:-$INT_CA_DIR/ca-chain.pem} # Currently novaclient needs you to specify the *compute api* version. This # needs to match the config of your catalog returned by Keystone. diff --git a/stack.sh b/stack.sh index 47d93bd642..28032def37 100755 --- a/stack.sh +++ b/stack.sh @@ -290,6 +290,10 @@ LOG_COLOR=`trueorfalse True $LOG_COLOR` # Service startup timeout SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} +# Reset the bundle of CA certificates +SSL_BUNDLE_FILE="$DATA_DIR/ca-bundle.pem" +rm -f $SSL_BUNDLE_FILE + # Configure Projects # ================== @@ -798,6 +802,17 @@ fi restart_rpc_backend +# Export Certicate Authority Bundle +# --------------------------------- + +# If certificates were used and written to the SSL bundle file then these +# should be exported so clients can validate their connections. + +if [ -f $SSL_BUNDLE_FILE ]; then + export OS_CACERT=$SSL_BUNDLE_FILE +fi + + # Configure database # ------------------ @@ -1145,6 +1160,7 @@ if is_service_enabled trove; then start_trove fi + # Create account rc files # ======================= @@ -1153,7 +1169,13 @@ fi # which is helpful in image bundle steps. if is_service_enabled nova && is_service_enabled key; then - $TOP_DIR/tools/create_userrc.sh -PA --target-dir $TOP_DIR/accrc + USERRC_PARAMS="-PA --target-dir $TOP_DIR/accrc" + + if [ -f $SSL_BUNDLE_FILE ]; then + USERRC_PARAMS="$USERRC_PARAMS --os-cacert $SSL_BUNDLE_FILE" + fi + + $TOP_DIR/tools/create_userrc.sh $USERRC_PARAMS fi @@ -1229,7 +1251,7 @@ fi CURRENT_RUN_TIME=$(date "+$TIMESTAMP_FORMAT") echo "# $CURRENT_RUN_TIME" >$TOP_DIR/.stackenv for i in BASE_SQL_CONN ENABLED_SERVICES HOST_IP LOGFILE \ - SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP; do + SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP KEYSTONE_AUTH_PROTOCOL OS_CACERT; do echo $i=${!i} >>$TOP_DIR/.stackenv done diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index 8383fe7d77..5f4c48660b 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -43,6 +43,7 @@ Optional Arguments --os-tenant-name --os-tenant-id --os-auth-url +--os-cacert --target-dir --skip-tenant --debug @@ -53,7 +54,7 @@ $0 -P -C mytenant -u myuser -p mypass EOF } -if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,skip-tenant:,help,debug -- "$@") +if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,skip-tenant:,os-cacert:,help,debug -- "$@") then #parse error display_help @@ -80,6 +81,7 @@ do --os-tenant-id) export OS_TENANT_ID=$2; shift ;; --skip-tenant) SKIP_TENANT="$SKIP_TENANT$2,"; shift ;; --os-auth-url) export OS_AUTH_URL=$2; shift ;; + --os-cacert) export OS_CACERT=$2; shift ;; --target-dir) ACCOUNT_DIR=$2; shift ;; --debug) set -o xtrace ;; -u) MODE=${MODE:-one}; USER_NAME=$2; shift ;; @@ -201,6 +203,7 @@ export OS_USERNAME="$user_name" # Openstack Tenant ID = $tenant_id export OS_TENANT_NAME="$tenant_name" export OS_AUTH_URL="$OS_AUTH_URL" +export OS_CACERT="$OS_CACERT" export EC2_CERT="$ec2_cert" export EC2_PRIVATE_KEY="$ec2_private_key" export EC2_USER_ID=42 #not checked by nova (can be a 12-digit id) From a677b7fe828445968cdc714a630c74d35321c8fb Mon Sep 17 00:00:00 2001 From: Emilien Macchi Date: Mon, 25 Nov 2013 23:40:20 +0100 Subject: [PATCH 0537/4704] Move neutron cache dir into a function Taking the model of Nova, this patch aims to move the cache directory management into a function with the goal to reuse it somewhere else like Grenade. Change-Id: I93df52f69ef339e6528b88d88d4ea70e0b725893 Signed-off-by: Emilien Macchi --- lib/neutron | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/lib/neutron b/lib/neutron index 70417be5d3..8b0656bc26 100644 --- a/lib/neutron +++ b/lib/neutron @@ -17,6 +17,7 @@ # - configure_neutron_third_party # - init_neutron_third_party # - start_neutron_third_party +# - create_neutron_cache_dir # - create_nova_conf_neutron # - start_neutron_service_and_check # - create_neutron_initial_network @@ -296,6 +297,14 @@ function create_nova_conf_neutron() { fi } +# create_neutron_cache_dir() - Part of the _neutron_setup_keystone() process +function create_neutron_cache_dir() { + # Create cache dir + sudo mkdir -p $NEUTRON_AUTH_CACHE_DIR + sudo chown $STACK_USER $NEUTRON_AUTH_CACHE_DIR + rm -f $NEUTRON_AUTH_CACHE_DIR/* +} + # create_neutron_accounts() - Set up common required neutron accounts # Tenant User Roles @@ -782,9 +791,7 @@ function _neutron_setup_keystone() { if [[ -z $skip_auth_cache ]]; then iniset $conf_file $section signing_dir $NEUTRON_AUTH_CACHE_DIR # Create cache dir - sudo mkdir -p $NEUTRON_AUTH_CACHE_DIR - sudo chown $STACK_USER $NEUTRON_AUTH_CACHE_DIR - rm -f $NEUTRON_AUTH_CACHE_DIR/* + create_neutron_cache_dir fi } From cee4b3bddff851d875562bf9ce27b2754b75b36a Mon Sep 17 00:00:00 2001 From: Peter Portante Date: Wed, 20 Nov 2013 14:33:16 -0500 Subject: [PATCH 0538/4704] Shorten PKI token logging Log only the first 12 characters of auth-token for the Swift API, since PKI based auth-tokens from keystone can huge (>> 2K). Also tidy up a comment. Change-Id: Ib784e8ecdcb7e371fe03458c7fd82b4460fa82b9 Signed-off-by: Peter Portante --- lib/swift | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/lib/swift b/lib/swift index c103b5ba5f..ce13868f4a 100644 --- a/lib/swift +++ b/lib/swift @@ -96,6 +96,13 @@ SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9} SWIFT_REPLICAS=${SWIFT_REPLICAS:-1} SWIFT_REPLICAS_SEQ=$(seq ${SWIFT_REPLICAS}) +# Set ``SWIFT_LOG_TOKEN_LENGTH`` to configure how many characters of an auth +# token should be placed in the logs. When keystone is used with PKI tokens, +# the token values can be huge, seemingly larger the 2K, at the least. We +# restrict it here to a default of 12 characters, which should be enough to +# trace through the logs when looking for its use. +SWIFT_LOG_TOKEN_LENGTH=${SWIFT_LOG_TOKEN_LENGTH:-12} + # Set ``OBJECT_PORT_BASE``, ``CONTAINER_PORT_BASE``, ``ACCOUNT_PORT_BASE`` # Port bases used in port number calclution for the service "nodes" # The specified port number will be used, the additinal ports calculated by @@ -281,6 +288,9 @@ function configure_swift() { SWIFT_EXTRAS_MIDDLEWARE_LAST="${SWIFT_EXTRAS_MIDDLEWARE_LAST} ceilometer" fi + # Restrict the length of auth tokens in the swift proxy-server logs. + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:proxy-logging reveal_sensitive_prefix ${SWIFT_LOG_TOKEN_LENGTH} + # By default Swift will be installed with keystone and tempauth middleware # and add the swift3 middleware if its configured for it. The token for # tempauth would be prefixed with the reseller_prefix setting `TEMPAUTH_` the @@ -336,7 +346,7 @@ EOF cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONF_DIR}/swift.conf iniset ${SWIFT_CONF_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH} - # This function generates an object/account/proxy configuration + # This function generates an object/container/account configuration # emulating 4 nodes on different ports function generate_swift_config() { local swift_node_config=$1 From 8afc8935362388c54101e4d34b3310aa2e57c412 Mon Sep 17 00:00:00 2001 From: Peter Portante Date: Wed, 20 Nov 2013 17:34:39 -0500 Subject: [PATCH 0539/4704] Use the swift logging adapter for txn IDs Change-Id: I2b2308eb9606279cffc1965fc3b86e9597d63e87 Signed-off-by: Peter Portante --- lib/swift | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/swift b/lib/swift index ce13868f4a..40722ab030 100644 --- a/lib/swift +++ b/lib/swift @@ -321,6 +321,10 @@ function configure_swift() { iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_user swift iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_password $SERVICE_PASSWORD iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken signing_dir $SWIFT_AUTH_CACHE_DIR + # This causes the authtoken middleware to use the same python logging + # adapter provided by the swift proxy-server, so that request transaction + # IDs will included in all of its log messages. + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken log_name swift iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth use iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles From b9e25135c51ee29edbdf48d41e1cb637188cc358 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 1 Oct 2013 14:45:04 -0500 Subject: [PATCH 0540/4704] freshen the LDAP support * Build the base DN from a given domain name * Remove all hard-coded names to allow configuration of base DN * Fix manager DN (cn=Manager,dc=...) * Add ldap init_ldap() * Add support for clean.sh Change-Id: Ieb69be9740653645b8e000574ad3fe59a0f97540 --- clean.sh | 6 + files/apts/ldap | 2 +- .../ldap/{openstack.ldif => keystone.ldif.in} | 16 +- files/ldap/manager.ldif.in | 9 +- ...e-config.ldif => suse-base-config.ldif.in} | 4 +- lib/keystone | 14 +- lib/ldap | 146 +++++++++++++----- 7 files changed, 141 insertions(+), 56 deletions(-) rename files/ldap/{openstack.ldif => keystone.ldif.in} (54%) rename files/ldap/{base-config.ldif => suse-base-config.ldif.in} (77%) diff --git a/clean.sh b/clean.sh index 395941ae21..480a81214f 100755 --- a/clean.sh +++ b/clean.sh @@ -15,6 +15,8 @@ TOP_DIR=$(cd $(dirname "$0") && pwd) # Import common functions source $TOP_DIR/functions +FILES=$TOP_DIR/files + # Load local configuration source $TOP_DIR/stackrc @@ -84,6 +86,10 @@ cleanup_nova cleanup_neutron cleanup_swift +if is_service_enabled ldap; then + cleanup_ldap +fi + # Do the hypervisor cleanup until this can be moved back into lib/nova if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then cleanup_nova_hypervisor diff --git a/files/apts/ldap b/files/apts/ldap index 81a00f27bf..26f7aeffe3 100644 --- a/files/apts/ldap +++ b/files/apts/ldap @@ -1,3 +1,3 @@ ldap-utils -slapd # NOPRIME +slapd python-ldap diff --git a/files/ldap/openstack.ldif b/files/ldap/keystone.ldif.in similarity index 54% rename from files/ldap/openstack.ldif rename to files/ldap/keystone.ldif.in index 02caf3f368..cf51907cf6 100644 --- a/files/ldap/openstack.ldif +++ b/files/ldap/keystone.ldif.in @@ -1,26 +1,26 @@ -dn: dc=openstack,dc=org -dc: openstack +dn: ${BASE_DN} objectClass: dcObject objectClass: organizationalUnit -ou: openstack +dc: ${BASE_DC} +ou: ${BASE_DC} -dn: ou=UserGroups,dc=openstack,dc=org +dn: ou=UserGroups,${BASE_DN} objectClass: organizationalUnit ou: UserGroups -dn: ou=Users,dc=openstack,dc=org +dn: ou=Users,${BASE_DN} objectClass: organizationalUnit ou: Users -dn: ou=Roles,dc=openstack,dc=org +dn: ou=Roles,${BASE_DN} objectClass: organizationalUnit ou: Roles -dn: ou=Projects,dc=openstack,dc=org +dn: ou=Projects,${BASE_DN} objectClass: organizationalUnit ou: Projects -dn: cn=9fe2ff9ee4384b1894a90878d3e92bab,ou=Roles,dc=openstack,dc=org +dn: cn=9fe2ff9ee4384b1894a90878d3e92bab,ou=Roles,${BASE_DN} objectClass: organizationalRole ou: _member_ cn: 9fe2ff9ee4384b1894a90878d3e92bab diff --git a/files/ldap/manager.ldif.in b/files/ldap/manager.ldif.in index e522150f2e..de3b69de7c 100644 --- a/files/ldap/manager.ldif.in +++ b/files/ldap/manager.ldif.in @@ -1,10 +1,15 @@ dn: olcDatabase={${LDAP_OLCDB_NUMBER}}hdb,cn=config changetype: modify replace: olcSuffix -olcSuffix: dc=openstack,dc=org +olcSuffix: ${BASE_DN} - replace: olcRootDN -olcRootDN: dc=Manager,dc=openstack,dc=org +olcRootDN: ${MANAGER_DN} - ${LDAP_ROOTPW_COMMAND}: olcRootPW olcRootPW: ${SLAPPASS} +- +replace: olcDbIndex +olcDbIndex: objectClass eq +olcDbIndex: default pres,eq +olcDbIndex: cn,sn,givenName,co diff --git a/files/ldap/base-config.ldif b/files/ldap/suse-base-config.ldif.in similarity index 77% rename from files/ldap/base-config.ldif rename to files/ldap/suse-base-config.ldif.in index 026d8bc0fc..00256ee9d8 100644 --- a/files/ldap/base-config.ldif +++ b/files/ldap/suse-base-config.ldif.in @@ -12,8 +12,10 @@ objectClass: olcSchemaConfig cn: schema include: file:///etc/openldap/schema/core.ldif +include: file:///etc/openldap/schema/cosine.ldif +include: file:///etc/openldap/schema/inetorgperson.ldif dn: olcDatabase={1}hdb,cn=config objectClass: olcHdbConfig olcDbDirectory: /var/lib/ldap -olcSuffix: dc=openstack,dc=org +olcSuffix: ${BASE_DN} diff --git a/lib/keystone b/lib/keystone index c1fa0af8af..76eff54e35 100644 --- a/lib/keystone +++ b/lib/keystone @@ -143,17 +143,17 @@ function configure_keystone() { if is_service_enabled ldap; then #Set all needed ldap values - iniset $KEYSTONE_CONF ldap password $LDAP_PASSWORD - iniset $KEYSTONE_CONF ldap user "dc=Manager,dc=openstack,dc=org" - iniset $KEYSTONE_CONF ldap suffix "dc=openstack,dc=org" + iniset $KEYSTONE_CONF ldap password $LDAP_PASSWORD + iniset $KEYSTONE_CONF ldap user $LDAP_MANAGER_DN + iniset $KEYSTONE_CONF ldap suffix $LDAP_BASE_DN iniset $KEYSTONE_CONF ldap use_dumb_member "True" iniset $KEYSTONE_CONF ldap user_attribute_ignore "enabled,email,tenants,default_project_id" iniset $KEYSTONE_CONF ldap tenant_attribute_ignore "enabled" iniset $KEYSTONE_CONF ldap tenant_domain_id_attribute "businessCategory" iniset $KEYSTONE_CONF ldap tenant_desc_attribute "description" - iniset $KEYSTONE_CONF ldap tenant_tree_dn "ou=Projects,dc=openstack,dc=org" + iniset $KEYSTONE_CONF ldap tenant_tree_dn "ou=Projects,$LDAP_BASE_DN" iniset $KEYSTONE_CONF ldap user_domain_id_attribute "businessCategory" - iniset $KEYSTONE_CONF ldap user_tree_dn "ou=Users,dc=openstack,dc=org" + iniset $KEYSTONE_CONF ldap user_tree_dn "ou=Users,$LDAP_BASE_DN" iniset $KEYSTONE_CONF DEFAULT member_role_id "9fe2ff9ee4384b1894a90878d3e92bab" iniset $KEYSTONE_CONF DEFAULT member_role_name "_member_" fi @@ -320,6 +320,10 @@ create_keystone_accounts() { # init_keystone() - Initialize databases, etc. function init_keystone() { + if is_service_enabled ldap; then + init_ldap + fi + # (Re)create keystone database recreate_database keystone utf8 diff --git a/lib/ldap b/lib/ldap index 80992a7a09..e4bd41624d 100644 --- a/lib/ldap +++ b/lib/ldap @@ -9,68 +9,137 @@ XTRACE=$(set +o | grep xtrace) set +o xtrace + +LDAP_DOMAIN=${LDAP_DOMAIN:-openstack.org} +# Make an array of domain components +DC=(${LDAP_DOMAIN/./ }) + +# Leftmost domain component used in top-level entry +LDAP_BASE_DC=${DC[0]} + +# Build the base DN +dn="" +for dc in ${DC[*]}; do + dn="$dn,dc=$dc" +done +LDAP_BASE_DN=${dn#,} + +LDAP_MANAGER_DN="${LDAP_MANAGER_DN:-cn=Manager,${LDAP_BASE_DN}}" +LDAP_URL=${LDAP_URL:-ldap://localhost} + LDAP_SERVICE_NAME=slapd +if is_ubuntu; then + LDAP_OLCDB_NUMBER=1 + LDAP_ROOTPW_COMMAND=replace +elif is_fedora; then + LDAP_OLCDB_NUMBER=2 + LDAP_ROOTPW_COMMAND=add +elif is_suse; then + # SUSE has slappasswd in /usr/sbin/ + PATH=$PATH:/usr/sbin/ + LDAP_OLCDB_NUMBER=1 + LDAP_ROOTPW_COMMAND=add + LDAP_SERVICE_NAME=ldap +fi + + # Functions # --------- +# Perform common variable substitutions on the data files +# _ldap_varsubst file +function _ldap_varsubst() { + local infile=$1 + sed -e " + s|\${LDAP_OLCDB_NUMBER}|$LDAP_OLCDB_NUMBER| + s|\${SLAPPASS}|$SLAPPASS| + s|\${LDAP_ROOTPW_COMMAND}|$LDAP_ROOTPW_COMMAND| + s|\${BASE_DC}|$LDAP_BASE_DC| + s|\${BASE_DN}|$LDAP_BASE_DN| + s|\${MANAGER_DN}|$LDAP_MANAGER_DN| + " $infile +} + +# clean_ldap() - Remove ldap server +function cleanup_ldap() { + uninstall_package $(get_packages ldap) + if is_ubuntu; then + uninstall_package slapd ldap-utils libslp1 + sudo rm -rf /etc/ldap/ldap.conf /var/lib/ldap + elif is_fedora; then + sudo rm -rf /etc/openldap /var/lib/ldap + elif is_suse; then + sudo rm -rf /var/lib/ldap + fi +} + +# init_ldap +# init_ldap() - Initialize databases, etc. +function init_ldap() { + local keystone_ldif + + TMP_LDAP_DIR=$(mktemp -d -t ldap.$$.XXXXXXXXXX) + + # Remove data but not schemas + clear_ldap_state + + # Add our top level ldap nodes + if ldapsearch -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -b "$LDAP_BASE_DN" | grep -q "Success"; then + printf "LDAP already configured for $LDAP_BASE_DC\n" + else + printf "Configuring LDAP for $LDAP_BASE_DC\n" + # If BASE_DN is changed, the user may override the default file + if [[ -r $FILES/ldap/${LDAP_BASE_DC}.ldif.in ]]; then + keystone_ldif=${LDAP_BASE_DC}.ldif + else + keystone_ldif=keystone.ldif + fi + _ldap_varsubst $FILES/ldap/${keystone_ldif}.in >$TMP_LDAP_DIR/${keystone_ldif} + if [[ -r $TMP_LDAP_DIR/${keystone_ldif} ]]; then + ldapadd -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -c -f $TMP_LDAP_DIR/${keystone_ldif} + fi + fi + + rm -rf TMP_LDAP_DIR +} + # install_ldap # install_ldap() - Collect source and prepare function install_ldap() { echo "Installing LDAP inside function" - echo "LDAP_PASSWORD is $LDAP_PASSWORD" echo "os_VENDOR is $os_VENDOR" - printf "installing" + + TMP_LDAP_DIR=$(mktemp -d -t ldap.$$.XXXXXXXXXX) + + printf "installing OpenLDAP" if is_ubuntu; then - LDAP_OLCDB_NUMBER=1 - LDAP_ROOTPW_COMMAND=replace - sudo DEBIAN_FRONTEND=noninteractive apt-get install slapd ldap-utils - #automatically starts LDAP on ubuntu so no need to call start_ldap + # Ubuntu automatically starts LDAP so no need to call start_ldap() + : elif is_fedora; then - LDAP_OLCDB_NUMBER=2 - LDAP_ROOTPW_COMMAND=add start_ldap elif is_suse; then - LDAP_OLCDB_NUMBER=1 - LDAP_ROOTPW_COMMAND=add - LDAP_SERVICE_NAME=ldap - # SUSE has slappasswd in /usr/sbin/ - PATH=$PATH:/usr/sbin/ - sudo slapadd -F /etc/openldap/slapd.d/ -bcn=config -l $FILES/ldap/base-config.ldif + _ldap_varsubst $FILES/ldap/suse-base-config.ldif.in >$TMP_LDAP_DIR/suse-base-config.ldif + sudo slapadd -F /etc/openldap/slapd.d/ -bcn=config -l $TMP_LDAP_DIR/suse-base-config.ldif sudo sed -i '/^OPENLDAP_START_LDAPI=/s/"no"/"yes"/g' /etc/sysconfig/openldap start_ldap fi - printf "generate password file" - SLAPPASS=`slappasswd -s $LDAP_PASSWORD` - - printf "secret is $SLAPPASS\n" - #create manager.ldif - TMP_MGR_DIFF_FILE=`mktemp -t manager_ldiff.$$.XXXXXXXXXX.ldif` - sed -e "s|\${LDAP_OLCDB_NUMBER}|$LDAP_OLCDB_NUMBER|" -e "s|\${SLAPPASS}|$SLAPPASS|" -e "s|\${LDAP_ROOTPW_COMMAND}|$LDAP_ROOTPW_COMMAND|" $FILES/ldap/manager.ldif.in >> $TMP_MGR_DIFF_FILE + echo "LDAP_PASSWORD is $LDAP_PASSWORD" + SLAPPASS=$(slappasswd -s $LDAP_PASSWORD) + printf "LDAP secret is $SLAPPASS\n" - #update ldap olcdb - sudo ldapmodify -Y EXTERNAL -H ldapi:/// -f $TMP_MGR_DIFF_FILE + # Create manager.ldif and add to olcdb + _ldap_varsubst $FILES/ldap/manager.ldif.in >$TMP_LDAP_DIR/manager.ldif + sudo ldapmodify -Y EXTERNAL -H ldapi:/// -f $TMP_LDAP_DIR/manager.ldif # On fedora we need to manually add cosine and inetorgperson schemas - if is_fedora || is_suse; then + if is_fedora; then sudo ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/cosine.ldif sudo ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/inetorgperson.ldif fi - # add our top level ldap nodes - if ldapsearch -x -w $LDAP_PASSWORD -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -x -b dc=openstack,dc=org | grep -q "Success"; then - printf "LDAP already configured for OpenStack\n" - if [[ "$KEYSTONE_CLEAR_LDAP" == "yes" ]]; then - # clear LDAP state - clear_ldap_state - # reconfigure LDAP for OpenStack - ldapadd -c -x -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -w $LDAP_PASSWORD -f $FILES/ldap/openstack.ldif - fi - else - printf "Configuring LDAP for OpenStack\n" - ldapadd -c -x -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -w $LDAP_PASSWORD -f $FILES/ldap/openstack.ldif - fi + rm -rf TMP_LDAP_DIR } # start_ldap() - Start LDAP @@ -78,7 +147,6 @@ function start_ldap() { sudo service $LDAP_SERVICE_NAME restart } - # stop_ldap() - Stop LDAP function stop_ldap() { sudo service $LDAP_SERVICE_NAME stop @@ -86,7 +154,7 @@ function stop_ldap() { # clear_ldap_state() - Clear LDAP State function clear_ldap_state() { - ldapdelete -x -w $LDAP_PASSWORD -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -x -r "dc=openstack,dc=org" + ldapdelete -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -r "$LDAP_BASE_DN" } # Restore xtrace From 1c402286cff1dfda5182020e4956f73e7d063d71 Mon Sep 17 00:00:00 2001 From: Gordon Chung Date: Tue, 26 Nov 2013 13:30:11 -0500 Subject: [PATCH 0541/4704] split collector service the ceilometer collector service has been split into two: ceilometer-collector and ceilometer-agent-notification Change-Id: I6114fd7f3e063abfa74d48d402dc863bccd249b6 Blueprint: split-collector --- lib/ceilometer | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 8e2970c652..fac3be14a9 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -3,7 +3,7 @@ # To enable a minimal set of Ceilometer services, add the following to localrc: # -# enable_service ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api +# enable_service ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api # # To ensure Ceilometer alarming services are enabled also, further add to the localrc: # @@ -145,6 +145,7 @@ function start_ceilometer() { screen_it ceilometer-acompute "cd ; sg $LIBVIRT_GROUP \"ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" fi screen_it ceilometer-acentral "cd ; ceilometer-agent-central --config-file $CEILOMETER_CONF" + screen_it ceilometer-anotification "cd ; ceilometer-agent-notification --config-file $CEILOMETER_CONF" screen_it ceilometer-collector "cd ; ceilometer-collector --config-file $CEILOMETER_CONF" screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" @@ -160,7 +161,7 @@ function start_ceilometer() { # stop_ceilometer() - Stop running processes function stop_ceilometer() { # Kill the ceilometer screen windows - for serv in ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do + for serv in ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do screen -S $SCREEN_NAME -p $serv -X kill done } From afbc631cb8c89316bbecbf0f2c601103304e1994 Mon Sep 17 00:00:00 2001 From: Tomoe Sugihara Date: Thu, 14 Nov 2013 20:02:47 +0000 Subject: [PATCH 0542/4704] Make tempest L3 capable plugin aware. With this patch, the public network config in tempest.conf will be done for the plugins that support L3. Change-Id: I820fe300fac45ff92d1281ff0c43ebc137783210 --- lib/neutron | 7 +++++++ lib/tempest | 3 ++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 8b0656bc26..6eabef5b3e 100644 --- a/lib/neutron +++ b/lib/neutron @@ -115,6 +115,13 @@ Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1} # nova vif driver that all plugins should use NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} +# The next two variables are configured by plugin +# e.g. _configure_neutron_l3_agent or lib/neutron_plugins/* +# +# The plugin supports L3. +Q_L3_ENABLED=${Q_L3_ENABLED:-False} +# L3 routers exist per tenant +Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-False} # List of config file names in addition to the main plugin config file # See _configure_neutron_common() for details about setting it up diff --git a/lib/tempest b/lib/tempest index 803b740221..7932fe69a3 100644 --- a/lib/tempest +++ b/lib/tempest @@ -15,6 +15,7 @@ # - ``PUBLIC_NETWORK_NAME`` # - ``Q_USE_NAMESPACE`` # - ``Q_ROUTER_NAME`` +# - ``Q_L3_ENABLED`` # - ``VIRT_DRIVER`` # - ``LIBVIRT_TYPE`` # - ``KEYSTONE_SERVICE_PROTOCOL``, ``KEYSTONE_SERVICE_HOST`` from lib/keystone @@ -200,7 +201,7 @@ function configure_tempest() { ssh_connect_method=${TEMPEST_SSH_CONNECT_METHOD:-$ssh_connect_method} - if is_service_enabled q-l3; then + if [ "$Q_L3_ENABLED" = "True" ]; then public_network_id=$(neutron net-list | grep $PUBLIC_NETWORK_NAME | \ awk '{print $2}') if [ "$Q_USE_NAMESPACE" == "False" ]; then From 3d94736b60d9f3c2f159e81eab5841dba255515a Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Wed, 27 Nov 2013 10:06:58 +0100 Subject: [PATCH 0543/4704] Assign unique name to each fake nova-compute Without a unique name, the scheduler (and anything else, really) will consider each of these nova-compute processes as being one and the same, so only one entry in the services table, only one hypervisor seen by the scheduler, etc. Assigning unique names lets us simulate an arbitrary amount of nova-computes which is very handy for benchmarking the scheduler. Change-Id: Ie03aad81bd2a8e73b876a9eae934bc00bf2f71e9 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 6ab2000111..43a33127a9 100644 --- a/lib/nova +++ b/lib/nova @@ -650,7 +650,7 @@ function start_nova_compute() { screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf'" elif [[ "$VIRT_DRIVER" = 'fake' ]]; then for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do - screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" + screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf --config-file <(echo -e '[DEFAULT]\nhost=${HOSTNAME}${i}')" done else if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then From 06b345e509f7fa213be17715f9ac581a51f2fd56 Mon Sep 17 00:00:00 2001 From: Flaper Fesp Date: Wed, 4 Sep 2013 15:35:47 +0200 Subject: [PATCH 0544/4704] Add marconi support to devstack Marconi has an optional dependency on keystone for authentication. This code was tested with everything enabled and also with the following localrc: STACK_USER=fedora SERVICE_TOKEN=secrete ADMIN_PASSWORD=secrete MYSQL_PASSWORD=secrete RABBIT_PASSWORD=secrete SERVICE_PASSWORD=secrete disable_all_services enable_service qpid enable_service key enable_service mysql enable_service marconi-server Implements blueprint marconi-devstack-integration Implements blueprint devstack-support Change-Id: I13495bcc5c5eb66cee641894e9f84a0089460c8b --- exercises/marconi.sh | 43 ++++++++++ extras.d/70-marconi.sh | 29 +++++++ files/apts/marconi-server | 3 + files/rpms/marconi-server | 3 + lib/marconi | 171 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 249 insertions(+) create mode 100755 exercises/marconi.sh create mode 100644 extras.d/70-marconi.sh create mode 100644 files/apts/marconi-server create mode 100644 files/rpms/marconi-server create mode 100644 lib/marconi diff --git a/exercises/marconi.sh b/exercises/marconi.sh new file mode 100755 index 0000000000..1b9788dce6 --- /dev/null +++ b/exercises/marconi.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +# **marconi.sh** + +# Sanity check that Marconi started if enabled + +echo "*********************************************************************" +echo "Begin DevStack Exercise: $0" +echo "*********************************************************************" + +# This script exits on an error so that errors don't compound and you see +# only the first error that occurred. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + + +# Settings +# ======== + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import configuration +source $TOP_DIR/openrc + +# Import exercise configuration +source $TOP_DIR/exerciserc + +is_service_enabled marconi-server || exit 55 + +curl http://$SERVICE_HOST:8888/v1/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Marconi API not functioning!" + +set +o xtrace +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" diff --git a/extras.d/70-marconi.sh b/extras.d/70-marconi.sh new file mode 100644 index 0000000000..a96a4c546c --- /dev/null +++ b/extras.d/70-marconi.sh @@ -0,0 +1,29 @@ +# marconi.sh - Devstack extras script to install Marconi + +if is_service_enabled marconi-server; then + if [[ "$1" == "source" ]]; then + # Initial source + source $TOP_DIR/lib/marconi + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + echo_summary "Installing Marconi" + install_marconiclient + install_marconi + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + echo_summary "Configuring Marconi" + configure_marconi + configure_marconiclient + + if is_service_enabled key; then + create_marconi_accounts + fi + + elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + echo_summary "Initializing Marconi" + init_marconi + start_marconi + fi + + if [[ "$1" == "unstack" ]]; then + stop_marconi + fi +fi diff --git a/files/apts/marconi-server b/files/apts/marconi-server new file mode 100644 index 0000000000..bc7ef22445 --- /dev/null +++ b/files/apts/marconi-server @@ -0,0 +1,3 @@ +python-pymongo +mongodb-server +pkg-config diff --git a/files/rpms/marconi-server b/files/rpms/marconi-server new file mode 100644 index 0000000000..d7b7ea89c1 --- /dev/null +++ b/files/rpms/marconi-server @@ -0,0 +1,3 @@ +selinux-policy-targeted +mongodb-server +pymongo diff --git a/lib/marconi b/lib/marconi new file mode 100644 index 0000000000..8e0b82b49e --- /dev/null +++ b/lib/marconi @@ -0,0 +1,171 @@ +# lib/marconi +# Install and start **Marconi** service + +# To enable a minimal set of Marconi services, add the following to localrc: +# enable_service marconi-server +# +# Dependencies: +# - functions +# - OS_AUTH_URL for auth in api +# - DEST set to the destination directory +# - SERVICE_PASSWORD, SERVICE_TENANT_NAME for auth in api +# - STACK_USER service user + +# stack.sh +# --------- +# install_marconi +# configure_marconi +# init_marconi +# start_marconi +# stop_marconi +# cleanup_marconi + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories +MARCONI_DIR=$DEST/marconi +MARCONICLIENT_DIR=$DEST/python-marconiclient +MARCONI_CONF_DIR=/etc/marconi +MARCONI_CONF=$MARCONI_CONF_DIR/marconi.conf +MARCONI_API_LOG_DIR=/var/log/marconi-api +MARCONI_AUTH_CACHE_DIR=${MARCONI_AUTH_CACHE_DIR:-/var/cache/marconi} + +# Support potential entry-points console scripts +MARCONI_BIN_DIR=$(get_python_exec_prefix) + +# Set up database backend +MARCONI_BACKEND=${MARCONI_BACKEND:-mongodb} + + +# Set Marconi repository +MARCONI_REPO=${MARCONI_REPO:-${GIT_BASE}/openstack/marconi.git} +MARCONI_BRANCH=${MARCONI_BRANCH:-master} + +# Set client library repository +MARCONICLIENT_REPO=${MARCONICLIENT_REPO:-${GIT_BASE}/openstack/python-marconiclient.git} +MARCONICLIENT_BRANCH=${MARCONICLIENT_BRANCH:-master} + +# Functions +# --------- + +# cleanup_marconi() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_marconi() { + mongo marconi --eval "db.dropDatabase();" +} + +# configure_marconiclient() - Set config files, create data dirs, etc +function configure_marconiclient() { + setup_develop $MARCONICLIENT_DIR +} + +# configure_marconi() - Set config files, create data dirs, etc +function configure_marconi() { + setup_develop $MARCONI_DIR + + [ ! -d $MARCONI_CONF_DIR ] && sudo mkdir -m 755 -p $MARCONI_CONF_DIR + sudo chown $USER $MARCONI_CONF_DIR + + [ ! -d $MARCONI_API_LOG_DIR ] && sudo mkdir -m 755 -p $MARCONI_API_LOG_DIR + sudo chown $USER $MARCONI_API_LOG_DIR + + iniset $MARCONI_CONF DEFAULT verbose True + iniset $MARCONI_CONF 'drivers:transport:wsgi' bind '0.0.0.0' + + # Install the policy file for the API server + cp $MARCONI_DIR/etc/marconi/policy.json $MARCONI_CONF_DIR + iniset $MARCONI_CONF DEFAULT policy_file $MARCONI_CONF_DIR/policy.json + + iniset $MARCONI_CONF keystone_authtoken auth_protocol http + iniset $MARCONI_CONF keystone_authtoken admin_user marconi + iniset $MARCONI_CONF keystone_authtoken admin_password $SERVICE_PASSWORD + iniset $MARCONI_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $MARCONI_CONF keystone_authtoken signing_dir $MARCONI_AUTH_CACHE_DIR + + if [[ "$MARCONI_BACKEND" = 'mongodb' ]]; then + iniset $MARCONI_CONF database connection mongodb://localhost:27017/marconi + configure_mongodb + cleanup_marconi + fi +} + +function configure_mongodb() { + # Set nssize to 2GB. This increases the number of namespaces supported + # # per database. + sudo sed -i '/--nssize/!s/OPTIONS=\"/OPTIONS=\"--nssize 2047 /' /etc/sysconfig/mongod + + restart_service mongod +} + +# init_marconi() - Initialize etc. +function init_marconi() { + # Create cache dir + sudo mkdir -p $MARCONI_AUTH_CACHE_DIR + sudo chown $STACK_USER $MARCONI_AUTH_CACHE_DIR + rm -f $MARCONI_AUTH_CACHE_DIR/* +} + +# install_marconi() - Collect source and prepare +function install_marconi() { + git_clone $MARCONI_REPO $MARCONI_DIR $MARCONI_BRANCH + setup_develop $MARCONI_DIR +} + +# install_marconiclient() - Collect source and prepare +function install_marconiclient() { + git_clone $MARCONICLIENT_REPO $MARCONICLIENT_DIR $MARCONICLIENT_BRANCH + setup_develop $MARCONICLIENT_DIR +} + +# start_marconi() - Start running processes, including screen +function start_marconi() { + screen_it marconi-server "marconi-server --config-file $MARCONI_CONF" +} + +# stop_marconi() - Stop running processes +function stop_marconi() { + # Kill the marconi screen windows + for serv in marconi-server; do + screen -S $SCREEN_NAME -p $serv -X kill + done +} + +function create_marconi_accounts() { + SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + + MARCONI_USER=$(get_id keystone user-create --name=marconi \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=marconi@example.com) + keystone user-role-add --tenant-id $SERVICE_TENANT \ + --user-id $MARCONI_USER \ + --role-id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + MARCONI_SERVICE=$(get_id keystone service-create \ + --name=marconi \ + --type=queuing \ + --description="Marconi Service") + keystone endpoint-create \ + --region RegionOne \ + --service_id $MARCONI_SERVICE \ + --publicurl "http://$SERVICE_HOST:8888" \ + --adminurl "http://$SERVICE_HOST:8888" \ + --internalurl "http://$SERVICE_HOST:8888" + fi + +} + + +# Restore xtrace +$XTRACE + +# Local variables: +# mode: shell-script +# End: From bc7f643bb7c7fe704cf436b9d96d878adaadf3c4 Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Mon, 25 Nov 2013 10:11:14 -0800 Subject: [PATCH 0545/4704] Fix neutron log format with colorization Closes-Bug: #1254817 Change-Id: I6de17ef15c18e2f8ab246934461a2b7c6ae4f95f --- lib/neutron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 70417be5d3..1c01d05e5f 100644 --- a/lib/neutron +++ b/lib/neutron @@ -556,7 +556,7 @@ function _configure_neutron_common() { # Format logging if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - setup_colorized_logging $NEUTRON_CONF DEFAULT + setup_colorized_logging $NEUTRON_CONF DEFAULT project_id fi _neutron_setup_rootwrap From 6cc0c12dd04e7d4f5f78c492ee46df0bd6bd8c43 Mon Sep 17 00:00:00 2001 From: Sushil Kumar Date: Thu, 28 Nov 2013 07:35:11 +0000 Subject: [PATCH 0546/4704] Updates .gitignore Closes-Bug: #1255854 Change-Id: I22b6fa351dd5b654783f432c01785c706eff8397 --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index a3d5b0d02a..49eb188dd8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,6 @@ proto *~ -.*.sw[nop] +.*.sw? *.log *.log.[1-9] src From 130c90ed0b6111946e4004b125b5ae1a92772a08 Mon Sep 17 00:00:00 2001 From: Akihiro Motoki Date: Thu, 28 Nov 2013 16:56:51 +0900 Subject: [PATCH 0547/4704] Do not create an unnecessary file "ml2" Previously when running devstack with Neutron ML2 plugin, an unnecessary file "ml2" is created in devstack directory. It is because when the first argument is not defined the second argument becomes the first one. This change moves the first "options" argument of populate_ml2_config to the last and checks the given options has a value before adding them to a file. Change-Id: I9ff40456798c42216d414d5f8d443e671ab7d497 Close-Bug: #1255853 --- lib/neutron_plugins/ml2 | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index 8d2e303854..b5b1873f3f 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -34,10 +34,13 @@ Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS:-} ML2_L3_PLUGIN=${ML2_L3_PLUGIN:-neutron.services.l3_router.l3_router_plugin.L3RouterPlugin} function populate_ml2_config() { - OPTS=$1 - CONF=$2 - SECTION=$3 + CONF=$1 + SECTION=$2 + OPTS=$3 + if [ -z "$OPTS" ]; then + return + fi for I in "${OPTS[@]}"; do # Replace the first '=' with ' ' for iniset syntax iniset $CONF $SECTION ${I/=/ } @@ -102,19 +105,17 @@ function neutron_plugin_configure_service() { # Since we enable the tunnel TypeDrivers, also enable a local_ip iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $HOST_IP - populate_ml2_config mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS /$Q_PLUGIN_CONF_FILE ml2 + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS - populate_ml2_config type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS /$Q_PLUGIN_CONF_FILE ml2 + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS - populate_ml2_config $Q_SRV_EXTRA_OPTS /$Q_PLUGIN_CONF_FILE ml2 + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 $Q_SRV_EXTRA_OPTS - populate_ml2_config $Q_ML2_PLUGIN_GRE_TYPE_OPTIONS /$Q_PLUGIN_CONF_FILE ml2_type_gre + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_gre $Q_ML2_PLUGIN_GRE_TYPE_OPTIONS - populate_ml2_config $Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS /$Q_PLUGIN_CONF_FILE ml2_type_vxlan + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_vxlan $Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS - if [ "$Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS" != "" ]; then - populate_ml2_config $Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS /$Q_PLUGIN_CONF_FILE ml2_type_vlan - fi + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_vlan $Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS } function has_neutron_plugin_security_group() { From 500a32f4f8e8526ba075b61f336cf91dc9d8c652 Mon Sep 17 00:00:00 2001 From: Edgar Magana Date: Mon, 2 Dec 2013 14:27:31 -0800 Subject: [PATCH 0548/4704] Adds entries for missing parameters in PLUMgrid plugin Three configuration parameters were missing for the PLUMgrid plugin. In this patch all those three have been properly added. Change-Id: If070aa5eb35678d0984470ebcd43fd99e08bcc8a Closes-Bug: #1255808 --- lib/neutron_plugins/plumgrid | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/plumgrid b/lib/neutron_plugins/plumgrid index d4050bb951..bccd301011 100644 --- a/lib/neutron_plugins/plumgrid +++ b/lib/neutron_plugins/plumgrid @@ -6,8 +6,6 @@ MY_XTRACE=$(set +o | grep xtrace) set +o xtrace -#source $TOP_DIR/lib/neutron_plugins/ovs_base - function neutron_plugin_create_nova_conf() { : } @@ -23,11 +21,17 @@ function neutron_plugin_configure_common() { Q_PLUGIN_CLASS="neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2" PLUMGRID_DIRECTOR_IP=${PLUMGRID_DIRECTOR_IP:-localhost} PLUMGRID_DIRECTOR_PORT=${PLUMGRID_DIRECTOR_PORT:-7766} + PLUMGRID_ADMIN=${PLUMGRID_ADMIN:-username} + PLUMGRID_PASSWORD=${PLUMGRID_PASSWORD:-password} + PLUMGRID_TIMEOUT=${PLUMGRID_TIMEOUT:-70} } function neutron_plugin_configure_service() { iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector director_server $PLUMGRID_DIRECTOR_IP iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector director_server_port $PLUMGRID_DIRECTOR_PORT + iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector username $PLUMGRID_ADMIN + iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector password $PLUMGRID_PASSWORD + iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector servertimeout $PLUMGRID_TIMEOUT } function neutron_plugin_configure_debug_command() { From f9e773982a905517d78ccaf51ef00ce1860bf591 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Tue, 3 Dec 2013 06:17:16 +0100 Subject: [PATCH 0549/4704] Use fixed network for ssh when n-net is enabled Server rebuild test has stability issues with n-net + floating ip. Change-Id: I8a921fddbca49c8499938a25f9722ea40cee76cc --- lib/tempest | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 803b740221..5a2c78d5e5 100644 --- a/lib/tempest +++ b/lib/tempest @@ -193,7 +193,9 @@ function configure_tempest() { if [ "$Q_USE_NAMESPACE" != "False" ]; then tenant_networks_reachable=false - ssh_connect_method="floating" + if ! is_service_enabled n-net; then + ssh_connect_method="floating" + fi else tenant_networks_reachable=true fi From 7858510ba7d8fa44878374ad71b14e21618adc17 Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Wed, 4 Dec 2013 01:41:11 +0400 Subject: [PATCH 0550/4704] Fix savanna-dashboard git repo clone dest It breaks savanna d-g jobs due to the ERROR_ON_CLONE=True in d-g (enforce that nothing will be cloned during the installation). Change-Id: I0531e1baf7252c31eb63ee5b46c28d1dfa7d0a1b --- lib/savanna-dashboard | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/savanna-dashboard b/lib/savanna-dashboard index e96762285c..7713a78637 100644 --- a/lib/savanna-dashboard +++ b/lib/savanna-dashboard @@ -29,7 +29,7 @@ SAVANNA_PYTHONCLIENT_REPO=${SAVANNA_PYTHONCLIENT_REPO:-${GIT_BASE}/openstack/pyt SAVANNA_PYTHONCLIENT_BRANCH=${SAVANNA_PYTHONCLIENT_BRANCH:-master} # Set up default directories -SAVANNA_DASHBOARD_DIR=$DEST/savanna_dashboard +SAVANNA_DASHBOARD_DIR=$DEST/savanna-dashboard SAVANNA_PYTHONCLIENT_DIR=$DEST/python-savannaclient # Functions From 90bcd2ff4d4ea11883a58521e58b67f2d981693b Mon Sep 17 00:00:00 2001 From: Arnaud Legendre Date: Fri, 22 Nov 2013 16:05:39 -0800 Subject: [PATCH 0551/4704] Attempt to retrieve the vmdk descriptor data-pair VMDK formats such as monolithicFlat and vmfs require two files to be fully consumable by the Nova drivers (a descriptor-data pair: *.vmdk and *-flat.vmdk). On the upload of the descriptor (*.vmdk), upload_image.sh should attempt to retrieve the *-flat.vmdk. The same way, the descriptor should be retrieved when a flat disk is uploaded. On success, the upload script will be able to use the flat disk as the image content and the relevant descriptor settings as the image metadata. Change-Id: I9214754029c46dd60b9e7d606d84d8819a498a8d Closes-Bug: #1252443 --- functions | 90 +++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 84 insertions(+), 6 deletions(-) diff --git a/functions b/functions index 6137aafd6e..4d5b4b574f 100644 --- a/functions +++ b/functions @@ -1351,10 +1351,9 @@ function upload_image() { # Create a directory for the downloaded image tarballs. mkdir -p $FILES/images - + IMAGE_FNAME=`basename "$image_url"` if [[ $image_url != file* ]]; then # Downloads the image (uec ami+aki style), then extracts it. - IMAGE_FNAME=`basename "$image_url"` if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then wget -c $image_url -O $FILES/$IMAGE_FNAME if [[ $? -ne 0 ]]; then @@ -1410,13 +1409,92 @@ function upload_image() { vmdk_create_type="$(head -25 $IMAGE | grep -a -F -m 1 'createType=' $IMAGE)" vmdk_create_type="${vmdk_create_type#*\"}" vmdk_create_type="${vmdk_create_type%?}" + + descriptor_data_pair_msg="Monolithic flat and VMFS disks "` + `"should use a descriptor-data pair." if [[ "$vmdk_create_type" = "monolithicSparse" ]]; then vmdk_disktype="sparse" - elif [[ "$vmdk_create_type" = "monolithicFlat" ]]; then - die $LINENO "Monolithic flat disks should use a descriptor-data pair." \ - "Please provide the disk and not the descriptor." + elif [[ "$vmdk_create_type" = "monolithicFlat" || \ + "$vmdk_create_type" = "vmfs" ]]; then + # Attempt to retrieve the *-flat.vmdk + flat_fname="$(head -25 $IMAGE | grep -G 'RW\|RDONLY [0-9]+ FLAT\|VMFS' $IMAGE)" + flat_fname="${flat_fname#*\"}" + flat_fname="${flat_fname%?}" + if [[ -z "$flat_name" ]]; then + flat_fname="$IMAGE_NAME-flat.vmdk" + fi + path_len=`expr ${#image_url} - ${#IMAGE_FNAME}` + flat_url="${image_url:0:$path_len}$flat_fname" + warn $LINENO "$descriptor_data_pair_msg"` + `" Attempt to retrieve the *-flat.vmdk: $flat_url" + if [[ $flat_url != file* ]]; then + if [[ ! -f $FILES/$flat_fname || \ + "$(stat -c "%s" $FILES/$flat_fname)" = "0" ]]; then + wget -c $flat_url -O $FILES/$flat_fname + if [[ $? -ne 0 ]]; then + echo "Flat disk not found: $flat_url" + flat_found=false + fi + fi + if $flat_found; then + IMAGE="$FILES/${flat_fname}" + fi + else + IMAGE=$(echo $flat_url | sed "s/^file:\/\///g") + if [[ ! -f $IMAGE || "$(stat -c "%s" $IMAGE)" == "0" ]]; then + echo "Flat disk not found: $flat_url" + flat_found=false + fi + if ! $flat_found; then + IMAGE=$(echo $image_url | sed "s/^file:\/\///g") + fi + fi + if $flat_found; then + IMAGE_NAME="${flat_fname}" + fi + vmdk_disktype="preallocated" + elif [[ -z "$vmdk_create_type" ]]; then + # *-flat.vmdk provided: attempt to retrieve the descriptor (*.vmdk) + # to retrieve appropriate metadata + if [[ ${IMAGE_NAME: -5} != "-flat" ]]; then + warn $LINENO "Expected filename suffix: '-flat'."` + `" Filename provided: ${IMAGE_NAME}" + else + descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk" + path_len=`expr ${#image_url} - ${#IMAGE_FNAME}` + flat_path="${image_url:0:$path_len}" + descriptor_url=$flat_path$descriptor_fname + warn $LINENO "$descriptor_data_pair_msg"` + `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url" + if [[ $flat_path != file* ]]; then + if [[ ! -f $FILES/$descriptor_fname || \ + "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then + wget -c $descriptor_url -O $FILES/$descriptor_fname + if [[ $? -ne 0 ]]; then + warn $LINENO "Descriptor not found $descriptor_url" + descriptor_found=false + fi + fi + descriptor_url="$FILES/$descriptor_fname" + else + descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g") + if [[ ! -f $descriptor_url || \ + "$(stat -c "%s" $descriptor_url)" == "0" ]]; then + warn $LINENO "Descriptor not found $descriptor_url" + descriptor_found=false + fi + fi + if $descriptor_found; then + vmdk_adapter_type="$(head -25 $descriptor_url |"` + `"grep -a -F -m 1 'ddb.adapterType =' $descriptor_url)" + vmdk_adapter_type="${vmdk_adapter_type#*\"}" + vmdk_adapter_type="${vmdk_adapter_type%?}" + fi + fi + #TODO(alegendre): handle streamOptimized once supported by the VMware driver. + vmdk_disktype="preallocated" else - #TODO(alegendre): handle streamOptimized once supported by VMware driver. + #TODO(alegendre): handle streamOptimized once supported by the VMware driver. vmdk_disktype="preallocated" fi From 1bbb0ca9c6126b726ed21738b76befe40345c773 Mon Sep 17 00:00:00 2001 From: Ana Krivokapic Date: Wed, 4 Dec 2013 15:25:45 +0100 Subject: [PATCH 0552/4704] Fix noVNC git repo Recent change switched base git URL to git.openstack.org. However, noVNC is only hosted on GitHub. Change git repo URL for noVNC back to the GitHub one. Change-Id: Iaa9f570639301be1c29cc400c1c73afcbf637b70 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 7eda5a5671..410f9d8d05 100644 --- a/stackrc +++ b/stackrc @@ -178,7 +178,7 @@ BM_POSEUR_REPO=${BM_POSEUR_REPO:-${GIT_BASE}/tripleo/bm_poseur.git} BM_POSEUR_BRANCH=${BM_POSEUR_BRANCH:-master} # a websockets/html5 or flash powered VNC console for vm instances -NOVNC_REPO=${NOVNC_REPO:-${GIT_BASE}/kanaka/noVNC.git} +NOVNC_REPO=${NOVNC_REPO:-https://github.com/kanaka/noVNC.git} NOVNC_BRANCH=${NOVNC_BRANCH:-master} # ryu service From ab77587a371dea59055484b6f9e4ee1a434fcaf0 Mon Sep 17 00:00:00 2001 From: Rafael Folco Date: Mon, 2 Dec 2013 14:04:32 -0200 Subject: [PATCH 0553/4704] Generate glance image with SCSI bus type for ppc arch This patch fixes wrong assumptions for bus types of disk and cdrom on ppc64. Qemu driver assumes IDE bus type for cdrom device, which is not supported on ppc arch. Adds capability to add --property key-value to the glance image-create command. Using double brackets for portability reasons. Change-Id: I9f55fa0b6a894a93926e4f8c3d0ea410b5283f9c --- functions | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/functions b/functions index 6137aafd6e..78c676fe3e 100644 --- a/functions +++ b/functions @@ -554,7 +554,7 @@ function exit_distro_not_supported { function is_arch { ARCH_TYPE=$1 - [ "($uname -m)" = "$ARCH_TYPE" ] + [[ "$(uname -m)" == "$ARCH_TYPE" ]] } # Checks if installed Apache is <= given version @@ -1510,11 +1510,15 @@ function upload_image() { *) echo "Do not know what to do with $IMAGE_FNAME"; false;; esac + if is_arch "ppc64"; then + IMG_PROPERTY="--property hw_disk_bus=scsi --property hw_cdrom_bus=scsi" + fi + if [ "$CONTAINER_FORMAT" = "bare" ]; then if [ "$UNPACK" = "zcat" ]; then - glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}") + glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" $IMG_PROPERTY --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}") else - glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < "${IMAGE}" + glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" $IMG_PROPERTY --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < "${IMAGE}" fi else # Use glance client to add the kernel the root filesystem. @@ -1522,12 +1526,12 @@ function upload_image() { # kernel for use when uploading the root filesystem. KERNEL_ID=""; RAMDISK_ID=""; if [ -n "$KERNEL" ]; then - KERNEL_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" --is-public True --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2) + KERNEL_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" $IMG_PROPERTY --is-public True --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2) fi if [ -n "$RAMDISK" ]; then - RAMDISK_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" --is-public True --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2) + RAMDISK_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" $IMG_PROPERTY --is-public True --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2) fi - glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" --is-public True --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}" + glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" $IMG_PROPERTY --is-public True --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}" fi } From 9a48a351fbb3cadb02cf0162d2bc66054cb6def1 Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Wed, 4 Dec 2013 23:13:57 +0000 Subject: [PATCH 0554/4704] XenAPI: Update DomU to Ubuntu Saucy Driven by a race condition in LVM being hit in the tempest tests. This is a locking race between a file lock and a semaphore which is fixed in 2.02.96-5. Change-Id: I8a8c215c90a0602288292ffd06b7694d2db6219e --- tools/xen/xenrc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/xen/xenrc b/tools/xen/xenrc index bdcaf992b2..5796268aaa 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -57,8 +57,8 @@ PUB_IP=${PUB_IP:-172.24.4.10} PUB_NETMASK=${PUB_NETMASK:-255.255.255.0} # Ubuntu install settings -UBUNTU_INST_RELEASE="precise" -UBUNTU_INST_TEMPLATE_NAME="Ubuntu 12.04 (64-bit) for DevStack" +UBUNTU_INST_RELEASE="saucy" +UBUNTU_INST_TEMPLATE_NAME="Ubuntu 13.10 (64-bit) for DevStack" # For 12.04 use "precise" and update template name # However, for 12.04, you should be using # XenServer 6.1 and later or XCP 1.6 or later From a49422e33ec08c2e8391168dd71689674ad8fc7c Mon Sep 17 00:00:00 2001 From: sbauza Date: Thu, 5 Dec 2013 14:56:14 +0100 Subject: [PATCH 0555/4704] Fix install_get_pip in order to work behind a proxy Proxy envvars are not passed to pip when sudo, we need to export them. Change-Id: I67622f5ea8ecb948006e032bdc395ecf36914146 Closes-Bug: #1258155 --- tools/install_pip.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 6b9b25e3e9..a65a77e079 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -67,7 +67,7 @@ function install_get_pip() { curl -O $PIP_GET_PIP_URL; \ ) fi - sudo python $FILES/get-pip.py + sudo -E python $FILES/get-pip.py } function install_pip_tarball() { @@ -75,7 +75,7 @@ function install_pip_tarball() { curl -O $PIP_TAR_URL; \ tar xvfz pip-$INSTALL_PIP_VERSION.tar.gz 1>/dev/null; \ cd pip-$INSTALL_PIP_VERSION; \ - sudo python setup.py install 1>/dev/null; \ + sudo -E python setup.py install 1>/dev/null; \ ) } From 526b79f98825963c5fbb157bca5a54750bd045af Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 22 Nov 2013 11:30:44 -0600 Subject: [PATCH 0556/4704] Eradicate last of underscores in options The --ip_range in stack.sh remains due to nova-manage needing to be fixed. (Rebased 05Dec2013) Change-Id: Ic0f93d41b6edfdc5deb82ae820e2c0c5a8bce24e --- exercises/boot_from_volume.sh | 6 +++--- exercises/client-args.sh | 16 +++++++--------- exercises/floating_ips.sh | 2 +- exercises/neutron-adv-test.sh | 4 ++-- exercises/volumes.sh | 4 ++-- stack.sh | 2 +- 6 files changed, 16 insertions(+), 18 deletions(-) diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index 3b3d3ba63b..ed8ba6310e 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -149,7 +149,7 @@ fi # Create the bootable volume start_time=$(date +%s) -cinder create --image-id $IMAGE --display_name=$VOL_NAME --display_description "test bootable volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \ +cinder create --image-id $IMAGE --display-name=$VOL_NAME --display-description "test bootable volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \ die $LINENO "Failure creating volume $VOL_NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then echo "Volume $VOL_NAME not created" @@ -165,10 +165,10 @@ die_if_not_set $LINENO VOL_ID "Failure retrieving volume ID for $VOL_NAME" # Boot instance # ------------- -# Boot using the --block_device_mapping param. The format of mapping is: +# Boot using the --block-device-mapping param. The format of mapping is: # =::: # Leaving the middle two fields blank appears to do-the-right-thing -VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block-device-mapping vda=$VOL_ID --security_groups=$SECGROUP --key_name $KEY_NAME $VM_NAME | grep ' id ' | get_field 2) +VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block-device-mapping vda=$VOL_ID --security-groups=$SECGROUP --key-name $KEY_NAME $VM_NAME | grep ' id ' | get_field 2) die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME" # Check that the status is active within ACTIVE_TIMEOUT seconds diff --git a/exercises/client-args.sh b/exercises/client-args.sh index 1e68042cec..e79774f98c 100755 --- a/exercises/client-args.sh +++ b/exercises/client-args.sh @@ -56,10 +56,8 @@ unset OS_PASSWORD unset OS_AUTH_URL # Common authentication args -TENANT_ARG="--os_tenant_name=$x_TENANT_NAME" -TENANT_ARG_DASH="--os-tenant-name=$x_TENANT_NAME" -ARGS="--os_username=$x_USERNAME --os_password=$x_PASSWORD --os_auth_url=$x_AUTH_URL" -ARGS_DASH="--os-username=$x_USERNAME --os-password=$x_PASSWORD --os-auth-url=$x_AUTH_URL" +TENANT_ARG="--os-tenant-name=$x_TENANT_NAME" +ARGS="--os-username=$x_USERNAME --os-password=$x_PASSWORD --os-auth-url=$x_AUTH_URL" # Set global return RETURN=0 @@ -71,7 +69,7 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then STATUS_KEYSTONE="Skipped" else echo -e "\nTest Keystone" - if keystone $TENANT_ARG_DASH $ARGS_DASH catalog --service identity; then + if keystone $TENANT_ARG $ARGS catalog --service identity; then STATUS_KEYSTONE="Succeeded" else STATUS_KEYSTONE="Failed" @@ -90,7 +88,7 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then else # Test OSAPI echo -e "\nTest Nova" - if nova $TENANT_ARG_DASH $ARGS_DASH flavor-list; then + if nova $TENANT_ARG $ARGS flavor-list; then STATUS_NOVA="Succeeded" else STATUS_NOVA="Failed" @@ -107,7 +105,7 @@ if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then STATUS_CINDER="Skipped" else echo -e "\nTest Cinder" - if cinder $TENANT_ARG_DASH $ARGS_DASH list; then + if cinder $TENANT_ARG $ARGS list; then STATUS_CINDER="Succeeded" else STATUS_CINDER="Failed" @@ -124,7 +122,7 @@ if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then STATUS_GLANCE="Skipped" else echo -e "\nTest Glance" - if glance $TENANT_ARG_DASH $ARGS_DASH image-list; then + if glance $TENANT_ARG $ARGS image-list; then STATUS_GLANCE="Succeeded" else STATUS_GLANCE="Failed" @@ -141,7 +139,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; the STATUS_SWIFT="Skipped" else echo -e "\nTest Swift" - if swift $TENANT_ARG_DASH $ARGS_DASH stat; then + if swift $TENANT_ARG $ARGS stat; then STATUS_SWIFT="Succeeded" else STATUS_SWIFT="Failed" diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 4d71d49163..7055278f35 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -127,7 +127,7 @@ fi # Boot instance # ------------- -VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2) +VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security-groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2) die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME" # Check that the status is active within ACTIVE_TIMEOUT seconds diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index 28e0a3d441..0a100c0fe8 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -238,9 +238,9 @@ function create_network { source $TOP_DIR/openrc admin admin local TENANT_ID=$(get_tenant_id $TENANT) source $TOP_DIR/openrc $TENANT $TENANT - local NET_ID=$(neutron net-create --tenant_id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' ) + local NET_ID=$(neutron net-create --tenant-id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' ) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $TENANT_ID $NET_NAME $EXTRA" - neutron subnet-create --ip_version 4 --tenant_id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR + neutron subnet-create --ip-version 4 --tenant-id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR neutron-debug probe-create --device-owner compute $NET_ID source $TOP_DIR/openrc demo demo } diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 77fa4ebc25..21b5d21c04 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -130,7 +130,7 @@ fi # Boot instance # ------------- -VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2) +VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security-groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2) die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME" # Check that the status is active within ACTIVE_TIMEOUT seconds @@ -156,7 +156,7 @@ fi # Create a new volume start_time=$(date +%s) -cinder create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \ +cinder create --display-name $VOL_NAME --display-description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \ die $LINENO "Failure creating volume $VOL_NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then die $LINENO "Volume $VOL_NAME not created" diff --git a/stack.sh b/stack.sh index af01faa01e..22d184efd9 100755 --- a/stack.sh +++ b/stack.sh @@ -1083,7 +1083,7 @@ if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nov die_if_not_set $LINENO NOVA_USER_ID "Failure retrieving NOVA_USER_ID for nova" NOVA_TENANT_ID=$(keystone tenant-list | grep " $SERVICE_TENANT_NAME " | get_field 1) die_if_not_set $LINENO NOVA_TENANT_ID "Failure retrieving NOVA_TENANT_ID for $SERVICE_TENANT_NAME" - CREDS=$(keystone ec2-credentials-create --user_id $NOVA_USER_ID --tenant_id $NOVA_TENANT_ID) + CREDS=$(keystone ec2-credentials-create --user-id $NOVA_USER_ID --tenant-id $NOVA_TENANT_ID) ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }') SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }') iniset $NOVA_CONF DEFAULT s3_access_key "$ACCESS_KEY" From 0c08e7b2b978b71fbb25cea6a9949cea0081db5c Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Wed, 4 Dec 2013 18:03:25 +0400 Subject: [PATCH 0557/4704] Setup user and endpoints for Savanna * create savanna user with admin role for auth token checks * create service data_processing * create savanna endpoint * use savanna user for auth token checks It's needed for running tempest tests. Change-Id: Iff26960746e36012c275f43c0de0dedcaebc8b0a --- extras.d/70-savanna.sh | 1 + lib/savanna | 50 ++++++++++++++++++++++++++++++++++++------ 2 files changed, 44 insertions(+), 7 deletions(-) diff --git a/extras.d/70-savanna.sh b/extras.d/70-savanna.sh index f6881cc4f6..6bbe113fa7 100644 --- a/extras.d/70-savanna.sh +++ b/extras.d/70-savanna.sh @@ -14,6 +14,7 @@ if is_service_enabled savanna; then elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then echo_summary "Configuring Savanna" configure_savanna + create_savanna_accounts if is_service_enabled horizon; then configure_savanna_dashboard fi diff --git a/lib/savanna b/lib/savanna index e9dbe72643..6794e36dfd 100644 --- a/lib/savanna +++ b/lib/savanna @@ -3,7 +3,6 @@ # Dependencies: # ``functions`` file # ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined -# ``ADMIN_{TENANT_NAME|PASSWORD}`` must be defined # ``stack.sh`` calls the entry points in this order: # @@ -28,11 +27,12 @@ SAVANNA_BRANCH=${SAVANNA_BRANCH:-master} SAVANNA_DIR=$DEST/savanna SAVANNA_CONF_DIR=${SAVANNA_CONF_DIR:-/etc/savanna} SAVANNA_CONF_FILE=savanna.conf -ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-admin} -ADMIN_NAME=${ADMIN_NAME:-admin} -ADMIN_PASSWORD=${ADMIN_PASSWORD:-nova} SAVANNA_DEBUG=${SAVANNA_DEBUG:-True} +SAVANNA_SERVICE_HOST=${SAVANNA_SERVICE_HOST:-$SERVICE_HOST} +SAVANNA_SERVICE_PORT=${SAVANNA_SERVICE_PORT:-8386} +SAVANNA_SERVICE_PROTOCOL=${SAVANNA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} + # Support entry points installation of console scripts if [[ -d $SAVANNA_DIR/bin ]]; then SAVANNA_BIN_DIR=$SAVANNA_DIR/bin @@ -43,6 +43,42 @@ fi # Functions # --------- +# create_savanna_accounts() - Set up common required savanna accounts +# +# Tenant User Roles +# ------------------------------ +# service savanna admin +function create_savanna_accounts() { + + SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + + SAVANNA_USER=$(keystone user-create \ + --name=savanna \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=savanna@example.com \ + | grep " id " | get_field 2) + keystone user-role-add \ + --tenant-id $SERVICE_TENANT \ + --user-id $SAVANNA_USER \ + --role-id $ADMIN_ROLE + + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + SAVANNA_SERVICE=$(keystone service-create \ + --name=savanna \ + --type=data_processing \ + --description="Savanna Data Processing" \ + | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $SAVANNA_SERVICE \ + --publicurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ + --adminurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ + --internalurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" + fi +} + # configure_savanna() - Set config files, create data dirs, etc function configure_savanna() { @@ -54,9 +90,9 @@ function configure_savanna() { # Copy over savanna configuration file and configure common parameters. cp $SAVANNA_DIR/etc/savanna/savanna.conf.sample $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_password $ADMIN_PASSWORD - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_username $ADMIN_NAME - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $ADMIN_TENANT_NAME + iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD + iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_username savanna + iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG recreate_database savanna utf8 From 485dd811f6da244c794171362c8ae59d2e6f2d38 Mon Sep 17 00:00:00 2001 From: Geronimo Orozco Date: Fri, 29 Nov 2013 23:53:32 +0000 Subject: [PATCH 0558/4704] Adds python2 to general packages to be installed devstack works only for python2 python3 will break the install. This commit adds python2 to the general dependencies of ubuntu Change-Id: I7721ff85a63f635ff798407a5ee1d6766405c683 Closes-Bug: #1188215 --- files/apts/general | 1 + 1 file changed, 1 insertion(+) diff --git a/files/apts/general b/files/apts/general index fcf0b5b06e..aff687fab4 100644 --- a/files/apts/general +++ b/files/apts/general @@ -20,3 +20,4 @@ tcpdump euca2ools # only for testing client tar python-cmd2 # dist:precise +python2.7 From 7103a84e3900502648e81bd4313b777f2da63f92 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Sat, 7 Dec 2013 13:53:33 -0800 Subject: [PATCH 0559/4704] Workaround Cinder "wishlist" bug 1255593 for multi-backend volume_clear Cinder currently only applies the volume_clear setting from the DEFAULT section of cinder.conf if you're using a single backend. The Cinder team has determined this is a 'wishlist' item to propagate volume_clear to each backend, but it does impact usability and performance. To improve the performance of running Tempest with multi-backends in the gate, workaround the bug in devstack. Related-Bug: #1255593 Change-Id: Ia0ff5422f53eeda9a3ac4336eefec3b9bdea6da2 --- lib/cinder | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/cinder b/lib/cinder index 9288685365..ef3bd81a1f 100644 --- a/lib/cinder +++ b/lib/cinder @@ -237,6 +237,11 @@ function configure_cinder() { iniset $CINDER_CONF lvmdriver-2 volume_group $VOLUME_GROUP2 iniset $CINDER_CONF lvmdriver-2 volume_driver cinder.volume.drivers.lvm.LVMISCSIDriver iniset $CINDER_CONF lvmdriver-2 volume_backend_name LVM_iSCSI_2 + # NOTE(mriedem): Work around Cinder "wishlist" bug 1255593 + if [[ "$CINDER_SECURE_DELETE" == "False" ]]; then + iniset $CINDER_CONF lvmdriver-1 volume_clear none + iniset $CINDER_CONF lvmdriver-2 volume_clear none + fi else iniset $CINDER_CONF DEFAULT volume_group $VOLUME_GROUP iniset $CINDER_CONF DEFAULT volume_name_template ${VOLUME_NAME_PREFIX}%s From ba0f1d36971fa59a5cc64d4508bc381a26964124 Mon Sep 17 00:00:00 2001 From: Rafael Folco Date: Fri, 6 Dec 2013 17:56:24 -0200 Subject: [PATCH 0560/4704] Qemu emulator requires at least 128MB of memory to boot on ppc64 The default nano and micro flavors need more memory to boot on ppc64. New flavors are 128MB and 256MB, respectively. Trailing spaces removed, again. Change-Id: Ic6740bda959754380982e67f753876dc6d984685 --- lib/tempest | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index 5ee4e8a372..4400b31751 100644 --- a/lib/tempest +++ b/lib/tempest @@ -147,12 +147,21 @@ function configure_tempest() { if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then available_flavors=$(nova flavor-list) if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then - nova flavor-create m1.nano 42 64 0 1 + if is_arch "ppc64"; then + # qemu needs at least 128MB of memory to boot on ppc64 + nova flavor-create m1.nano 42 128 0 1 + else + nova flavor-create m1.nano 42 64 0 1 + fi fi flavor_ref=42 boto_instance_type=m1.nano if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then - nova flavor-create m1.micro 84 128 0 1 + if is_arch "ppc64"; then + nova flavor-create m1.micro 84 256 0 1 + else + nova flavor-create m1.micro 84 128 0 1 + fi fi flavor_ref_alt=84 else From bf36e8e4cf89a9de75746ce3e2ae1c98c3948993 Mon Sep 17 00:00:00 2001 From: Darragh O'Reilly Date: Mon, 9 Dec 2013 13:16:16 +0000 Subject: [PATCH 0561/4704] Make rejoin-stack.sh resume file logging if enabled This patch ensures that screen will resume logging to files after rejoin-stack.sh when SCREEN_LOGDIR is set. Change-Id: I4c3eae0df7755b700dd8acf4bf14b7e383372ca3 Closes-bug: 1192568 --- functions | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/functions b/functions index 5ff4a9b7ca..5fa265bd7d 100644 --- a/functions +++ b/functions @@ -1156,6 +1156,11 @@ function screen_rc { NL=`echo -ne '\015'` echo "screen -t $1 bash" >> $SCREENRC echo "stuff \"$2$NL\"" >> $SCREENRC + + if [[ -n ${SCREEN_LOGDIR} ]]; then + echo "logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log" >>$SCREENRC + echo "log on" >>$SCREENRC + fi fi } From 57bf097e5a10e16b7d0cf5bf6c48bc86d78a1553 Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Tue, 3 Dec 2013 17:35:02 +0000 Subject: [PATCH 0562/4704] XenAPI: Increase DomU's memory Devstack has been increasing in complexity, and if we use too much of our memory it can lead to fragmentation which in turn causes a DomU error and a failure of random tests. Change-Id: Ide9cc84625caed4e35a64a47ee1e92a6cd567651 --- tools/xen/xenrc | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tools/xen/xenrc b/tools/xen/xenrc index bdcaf992b2..c0ea3bc85e 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -13,7 +13,13 @@ CLEAN_TEMPLATES=${CLEAN_TEMPLATES:-false} # Size of image VDI_MB=${VDI_MB:-5000} -OSDOMU_MEM_MB=3072 + +# Devstack now contains many components. 3GB ram is not enough to prevent +# swapping and memory fragmentation - the latter of which can cause failures +# such as blkfront failing to plug a VBD and lead to random test fails. +# +# Set to 4GB so an 8GB XenServer VM can have a 1GB Dom0 and leave 3GB for VMs +OSDOMU_MEM_MB=4096 OSDOMU_VDI_GB=8 # Network mapping. Specify bridge names or network names. Network names may From 2ac8b3f3c2ebe586802d7789cf152b13fe0d0497 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 4 Dec 2013 17:20:28 -0600 Subject: [PATCH 0563/4704] Fix a couple of INI whitespace bugs * iniset() bails if no section or option (attribute) is supplied * merge_config_file() properly skips lines with only whitespace * Also split the ini-tests into their own script Bug 1257954 Change-Id: Ie31c5bd0df8dfed129fbcf1e37228aaf25e9305d --- functions | 2 + lib/config | 2 +- tests/functions.sh | 189 ------------------------------------- tests/test_config.sh | 18 +++- tests/test_ini.sh | 220 +++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 240 insertions(+), 191 deletions(-) create mode 100755 tests/test_ini.sh diff --git a/functions b/functions index 5ff4a9b7ca..0280b2bcc4 100644 --- a/functions +++ b/functions @@ -729,6 +729,8 @@ function iniset() { local option=$3 local value=$4 + [[ -z $section || -z $option ]] && return + if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then # Add section at the end echo -e "\n[$section]" >>"$file" diff --git a/lib/config b/lib/config index 91cefe48cc..c28072fc08 100644 --- a/lib/config +++ b/lib/config @@ -95,7 +95,7 @@ function merge_config_file() { /^ *\#/ { next } - /^.+/ { + /^[^ \t]+/ { split($0, d, " *= *") print "iniset " configfile " " section " " d[1] " \"" d[2] "\"" } diff --git a/tests/functions.sh b/tests/functions.sh index 40376aa63f..95dafe1028 100755 --- a/tests/functions.sh +++ b/tests/functions.sh @@ -38,195 +38,6 @@ if [[ $? = 0 ]]; then fi -echo "Testing INI functions" - -cat >test.ini <test.ini < Date: Mon, 9 Dec 2013 15:40:22 +1100 Subject: [PATCH 0564/4704] Added keystone auth port to the nova config Added the $KEYSTONE_AUTH_PORT to the keystone_authtoken section of the create_nova_conf function. This is required as without it nova doesn't communicate to the keystone server. Generating an "Unauthorised (HTTP 401)" page when acceesing /admin/. Change-Id: Ibf4d9d1c21081a1e3de4ea765f4db6de5fbdb237 --- lib/nova | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/nova b/lib/nova index 5fd0bebf65..e4dae7c7df 100644 --- a/lib/nova +++ b/lib/nova @@ -398,6 +398,7 @@ function create_nova_conf() { # Add keystone authtoken configuration iniset $NOVA_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $NOVA_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT iniset $NOVA_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL iniset $NOVA_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $NOVA_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA From 86d9aed67dd16022fdd688edaf099e42ca761444 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Tue, 10 Dec 2013 12:24:16 +0000 Subject: [PATCH 0565/4704] Fix the amount of workers spawned for non proxies We were running as auto so swift would spawn a lot of processes consuming memory and CPU which are not really neeeded in a devstack env (and bad for the jenkins vm). Closes-Bug: 1259548 Change-Id: I6b5266186168fe99568dda5453b436c2f9cfedb3 --- lib/swift | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/swift b/lib/swift index 40722ab030..03aa8f4a7c 100644 --- a/lib/swift +++ b/lib/swift @@ -376,6 +376,9 @@ EOF iniuncomment ${swift_node_config} DEFAULT log_facility iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility} + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers 1 + iniuncomment ${swift_node_config} DEFAULT disable_fallocate iniset ${swift_node_config} DEFAULT disable_fallocate true From 19a47a49a98931ab311fe22ec78ffa4900013b2c Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Tue, 10 Dec 2013 07:41:26 -0800 Subject: [PATCH 0566/4704] Neutron/NVP plugin: fix 'ip link' usage Closes-bug: #1258141 Change-Id: Id26eca6c3174a108d1822440956ab7f66cc3ebd3 --- lib/neutron_thirdparty/nicira | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/neutron_thirdparty/nicira b/lib/neutron_thirdparty/nicira index 3f2a5af11f..3efb5a93b3 100644 --- a/lib/neutron_thirdparty/nicira +++ b/lib/neutron_thirdparty/nicira @@ -33,7 +33,7 @@ function init_nicira() { echo "Defaulting to "$NVP_GATEWAY_NETWORK_CIDR fi # Make sure the interface is up, but not configured - sudo ip link dev $NVP_GATEWAY_NETWORK_INTERFACE set up + sudo ip link set $NVP_GATEWAY_NETWORK_INTERFACE up # Save and then flush the IP addresses on the interface addresses=$(ip addr show dev $NVP_GATEWAY_NETWORK_INTERFACE | grep inet | awk {'print $2'}) sudo ip addr flush $NVP_GATEWAY_NETWORK_INTERFACE @@ -45,7 +45,7 @@ function init_nicira() { sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE sudo ovs-vsctl -- --may-exist add-port $PUBLIC_BRIDGE $NVP_GATEWAY_NETWORK_INTERFACE nvp_gw_net_if_mac=$(ip link show $NVP_GATEWAY_NETWORK_INTERFACE | awk '/ether/ {print $2}') - sudo ip link dev $PUBLIC_BRIDGE set address $nvp_gw_net_if_mac + sudo ip link set address $nvp_gw_net_if_mac dev $PUBLIC_BRIDGE for address in $addresses; do sudo ip addr add dev $PUBLIC_BRIDGE $address done From 81fe5f54981e5627bc876ff02753e95705d9d4a1 Mon Sep 17 00:00:00 2001 From: Tomoe Sugihara Date: Thu, 14 Nov 2013 20:04:44 +0000 Subject: [PATCH 0567/4704] Define Q_L3_ENABLED=True for MidoNet plugin Change-Id: Iabf7a5ff2e53b1822a327600da9acac8cf6a59f7 --- lib/neutron_plugins/midonet | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet index e406146bbe..f95fcb75b9 100644 --- a/lib/neutron_plugins/midonet +++ b/lib/neutron_plugins/midonet @@ -62,6 +62,9 @@ function neutron_plugin_configure_service() { if [[ "$MIDONET_PROVIDER_ROUTER_ID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE MIDONET provider_router_id $MIDONET_PROVIDER_ROUTER_ID fi + + Q_L3_ENABLED=True + Q_L3_ROUTER_PER_TENANT=True } function neutron_plugin_setup_interface_driver() { From a515a70e2b58912877bdf2952e7812410da647f3 Mon Sep 17 00:00:00 2001 From: KIYOHIRO ADACHI Date: Wed, 11 Dec 2013 16:11:28 +0900 Subject: [PATCH 0568/4704] Fixed check method of $USE_GET_PIP '[[ -n "$USE_GET_PIP" ]]' always TRUE because $USE_GET_PIP is '0' or '1'. Change-Id: I73c4c6befe2126882ef21991b2a3fe712b2ac388 Closes-Bug: #1259824 --- tools/install_pip.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index a65a77e079..d714d33530 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -87,7 +87,7 @@ get_versions # Eradicate any and all system packages uninstall_package python-pip -if [[ -n "$USE_GET_PIP" ]]; then +if [[ "$USE_GET_PIP" == "1" ]]; then install_get_pip else install_pip_tarball From 0c5a04267458271fb1010cc7dad4226bec7238e7 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Wed, 4 Dec 2013 17:01:01 +1300 Subject: [PATCH 0569/4704] Allow heat tests to use already uploaded test image If $HEAT_FETCHED_TEST_IMAGE is set then tempest is configured to use the image named $HEAT_FETCHED_TEST_IMAGE for any orchestration tests which require an image. Fallback to checking $HEAT_CREATE_TEST_IMAGE and invoking diskimage-builder if necessary. The intent is to use Fedora 20 as the test image for gating since this image has heat-cfntools already installed. Change-Id: I177ae091a641ba99fd4c618e30a39c5148ae617f --- lib/tempest | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 803b740221..d2e3b0a27f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -281,7 +281,9 @@ function configure_tempest() { iniset $TEMPEST_CONF boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # Orchestration test image - if [[ "$HEAT_CREATE_TEST_IMAGE" = "True" ]]; then + if [[ ! -z "$HEAT_FETCHED_TEST_IMAGE" ]]; then + iniset $TEMPEST_CONF orchestration image_ref "$HEAT_FETCHED_TEST_IMAGE" + elif [[ "$HEAT_CREATE_TEST_IMAGE" = "True" ]]; then disk_image_create /usr/share/tripleo-image-elements "vm fedora heat-cfntools" "i386" "fedora-vm-heat-cfntools-tempest" iniset $TEMPEST_CONF orchestration image_ref "fedora-vm-heat-cfntools-tempest" fi From cd7d956fbc30eae3c1694b187ea605a5f0d960d3 Mon Sep 17 00:00:00 2001 From: Andrea Frittoli Date: Thu, 5 Dec 2013 08:09:12 +0000 Subject: [PATCH 0570/4704] Handle the case of pipe char in value for iniset iniset did not handle the case of "|" in the value to be injected. Fix this by replacing | with \000 (NULL). Fixes bug #1258050 Change-Id: I8882c2f3f177ebdfa0c66270dbbc7fd50f30b065 --- functions | 3 ++- tests/test_ini.sh | 20 ++++++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/functions b/functions index 0280b2bcc4..995be576c7 100644 --- a/functions +++ b/functions @@ -741,8 +741,9 @@ function iniset() { $option = $value " "$file" else + local sep=$(echo -ne "\x01") # Replace it - sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" "$file" + sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file" fi } diff --git a/tests/test_ini.sh b/tests/test_ini.sh index b0dc6b176b..598cd578f6 100755 --- a/tests/test_ini.sh +++ b/tests/test_ini.sh @@ -136,6 +136,26 @@ else echo "iniget failed: $VAL" fi +# test pipe in option +iniset test.ini aaa handlers "a|b" + +VAL=$(iniget test.ini aaa handlers) +if [[ "$VAL" == "a|b" ]]; then + echo "OK: $VAL" +else + echo "iniget failed: $VAL" +fi + +# test space in option +iniset test.ini aaa handlers "a b" + +VAL="$(iniget test.ini aaa handlers)" +if [[ "$VAL" == "a b" ]]; then + echo "OK: $VAL" +else + echo "iniget failed: $VAL" +fi + # Test section not exist VAL=$(iniget test.ini zzz handlers) From 0718568b1203bd11058d3cd28402f84841c01dda Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Fri, 13 Dec 2013 15:20:26 +0100 Subject: [PATCH 0571/4704] Support oslo-rootwrap in lib/cinder Make lib/cinder support both cinder-rootwrap (current case) and oslo-rootwrap (future case) to handle the Cinder transition towards oslo-rootwrap usage peacefully. Related blueprint: https://blueprints.launchpad.net/cinder/+spec/cinder-oslo-rootwrap Change-Id: I663986304bd74cb6d72d51c553540fb5f9db1d1d --- lib/cinder | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/lib/cinder b/lib/cinder index ef3bd81a1f..cbe732e9b0 100644 --- a/lib/cinder +++ b/lib/cinder @@ -174,6 +174,12 @@ function configure_cinder() { # Set the paths of certain binaries CINDER_ROOTWRAP=$(get_rootwrap_location cinder) + if [[ ! -x $CINDER_ROOTWRAP ]]; then + CINDER_ROOTWRAP=$(get_rootwrap_location oslo) + if [[ ! -x $CINDER_ROOTWRAP ]]; then + die $LINENO "No suitable rootwrap found." + fi + fi # If Cinder ships the new rootwrap filters files, deploy them # (owned by root) and add a parameter to $CINDER_ROOTWRAP @@ -189,11 +195,16 @@ function configure_cinder() { sudo chown -R root:root $CINDER_CONF_DIR/rootwrap.d sudo chmod 644 $CINDER_CONF_DIR/rootwrap.d/* # Set up rootwrap.conf, pointing to /etc/cinder/rootwrap.d - sudo cp $CINDER_DIR/etc/cinder/rootwrap.conf $CINDER_CONF_DIR/ + if [[ -f $CINDER_DIR/etc/cinder/rootwrap.conf ]]; then + sudo cp $CINDER_DIR/etc/cinder/rootwrap.conf $CINDER_CONF_DIR/ + else + # rootwrap.conf is no longer shipped in Cinder itself + echo "filters_path=" | sudo tee $CINDER_CONF_DIR/rootwrap.conf > /dev/null + fi sudo sed -e "s:^filters_path=.*$:filters_path=$CINDER_CONF_DIR/rootwrap.d:" -i $CINDER_CONF_DIR/rootwrap.conf sudo chown root:root $CINDER_CONF_DIR/rootwrap.conf sudo chmod 0644 $CINDER_CONF_DIR/rootwrap.conf - # Specify rootwrap.conf as first parameter to cinder-rootwrap + # Specify rootwrap.conf as first parameter to rootwrap CINDER_ROOTWRAP="$CINDER_ROOTWRAP $CINDER_CONF_DIR/rootwrap.conf" ROOTWRAP_CINDER_SUDOER_CMD="$CINDER_ROOTWRAP *" fi From 0f7ad6bba6fe451c69cdc27fadfbb8ed8fdc7b71 Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Fri, 13 Dec 2013 12:42:31 -0500 Subject: [PATCH 0572/4704] Fix Issues in Marconi integration This patch fixes a couple of issues, that prevents marconi from running on devstack. Change-Id: I47060a0334ad6f90f1402b34c83bb6ad22f723d4 Closes-Bug: #1260820 --- exercises/marconi.sh | 2 +- lib/marconi | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/exercises/marconi.sh b/exercises/marconi.sh index 1b9788dce6..9d83a99f02 100755 --- a/exercises/marconi.sh +++ b/exercises/marconi.sh @@ -35,7 +35,7 @@ source $TOP_DIR/exerciserc is_service_enabled marconi-server || exit 55 -curl http://$SERVICE_HOST:8888/v1/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Marconi API not functioning!" +curl http://$SERVICE_HOST:8888/v1/ 2>/dev/null | grep -q 'queue_name' || die $LINENO "Marconi API not functioning!" set +o xtrace echo "*********************************************************************" diff --git a/lib/marconi b/lib/marconi index 8e0b82b49e..742f866e7d 100644 --- a/lib/marconi +++ b/lib/marconi @@ -148,10 +148,11 @@ function create_marconi_accounts() { --user-id $MARCONI_USER \ --role-id $ADMIN_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - MARCONI_SERVICE=$(get_id keystone service-create \ + MARCONI_SERVICE=$(keystone service-create \ --name=marconi \ --type=queuing \ - --description="Marconi Service") + --description="Marconi Service" \ + | grep " id " | get_field 2) keystone endpoint-create \ --region RegionOne \ --service_id $MARCONI_SERVICE \ From 055cdee2bf4582e39fa91b96de745783850f082d Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Fri, 13 Dec 2013 13:51:25 -0600 Subject: [PATCH 0573/4704] Remove duplicate debug option from keystone-all The keystone server was started with the options like keystone-all ... -d --debug ... The -d and --debug options are the same so one of them is redundant. This will make it less confusing if someone removes --debug and thinks that debug is off, but debug is still on because they didn't notice there was an extra -d. Change-Id: I1ac977e6b12f1cc44f02b636c1bfb5c115b5b3e4 --- lib/keystone | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index 712a509fde..29b9604efe 100644 --- a/lib/keystone +++ b/lib/keystone @@ -403,7 +403,7 @@ function start_keystone() { screen_it key "cd $KEYSTONE_DIR && sudo tail -f /var/log/$APACHE_NAME/keystone" else # Start Keystone in a screen window - screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug" + screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG --debug" fi echo "Waiting for keystone to start..." From 90234ac4b03426d844b72d251d4cae13fa09cde5 Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Mon, 25 Nov 2013 05:44:10 -0800 Subject: [PATCH 0574/4704] Default to /24 prefix for floating IP range with neutron When running Tempest parallel tests with neutron, several long-running tests might create routers and floating IPs, which will result in IP allocations over the public network. Increasing the public network size should ensure tests do not fail due to IP address shortage; this patch also updates the public network gateway IP address. Related-Bug: 1253966 Change-Id: Ie075b3c4d14a07b06c42fd29b09770dd1972aa45 --- lib/neutron | 2 +- lib/neutron_thirdparty/nicira | 2 +- stack.sh | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/neutron b/lib/neutron index 70417be5d3..786e8f8712 100644 --- a/lib/neutron +++ b/lib/neutron @@ -68,7 +68,7 @@ set +o xtrace # Gateway and subnet defaults, in case they are not customized in localrc NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1} -PUBLIC_NETWORK_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-172.24.4.225} +PUBLIC_NETWORK_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-172.24.4.1} PRIVATE_SUBNET_NAME=${PRIVATE_SUBNET_NAME:-"private-subnet"} PUBLIC_SUBNET_NAME=${PUBLIC_SUBNET_NAME:-"public-subnet"} diff --git a/lib/neutron_thirdparty/nicira b/lib/neutron_thirdparty/nicira index 3f2a5af11f..5b034e0c62 100644 --- a/lib/neutron_thirdparty/nicira +++ b/lib/neutron_thirdparty/nicira @@ -20,7 +20,7 @@ set +o xtrace NVP_GATEWAY_NETWORK_INTERFACE=${NVP_GATEWAY_NETWORK_INTERFACE:-eth2} # Re-declare floating range as it's needed also in stop_nicira, which # is invoked by unstack.sh -FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28} +FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24} function configure_nicira() { : diff --git a/stack.sh b/stack.sh index 47d93bd642..d0674066b5 100755 --- a/stack.sh +++ b/stack.sh @@ -260,7 +260,7 @@ safe_chown -R $STACK_USER $DATA_DIR # from either range when attempting to guess the IP to use for the host. # Note that setting FIXED_RANGE may be necessary when running DevStack # in an OpenStack cloud that uses either of these address ranges internally. -FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28} +FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24} FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24} FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256} From 8c1b95eef119837428993d32a05f97a231f44b9e Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Mon, 16 Dec 2013 11:04:03 +1300 Subject: [PATCH 0575/4704] Add Fedora 20 to IMAGE_URLS Fedora 20 is only added to IMAGE_URLS if $HEAT_FETCHED_TEST_IMAGE contains the exact known image name This image is used for running heat tempest tests which require an image which has heat-cfntools installed. Change-Id: Ic6cdea932a5d5f3de138da96d27e407775b3e84b --- stackrc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stackrc b/stackrc index 410f9d8d05..e89e64dc65 100644 --- a/stackrc +++ b/stackrc @@ -282,6 +282,9 @@ case "$VIRT_DRIVER" in IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz"};; esac +if [[ "$HEAT_FETCHED_TEST_IMAGE" == "Fedora-i386-20-20131211.1-sda" ]]; then + IMAGE_URLS+=",https://dl.fedoraproject.org/pub/alt/stage/20-RC1.1/Images/i386/$HEAT_FETCHED_TEST_IMAGE.qcow2" +fi # 10Gb default volume backing file size VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-10250M} From edddb1fddf6b571286d85057abe8aa1cd21e67a6 Mon Sep 17 00:00:00 2001 From: Akihiro Motoki Date: Mon, 9 Dec 2013 20:21:06 +0900 Subject: [PATCH 0576/4704] Stop Neutron advanced service external processes Some of Neutron advanced services (LBaaS, VPNaaS, ....) creates external processes and they should be stopped in unstack. This commit defines neutron__stop functions for all services and implements the cleanup logics if necessary. Also cleanup_neutron removes netns used by LBaaS haproxy. Change-Id: Ied3a2c374ffcb6b59ecaf1027fb6e6083eded2ae --- lib/neutron | 15 ++++++++++++++- lib/neutron_plugins/services/firewall | 4 ++++ lib/neutron_plugins/services/loadbalancer | 5 +++++ lib/neutron_plugins/services/metering | 4 ++++ lib/neutron_plugins/services/vpn | 11 +++++++++++ 5 files changed, 38 insertions(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index b05b16d72e..851b2ac65e 100644 --- a/lib/neutron +++ b/lib/neutron @@ -508,6 +508,19 @@ function stop_neutron() { pid=$(ps aux | awk '/neutron-ns-metadata-proxy/ { print $2 }') [ ! -z "$pid" ] && sudo kill -9 $pid fi + + if is_service_enabled q-lbaas; then + neutron_lbaas_stop + fi + if is_service_enabled q-fwaas; then + neutron_fwaas_stop + fi + if is_service_enabled q-vpn; then + neutron_vpn_stop + fi + if is_service_enabled q-metering; then + neutron_metering_stop + fi } # cleanup_neutron() - Remove residual data files, anything left over from previous @@ -518,7 +531,7 @@ function cleanup_neutron() { fi # delete all namespaces created by neutron - for ns in $(sudo ip netns list | grep -o -e qdhcp-[0-9a-f\-]* -e qrouter-[0-9a-f\-]*); do + for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|qlbaas)-[0-9a-f-]*'); do sudo ip netns delete ${ns} done } diff --git a/lib/neutron_plugins/services/firewall b/lib/neutron_plugins/services/firewall index 1597e8577d..580071ff71 100644 --- a/lib/neutron_plugins/services/firewall +++ b/lib/neutron_plugins/services/firewall @@ -23,5 +23,9 @@ function neutron_fwaas_configure_driver() { iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas driver "neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver" } +function neutron_fwaas_stop() { + : +} + # Restore xtrace $MY_XTRACE diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer index c38f904b69..2699a9b698 100644 --- a/lib/neutron_plugins/services/loadbalancer +++ b/lib/neutron_plugins/services/loadbalancer @@ -45,5 +45,10 @@ function neutron_agent_lbaas_configure_agent() { fi } +function neutron_lbaas_stop() { + pids=$(ps aux | awk '/haproxy/ { print $2 }') + [ ! -z "$pids" ] && sudo kill $pids +} + # Restore xtrace $MY_XTRACE diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering index 629f3b788a..b105429bfd 100644 --- a/lib/neutron_plugins/services/metering +++ b/lib/neutron_plugins/services/metering @@ -26,5 +26,9 @@ function neutron_agent_metering_configure_agent() { cp $NEUTRON_DIR/etc/metering_agent.ini $METERING_AGENT_CONF_FILENAME } +function neutron_metering_stop() { + : +} + # Restore xtrace $MY_XTRACE diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn index b8f5c7d56b..55d0a76363 100644 --- a/lib/neutron_plugins/services/vpn +++ b/lib/neutron_plugins/services/vpn @@ -22,5 +22,16 @@ function neutron_vpn_configure_common() { fi } +function neutron_vpn_stop() { + local ipsec_data_dir=$DATA_DIR/neutron/ipsec + local pids + if [ -d $ipsec_data_dir ]; then + pids=$(find $ipsec_data_dir -name 'pluto.pid' -exec cat {} \;) + fi + if [ -n "$pids" ]; then + sudo kill $pids + fi +} + # Restore xtrace $MY_XTRACE From 1692bda49264e35757c0f2f8d9264681256657b6 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Fri, 13 Dec 2013 10:15:34 -0800 Subject: [PATCH 0577/4704] Add ability to configure tempest tests based on network extensions This patch introduces a Devstack variable to specify which network extensions are enabled; this is useful for configuring tempest runs when third-party plugins that do not support certain extensions. Closes-bug: #1247778 Closes-bug: #1231152 Change-Id: Iee170993cb164502774f9ac4201b963d9a2715ba --- lib/tempest | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/tempest b/lib/tempest index ec1fc90b76..0af93103a4 100644 --- a/lib/tempest +++ b/lib/tempest @@ -299,6 +299,9 @@ function configure_tempest() { # cli iniset $TEMPEST_CONF cli cli_dir $NOVA_BIN_DIR + # Networking + iniset $TEMPEST_CONF network-feature-enabled api_extensions "${NETWORK_API_EXTENSIONS:-all}" + # service_available for service in nova cinder glance neutron swift heat horizon ceilometer; do if is_service_enabled $service ; then From db54311552d6c1efad7d9958a539848b3aeea775 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 16 Dec 2013 16:35:44 -0500 Subject: [PATCH 0578/4704] add libxslt1-dev to tempest required files tempest actually needs a pretty new lxml, which means it's going to build it from source. To do that it needs libxslt1-dev to compile. We should be good and let devstack do this out of the box so it works on minimal environments. Change-Id: Ia527905c1c15fb8c6793f0ce543ad05e25a88179 --- files/apts/tempest | 1 + files/rpms/tempest | 1 + 2 files changed, 2 insertions(+) create mode 100644 files/apts/tempest create mode 100644 files/rpms/tempest diff --git a/files/apts/tempest b/files/apts/tempest new file mode 100644 index 0000000000..f244e4e783 --- /dev/null +++ b/files/apts/tempest @@ -0,0 +1 @@ +libxslt1-dev \ No newline at end of file diff --git a/files/rpms/tempest b/files/rpms/tempest new file mode 100644 index 0000000000..de32b81504 --- /dev/null +++ b/files/rpms/tempest @@ -0,0 +1 @@ +libxslt-dev \ No newline at end of file From be2ff9a745c46bd6627b68d9b6fbdbb724f3854d Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 17 Dec 2013 16:26:21 +1100 Subject: [PATCH 0579/4704] Match RHEL Beta release strings RHEL7 (in beta) has "Beta" after the release number and before the code-name. Add a number-match to the regex so everything between that match and the codename in parenthesis is considered the release. Change-Id: I992f20eedcefc6aeed6bd3ad57fc4cf20c8ef15d --- functions | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/functions b/functions index 7e081907aa..e79e1d58af 100644 --- a/functions +++ b/functions @@ -422,6 +422,7 @@ GetOSVersion() { os_CODENAME=$(lsb_release -c -s) elif [[ -r /etc/redhat-release ]]; then # Red Hat Enterprise Linux Server release 5.5 (Tikanga) + # Red Hat Enterprise Linux Server release 7.0 Beta (Maipo) # CentOS release 5.5 (Final) # CentOS Linux release 6.0 (Final) # Fedora release 16 (Verne) @@ -430,7 +431,7 @@ GetOSVersion() { for r in "Red Hat" CentOS Fedora XenServer; do os_VENDOR=$r if [[ -n "`grep \"$r\" /etc/redhat-release`" ]]; then - ver=`sed -e 's/^.* \(.*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release` + ver=`sed -e 's/^.* \([0-9].*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release` os_CODENAME=${ver#*|} os_RELEASE=${ver%|*} os_UPDATE=${os_RELEASE##*.} From cff1c7d3700278263da1a6f0daa381e97df15f28 Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Wed, 18 Dec 2013 13:20:40 +0000 Subject: [PATCH 0580/4704] HEAT_FETCHED_TEST_IMAGE may not be defined in some cases. Some scripts that source stackrc use set -u, which will error when unknown variables are accessed Change-Id: I5af2d58475e1793dfa728b7ce9180fcbba1145e9 --- stackrc | 1 + 1 file changed, 1 insertion(+) diff --git a/stackrc b/stackrc index e89e64dc65..b129197e8f 100644 --- a/stackrc +++ b/stackrc @@ -282,6 +282,7 @@ case "$VIRT_DRIVER" in IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz"};; esac +HEAT_FETCHED_TEST_IMAGE=${HEAT_FETCHED_TEST_IMAGE:-""} if [[ "$HEAT_FETCHED_TEST_IMAGE" == "Fedora-i386-20-20131211.1-sda" ]]; then IMAGE_URLS+=",https://dl.fedoraproject.org/pub/alt/stage/20-RC1.1/Images/i386/$HEAT_FETCHED_TEST_IMAGE.qcow2" fi From 9e136b4adee6ce33fdbf01e0a8614c186c5f20b7 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 16 Dec 2013 15:52:03 +0900 Subject: [PATCH 0581/4704] Neutron: Define a utility function to add neutron service class When enabling neutron service (i.e. enable_service q-xxx), related code unconditionally adds a necessary plugin class to Q_SERVICE_PLUGIN_CLASSES. Which may cause duplication in Q_SERVICE_PLUGIN_CLASSES when Q_SERVICE_PLUGIN_CLASSES is explicitly specified in localrc. As a result, neutron server fails to start. This patch introduces a utility function to add service class, and check duplication. Closes-Bug: #1261291 Change-Id: Id2880c7647babfccc3e8d9fc60dd93c4b3997ed9 --- lib/neutron | 10 ++++++++++ lib/neutron_plugins/ml2 | 6 +----- lib/neutron_plugins/services/firewall | 6 +----- lib/neutron_plugins/services/loadbalancer | 6 +----- lib/neutron_plugins/services/metering | 6 +----- lib/neutron_plugins/services/vpn | 6 +----- 6 files changed, 15 insertions(+), 25 deletions(-) diff --git a/lib/neutron b/lib/neutron index b05b16d72e..38081653e4 100644 --- a/lib/neutron +++ b/lib/neutron @@ -744,6 +744,16 @@ function _configure_neutron_service() { # Utility Functions #------------------ +# _neutron_service_plugin_class_add() - add service plugin class +function _neutron_service_plugin_class_add() { + local service_plugin_class=$1 + if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then + Q_SERVICE_PLUGIN_CLASSES=$service_plugin_class + elif [[ ! ,${Q_SERVICE_PLUGIN_CLASSES}, =~ ,${service_plugin_class}, ]]; then + Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$service_plugin_class" + fi +} + # _neutron_setup_rootwrap() - configure Neutron's rootwrap function _neutron_setup_rootwrap() { if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index b5b1873f3f..ab4e3474a6 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -54,11 +54,7 @@ function neutron_plugin_configure_common() { Q_PLUGIN_CLASS="neutron.plugins.ml2.plugin.Ml2Plugin" # The ML2 plugin delegates L3 routing/NAT functionality to # the L3 service plugin which must therefore be specified. - if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then - Q_SERVICE_PLUGIN_CLASSES=$ML2_L3_PLUGIN - else - Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$ML2_L3_PLUGIN" - fi + _neutron_service_plugin_class_add $ML2_L3_PLUGIN } function neutron_plugin_configure_service() { diff --git a/lib/neutron_plugins/services/firewall b/lib/neutron_plugins/services/firewall index 1597e8577d..97cc5a28fd 100644 --- a/lib/neutron_plugins/services/firewall +++ b/lib/neutron_plugins/services/firewall @@ -8,11 +8,7 @@ set +o xtrace FWAAS_PLUGIN=neutron.services.firewall.fwaas_plugin.FirewallPlugin function neutron_fwaas_configure_common() { - if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then - Q_SERVICE_PLUGIN_CLASSES=$FWAAS_PLUGIN - else - Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$FWAAS_PLUGIN" - fi + _neutron_service_plugin_class_add $FWAAS_PLUGIN } function neutron_fwaas_configure_driver() { diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer index c38f904b69..6ff991c855 100644 --- a/lib/neutron_plugins/services/loadbalancer +++ b/lib/neutron_plugins/services/loadbalancer @@ -19,11 +19,7 @@ function neutron_agent_lbaas_install_agent_packages() { } function neutron_agent_lbaas_configure_common() { - if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then - Q_SERVICE_PLUGIN_CLASSES=$LBAAS_PLUGIN - else - Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$LBAAS_PLUGIN" - fi + _neutron_service_plugin_class_add $LBAAS_PLUGIN } function neutron_agent_lbaas_configure_agent() { diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering index 629f3b788a..5cabfbfc3b 100644 --- a/lib/neutron_plugins/services/metering +++ b/lib/neutron_plugins/services/metering @@ -10,11 +10,7 @@ AGENT_METERING_BINARY="$NEUTRON_BIN_DIR/neutron-metering-agent" METERING_PLUGIN="neutron.services.metering.metering_plugin.MeteringPlugin" function neutron_agent_metering_configure_common() { - if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then - Q_SERVICE_PLUGIN_CLASSES=$METERING_PLUGIN - else - Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$METERING_PLUGIN" - fi + _neutron_service_plugin_class_add $METERING_PLUGIN } function neutron_agent_metering_configure_agent() { diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn index b8f5c7d56b..1ab07cb93c 100644 --- a/lib/neutron_plugins/services/vpn +++ b/lib/neutron_plugins/services/vpn @@ -15,11 +15,7 @@ function neutron_vpn_install_agent_packages() { } function neutron_vpn_configure_common() { - if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then - Q_SERVICE_PLUGIN_CLASSES=$VPN_PLUGIN - else - Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$VPN_PLUGIN" - fi + _neutron_service_plugin_class_add $VPN_PLUGIN } # Restore xtrace From af72b68ab0bb69178084d27374a3ec96ced40e98 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Fri, 20 Dec 2013 08:39:12 +1300 Subject: [PATCH 0582/4704] Use final release of Fedora 20 for heat test image The image file has not changed, but the URL now points to the release directory for Fedora 20. Change-Id: Ie1a9bcc7da634996b25ef7f6fc694398c632549d --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index e89e64dc65..2c770bc7a7 100644 --- a/stackrc +++ b/stackrc @@ -283,7 +283,7 @@ case "$VIRT_DRIVER" in esac if [[ "$HEAT_FETCHED_TEST_IMAGE" == "Fedora-i386-20-20131211.1-sda" ]]; then - IMAGE_URLS+=",https://dl.fedoraproject.org/pub/alt/stage/20-RC1.1/Images/i386/$HEAT_FETCHED_TEST_IMAGE.qcow2" + IMAGE_URLS+=",https://dl.fedoraproject.org/pub/fedora/linux/releases/20/Images/i386/$HEAT_FETCHED_TEST_IMAGE.qcow2" fi # 10Gb default volume backing file size From bff001456cc5a804f752722d1c406bbb880dd542 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Fri, 20 Dec 2013 11:55:08 +0900 Subject: [PATCH 0583/4704] config: get_meta_section() misunderstands the beginning of metasection For example, the line, "if [[ -n $no_proxy ]]; then" is misparsed as the beginning of metasection because get_meta_section() misses escaping of "|" unlike get_meta_section_files(). This patch adds necessary escape as "|" -> "\|". Change-Id: Ic14b2ac167037c4f5db89492f0e8a4c5b13c7b6d Closes-Bug: #1262960 --- lib/config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/config b/lib/config index c28072fc08..1678aec3fc 100644 --- a/lib/config +++ b/lib/config @@ -35,7 +35,7 @@ function get_meta_section() { $CONFIG_AWK_CMD -v matchgroup=$matchgroup -v configfile=$configfile ' BEGIN { group = "" } - /^\[\[.+|.*\]\]/ { + /^\[\[.+\|.*\]\]/ { if (group == "") { gsub("[][]", "", $1); split($1, a, "|"); From 74ba66dd3f3d54d6a2bec925e8c6573810976b5b Mon Sep 17 00:00:00 2001 From: Yuiko Takada Date: Fri, 20 Dec 2013 08:25:10 +0000 Subject: [PATCH 0584/4704] Fix the option of a2ensite command devstack fails with trema when execute "sudo a2ensite sliceable_switch" command, without ".conf" filename extension with Apache2.4, Apache2.22. With Apache 2.2, it successes. Because in the versions which newer than version 2.2, file checking of a2ensite command is more severe. So, a2ensite command forbid "sliceable_switch" without "/conf". Added ".conf" filename extension. Change-Id: I29a03cb59ee493345b7df0f1a9189eb3516c86e2 Closes-Bug: #1263017 --- lib/neutron_thirdparty/trema | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_thirdparty/trema b/lib/neutron_thirdparty/trema index 9efd3f6c39..bdc23568fb 100644 --- a/lib/neutron_thirdparty/trema +++ b/lib/neutron_thirdparty/trema @@ -62,7 +62,7 @@ function init_trema() { sudo sed -i -e "s|/home/sliceable_switch/script|$TREMA_SS_SCRIPT_DIR|" \ $TREMA_SS_APACHE_CONFIG sudo a2enmod rewrite actions - sudo a2ensite sliceable_switch + sudo a2ensite sliceable_switch.conf cp $TREMA_SS_DIR/sliceable_switch_null.conf $TREMA_SS_CONFIG sed -i -e "s|^\$apps_dir.*$|\$apps_dir = \"$TREMA_DIR/apps\"|" \ From 355fc866833e0bd83796da1c45e4f94b58d5f500 Mon Sep 17 00:00:00 2001 From: Flavio Percoco Date: Fri, 29 Nov 2013 14:27:35 +0100 Subject: [PATCH 0585/4704] Explicily enable the stores used by devstack Devstack currently relies on the default value of the `known_stores` configuration option. This patch enables explicitly the default stores used by devstack. The real fix for the issue below will land in Glance. However, since the default stores will be FS and HTTP we need devstack to enable Swift's as well, which is required in the gates, hence this patch. Partially-fixes: #1255556 Change-Id: Id9aab356b36b2150312324a0349d120bbbbd4e63 --- lib/glance | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/glance b/lib/glance index 2e29a8f77c..b278796d21 100644 --- a/lib/glance +++ b/lib/glance @@ -124,6 +124,8 @@ function configure_glance() { iniset $GLANCE_API_CONF DEFAULT swift_store_user $SERVICE_TENANT_NAME:glance iniset $GLANCE_API_CONF DEFAULT swift_store_key $SERVICE_PASSWORD iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True + + iniset_multiline DEFAULT known_stores glance.store.filesystem.Store glance.store.http.Store glance.store.swift.Store fi cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI From 63d9f3e550e0918ae59ed76bd5cf0fe6ef15353b Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Sat, 21 Dec 2013 01:19:09 -0800 Subject: [PATCH 0586/4704] Fix bad copy and paste in lib/swift Change-Id: I3b7526b52867525b1d7aa634aa8163c520a92f97 --- lib/swift | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/swift b/lib/swift index 3bf2b7804f..96929db557 100644 --- a/lib/swift +++ b/lib/swift @@ -378,8 +378,8 @@ EOF iniuncomment ${swift_node_config} DEFAULT log_facility iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility} - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers - iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers 1 + iniuncomment ${swift_node_config} DEFAULT workers + iniset ${swift_node_config} DEFAULT workers 1 iniuncomment ${swift_node_config} DEFAULT disable_fallocate iniset ${swift_node_config} DEFAULT disable_fallocate true From 16312738d1a8302537e76e1e6cdeac85d63b64aa Mon Sep 17 00:00:00 2001 From: Jianing Yang Date: Sun, 22 Dec 2013 10:47:39 +0800 Subject: [PATCH 0587/4704] Correct glance db_sync command Closes-Bug: #1263431 Change-Id: I30a53adfdd8e00a9995595af2e090190bac241a0 --- lib/glance | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/glance b/lib/glance index b278796d21..135136db7e 100644 --- a/lib/glance +++ b/lib/glance @@ -176,7 +176,7 @@ function init_glance() { recreate_database glance utf8 # Migrate glance database - $GLANCE_BIN_DIR/glance-manage db sync + $GLANCE_BIN_DIR/glance-manage db_sync create_glance_cache_dir } From 6fbb28d021d168271bb2a0643059e8c65c8ce74b Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Sun, 22 Dec 2013 07:59:37 -0800 Subject: [PATCH 0588/4704] Neutron: create network resources when agents are started Creating network resources before the agents start with the ml2 plugin might result in bnding failures for some resources such as DHCP ports because the resources are created before the agents report to the server. This patch should ensure all agents have started and reported their state to the server before creating network resources. Change-Id: Ifafb73bd3c5409a555a573ad9a94b96d79061c38 Related-Bug: #1253896 --- stack.sh | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index ce5fbd47e5..d54a652928 100755 --- a/stack.sh +++ b/stack.sh @@ -1104,10 +1104,7 @@ fi if is_service_enabled q-svc; then echo_summary "Starting Neutron" - start_neutron_service_and_check - create_neutron_initial_network - setup_neutron_debug elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then NM_CONF=${NOVA_CONF} if is_service_enabled n-cell; then @@ -1127,6 +1124,12 @@ fi if is_service_enabled neutron; then start_neutron_agents fi +# Once neutron agents are started setup initial network elements +if is_service_enabled q-svc; then + echo_summary "Creating initial neutron network elements" + create_neutron_initial_network + setup_neutron_debug +fi if is_service_enabled nova; then echo_summary "Starting Nova" start_nova From 60fcfb5c91063bb71252b7077a363092d8bebe2b Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Mon, 23 Dec 2013 17:23:47 +0000 Subject: [PATCH 0589/4704] XenAPI: Fix bug with Xen ext4-using guests Ubuntu saucy is using ext4, which means it hits a barrier bug with certain versions of Xen, leading to a read only filesystem. This is bug https://bugs.launchpad.net/ubuntu/+source/linux/+bug/824089 Change-Id: I9a72b203d473dc555324d44ad7c240c80dccda15 --- tools/xen/prepare_guest_template.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/xen/prepare_guest_template.sh b/tools/xen/prepare_guest_template.sh index 6ea6f6321d..546ac99cd9 100755 --- a/tools/xen/prepare_guest_template.sh +++ b/tools/xen/prepare_guest_template.sh @@ -79,3 +79,7 @@ bash /opt/stack/prepare_guest.sh \\ "$GUEST_PASSWORD" "$XS_TOOLS_PATH" "$STACK_USER" \\ > /opt/stack/prepare_guest.log 2>&1 EOF + +# Need to set barrier=0 to avoid a Xen bug +# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/824089 +sed -i -e 's/errors=/barrier=0,errors=/' $STAGING_DIR/etc/fstab From e4b85590037974b04487be5b4e23166a8a35d9dc Mon Sep 17 00:00:00 2001 From: Ben Nemec Date: Fri, 20 Dec 2013 19:51:04 +0000 Subject: [PATCH 0590/4704] Set default_network in tempest.conf This is to support testing of change Ia78582cac3790653c2281a5b63d953cd46d5c290 in Tempest. Change-Id: Ibb812e2598fb11b7eef21a0868ee9baeea73186c --- lib/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tempest b/lib/tempest index 0969b2df1f..95b300ce77 100644 --- a/lib/tempest +++ b/lib/tempest @@ -283,6 +283,7 @@ function configure_tempest() { iniset $TEMPEST_CONF network tenant_networks_reachable "$tenant_networks_reachable" iniset $TEMPEST_CONF network public_network_id "$public_network_id" iniset $TEMPEST_CONF network public_router_id "$public_router_id" + iniset $TEMPEST_CONF network default_network "$FIXED_RANGE" # boto iniset $TEMPEST_CONF boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" From 3bd85c9d6e257fc952cb3c6d0c09e199685bd5ed Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Wed, 25 Dec 2013 22:14:11 +0900 Subject: [PATCH 0591/4704] Change the libvirtd log level to DEBUG Gate tests fail sometimes due to libvirt problems, but it is difficult to investigate their reasons or workarounds because there is not any log about libvirt. This patch changes the log level of libvirtd to DEBUG for investigating libvirt problems. Change-Id: Ib6559ff978fa813d0332f2339d241dd3437196ee Related-Bug: #1254872 --- lib/nova_plugins/hypervisor-libvirt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 6f90f4ac17..ef40e7ab4c 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -93,6 +93,9 @@ EOF" fi fi + # Change the libvirtd log level to DEBUG. + sudo sed -i s/"#log_level = 3"/"log_level = 1"/ /etc/libvirt/libvirtd.conf + # The user that nova runs as needs to be member of **libvirtd** group otherwise # nova-compute will be unable to use libvirt. if ! getent group $LIBVIRT_GROUP >/dev/null; then From 9aadec380605e4b2aab0fb159c4186618a284853 Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Fri, 27 Dec 2013 19:08:26 +0900 Subject: [PATCH 0592/4704] Remove some keystone resource parsers Current "keystone" command can parse the specified resources(tenant, user, role, service) by itself. Then it is unnecessary to translate resource names to resource ids in devstack. This patch removes these resource parsers from devstack for cleanup. Change-Id: Ibae06581b471f02168b559b4ca0c10f14996d661 --- files/keystone_data.sh | 113 +++++++++++++++++++---------------------- 1 file changed, 51 insertions(+), 62 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index ea2d52d114..07b6b601d2 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -28,16 +28,6 @@ export SERVICE_TOKEN=$SERVICE_TOKEN export SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} -function get_id () { - echo `"$@" | awk '/ id / { print $4 }'` -} - -# Lookups -SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") -ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") -MEMBER_ROLE=$(keystone role-list | awk "/ Member / { print \$2 }") - - # Roles # ----- @@ -45,53 +35,52 @@ MEMBER_ROLE=$(keystone role-list | awk "/ Member / { print \$2 }") # The admin role in swift allows a user to act as an admin for their tenant, # but ResellerAdmin is needed for a user to act as any tenant. The name of this # role is also configurable in swift-proxy.conf -RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin) +keystone role-create --name=ResellerAdmin # Service role, so service users do not have to be admins -SERVICE_ROLE=$(get_id keystone role-create --name=service) +keystone role-create --name=service # Services # -------- if [[ "$ENABLED_SERVICES" =~ "n-api" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" || "$ENABLED_SERVICES" =~ "swift" ]]; then - NOVA_USER=$(keystone user-list | awk "/ nova / { print \$2 }") # Nova needs ResellerAdmin role to download images when accessing # swift through the s3 api. keystone user-role-add \ - --tenant-id $SERVICE_TENANT \ - --user-id $NOVA_USER \ - --role-id $RESELLER_ROLE + --tenant $SERVICE_TENANT_NAME \ + --user nova \ + --role ResellerAdmin fi # Heat if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then - HEAT_USER=$(get_id keystone user-create --name=heat \ + keystone user-create --name=heat \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=heat@example.com) - keystone user-role-add --tenant-id $SERVICE_TENANT \ - --user-id $HEAT_USER \ - --role-id $SERVICE_ROLE + --tenant $SERVICE_TENANT_NAME \ + --email=heat@example.com + keystone user-role-add --tenant $SERVICE_TENANT_NAME \ + --user heat \ + --role service # heat_stack_user role is for users created by Heat keystone role-create --name heat_stack_user if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - HEAT_CFN_SERVICE=$(get_id keystone service-create \ + keystone service-create \ --name=heat-cfn \ --type=cloudformation \ - --description="Heat CloudFormation Service") + --description="Heat CloudFormation Service" keystone endpoint-create \ --region RegionOne \ - --service_id $HEAT_CFN_SERVICE \ + --service heat-cfn \ --publicurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \ --adminurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \ --internalurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" - HEAT_SERVICE=$(get_id keystone service-create \ + keystone service-create \ --name=heat \ --type=orchestration \ - --description="Heat Service") + --description="Heat Service" keystone endpoint-create \ --region RegionOne \ - --service_id $HEAT_SERVICE \ + --service heat \ --publicurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ --adminurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ --internalurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" @@ -100,23 +89,23 @@ fi # Glance if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then - GLANCE_USER=$(get_id keystone user-create \ + keystone user-create \ --name=glance \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=glance@example.com) + --tenant $SERVICE_TENANT_NAME \ + --email=glance@example.com keystone user-role-add \ - --tenant-id $SERVICE_TENANT \ - --user-id $GLANCE_USER \ - --role-id $ADMIN_ROLE + --tenant $SERVICE_TENANT_NAME \ + --user glance \ + --role admin if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - GLANCE_SERVICE=$(get_id keystone service-create \ + keystone service-create \ --name=glance \ --type=image \ - --description="Glance Image Service") + --description="Glance Image Service" keystone endpoint-create \ --region RegionOne \ - --service_id $GLANCE_SERVICE \ + --service glance \ --publicurl "http://$SERVICE_HOST:9292" \ --adminurl "http://$SERVICE_HOST:9292" \ --internalurl "http://$SERVICE_HOST:9292" @@ -125,25 +114,25 @@ fi # Ceilometer if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]]; then - CEILOMETER_USER=$(get_id keystone user-create --name=ceilometer \ + keystone user-create --name=ceilometer \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=ceilometer@example.com) - keystone user-role-add --tenant-id $SERVICE_TENANT \ - --user-id $CEILOMETER_USER \ - --role-id $ADMIN_ROLE + --tenant $SERVICE_TENANT_NAME \ + --email=ceilometer@example.com + keystone user-role-add --tenant $SERVICE_TENANT_NAME \ + --user ceilometer \ + --role admin # Ceilometer needs ResellerAdmin role to access swift account stats. - keystone user-role-add --tenant-id $SERVICE_TENANT \ - --user-id $CEILOMETER_USER \ - --role-id $RESELLER_ROLE + keystone user-role-add --tenant $SERVICE_TENANT_NAME \ + --user ceilometer \ + --role ResellerAdmin if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - CEILOMETER_SERVICE=$(get_id keystone service-create \ + keystone service-create \ --name=ceilometer \ --type=metering \ - --description="Ceilometer Service") + --description="Ceilometer Service" keystone endpoint-create \ --region RegionOne \ - --service_id $CEILOMETER_SERVICE \ + --service ceilometer \ --publicurl "http://$SERVICE_HOST:8777" \ --adminurl "http://$SERVICE_HOST:8777" \ --internalurl "http://$SERVICE_HOST:8777" @@ -153,13 +142,13 @@ fi # EC2 if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - EC2_SERVICE=$(get_id keystone service-create \ + keystone service-create \ --name=ec2 \ --type=ec2 \ - --description="EC2 Compatibility Layer") + --description="EC2 Compatibility Layer" keystone endpoint-create \ --region RegionOne \ - --service_id $EC2_SERVICE \ + --service ec2 \ --publicurl "http://$SERVICE_HOST:8773/services/Cloud" \ --adminurl "http://$SERVICE_HOST:8773/services/Admin" \ --internalurl "http://$SERVICE_HOST:8773/services/Cloud" @@ -169,13 +158,13 @@ fi # S3 if [[ "$ENABLED_SERVICES" =~ "n-obj" || "$ENABLED_SERVICES" =~ "swift3" ]]; then if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - S3_SERVICE=$(get_id keystone service-create \ + keystone service-create \ --name=s3 \ --type=s3 \ - --description="S3") + --description="S3" keystone endpoint-create \ --region RegionOne \ - --service_id $S3_SERVICE \ + --service s3 \ --publicurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ --adminurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ --internalurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" @@ -185,14 +174,14 @@ fi if [[ "$ENABLED_SERVICES" =~ "tempest" ]]; then # Tempest has some tests that validate various authorization checks # between two regular users in separate tenants - ALT_DEMO_TENANT=$(get_id keystone tenant-create \ - --name=alt_demo) - ALT_DEMO_USER=$(get_id keystone user-create \ + keystone tenant-create \ + --name=alt_demo + keystone user-create \ --name=alt_demo \ --pass="$ADMIN_PASSWORD" \ - --email=alt_demo@example.com) + --email=alt_demo@example.com keystone user-role-add \ - --tenant-id $ALT_DEMO_TENANT \ - --user-id $ALT_DEMO_USER \ - --role-id $MEMBER_ROLE + --tenant alt_demo \ + --user alt_demo \ + --role Member fi From 0f9a1b058423b293935b414b2035713d8ead3e71 Mon Sep 17 00:00:00 2001 From: Michael Basnight Date: Sat, 28 Dec 2013 03:42:07 +0000 Subject: [PATCH 0593/4704] Migrating trove to entry points partially implements blueprint entrypoints-for-binscripts Change-Id: Iaafde0ab7f27598d566fc008fba7eddc582139c9 --- lib/trove | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/lib/trove b/lib/trove index 6d5a56e456..f8e3eddfe2 100644 --- a/lib/trove +++ b/lib/trove @@ -30,7 +30,13 @@ TROVECLIENT_DIR=$DEST/python-troveclient TROVE_CONF_DIR=/etc/trove TROVE_LOCAL_CONF_DIR=$TROVE_DIR/etc/trove TROVE_AUTH_CACHE_DIR=${TROVE_AUTH_CACHE_DIR:-/var/cache/trove} -TROVE_BIN_DIR=/usr/local/bin + +# Support entry points installation of console scripts +if [[ -d $TROVE_DIR/bin ]]; then + TROVE_BIN_DIR=$TROVE_DIR/bin +else + TROVE_BIN_DIR=$(get_python_exec_prefix) +fi # setup_trove_logging() - Adds logging configuration to conf files function setup_trove_logging() { @@ -178,14 +184,14 @@ function init_trove() { recreate_database trove utf8 #Initialize the trove database - $TROVE_DIR/bin/trove-manage db_sync + $TROVE_BIN_DIR/trove-manage db_sync } # start_trove() - Start running processes, including screen function start_trove() { - screen_it tr-api "cd $TROVE_DIR; bin/trove-api --config-file=$TROVE_CONF_DIR/trove.conf --debug 2>&1" - screen_it tr-tmgr "cd $TROVE_DIR; bin/trove-taskmanager --config-file=$TROVE_CONF_DIR/trove-taskmanager.conf --debug 2>&1" - screen_it tr-cond "cd $TROVE_DIR; bin/trove-conductor --config-file=$TROVE_CONF_DIR/trove-conductor.conf --debug 2>&1" + screen_it tr-api "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-api --config-file=$TROVE_CONF_DIR/trove.conf --debug 2>&1" + screen_it tr-tmgr "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-taskmanager --config-file=$TROVE_CONF_DIR/trove-taskmanager.conf --debug 2>&1" + screen_it tr-cond "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-conductor --config-file=$TROVE_CONF_DIR/trove-conductor.conf --debug 2>&1" } # stop_trove() - Stop running processes From 3ee52c81a12f1b823c1bc22e39d9f09a8d8b2ca8 Mon Sep 17 00:00:00 2001 From: Ben Nemec Date: Thu, 12 Dec 2013 19:26:12 +0000 Subject: [PATCH 0594/4704] Ensure hostname resolves correctly rabbitmq hangs on startup if the unqualified hostname for the system doesn't resolve properly. This change ensures that the hostname is added to /etc/hosts so that will never happen with devstack. Change-Id: I2c250f38f9feb18d1a59f3a457c6d01c1d98499c --- stack.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/stack.sh b/stack.sh index ce5fbd47e5..3ee6b1cde2 100755 --- a/stack.sh +++ b/stack.sh @@ -234,6 +234,13 @@ safe_chmod 0755 $DEST # a basic test for $DEST path permissions (fatal on error unless skipped) check_path_perm_sanity ${DEST} +# Certain services such as rabbitmq require that the local hostname resolves +# correctly. Make sure it exists in /etc/hosts so that is always true. +LOCAL_HOSTNAME=`hostname -s` +if [ -z "`grep ^127.0.0.1 /etc/hosts | grep $LOCAL_HOSTNAME`" ]; then + sudo sed -i "s/\(^127.0.0.1.*\)/\1 $LOCAL_HOSTNAME/" /etc/hosts +fi + # Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly without # Internet access. ``stack.sh`` must have been previously run with Internet # access to install prerequisites and fetch repositories. From 00b434182e3c04976e03b94490359fa26e71ef69 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 2 Jan 2014 10:33:21 +0000 Subject: [PATCH 0595/4704] Handle more nicely when role root is already here When using postgresql we were handling the fallback if the role root was already here but this was still printing an error message, try to make it a bit smarter. Closes-Bug: #1265477 Change-Id: Ib3768dd182ab968e81038f900550f641b9a2af5c --- lib/databases/postgresql | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/lib/databases/postgresql b/lib/databases/postgresql index 519479ad68..60e5a33715 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -64,9 +64,13 @@ function configure_database_postgresql { sudo sed -i "/^host/s/all\s\+::1\/128\s\+ident/$DATABASE_USER\t::0\/0\tpassword/" $PG_HBA restart_service postgresql - # If creating the role fails, chances are it already existed. Try to alter it. - sudo -u root sudo -u postgres -i psql -c "CREATE ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" || \ - sudo -u root sudo -u postgres -i psql -c "ALTER ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" + # Create the role if it's not here or else alter it. + root_roles=$(sudo -u root sudo -u postgres -i psql -t -c "SELECT 'HERE' from pg_roles where rolname='root'") + if [[ ${root_roles} == *HERE ]];then + sudo -u root sudo -u postgres -i psql -c "ALTER ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" + else + sudo -u root sudo -u postgres -i psql -c "CREATE ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" + fi } function install_database_postgresql { From 0915e0c6bd9d9d370fbf05963704690580af62ec Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Thu, 2 Jan 2014 15:05:41 +0100 Subject: [PATCH 0596/4704] Add oslo.rootwrap to devstack gate oslo.rootwrap recently graduated but was not made part of the devstack-gate. This change is part of a series of changes affecting devstack-gate, config and devstack which will collectively fix this: https://review.openstack.org/#/q/status:open+topic:rootwrap-gate,n,z This should probably be merged once the config and devstack-gate changes are in, so that it can be self-testing. Change-Id: I7b1332c8004845a0dd76e27d871370d41d4524ac --- lib/oslo | 4 ++++ stackrc | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/lib/oslo b/lib/oslo index 816ae9a48a..f644ed76c3 100644 --- a/lib/oslo +++ b/lib/oslo @@ -22,6 +22,7 @@ set +o xtrace # -------- OSLOCFG_DIR=$DEST/oslo.config OSLOMSG_DIR=$DEST/oslo.messaging +OSLORWRAP_DIR=$DEST/oslo.rootwrap # Entry Points # ------------ @@ -37,6 +38,9 @@ function install_oslo() { git_clone $OSLOMSG_REPO $OSLOMSG_DIR $OSLOMSG_BRANCH setup_develop $OSLOMSG_DIR + + git_clone $OSLORWRAP_REPO $OSLORWRAP_DIR $OSLORWRAP_BRANCH + setup_develop $OSLORWRAP_DIR } # cleanup_oslo() - purge possibly old versions of oslo diff --git a/stackrc b/stackrc index 695bdb15d6..3fdc566ed2 100644 --- a/stackrc +++ b/stackrc @@ -136,6 +136,10 @@ OSLOCFG_BRANCH=${OSLOCFG_BRANCH:-master} OSLOMSG_REPO=${OSLOMSG_REPO:-${GIT_BASE}/openstack/oslo.messaging.git} OSLOMSG_BRANCH=${OSLOMSG_BRANCH:-master} +# oslo.rootwrap +OSLORWRAP_REPO=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git} +OSLORWRAP_BRANCH=${OSLORWRAP_BRANCH:-master} + # pbr drives the setuptools configs PBR_REPO=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git} PBR_BRANCH=${PBR_BRANCH:-master} From 05952e3fcc6bdd9ccd1c7980e6a73c527711c08c Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Sun, 5 Jan 2014 07:59:06 -0800 Subject: [PATCH 0597/4704] Configuration changes required to support VMware NSX plugin (Formerly known as Nicira NVP plugin). Following Neutron change 79fbeb7ebebc0dfbe143aee96fbc250d1b9e7582, this patch introduces the new naming scheme for Neutron VMware NSX plugin configuration. Related-blueprint: nvp-third-part-support (aka bp vmware-nsx-third-party) Partial-implements blueprint: nicira-plugin-renaming Change-Id: If7790887661507bfdec6d2b97c0f99609039aa73 --- exercises/neutron-adv-test.sh | 4 +- lib/neutron_plugins/{nicira => vmware_nsx} | 77 ++++++++++--------- lib/neutron_thirdparty/{nicira => vmware_nsx} | 62 +++++++-------- 3 files changed, 72 insertions(+), 71 deletions(-) rename lib/neutron_plugins/{nicira => vmware_nsx} (59%) rename lib/neutron_thirdparty/{nicira => vmware_nsx} (50%) diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index 0a100c0fe8..0c0d42f458 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -400,10 +400,10 @@ main() { echo Description echo echo Copyright 2012, Cisco Systems - echo Copyright 2012, Nicira Networks, Inc. + echo Copyright 2012, VMware, Inc. echo Copyright 2012, NTT MCL, Inc. echo - echo Please direct any questions to dedutta@cisco.com, dan@nicira.com, nachi@nttmcl.com + echo Please direct any questions to dedutta@cisco.com, dwendlandt@vmware.com, nachi@nttmcl.com echo diff --git a/lib/neutron_plugins/nicira b/lib/neutron_plugins/vmware_nsx similarity index 59% rename from lib/neutron_plugins/nicira rename to lib/neutron_plugins/vmware_nsx index 87d3c3d17b..d506cb6f8d 100644 --- a/lib/neutron_plugins/nicira +++ b/lib/neutron_plugins/vmware_nsx @@ -1,5 +1,5 @@ -# Neutron Nicira NVP plugin -# --------------------------- +# Neutron VMware NSX plugin +# ------------------------- # Save trace setting MY_XTRACE=$(set +o | grep xtrace) @@ -9,10 +9,10 @@ source $TOP_DIR/lib/neutron_plugins/ovs_base function setup_integration_bridge() { _neutron_ovs_base_setup_bridge $OVS_BRIDGE - # Set manager to NVP controller (1st of list) - if [[ "$NVP_CONTROLLERS" != "" ]]; then + # Set manager to NSX controller (1st of list) + if [[ "$NSX_CONTROLLERS" != "" ]]; then # Get the first controller - controllers=(${NVP_CONTROLLERS//,/ }) + controllers=(${NSX_CONTROLLERS//,/ }) OVS_MGR_IP=${controllers[0]} else die $LINENO "Error - No controller specified. Unable to set a manager for OVS" @@ -21,7 +21,7 @@ function setup_integration_bridge() { } function is_neutron_ovs_base_plugin() { - # NVP uses OVS, but not the l3-agent + # NSX uses OVS, but not the l3-agent return 0 } @@ -33,14 +33,15 @@ function neutron_plugin_create_nova_conf() { } function neutron_plugin_install_agent_packages() { - # Nicira Plugin does not run q-agt, but it currently needs dhcp and metadata agents + # VMware NSX Plugin does not run q-agt, but it currently needs dhcp and metadata agents _neutron_ovs_base_install_agent_packages } function neutron_plugin_configure_common() { - Q_PLUGIN_CONF_PATH=etc/neutron/plugins/nicira - Q_PLUGIN_CONF_FILENAME=nvp.ini - Q_DB_NAME="neutron_nvp" + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/vmware + Q_PLUGIN_CONF_FILENAME=nsx.ini + Q_DB_NAME="neutron_nsx" + # TODO(armando-migliaccio): rename this once the code rename is complete Q_PLUGIN_CLASS="neutron.plugins.nicira.NeutronPlugin.NvpPluginV2" } @@ -57,76 +58,76 @@ function neutron_plugin_configure_dhcp_agent() { } function neutron_plugin_configure_l3_agent() { - # Nicira plugin does not run L3 agent - die $LINENO "q-l3 should must not be executed with Nicira plugin!" + # VMware NSX plugin does not run L3 agent + die $LINENO "q-l3 should must not be executed with VMware NSX plugin!" } function neutron_plugin_configure_plugin_agent() { - # Nicira plugin does not run L2 agent - die $LINENO "q-agt must not be executed with Nicira plugin!" + # VMware NSX plugin does not run L2 agent + die $LINENO "q-agt must not be executed with VMware NSX plugin!" } function neutron_plugin_configure_service() { if [[ "$MAX_LP_PER_BRIDGED_LS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE nvp max_lp_per_bridged_ls $MAX_LP_PER_BRIDGED_LS + iniset /$Q_PLUGIN_CONF_FILE nsx max_lp_per_bridged_ls $MAX_LP_PER_BRIDGED_LS fi if [[ "$MAX_LP_PER_OVERLAY_LS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE nvp max_lp_per_overlay_ls $MAX_LP_PER_OVERLAY_LS + iniset /$Q_PLUGIN_CONF_FILE nsx max_lp_per_overlay_ls $MAX_LP_PER_OVERLAY_LS fi if [[ "$FAILOVER_TIME" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE nvp failover_time $FAILOVER_TIME + iniset /$Q_PLUGIN_CONF_FILE nsx failover_time $FAILOVER_TIME fi if [[ "$CONCURRENT_CONNECTIONS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE nvp concurrent_connections $CONCURRENT_CONNECTIONS + iniset /$Q_PLUGIN_CONF_FILE nsx concurrent_connections $CONCURRENT_CONNECTIONS fi if [[ "$DEFAULT_TZ_UUID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_tz_uuid $DEFAULT_TZ_UUID else - die $LINENO "The nicira plugin won't work without a default transport zone." + die $LINENO "The VMware NSX plugin won't work without a default transport zone." fi if [[ "$DEFAULT_L3_GW_SVC_UUID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l3_gw_service_uuid $DEFAULT_L3_GW_SVC_UUID Q_L3_ENABLED=True Q_L3_ROUTER_PER_TENANT=True - iniset /$Q_PLUGIN_CONF_FILE nvp metadata_mode access_network + iniset /$Q_PLUGIN_CONF_FILE nsx metadata_mode access_network fi if [[ "$DEFAULT_L2_GW_SVC_UUID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l2_gw_service_uuid $DEFAULT_L2_GW_SVC_UUID fi - # NVP_CONTROLLERS must be a comma separated string - if [[ "$NVP_CONTROLLERS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT nvp_controllers $NVP_CONTROLLERS + # NSX_CONTROLLERS must be a comma separated string + if [[ "$NSX_CONTROLLERS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_controllers $NSX_CONTROLLERS else - die $LINENO "The nicira plugin needs at least an NVP controller." + die $LINENO "The VMware NSX plugin needs at least an NSX controller." fi - if [[ "$NVP_USER" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT nvp_user $NVP_USER + if [[ "$NSX_USER" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_user $NSX_USER fi - if [[ "$NVP_PASSWORD" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT nvp_password $NVP_PASSWORD + if [[ "$NSX_PASSWORD" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_password $NSX_PASSWORD fi - if [[ "$NVP_REQ_TIMEOUT" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT req_timeout $NVP_REQ_TIMEOUT + if [[ "$NSX_REQ_TIMEOUT" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT req_timeout $NSX_REQ_TIMEOUT fi - if [[ "$NVP_HTTP_TIMEOUT" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT http_timeout $NVP_HTTP_TIMEOUT + if [[ "$NSX_HTTP_TIMEOUT" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT http_timeout $NSX_HTTP_TIMEOUT fi - if [[ "$NVP_RETRIES" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT retries $NVP_RETRIES + if [[ "$NSX_RETRIES" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT retries $NSX_RETRIES fi - if [[ "$NVP_REDIRECTS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE DEFAULT redirects $NVP_REDIRECTS + if [[ "$NSX_REDIRECTS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE DEFAULT redirects $NSX_REDIRECTS fi if [[ "$AGENT_MODE" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE nvp agent_mode $AGENT_MODE + iniset /$Q_PLUGIN_CONF_FILE nsx agent_mode $AGENT_MODE if [[ "$AGENT_MODE" == "agentless" ]]; then if [[ "$DEFAULT_SERVICE_CLUSTER_UUID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_service_cluster_uuid $DEFAULT_SERVICE_CLUSTER_UUID else die $LINENO "Agentless mode requires a service cluster." fi - iniset /$Q_PLUGIN_CONF_FILE nvp_metadata metadata_server_address $Q_META_DATA_IP + iniset /$Q_PLUGIN_CONF_FILE nsx_metadata metadata_server_address $Q_META_DATA_IP fi fi } diff --git a/lib/neutron_thirdparty/nicira b/lib/neutron_thirdparty/vmware_nsx similarity index 50% rename from lib/neutron_thirdparty/nicira rename to lib/neutron_thirdparty/vmware_nsx index a24392cd4d..70d348274f 100644 --- a/lib/neutron_thirdparty/nicira +++ b/lib/neutron_thirdparty/vmware_nsx @@ -1,14 +1,14 @@ -# Nicira NVP +# VMware NSX # ---------- # This third-party addition can be used to configure connectivity between a DevStack instance -# and an NVP Gateway in dev/test environments. In order to use this correctly, the following +# and an NSX Gateway in dev/test environments. In order to use this correctly, the following # env variables need to be set (e.g. in your localrc file): # -# * enable_service nicira --> to execute this third-party addition +# * enable_service vmware_nsx --> to execute this third-party addition # * PUBLIC_BRIDGE --> bridge used for external connectivity, typically br-ex -# * NVP_GATEWAY_NETWORK_INTERFACE --> interface used to communicate with the NVP Gateway -# * NVP_GATEWAY_NETWORK_CIDR --> CIDR to configure br-ex, e.g. 172.24.4.211/24 +# * NSX_GATEWAY_NETWORK_INTERFACE --> interface used to communicate with the NSX Gateway +# * NSX_GATEWAY_NETWORK_CIDR --> CIDR to configure br-ex, e.g. 172.24.4.211/24 # Save trace setting MY_XTRACE=$(set +o | grep xtrace) @@ -17,64 +17,64 @@ set +o xtrace # This is the interface that connects the Devstack instance # to an network that allows it to talk to the gateway for # testing purposes -NVP_GATEWAY_NETWORK_INTERFACE=${NVP_GATEWAY_NETWORK_INTERFACE:-eth2} -# Re-declare floating range as it's needed also in stop_nicira, which +NSX_GATEWAY_NETWORK_INTERFACE=${NSX_GATEWAY_NETWORK_INTERFACE:-eth2} +# Re-declare floating range as it's needed also in stop_vmware_nsx, which # is invoked by unstack.sh FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24} -function configure_nicira() { +function configure_vmware_nsx() { : } -function init_nicira() { - if ! is_set NVP_GATEWAY_NETWORK_CIDR; then - NVP_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} +function init_vmware_nsx() { + if ! is_set NSX_GATEWAY_NETWORK_CIDR; then + NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} echo "The IP address to set on br-ex was not specified. " - echo "Defaulting to "$NVP_GATEWAY_NETWORK_CIDR + echo "Defaulting to "$NSX_GATEWAY_NETWORK_CIDR fi # Make sure the interface is up, but not configured - sudo ip link set $NVP_GATEWAY_NETWORK_INTERFACE up + sudo ip link set $NSX_GATEWAY_NETWORK_INTERFACE up # Save and then flush the IP addresses on the interface - addresses=$(ip addr show dev $NVP_GATEWAY_NETWORK_INTERFACE | grep inet | awk {'print $2'}) - sudo ip addr flush $NVP_GATEWAY_NETWORK_INTERFACE - # Use the PUBLIC Bridge to route traffic to the NVP gateway + addresses=$(ip addr show dev $NSX_GATEWAY_NETWORK_INTERFACE | grep inet | awk {'print $2'}) + sudo ip addr flush $NSX_GATEWAY_NETWORK_INTERFACE + # Use the PUBLIC Bridge to route traffic to the NSX gateway # NOTE(armando-migliaccio): if running in a nested environment this will work # only with mac learning enabled, portsecurity and security profiles disabled - # The public bridge might not exist for the NVP plugin if Q_USE_DEBUG_COMMAND is off + # The public bridge might not exist for the NSX plugin if Q_USE_DEBUG_COMMAND is off # Try to create it anyway sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE - sudo ovs-vsctl -- --may-exist add-port $PUBLIC_BRIDGE $NVP_GATEWAY_NETWORK_INTERFACE - nvp_gw_net_if_mac=$(ip link show $NVP_GATEWAY_NETWORK_INTERFACE | awk '/ether/ {print $2}') - sudo ip link set address $nvp_gw_net_if_mac dev $PUBLIC_BRIDGE + sudo ovs-vsctl -- --may-exist add-port $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_INTERFACE + nsx_gw_net_if_mac=$(ip link show $NSX_GATEWAY_NETWORK_INTERFACE | awk '/ether/ {print $2}') + sudo ip link set address $nsx_gw_net_if_mac dev $PUBLIC_BRIDGE for address in $addresses; do sudo ip addr add dev $PUBLIC_BRIDGE $address done - sudo ip addr add dev $PUBLIC_BRIDGE $NVP_GATEWAY_NETWORK_CIDR + sudo ip addr add dev $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_CIDR } -function install_nicira() { +function install_vmware_nsx() { : } -function start_nicira() { +function start_vmware_nsx() { : } -function stop_nicira() { - if ! is_set NVP_GATEWAY_NETWORK_CIDR; then - NVP_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} +function stop_vmware_nsx() { + if ! is_set NSX_GATEWAY_NETWORK_CIDR; then + NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} echo "The IP address expected on br-ex was not specified. " - echo "Defaulting to "$NVP_GATEWAY_NETWORK_CIDR + echo "Defaulting to "$NSX_GATEWAY_NETWORK_CIDR fi - sudo ip addr del $NVP_GATEWAY_NETWORK_CIDR dev $PUBLIC_BRIDGE + sudo ip addr del $NSX_GATEWAY_NETWORK_CIDR dev $PUBLIC_BRIDGE # Save and then flush remaining addresses on the interface addresses=$(ip addr show dev $PUBLIC_BRIDGE | grep inet | awk {'print $2'}) sudo ip addr flush $PUBLIC_BRIDGE # Try to detach physical interface from PUBLIC_BRIDGE - sudo ovs-vsctl del-port $NVP_GATEWAY_NETWORK_INTERFACE - # Restore addresses on NVP_GATEWAY_NETWORK_INTERFACE + sudo ovs-vsctl del-port $NSX_GATEWAY_NETWORK_INTERFACE + # Restore addresses on NSX_GATEWAY_NETWORK_INTERFACE for address in $addresses; do - sudo ip addr add dev $NVP_GATEWAY_NETWORK_INTERFACE $address + sudo ip addr add dev $NSX_GATEWAY_NETWORK_INTERFACE $address done } From 21fe4e76d5453a252e802c5d5f487f88b896decf Mon Sep 17 00:00:00 2001 From: Vincent Hou Date: Thu, 21 Nov 2013 03:10:27 -0500 Subject: [PATCH 0598/4704] Add a flexible API version choice for Cinder, Glance and Heat The version of the authentication url is set to v1.0 for some projects by default. We can make it configurable via the parameter "$IDENTITY_API_VERSION". Closes-Bug: #1253539 Change-Id: I6640e345d1317b1308403c95b13f8a998320241b --- lib/cinder | 2 +- lib/glance | 4 ++-- lib/heat | 2 +- lib/keystone | 8 ++++++++ 4 files changed, 12 insertions(+), 4 deletions(-) diff --git a/lib/cinder b/lib/cinder index cbe732e9b0..099cfda7fd 100644 --- a/lib/cinder +++ b/lib/cinder @@ -341,7 +341,7 @@ function configure_cinder() { -e 's/snapshot_autoextend_percent =.*/snapshot_autoextend_percent = 20/' \ /etc/lvm/lvm.conf fi - iniset $CINDER_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT + configure_API_version $CINDER_CONF $IDENTITY_API_VERSION iniset $CINDER_CONF keystone_authtoken admin_user cinder iniset $CINDER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $CINDER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD diff --git a/lib/glance b/lib/glance index 135136db7e..321174e619 100644 --- a/lib/glance +++ b/lib/glance @@ -83,7 +83,7 @@ function configure_glance() { iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL iniset $GLANCE_REGISTRY_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA - iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ + configure_API_version $GLANCE_REGISTRY_CONF $IDENTITY_API_VERSION iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_user glance iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_password $SERVICE_PASSWORD @@ -101,7 +101,7 @@ function configure_glance() { iniset $GLANCE_API_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT iniset $GLANCE_API_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL iniset $GLANCE_API_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA - iniset $GLANCE_API_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ + configure_API_version $GLANCE_API_CONF $IDENTITY_API_VERSION iniset $GLANCE_API_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $GLANCE_API_CONF keystone_authtoken admin_user glance iniset $GLANCE_API_CONF keystone_authtoken admin_password $SERVICE_PASSWORD diff --git a/lib/heat b/lib/heat index e44a618162..59fd3d7a7a 100644 --- a/lib/heat +++ b/lib/heat @@ -95,7 +95,7 @@ function configure_heat() { iniset $HEAT_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST iniset $HEAT_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT iniset $HEAT_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $HEAT_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + configure_API_version $HEAT_CONF $IDENTITY_API_VERSION iniset $HEAT_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA iniset $HEAT_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $HEAT_CONF keystone_authtoken admin_user heat diff --git a/lib/keystone b/lib/keystone index 29b9604efe..79f1fd9e84 100644 --- a/lib/keystone +++ b/lib/keystone @@ -335,6 +335,14 @@ create_keystone_accounts() { fi } +# Configure the API version for the OpenStack projects. +# configure_API_version conf_file version +function configure_API_version() { + local conf_file=$1 + local api_version=$2 + iniset $conf_file keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$api_version +} + # init_keystone() - Initialize databases, etc. function init_keystone() { if is_service_enabled ldap; then From 74103f2b3ffd047a4582ae9d37a057534cb6cce7 Mon Sep 17 00:00:00 2001 From: Nikhil Manchanda Date: Fri, 3 Jan 2014 13:53:14 -0800 Subject: [PATCH 0599/4704] Handle trove service availabilty in tempest. Partially implements blueprint: trove-tempest Change-Id: I5413a7afeffe670f6972b41d61dd27ed05da5ba2 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 95b300ce77..08c0553f03 100644 --- a/lib/tempest +++ b/lib/tempest @@ -329,7 +329,7 @@ function configure_tempest() { iniset $TEMPEST_CONF network-feature-enabled api_extensions "${NETWORK_API_EXTENSIONS:-all}" # service_available - for service in nova cinder glance neutron swift heat horizon ceilometer ironic savanna; do + for service in nova cinder glance neutron swift heat horizon ceilometer ironic savanna trove; do if is_service_enabled $service ; then iniset $TEMPEST_CONF service_available $service "True" else From def4c141f1d917705ac1dbdbfe7525f47382dada Mon Sep 17 00:00:00 2001 From: Kaitlin Farr Date: Mon, 6 Jan 2014 08:52:49 -0500 Subject: [PATCH 0600/4704] Adds default value for fixed_key Adds a default value for fixed_key, for use by a key manager implementation that reads the key from the configuration settings. This single, fixed key proffers no protection if the key is compromised. The current implementation of the key manager does not work correctly if the key is not set, so including this option is helpful for Tempest testing and volume encryption within DevStack. Implements: blueprint encrypt-cinder-volumes Change-Id: Id83060afc862c793b79b5429355b213cb4c173fd https://blueprints.launchpad.net/nova/+spec/encrypt-cinder-volumes --- stack.sh | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/stack.sh b/stack.sh index 2438f9fffc..558f71a3a4 100755 --- a/stack.sh +++ b/stack.sh @@ -1098,6 +1098,15 @@ if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nov iniset $NOVA_CONF DEFAULT s3_affix_tenant "True" fi +# Create a randomized default value for the keymgr's fixed_key +if is_service_enabled nova; then + FIXED_KEY="" + for i in $(seq 1 64); + do FIXED_KEY+=$(echo "obase=16; $(($RANDOM % 16))" | bc); + done; + iniset $NOVA_CONF keymgr fixed_key "$FIXED_KEY" +fi + if is_service_enabled zeromq; then echo_summary "Starting zermomq receiver" screen_it zeromq "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-rpc-zmq-receiver" From 4ad37630a2f938b19697f6e310def046a4dcca48 Mon Sep 17 00:00:00 2001 From: Juan Manuel Olle Date: Mon, 6 Jan 2014 15:07:09 -0300 Subject: [PATCH 0601/4704] Remove duplicated name services Due to the fact that keystone will not allow services with duplicated names, cinder and nova services names were changed Closes-Bug: #1259425 Change-Id: I988aef477b418a289426e02e5e108aa57dd1076b --- lib/cinder | 2 +- lib/nova | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/cinder b/lib/cinder index cbe732e9b0..a0b33c8c88 100644 --- a/lib/cinder +++ b/lib/cinder @@ -385,7 +385,7 @@ create_cinder_accounts() { --adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ --internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" CINDER_V2_SERVICE=$(keystone service-create \ - --name=cinder \ + --name=cinderv2 \ --type=volumev2 \ --description="Cinder Volume Service V2" \ | grep " id " | get_field 2) diff --git a/lib/nova b/lib/nova index e754341bad..e9f87fce1f 100644 --- a/lib/nova +++ b/lib/nova @@ -338,7 +338,7 @@ create_nova_accounts() { --adminurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \ --internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" NOVA_V3_SERVICE=$(keystone service-create \ - --name=nova \ + --name=novav3 \ --type=computev3 \ --description="Nova Compute Service V3" \ | grep " id " | get_field 2) From 085abd8eb7c744170cd92429b9aea9d07fd4458b Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Wed, 11 Dec 2013 12:21:12 +0000 Subject: [PATCH 0602/4704] Fix xenapi functions' tests The tests got outdated, this fix makes the tests pass again. Change-Id: Iadddfbf34bf79ba455811645e766c2f3d0fcca84 --- tools/xen/mocks | 2 +- tools/xen/test_functions.sh | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/tools/xen/mocks b/tools/xen/mocks index 94b0ca4d02..ec8679e816 100644 --- a/tools/xen/mocks +++ b/tools/xen/mocks @@ -73,7 +73,7 @@ function [ { done return 1 fi - echo "Mock test does not implement the requested function" + echo "Mock test does not implement the requested function: ${1:-}" exit 1 } diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh index 0ae2cb7f9a..14551868e1 100755 --- a/tools/xen/test_functions.sh +++ b/tools/xen/test_functions.sh @@ -118,7 +118,7 @@ function test_zip_snapshot_location { function test_create_directory_for_kernels { ( . mocks - mock_out get_local_sr uuid1 + mock_out get_local_sr_path /var/run/sr-mount/uuid1 create_directory_for_kernels ) @@ -141,7 +141,7 @@ EOF function test_create_directory_for_images { ( . mocks - mock_out get_local_sr uuid1 + mock_out get_local_sr_path /var/run/sr-mount/uuid1 create_directory_for_images ) @@ -199,8 +199,7 @@ function test_get_local_sr { [ "$RESULT" == "uuid123" ] - assert_xe_min - assert_xe_param "sr-list" "name-label=Local storage" + assert_xe_param "pool-list" params=default-SR minimal=true } function test_get_local_sr_path { From 2781f3bfc3e0ceca29457f65adfddb63f01d8059 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Wed, 11 Dec 2013 13:41:54 +0000 Subject: [PATCH 0603/4704] Workaround missing zip snapshot At the moment, xenserver installation depends on github snapshots. Unfortunately, git.openstack.org does not have that capability. This fix includes: - Exit with error code, if a download fails - create proper urls, even if they are using the git protocol - set git base to github - so we are able to do snapshots Fixes bug: 1259905 Change-Id: I8d0cf8bf8abb16ee0a4b138a6719409c75e7a146 --- tools/xen/README.md | 3 +++ tools/xen/functions | 15 +++++++++++++-- tools/xen/mocks | 6 +++++- tools/xen/test_functions.sh | 21 +++++++++++++++++---- 4 files changed, 38 insertions(+), 7 deletions(-) diff --git a/tools/xen/README.md b/tools/xen/README.md index 06192ed2b7..ee1abcc091 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -70,6 +70,9 @@ the `XENAPI_PASSWORD` must be your dom0 root password. Of course, use real passwords if this machine is exposed. cat > ./localrc <&2 + exit 1 +} + function xapi_plugin_location { for PLUGIN_DIR in "/etc/xapi.d/plugins/" "/usr/lib/xcp/plugins/" "/usr/lib/xapi/plugins"; do if [ -d $PLUGIN_DIR ]; then @@ -11,7 +20,7 @@ function xapi_plugin_location { } function zip_snapshot_location { - echo $1 | sed "s:\.git$::;s:$:/zipball/$2:g" + echo $1 | sed "s,^git://,http://,g;s:\.git$::;s:$:/zipball/$2:g" } function create_directory_for_kernels { @@ -41,7 +50,9 @@ function extract_remote_zipball { local EXTRACTED_FILES=$(mktemp -d) { - wget -nv $ZIPBALL_URL -O $LOCAL_ZIPBALL --no-check-certificate + if ! wget -nv $ZIPBALL_URL -O $LOCAL_ZIPBALL --no-check-certificate; then + die_with_error "Failed to download [$ZIPBALL_URL]" + fi unzip -q -o $LOCAL_ZIPBALL -d $EXTRACTED_FILES rm -f $LOCAL_ZIPBALL } >&2 diff --git a/tools/xen/mocks b/tools/xen/mocks index ec8679e816..3b9b05c747 100644 --- a/tools/xen/mocks +++ b/tools/xen/mocks @@ -35,7 +35,7 @@ function mktemp { function wget { if [[ $@ =~ "failurl" ]]; then - exit 1 + return 1 fi echo "wget $@" >> $LIST_OF_ACTIONS } @@ -77,6 +77,10 @@ function [ { exit 1 } +function die_with_error { + echo "$1" >> $DEAD_MESSAGES +} + function xe { cat $XE_RESPONSE { diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh index 14551868e1..373d996760 100755 --- a/tools/xen/test_functions.sh +++ b/tools/xen/test_functions.sh @@ -29,6 +29,9 @@ function before_each_test { XE_CALLS=$(mktemp) truncate -s 0 $XE_CALLS + + DEAD_MESSAGES=$(mktemp) + truncate -s 0 $DEAD_MESSAGES } # Teardown @@ -64,6 +67,10 @@ function assert_xe_param { grep -qe "^$1\$" $XE_CALLS } +function assert_died_with { + diff -u <(echo "$1") $DEAD_MESSAGES +} + function mock_out { local FNNAME="$1" local OUTPUT="$2" @@ -109,10 +116,16 @@ function test_no_plugin_directory_found { grep "[ -d /usr/lib/xcp/plugins/ ]" $LIST_OF_ACTIONS } -function test_zip_snapshot_location { +function test_zip_snapshot_location_http { diff \ - <(zip_snapshot_location "git://git.openstack.org/openstack/nova.git" "master") \ - <(echo "git://git.openstack.org/openstack/nova/zipball/master") + <(zip_snapshot_location "http://github.com/openstack/nova.git" "master") \ + <(echo "http://github.com/openstack/nova/zipball/master") +} + +function test_zip_snapsot_location_git { + diff \ + <(zip_snapshot_location "git://github.com/openstack/nova.git" "master") \ + <(echo "http://github.com/openstack/nova/zipball/master") } function test_create_directory_for_kernels { @@ -179,7 +192,7 @@ function test_extract_remote_zipball_wget_fail { local IGNORE IGNORE=$(. mocks && extract_remote_zipball "failurl") - assert_previous_command_failed + assert_died_with "Failed to download [failurl]" } function test_find_nova_plugins { From f93b98ac7309e3ebd106b44843650a161fad4616 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Wed, 8 Jan 2014 18:15:14 +0900 Subject: [PATCH 0604/4704] gitignore: add .localrc.auto and local.conf The changeset of 893e66360caf3bcf0578d4541b3c17d089c33b02, Change-Id of I367cadc86116621e9574ac203aafdab483d810d3 introduced local.conf and generates .localrc.auto. But they aren't in .gitignore. This patch adds them into .gitignore. Change-Id: I7d4dc99d980d9c5b5156cf915646bc96163a3dc4 Closes-Bug: #1267027 --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index c49b4a3287..43652024f3 100644 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,5 @@ accrc devstack-docs-* docs/ docs-files +.localrc.auto +local.conf From 96f8e34c38f172689f09842761dd20600a60fc5a Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Mon, 9 Sep 2013 14:22:07 -0700 Subject: [PATCH 0605/4704] Enable multi-threaded nova-conductor Just like I09f4c6f57e71982b8c7fc92645b3ebec12ff1348, enable multi-threaded nova-conductor. This feature was merged into nova in I8698997d211d7617ee14a1c6113056a694d70620. Change-Id: Id7042284e81bd64092a400d24a3170ce07beb08c --- lib/nova | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/nova b/lib/nova index e754341bad..39685a835a 100644 --- a/lib/nova +++ b/lib/nova @@ -377,6 +377,7 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT osapi_compute_workers "4" iniset $NOVA_CONF DEFAULT ec2_workers "4" iniset $NOVA_CONF DEFAULT metadata_workers "4" + iniset $NOVA_CONF conductor workers "4" iniset $NOVA_CONF DEFAULT sql_connection `database_connection_url nova` iniset $NOVA_CONF DEFAULT fatal_deprecations "True" iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" From 25049cd23de0e8055326c668ff119dd8cdf0bae4 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Thu, 9 Jan 2014 13:53:52 +0100 Subject: [PATCH 0606/4704] Use --tenant-id, not --tenant_id Change-Id: I0e3d65d5b69ac82cbf7ee6ffc41ead369af8c126 --- lib/cinder | 2 +- lib/ironic | 4 ++-- lib/marconi | 2 +- lib/neutron | 14 +++++++------- lib/nova | 2 +- lib/savanna | 2 +- lib/swift | 2 +- lib/trove | 2 +- 8 files changed, 15 insertions(+), 15 deletions(-) diff --git a/lib/cinder b/lib/cinder index cbe732e9b0..fe278f60bb 100644 --- a/lib/cinder +++ b/lib/cinder @@ -365,7 +365,7 @@ create_cinder_accounts() { CINDER_USER=$(keystone user-create \ --name=cinder \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ + --tenant-id $SERVICE_TENANT \ --email=cinder@example.com \ | grep " id " | get_field 2) keystone user-role-add \ diff --git a/lib/ironic b/lib/ironic index 099746ae22..1ff3c81f06 100644 --- a/lib/ironic +++ b/lib/ironic @@ -149,11 +149,11 @@ create_ironic_accounts() { IRONIC_USER=$(keystone user-create \ --name=ironic \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ + --tenant-id $SERVICE_TENANT \ --email=ironic@example.com \ | grep " id " | get_field 2) keystone user-role-add \ - --tenant_id $SERVICE_TENANT \ + --tenant-id $SERVICE_TENANT \ --user_id $IRONIC_USER \ --role_id $ADMIN_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then diff --git a/lib/marconi b/lib/marconi index 742f866e7d..6b9ffdc0b3 100644 --- a/lib/marconi +++ b/lib/marconi @@ -142,7 +142,7 @@ function create_marconi_accounts() { MARCONI_USER=$(get_id keystone user-create --name=marconi \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ + --tenant-id $SERVICE_TENANT \ --email=marconi@example.com) keystone user-role-add --tenant-id $SERVICE_TENANT \ --user-id $MARCONI_USER \ diff --git a/lib/neutron b/lib/neutron index a7519ad328..43f43f951a 100644 --- a/lib/neutron +++ b/lib/neutron @@ -328,7 +328,7 @@ function create_neutron_accounts() { NEUTRON_USER=$(keystone user-create \ --name=neutron \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ + --tenant-id $SERVICE_TENANT \ --email=neutron@example.com \ | grep " id " | get_field 2) keystone user-role-add \ @@ -357,7 +357,7 @@ function create_neutron_initial_network() { # Create a small network # Since neutron command is executed in admin context at this point, - # ``--tenant_id`` needs to be specified. + # ``--tenant-id`` needs to be specified. if is_baremetal; then if [[ "$PUBLIC_INTERFACE" == '' || "$OVS_PHYSICAL_BRIDGE" == '' ]]; then die $LINENO "Neutron settings for baremetal not set.. exiting" @@ -367,16 +367,16 @@ function create_neutron_initial_network() { sudo ip addr del $IP dev $PUBLIC_INTERFACE sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE done - NET_ID=$(neutron net-create $PHYSICAL_NETWORK --tenant_id $TENANT_ID --provider:network_type flat --provider:physical_network "$PHYSICAL_NETWORK" | grep ' id ' | get_field 2) + NET_ID=$(neutron net-create $PHYSICAL_NETWORK --tenant-id $TENANT_ID --provider:network_type flat --provider:physical_network "$PHYSICAL_NETWORK" | grep ' id ' | get_field 2) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID" - SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + SUBNET_ID=$(neutron subnet-create --tenant-id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $TENANT_ID" sudo ifconfig $OVS_PHYSICAL_BRIDGE up sudo route add default gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE else - NET_ID=$(neutron net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) + NET_ID=$(neutron net-create --tenant-id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID" - SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + SUBNET_ID=$(neutron subnet-create --tenant-id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $TENANT_ID" fi @@ -384,7 +384,7 @@ function create_neutron_initial_network() { # Create a router, and add the private subnet as one of its interfaces if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then # create a tenant-owned router. - ROUTER_ID=$(neutron router-create --tenant_id $TENANT_ID $Q_ROUTER_NAME | grep ' id ' | get_field 2) + ROUTER_ID=$(neutron router-create --tenant-id $TENANT_ID $Q_ROUTER_NAME | grep ' id ' | get_field 2) die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $TENANT_ID $Q_ROUTER_NAME" else # Plugin only supports creating a single router, which should be admin owned. diff --git a/lib/nova b/lib/nova index e754341bad..367ec83072 100644 --- a/lib/nova +++ b/lib/nova @@ -318,7 +318,7 @@ create_nova_accounts() { NOVA_USER=$(keystone user-create \ --name=nova \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ + --tenant-id $SERVICE_TENANT \ --email=nova@example.com \ | grep " id " | get_field 2) keystone user-role-add \ diff --git a/lib/savanna b/lib/savanna index 6794e36dfd..bb4dfe693d 100644 --- a/lib/savanna +++ b/lib/savanna @@ -56,7 +56,7 @@ function create_savanna_accounts() { SAVANNA_USER=$(keystone user-create \ --name=savanna \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ + --tenant-id $SERVICE_TENANT \ --email=savanna@example.com \ | grep " id " | get_field 2) keystone user-role-add \ diff --git a/lib/swift b/lib/swift index 96929db557..44c230be93 100644 --- a/lib/swift +++ b/lib/swift @@ -514,7 +514,7 @@ function create_swift_accounts() { ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") SWIFT_USER=$(keystone user-create --name=swift --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT --email=swift@example.com | grep " id " | get_field 2) + --tenant-id $SERVICE_TENANT --email=swift@example.com | grep " id " | get_field 2) keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $SWIFT_USER --role-id $ADMIN_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then diff --git a/lib/trove b/lib/trove index f8e3eddfe2..4efdb5d669 100644 --- a/lib/trove +++ b/lib/trove @@ -64,7 +64,7 @@ create_trove_accounts() { TROVE_USER=$(keystone user-create \ --name=trove \ --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ + --tenant-id $SERVICE_TENANT \ --email=trove@example.com \ | grep " id " | get_field 2) keystone user-role-add --tenant-id $SERVICE_TENANT \ From 72dc98ed6bcdaa1cdd81c1b655b5cbdf5490291d Mon Sep 17 00:00:00 2001 From: Akihiro Motoki Date: Thu, 9 Jan 2014 21:57:22 +0900 Subject: [PATCH 0607/4704] Correct Qpid package name in files/apts/neutron Ubuntu qpid server package is named as "qpidd", but files/apts/neutron has an entry "qpid". Change-Id: Ie3f8391a7404bdeb222acfcce77ca80a14ea8693 Closes-Bug: #1267459 --- files/apts/neutron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/apts/neutron b/files/apts/neutron index 0f4b69f8ef..4e9f0f7dfd 100644 --- a/files/apts/neutron +++ b/files/apts/neutron @@ -20,6 +20,6 @@ python-qpid # dist:precise dnsmasq-base dnsmasq-utils # for dhcp_release only available in dist:oneiric,precise,quantal rabbitmq-server # NOPRIME -qpid # NOPRIME +qpidd # NOPRIME sqlite3 vlan From fa5ccfff1098bb85eb7810ad5146fbdfee83fb15 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Thu, 9 Jan 2014 13:27:35 +0100 Subject: [PATCH 0608/4704] Setup Keystone catalog information for Ceilometer Change-Id: I3f536f38fe7862ee41b06d1d48b848cc07492c8d Closes-Bug: #1267322 --- files/default_catalog.templates | 5 ++++ lib/ceilometer | 42 +++++++++++++++++++++++++++++++++ stack.sh | 4 ++++ 3 files changed, 51 insertions(+) diff --git a/files/default_catalog.templates b/files/default_catalog.templates index 277904a8e3..430c42a337 100644 --- a/files/default_catalog.templates +++ b/files/default_catalog.templates @@ -51,3 +51,8 @@ catalog.RegionOne.orchestration.publicURL = http://%SERVICE_HOST%:8000/v1 catalog.RegionOne.orchestration.adminURL = http://%SERVICE_HOST%:8000/v1 catalog.RegionOne.orchestration.internalURL = http://%SERVICE_HOST%:8000/v1 catalog.RegionOne.orchestration.name = Heat Service + +catalog.RegionOne.metering.publicURL = http://%SERVICE_HOST%:8777/v1 +catalog.RegionOne.metering.adminURL = http://%SERVICE_HOST%:8777/v1 +catalog.RegionOne.metering.internalURL = http://%SERVICE_HOST%:8777/v1 +catalog.RegionOne.metering.name = Telemetry Service diff --git a/lib/ceilometer b/lib/ceilometer index fac3be14a9..fe72fcdb11 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -48,8 +48,50 @@ CEILOMETER_BIN_DIR=$(get_python_exec_prefix) # Set up database backend CEILOMETER_BACKEND=${CEILOMETER_BACKEND:-mysql} +# Ceilometer connection info. +CEILOMETER_SERVICE_PROTOCOL=http +CEILOMETER_SERVICE_HOST=$SERVICE_HOST +CEILOMETER_SERVICE_PORT=${CEILOMETER_SERVICE_PORT:-8777} +# + # Functions # --------- +# +# create_ceilometer_accounts() - Set up common required ceilometer accounts + +create_ceilometer_accounts() { + + SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + + # Ceilometer + if [[ "$ENABLED_SERVICES" =~ "ceilometer-api" ]]; then + CEILOMETER_USER=$(keystone user-create \ + --name=ceilometer \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=ceilometer@example.com \ + | grep " id " | get_field 2) + keystone user-role-add \ + --tenant-id $SERVICE_TENANT \ + --user-id $CEILOMETER_USER \ + --role-id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + CEILOMETER_SERVICE=$(keystone service-create \ + --name=ceilometer \ + --type=metering \ + --description="OpenStack Telemetry Service" \ + | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $CEILOMETER_SERVICE \ + --publicurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \ + --adminurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \ + --internalurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" + fi + fi +} + # cleanup_ceilometer() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up diff --git a/stack.sh b/stack.sh index 2438f9fffc..bf782bc047 100755 --- a/stack.sh +++ b/stack.sh @@ -901,6 +901,10 @@ if is_service_enabled key; then create_trove_accounts fi + if is_service_enabled ceilometer; then + create_ceilometer_accounts + fi + if is_service_enabled swift || is_service_enabled s-proxy; then create_swift_accounts fi From 6681a4fae9df92cee77900f2248b8e98c501626f Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Fri, 10 Jan 2014 15:28:29 +0900 Subject: [PATCH 0609/4704] bash8: fix bash8 warning This patch removes the following bash8 warnings. > /devstack/ $ ./run_tests.sh > Running bash8... > E003: Indent not multiple of 4: ' wget -c $image_url -O $FILES/$IMAGE_FNAME' > - functions: L1367 > E003: Indent not multiple of 4: ' if [[ $? -ne 0 ]]; then' > - functions: L1368 > E003: Indent not multiple of 4: ' echo "Not found: $image_url"' > - functions: L1369 > E003: Indent not multiple of 4: ' return' > - functions: L1370 > E003: Indent not multiple of 4: ' fi' > - functions: L1371 > E003: Indent not multiple of 4: ' `"should use a descriptor-data pair."' > - functions: L1423 > E003: Indent not multiple of 4: ' `" Attempt to retrieve the *-flat.vmdk: $flat_url"' > - functions: L1438 > E003: Indent not multiple of 4: ' `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url"' > - functions: L1477 > E003: Indent not multiple of 4: ' warn $LINENO "Descriptor not found $descriptor_url"' > - functions: L1492 > E003: Indent not multiple of 4: ' descriptor_found=false' > - functions: L1493 > E003: Indent not multiple of 4: ' fi' > - functions: L1501 > E003: Indent not multiple of 4: ' fi' > - functions: L1502 > E003: Indent not multiple of 4: ' #TODO(alegendre): handle streamOptimized once supported by the VMware driver.' > - functions: L1503 > E003: Indent not multiple of 4: ' vmdk_disktype="preallocated"' > - functions: L1504 > 14 bash8 error(s) found Change-Id: Icf2cddf283192a50253ccfa697c2d32eec75b4ba Closes-Bug: #1267716 --- functions | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/functions b/functions index e79e1d58af..6f09685efb 100644 --- a/functions +++ b/functions @@ -1364,11 +1364,11 @@ function upload_image() { if [[ $image_url != file* ]]; then # Downloads the image (uec ami+aki style), then extracts it. if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then - wget -c $image_url -O $FILES/$IMAGE_FNAME - if [[ $? -ne 0 ]]; then - echo "Not found: $image_url" - return - fi + wget -c $image_url -O $FILES/$IMAGE_FNAME + if [[ $? -ne 0 ]]; then + echo "Not found: $image_url" + return + fi fi IMAGE="$FILES/${IMAGE_FNAME}" else @@ -1420,7 +1420,7 @@ function upload_image() { vmdk_create_type="${vmdk_create_type%?}" descriptor_data_pair_msg="Monolithic flat and VMFS disks "` - `"should use a descriptor-data pair." + `"should use a descriptor-data pair." if [[ "$vmdk_create_type" = "monolithicSparse" ]]; then vmdk_disktype="sparse" elif [[ "$vmdk_create_type" = "monolithicFlat" || \ @@ -1435,7 +1435,7 @@ function upload_image() { path_len=`expr ${#image_url} - ${#IMAGE_FNAME}` flat_url="${image_url:0:$path_len}$flat_fname" warn $LINENO "$descriptor_data_pair_msg"` - `" Attempt to retrieve the *-flat.vmdk: $flat_url" + `" Attempt to retrieve the *-flat.vmdk: $flat_url" if [[ $flat_url != file* ]]; then if [[ ! -f $FILES/$flat_fname || \ "$(stat -c "%s" $FILES/$flat_fname)" = "0" ]]; then @@ -1474,7 +1474,7 @@ function upload_image() { flat_path="${image_url:0:$path_len}" descriptor_url=$flat_path$descriptor_fname warn $LINENO "$descriptor_data_pair_msg"` - `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url" + `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url" if [[ $flat_path != file* ]]; then if [[ ! -f $FILES/$descriptor_fname || \ "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then @@ -1489,8 +1489,8 @@ function upload_image() { descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g") if [[ ! -f $descriptor_url || \ "$(stat -c "%s" $descriptor_url)" == "0" ]]; then - warn $LINENO "Descriptor not found $descriptor_url" - descriptor_found=false + warn $LINENO "Descriptor not found $descriptor_url" + descriptor_found=false fi fi if $descriptor_found; then @@ -1498,10 +1498,10 @@ function upload_image() { `"grep -a -F -m 1 'ddb.adapterType =' $descriptor_url)" vmdk_adapter_type="${vmdk_adapter_type#*\"}" vmdk_adapter_type="${vmdk_adapter_type%?}" - fi - fi - #TODO(alegendre): handle streamOptimized once supported by the VMware driver. - vmdk_disktype="preallocated" + fi + fi + #TODO(alegendre): handle streamOptimized once supported by the VMware driver. + vmdk_disktype="preallocated" else #TODO(alegendre): handle streamOptimized once supported by the VMware driver. vmdk_disktype="preallocated" From d7f6090f29786f091773497bc3597142d94619ec Mon Sep 17 00:00:00 2001 From: Alvaro Lopez Ortega Date: Sun, 22 Dec 2013 17:03:47 +0100 Subject: [PATCH 0610/4704] Add support for Fedora 20 The list of RPM packages have been updated to support the recently released Fedora 20 distribution. Closes-Bug: #1263291 Co-Authored: Alvaro Lopez Ortega Change-Id: Ia66abef1a1a54e6d5ee6eebc12908cef3f1d211d --- files/rpms/cinder | 1 + files/rpms/general | 1 + files/rpms/glance | 5 +++-- files/rpms/horizon | 4 ++-- files/rpms/keystone | 10 +++++----- files/rpms/neutron | 4 ++-- files/rpms/nova | 6 +++--- files/rpms/swift | 2 +- files/rpms/tempest | 2 +- files/rpms/trove | 2 +- stack.sh | 4 ++-- 11 files changed, 22 insertions(+), 19 deletions(-) diff --git a/files/rpms/cinder b/files/rpms/cinder index c4edb68f14..623c13e676 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -4,3 +4,4 @@ qemu-img python-devel postgresql-devel iscsi-initiator-utils +python-lxml #dist:f18,f19,f20 diff --git a/files/rpms/general b/files/rpms/general index 2db31d1db0..40246ea4ab 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -20,6 +20,7 @@ tar tcpdump unzip wget +which # [1] : some of installed tools have unversioned dependencies on this, # but others have versioned (<=0.7). So if a later version (0.7.1) diff --git a/files/rpms/glance b/files/rpms/glance index dd66171f7a..fffd9c85b4 100644 --- a/files/rpms/glance +++ b/files/rpms/glance @@ -1,6 +1,6 @@ gcc libffi-devel # testonly -libxml2-devel +libxml2-devel # testonly libxslt-devel # testonly mysql-devel # testonly openssl-devel # testonly @@ -9,7 +9,8 @@ python-argparse python-devel python-eventlet python-greenlet -python-paste-deploy #dist:f16,f17,f18,f19 +python-lxml #dist:f18,f19,f20 +python-paste-deploy #dist:f18,f19,f20 python-routes python-sqlalchemy python-wsgiref diff --git a/files/rpms/horizon b/files/rpms/horizon index aa27ab4e97..59503cc9aa 100644 --- a/files/rpms/horizon +++ b/files/rpms/horizon @@ -16,8 +16,8 @@ python-kombu python-migrate python-mox python-nose -python-paste #dist:f16,f17,f18,f19 -python-paste-deploy #dist:f16,f17,f18,f19 +python-paste #dist:f18,f19,f20 +python-paste-deploy #dist:f18,f19,f20 python-routes python-sphinx python-sqlalchemy diff --git a/files/rpms/keystone b/files/rpms/keystone index 52dbf477d8..99e8524628 100644 --- a/files/rpms/keystone +++ b/files/rpms/keystone @@ -1,11 +1,11 @@ python-greenlet -python-lxml #dist:f16,f17,f18,f19 -python-paste #dist:f16,f17,f18,f19 -python-paste-deploy #dist:f16,f17,f18,f19 -python-paste-script #dist:f16,f17,f18,f19 +libxslt-devel # dist:f20 +python-lxml #dist:f18,f19,f20 +python-paste #dist:f18,f19,f20 +python-paste-deploy #dist:f18,f19,f20 +python-paste-script #dist:f18,f19,f20 python-routes python-sqlalchemy -python-sqlite2 python-webob sqlite diff --git a/files/rpms/neutron b/files/rpms/neutron index a7700f77d4..67bf52350a 100644 --- a/files/rpms/neutron +++ b/files/rpms/neutron @@ -11,8 +11,8 @@ python-greenlet python-iso8601 python-kombu #rhel6 gets via pip -python-paste # dist:f16,f17,f18,f19 -python-paste-deploy # dist:f16,f17,f18,f19 +python-paste # dist:f18,f19,f20 +python-paste-deploy # dist:f18,f19,f20 python-qpid python-routes python-sqlalchemy diff --git a/files/rpms/nova b/files/rpms/nova index c99f3defc8..ac70ac5d6f 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -28,11 +28,11 @@ python-kombu python-lockfile python-migrate python-mox -python-paramiko # dist:f16,f17,f18,f19 +python-paramiko # dist:f18,f19,f20 # ^ on RHEL, brings in python-crypto which conflicts with version from # pip we need -python-paste # dist:f16,f17,f18,f19 -python-paste-deploy # dist:f16,f17,f18,f19 +python-paste # dist:f18,f19,f20 +python-paste-deploy # dist:f18,f19,f20 python-qpid python-routes python-sqlalchemy diff --git a/files/rpms/swift b/files/rpms/swift index b137f30dce..32432bca9b 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -9,7 +9,7 @@ python-eventlet python-greenlet python-netifaces python-nose -python-paste-deploy # dist:f16,f17,f18,f19 +python-paste-deploy # dist:f18,f19,f20 python-simplejson python-webob pyxattr diff --git a/files/rpms/tempest b/files/rpms/tempest index de32b81504..e7bbd43cd6 100644 --- a/files/rpms/tempest +++ b/files/rpms/tempest @@ -1 +1 @@ -libxslt-dev \ No newline at end of file +libxslt-devel diff --git a/files/rpms/trove b/files/rpms/trove index 09dcee8104..c5cbdea012 100644 --- a/files/rpms/trove +++ b/files/rpms/trove @@ -1 +1 @@ -libxslt1-dev # testonly +libxslt-devel # testonly diff --git a/stack.sh b/stack.sh index ce5fbd47e5..4e12c45523 100755 --- a/stack.sh +++ b/stack.sh @@ -12,7 +12,7 @@ # developer install. # To keep this script simple we assume you are running on a recent **Ubuntu** -# (12.04 Precise or newer) or **Fedora** (F16 or newer) machine. (It may work +# (12.04 Precise or newer) or **Fedora** (F18 or newer) machine. (It may work # on other platforms but support for those platforms is left to those who added # them to DevStack.) It should work in a VM or physical server. Additionally # we maintain a list of ``apt`` and ``rpm`` dependencies and other configuration @@ -131,7 +131,7 @@ disable_negated_services # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|saucy|trusty|7.0|wheezy|sid|testing|jessie|f16|f17|f18|f19|opensuse-12.2|rhel6) ]]; then +if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|saucy|trusty|7.0|wheezy|sid|testing|jessie|f18|f19|f20|opensuse-12.2|rhel6) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From 1b0eccdf75cf70a26c1b2ae6b9beaa75ebaf7a6a Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Fri, 10 Jan 2014 11:51:01 +0100 Subject: [PATCH 0611/4704] Fix Heat/Cloud formation catalog template Cloud formation and Heat API ports where mixed. Change-Id: I029592c4821bb93c8a1dd91519f30908efd56627 Closes-Bug: #1267355 --- files/default_catalog.templates | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/files/default_catalog.templates b/files/default_catalog.templates index 277904a8e3..debcedfb5b 100644 --- a/files/default_catalog.templates +++ b/files/default_catalog.templates @@ -47,7 +47,12 @@ catalog.RegionOne.image.adminURL = http://%SERVICE_HOST%:9292 catalog.RegionOne.image.internalURL = http://%SERVICE_HOST%:9292 catalog.RegionOne.image.name = Image Service -catalog.RegionOne.orchestration.publicURL = http://%SERVICE_HOST%:8000/v1 -catalog.RegionOne.orchestration.adminURL = http://%SERVICE_HOST%:8000/v1 -catalog.RegionOne.orchestration.internalURL = http://%SERVICE_HOST%:8000/v1 +catalog.RegionOne.cloudformation.publicURL = http://%SERVICE_HOST%:8000/v1 +catalog.RegionOne.cloudformation.adminURL = http://%SERVICE_HOST%:8000/v1 +catalog.RegionOne.cloudformation.internalURL = http://%SERVICE_HOST%:8000/v1 +catalog.RegionOne.cloudformation.name = Heat CloudFormation Service + +catalog.RegionOne.orchestration.publicURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s +catalog.RegionOne.orchestration.adminURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s +catalog.RegionOne.orchestration.internalURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s catalog.RegionOne.orchestration.name = Heat Service From f69c6f16d21ce51eb5939ea6fecd99a8b28b426b Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Thu, 9 Jan 2014 19:47:54 -0500 Subject: [PATCH 0612/4704] Enable server-side and client-side logs for libvirt Need this to diagnose libvirt Errors in the gate Change-Id: Id46137a71d17abc8bfab66b14ab567d81a31f018 Related-Bug: #1254872 --- lib/nova | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/nova b/lib/nova index e754341bad..162212da59 100644 --- a/lib/nova +++ b/lib/nova @@ -648,6 +648,14 @@ function start_nova_compute() { fi if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then + # Enable client side traces for libvirt + export LIBVIRT_LOG_FILTERS="1:libvirt" + export LIBVIRT_LOG_OUTPUTS="1:file:/var/log/libvirt/libvirtd-nova.log" + + # Enable server side traces for libvirtd + echo "log_filters=\"1:libvirt 1:qemu\"" | sudo tee -a /etc/libvirt/libvirtd.conf + echo "log_outputs=\"1:file:/var/log/libvirt/libvirtd.log\"" | sudo tee -a /etc/libvirt/libvirtd.conf + # The group **$LIBVIRT_GROUP** is added to the current user in this script. # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group. screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf'" From 63e1784354a49ca45bb4ae9465d2cb6dfb31db12 Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Fri, 10 Jan 2014 14:23:03 +0100 Subject: [PATCH 0613/4704] Put cinder rootwrap config in separate function Separate out Cinder's rootwrap configuration so that it can be called from Grenade's upgrade scripts. This follows the same model as Nova uses with configure_nova_rootwrap() which can be called from Grenade to refresh rootwrap config. Change-Id: Id808abc2b5754443362b3de4b3453e305d3720f3 --- lib/cinder | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/lib/cinder b/lib/cinder index cbe732e9b0..6f5fb188c9 100644 --- a/lib/cinder +++ b/lib/cinder @@ -163,15 +163,8 @@ function cleanup_cinder() { fi } -# configure_cinder() - Set config files, create data dirs, etc -function configure_cinder() { - if [[ ! -d $CINDER_CONF_DIR ]]; then - sudo mkdir -p $CINDER_CONF_DIR - fi - sudo chown $STACK_USER $CINDER_CONF_DIR - - cp -p $CINDER_DIR/etc/cinder/policy.json $CINDER_CONF_DIR - +# configure_cinder_rootwrap() - configure Cinder's rootwrap +function configure_cinder_rootwrap() { # Set the paths of certain binaries CINDER_ROOTWRAP=$(get_rootwrap_location cinder) if [[ ! -x $CINDER_ROOTWRAP ]]; then @@ -214,6 +207,18 @@ function configure_cinder() { chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/cinder-rootwrap +} + +# configure_cinder() - Set config files, create data dirs, etc +function configure_cinder() { + if [[ ! -d $CINDER_CONF_DIR ]]; then + sudo mkdir -p $CINDER_CONF_DIR + fi + sudo chown $STACK_USER $CINDER_CONF_DIR + + cp -p $CINDER_DIR/etc/cinder/policy.json $CINDER_CONF_DIR + + configure_cinder_rootwrap cp $CINDER_DIR/etc/cinder/api-paste.ini $CINDER_API_PASTE_INI From 9fc8792b0ac7525b4c353b0a55b8b80eabf76e2a Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 22 May 2013 17:19:06 -0500 Subject: [PATCH 0614/4704] Robustify service shutdown * Save PID when using screen in screen_it() * Add screen_stop() * Call out service stop_*() in unstack.sh functions so screen_stop() can do its thing Closes-bug: 1183449 Change-Id: Iac84231cfda960c4197de5b6e8ba6eb19225169a --- functions | 33 +++++++++++++++++++++++++++++++-- lib/ceilometer | 2 +- lib/cinder | 2 +- lib/glance | 4 ++-- lib/heat | 2 +- lib/keystone | 2 +- lib/nova | 2 +- lib/trove | 2 +- stackrc | 3 +++ unstack.sh | 47 ++++++++++++++++++++++++++++++++++------------- 10 files changed, 76 insertions(+), 23 deletions(-) diff --git a/functions b/functions index 6f09685efb..92b61ed974 100644 --- a/functions +++ b/functions @@ -1132,10 +1132,39 @@ function screen_it { sleep 1.5 NL=`echo -ne '\015'` - screen -S $SCREEN_NAME -p $1 -X stuff "$2 || echo \"$1 failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL" + # This fun command does the following: + # - the passed server command is backgrounded + # - the pid of the background process is saved in the usual place + # - the server process is brought back to the foreground + # - if the server process exits prematurely the fg command errors + # and a message is written to stdout and the service failure file + # The pid saved can be used in screen_stop() as a process group + # id to kill off all child processes + screen -S $SCREEN_NAME -p $1 -X stuff "$2 & echo \$! >$SERVICE_DIR/$SCREEN_NAME/$1.pid; fg || echo \"$1 failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL" else # Spawn directly without screen - run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$service.pid + run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$1.pid + fi + fi +} + + +# Stop a service in screen +# screen_stop service +function screen_stop() { + SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + USE_SCREEN=$(trueorfalse True $USE_SCREEN) + + if is_service_enabled $1; then + # Kill via pid if we have one available + if [[ -r $SERVICE_DIR/$SCREEN_NAME/$1.pid ]]; then + pkill -TERM -P $(cat $SERVICE_DIR/$SCREEN_NAME/$1.pid) + rm $SERVICE_DIR/$SCREEN_NAME/$1.pid + fi + if [[ "$USE_SCREEN" = "True" ]]; then + # Clean up the screen window + screen -S $SCREEN_NAME -p $1 -X kill fi fi } diff --git a/lib/ceilometer b/lib/ceilometer index fac3be14a9..211303f57c 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -162,7 +162,7 @@ function start_ceilometer() { function stop_ceilometer() { # Kill the ceilometer screen windows for serv in ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do - screen -S $SCREEN_NAME -p $serv -X kill + screen_stop $serv done } diff --git a/lib/cinder b/lib/cinder index cbe732e9b0..11414bedd3 100644 --- a/lib/cinder +++ b/lib/cinder @@ -556,7 +556,7 @@ function start_cinder() { function stop_cinder() { # Kill the cinder screen windows for serv in c-api c-bak c-sch c-vol; do - screen -S $SCREEN_NAME -p $serv -X kill + screen_stop $serv done if is_service_enabled c-vol; then diff --git a/lib/glance b/lib/glance index 135136db7e..80868ae5c5 100644 --- a/lib/glance +++ b/lib/glance @@ -206,8 +206,8 @@ function start_glance() { # stop_glance() - Stop running processes function stop_glance() { # Kill the Glance screen windows - screen -S $SCREEN_NAME -p g-api -X kill - screen -S $SCREEN_NAME -p g-reg -X kill + screen_stop g-api + screen_stop g-reg } diff --git a/lib/heat b/lib/heat index e44a618162..29cd967fe1 100644 --- a/lib/heat +++ b/lib/heat @@ -175,7 +175,7 @@ function start_heat() { function stop_heat() { # Kill the screen windows for serv in h-eng h-api h-api-cfn h-api-cw; do - screen -S $SCREEN_NAME -p $serv -X kill + screen_stop $serv done } diff --git a/lib/keystone b/lib/keystone index 29b9604efe..dc6a730f16 100644 --- a/lib/keystone +++ b/lib/keystone @@ -421,7 +421,7 @@ function start_keystone() { # stop_keystone() - Stop running processes function stop_keystone() { # Kill the Keystone screen window - screen -S $SCREEN_NAME -p key -X kill + screen_stop key } diff --git a/lib/nova b/lib/nova index 39685a835a..178f8ee19c 100644 --- a/lib/nova +++ b/lib/nova @@ -705,7 +705,7 @@ function stop_nova() { # Some services are listed here twice since more than one instance # of a service may be running in certain configs. for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta; do - screen -S $SCREEN_NAME -p $serv -X kill + screen_stop $serv done if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then stop_nova_hypervisor diff --git a/lib/trove b/lib/trove index f8e3eddfe2..870afbe7bd 100644 --- a/lib/trove +++ b/lib/trove @@ -198,7 +198,7 @@ function start_trove() { function stop_trove() { # Kill the trove screen windows for serv in tr-api tr-tmgr tr-cond; do - screen -S $SCREEN_NAME -p $serv -X kill + screen_stop $serv done } diff --git a/stackrc b/stackrc index 3fdc566ed2..49fb26b2c7 100644 --- a/stackrc +++ b/stackrc @@ -9,6 +9,9 @@ DEST=/opt/stack # Destination for working data DATA_DIR=${DEST}/data +# Destination for status files +SERVICE_DIR=${DEST}/status + # Determine stack user if [[ $EUID -eq 0 ]]; then STACK_USER=stack diff --git a/unstack.sh b/unstack.sh index 67c8b7c7b1..77dbe074d2 100755 --- a/unstack.sh +++ b/unstack.sh @@ -36,6 +36,9 @@ source $TOP_DIR/lib/apache # Get project function libraries source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/cinder +source $TOP_DIR/lib/keystone +source $TOP_DIR/lib/glance +source $TOP_DIR/lib/nova source $TOP_DIR/lib/horizon source $TOP_DIR/lib/swift source $TOP_DIR/lib/neutron @@ -75,21 +78,29 @@ if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then teardown_neutron_debug fi -# Shut down devstack's screen to get the bulk of OpenStack services in one shot -SCREEN=$(which screen) -if [[ -n "$SCREEN" ]]; then - SESSION=$(screen -ls | awk '/[0-9].stack/ { print $1 }') - if [[ -n "$SESSION" ]]; then - screen -X -S $SESSION quit - fi +# Call service stop +if is_service_enabled trove; then + stop_trove +fi + +if is_service_enabled heat; then + stop_heat fi -# Shut down Nova hypervisor plugins after Nova -NOVA_PLUGINS=$TOP_DIR/lib/nova_plugins -if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then - # Load plugin - source $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER - stop_nova_hypervisor +if is_service_enabled ceilometer; then + stop_ceilometer +fi + +if is_service_enabled nova; then + stop_nova +fi + +if is_service_enabled g-api g-reg; then + stop_glance +fi + +if is_service_enabled key; then + stop_keystone fi # Swift runs daemons @@ -123,6 +134,7 @@ SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/* # Get the iSCSI volumes if is_service_enabled cinder; then + stop_cinder cleanup_cinder fi @@ -152,4 +164,13 @@ if is_service_enabled trove; then cleanup_trove fi +# Clean up the remainder of the screen processes +SCREEN=$(which screen) +if [[ -n "$SCREEN" ]]; then + SESSION=$(screen -ls | awk '/[0-9].stack/ { print $1 }') + if [[ -n "$SESSION" ]]; then + screen -X -S $SESSION quit + fi +fi + cleanup_tmp From 2bb483d32ec0876f071550a3fc755436d1661681 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 3 Jan 2014 09:41:27 -0500 Subject: [PATCH 0615/4704] clean up ubuntu versions oneiric is long dead, remove references to it whenever possible (one more subtle issue in cinder should be a seperate patch). This includes removing the oneiric only tool build_uec.sh. also remove the bulk of references to quantal, which is 8 months out of support. note: raring only has support for the rest of the month. Change-Id: Ib17502be7572af76dc95560615221b48b970a547 --- files/apts/cinder | 2 +- files/apts/glance | 1 - files/apts/n-cpu | 2 +- files/apts/neutron | 2 +- files/apts/tls-proxy | 2 +- lib/rpc_backend | 5 +- stack.sh | 3 +- tools/build_uec.sh | 302 ----------------------------------------- tools/get_uec_image.sh | 6 +- 9 files changed, 9 insertions(+), 316 deletions(-) delete mode 100755 tools/build_uec.sh diff --git a/files/apts/cinder b/files/apts/cinder index f8e3b6d06d..712fee99ec 100644 --- a/files/apts/cinder +++ b/files/apts/cinder @@ -4,4 +4,4 @@ qemu-utils libpq-dev python-dev open-iscsi -open-iscsi-utils # Deprecated since quantal dist:lucid,oneiric,precise +open-iscsi-utils # Deprecated since quantal dist:precise diff --git a/files/apts/glance b/files/apts/glance index 26826a53c7..22787bc5a2 100644 --- a/files/apts/glance +++ b/files/apts/glance @@ -9,7 +9,6 @@ python-dev python-eventlet python-routes python-greenlet -python-argparse # dist:oneiric python-sqlalchemy python-wsgiref python-pastedeploy diff --git a/files/apts/n-cpu b/files/apts/n-cpu index 88e0144079..29e37603b7 100644 --- a/files/apts/n-cpu +++ b/files/apts/n-cpu @@ -2,7 +2,7 @@ nbd-client lvm2 open-iscsi -open-iscsi-utils # Deprecated since quantal dist:lucid,oneiric,precise +open-iscsi-utils # Deprecated since quantal dist:precise genisoimage sysfsutils sg3-utils diff --git a/files/apts/neutron b/files/apts/neutron index 0f4b69f8ef..5760113c8c 100644 --- a/files/apts/neutron +++ b/files/apts/neutron @@ -18,7 +18,7 @@ python-mysqldb python-pyudev python-qpid # dist:precise dnsmasq-base -dnsmasq-utils # for dhcp_release only available in dist:oneiric,precise,quantal +dnsmasq-utils # for dhcp_release only available in dist:precise rabbitmq-server # NOPRIME qpid # NOPRIME sqlite3 diff --git a/files/apts/tls-proxy b/files/apts/tls-proxy index 0a44015925..8fca42d124 100644 --- a/files/apts/tls-proxy +++ b/files/apts/tls-proxy @@ -1 +1 @@ -stud # only available in dist:precise,quantal +stud # only available in dist:precise diff --git a/lib/rpc_backend b/lib/rpc_backend index ae83e85e89..f59c80096f 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -192,9 +192,8 @@ function qpid_is_supported() { GetDistro fi - # Qpid was introduced to Ubuntu in precise, disallow it on oneiric; it is - # not in openSUSE either right now. - ( ! ([[ "$DISTRO" = "oneiric" ]] || is_suse) ) + # Qpid is not in openSUSE + ( ! is_suse ) } diff --git a/stack.sh b/stack.sh index 7c065719c4..c303dc3927 100755 --- a/stack.sh +++ b/stack.sh @@ -131,7 +131,7 @@ disable_negated_services # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|saucy|trusty|7.0|wheezy|sid|testing|jessie|f18|f19|f20|opensuse-12.2|rhel6) ]]; then +if [[ ! ${DISTRO} =~ (precise|raring|saucy|trusty|7.0|wheezy|sid|testing|jessie|f18|f19|f20|opensuse-12.2|rhel6) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" @@ -1203,7 +1203,6 @@ fi # See https://help.ubuntu.com/community/CloudInit for more on cloud-init # # Override ``IMAGE_URLS`` with a comma-separated list of UEC images. -# * **oneiric**: http://uec-images.ubuntu.com/oneiric/current/oneiric-server-cloudimg-amd64.tar.gz # * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz if is_service_enabled g-reg; then diff --git a/tools/build_uec.sh b/tools/build_uec.sh deleted file mode 100755 index bce051a0b7..0000000000 --- a/tools/build_uec.sh +++ /dev/null @@ -1,302 +0,0 @@ -#!/usr/bin/env bash - -# **build_uec.sh** - -# Make sure that we have the proper version of ubuntu (only works on oneiric) -if ! egrep -q "oneiric" /etc/lsb-release; then - echo "This script only works with ubuntu oneiric." - exit 1 -fi - -# Keep track of the current directory -TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $TOOLS_DIR/..; pwd) - -# Import common functions -. $TOP_DIR/functions - -cd $TOP_DIR - -# Source params -source ./stackrc - -# Ubuntu distro to install -DIST_NAME=${DIST_NAME:-oneiric} - -# Configure how large the VM should be -GUEST_SIZE=${GUEST_SIZE:-10G} - -# exit on error to stop unexpected errors -set -o errexit -set -o xtrace - -# Abort if localrc is not set -if [ ! -e $TOP_DIR/localrc ]; then - echo "You must have a localrc with ALL necessary passwords defined before proceeding." - echo "See stack.sh for required passwords." - exit 1 -fi - -# Install deps if needed -DEPS="kvm libvirt-bin kpartx cloud-utils curl" -apt_get install -y --force-yes $DEPS || true # allow this to fail gracefully for concurrent builds - -# Where to store files and instances -WORK_DIR=${WORK_DIR:-/opt/uecstack} - -# Where to store images -image_dir=$WORK_DIR/images/$DIST_NAME -mkdir -p $image_dir - -# Start over with a clean base image, if desired -if [ $CLEAN_BASE ]; then - rm -f $image_dir/disk -fi - -# Get the base image if it does not yet exist -if [ ! -e $image_dir/disk ]; then - $TOOLS_DIR/get_uec_image.sh -r $GUEST_SIZE $DIST_NAME $image_dir/disk $image_dir/kernel -fi - -# Copy over dev environment if COPY_ENV is set. -# This will also copy over your current devstack. -if [ $COPY_ENV ]; then - cd $TOOLS_DIR - ./copy_dev_environment_to_uec.sh $image_dir/disk -fi - -# Option to warm the base image with software requirements. -if [ $WARM_CACHE ]; then - cd $TOOLS_DIR - ./warm_apts_for_uec.sh $image_dir/disk -fi - -# Name of our instance, used by libvirt -GUEST_NAME=${GUEST_NAME:-devstack} - -# Mop up after previous runs -virsh destroy $GUEST_NAME || true - -# Where this vm is stored -vm_dir=$WORK_DIR/instances/$GUEST_NAME - -# Create vm dir and remove old disk -mkdir -p $vm_dir -rm -f $vm_dir/disk - -# Create a copy of the base image -qemu-img create -f qcow2 -b $image_dir/disk $vm_dir/disk - -# Back to devstack -cd $TOP_DIR - -GUEST_NETWORK=${GUEST_NETWORK:-1} -GUEST_RECREATE_NET=${GUEST_RECREATE_NET:-yes} -GUEST_IP=${GUEST_IP:-192.168.$GUEST_NETWORK.50} -GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24} -GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0} -GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.$GUEST_NETWORK.1} -GUEST_MAC=${GUEST_MAC:-"02:16:3e:07:69:`printf '%02X' $GUEST_NETWORK`"} -GUEST_RAM=${GUEST_RAM:-1524288} -GUEST_CORES=${GUEST_CORES:-1} - -# libvirt.xml configuration -NET_XML=$vm_dir/net.xml -NET_NAME=${NET_NAME:-devstack-$GUEST_NETWORK} -cat > $NET_XML < - $NET_NAME - - - - - - - - -EOF - -if [[ "$GUEST_RECREATE_NET" == "yes" ]]; then - virsh net-destroy $NET_NAME || true - # destroying the network isn't enough to delete the leases - rm -f /var/lib/libvirt/dnsmasq/$NET_NAME.leases - virsh net-create $vm_dir/net.xml -fi - -# libvirt.xml configuration -LIBVIRT_XML=$vm_dir/libvirt.xml -cat > $LIBVIRT_XML < - $GUEST_NAME - $GUEST_RAM - - hvm - $image_dir/kernel - root=/dev/vda ro console=ttyS0 init=/usr/lib/cloud-init/uncloud-init ds=nocloud-net;s=http://192.168.$GUEST_NETWORK.1:4567/ ubuntu-pass=ubuntu - - - - - - $GUEST_CORES - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -EOF - - -rm -rf $vm_dir/uec -cp -r $TOOLS_DIR/uec $vm_dir/uec - -# set metadata -cat > $vm_dir/uec/meta-data< $vm_dir/uec/user-data<> $vm_dir/uec/user-data< localrc < /opt/stack/.ssh/authorized_keys -chown -R $STACK_USER /opt/stack -chmod 700 /opt/stack/.ssh -chmod 600 /opt/stack/.ssh/authorized_keys - -grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || - echo "#includedir /etc/sudoers.d" >> /etc/sudoers -( umask 226 && echo "stack ALL=(ALL) NOPASSWD:ALL" \ - > /etc/sudoers.d/50_stack_sh ) -EOF -fi - -# Run stack.sh -cat >> $vm_dir/uec/user-data< Date: Sun, 12 Jan 2014 19:35:43 +0000 Subject: [PATCH 0616/4704] Skip Nova exercises if Nova is not enabled This allows for ./exercises.sh to complete sucessfully when nova is not enabled / installed. Change-Id: If969e14f5106c15007146e8fad1da27d131828c8 --- exercises/aggregates.sh | 4 ++++ exercises/bundle.sh | 4 ++++ exercises/euca.sh | 4 ++++ exercises/floating_ips.sh | 4 ++++ exercises/sec_groups.sh | 4 ++++ 5 files changed, 20 insertions(+) diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh index 1b1ac06678..d223301f35 100755 --- a/exercises/aggregates.sh +++ b/exercises/aggregates.sh @@ -43,6 +43,10 @@ source $TOP_DIR/exerciserc # Test as the admin user . $TOP_DIR/openrc admin admin +# If nova api is not enabled we exit with exitcode 55 so that +# the exercise is skipped +is_service_enabled n-api || exit 55 + # Cells does not support aggregates. is_service_enabled n-cell && exit 55 diff --git a/exercises/bundle.sh b/exercises/bundle.sh index b83678ab1f..5470960b91 100755 --- a/exercises/bundle.sh +++ b/exercises/bundle.sh @@ -39,6 +39,10 @@ rm -f $TOP_DIR/cacert.pem rm -f $TOP_DIR/cert.pem rm -f $TOP_DIR/pk.pem +# If nova api is not enabled we exit with exitcode 55 so that +# the exercise is skipped +is_service_enabled n-api || exit 55 + # Get Certificates nova x509-get-root-cert $TOP_DIR/cacert.pem nova x509-create-cert $TOP_DIR/pk.pem $TOP_DIR/cert.pem diff --git a/exercises/euca.sh b/exercises/euca.sh index ed521e4f7f..51b2644458 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -41,6 +41,10 @@ fi # Import exercise configuration source $TOP_DIR/exerciserc +# If nova api is not enabled we exit with exitcode 55 so that +# the exercise is skipped +is_service_enabled n-api || exit 55 + # Skip if the hypervisor is Docker [[ "$VIRT_DRIVER" == "docker" ]] && exit 55 diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 7055278f35..4ca90a5c35 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -38,6 +38,10 @@ fi # Import exercise configuration source $TOP_DIR/exerciserc +# If nova api is not enabled we exit with exitcode 55 so that +# the exercise is skipped +is_service_enabled n-api || exit 55 + # Skip if the hypervisor is Docker [[ "$VIRT_DRIVER" == "docker" ]] && exit 55 diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh index eb32cc7aa7..d71a1e0755 100755 --- a/exercises/sec_groups.sh +++ b/exercises/sec_groups.sh @@ -33,6 +33,10 @@ source $TOP_DIR/openrc # Import exercise configuration source $TOP_DIR/exerciserc +# If nova api is not enabled we exit with exitcode 55 so that +# the exercise is skipped +is_service_enabled n-api || exit 55 + # Skip if the hypervisor is Docker [[ "$VIRT_DRIVER" == "docker" ]] && exit 55 From 38d1f2339a88c389e4be44fc00e59f25a62fec14 Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Wed, 8 Jan 2014 09:54:13 -0500 Subject: [PATCH 0617/4704] Add Marconi to Tempest config This patch adds queuing to tempest config, provided queuing is available in devstack. Change-Id: I2925a07d312c1f8ab2fe465f74f0bef9299eef40 Implements: blueprint add-basic-marconi-tests --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 08c0553f03..ef9dfe218b 100644 --- a/lib/tempest +++ b/lib/tempest @@ -329,7 +329,7 @@ function configure_tempest() { iniset $TEMPEST_CONF network-feature-enabled api_extensions "${NETWORK_API_EXTENSIONS:-all}" # service_available - for service in nova cinder glance neutron swift heat horizon ceilometer ironic savanna trove; do + for service in nova cinder glance neutron swift heat horizon ceilometer ironic savanna trove marconi; do if is_service_enabled $service ; then iniset $TEMPEST_CONF service_available $service "True" else From d2bcbea5f95377043b0dcdba330501d7b81a4561 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 13 Jan 2014 11:22:41 -0600 Subject: [PATCH 0618/4704] Updates for tools/info.sh * Handle local.conf localrc section * remove blank lines * rather than removing password lines, just remove the password itself to at least show which password vars have been set Change-Id: Ieca9baaf03e53b23e336944ad0ed2581c9bee460 --- tools/info.sh | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/tools/info.sh b/tools/info.sh index 14ab8f6306..3ab7966ab4 100755 --- a/tools/info.sh +++ b/tools/info.sh @@ -85,8 +85,8 @@ done # Packages # -------- -# - We are going to check packages only for the services needed. -# - We are parsing the packages files and detecting metadatas. +# - Only check packages for the services enabled +# - Parse version info from the package metadata, not the package/file names for p in $(get_packages $ENABLED_SERVICES); do if [[ "$os_PACKAGE" = "deb" ]]; then @@ -141,9 +141,15 @@ rm $FREEZE_FILE # Dump localrc with 'localrc|' prepended and comments and passwords left out if [[ -r $TOP_DIR/localrc ]]; then + RC=$TOP_DIR/localrc +elif [[ -f $RC_DIR/.localrc.auto ]]; then + RC=$TOP_DIR/.localrc.auto +fi +if [[ -n $RC ]]; then sed -e ' - /PASSWORD/d; + /^[ \t]*$/d; + /PASSWORD/s/=.*$/=\/; /^#/d; s/^/localrc\|/; - ' $TOP_DIR/localrc + ' $RC fi From 279295c72c4e7028fc6eac75412b9b5f92cd630b Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Tue, 14 Jan 2014 11:37:51 +0000 Subject: [PATCH 0619/4704] Fix duplicated rootwrap.d in lib/ironic The Ironic setup of devstack is duplicating the rootwrap.d directory at /etc/ironic/rootwrap.d/rootwrap.d, this will cause the ironic-rootwrap command to fail to execute. This patch is removing the duplicated rootwrap.d directory. Change-Id: I24844c24620b5b33ad1a6acd0d872e9df11d6d89 Closes-Bug: #1268930 --- lib/ironic | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/ironic b/lib/ironic index 1ff3c81f06..afbc3e09e4 100644 --- a/lib/ironic +++ b/lib/ironic @@ -33,7 +33,6 @@ IRONIC_AUTH_CACHE_DIR=${IRONIC_AUTH_CACHE_DIR:-/var/cache/ironic} IRONIC_CONF_DIR=${IRONIC_CONF_DIR:-/etc/ironic} IRONIC_CONF_FILE=$IRONIC_CONF_DIR/ironic.conf IRONIC_ROOTWRAP_CONF=$IRONIC_CONF_DIR/rootwrap.conf -IRONIC_ROOTWRAP_FILTERS=$IRONIC_CONF_DIR/rootwrap.d IRONIC_POLICY_JSON=$IRONIC_CONF_DIR/policy.json # Support entry points installation of console scripts @@ -118,7 +117,7 @@ function configure_ironic_api() { # Sets conductor specific settings. function configure_ironic_conductor() { cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF - cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_ROOTWRAP_FILTERS + cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR iniset $IRONIC_CONF DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF } From ef1e08022b9553b07757005e7a5103fbdc0d99f0 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Thu, 2 Jan 2014 16:33:53 -0800 Subject: [PATCH 0620/4704] Add sanity check framework to verify neutron server/backend integration Some Neutron plugins require controllers and multiple backend services to operate correctly. This patch adds the framework for third party plugins to run sanity checks after Neutron Server has started. This simple addition may reveal potential configuration pitfalls much earlier in the dev/test cycle, thus speeding up the build churn process. The first plugin that uses this framework is the VMware NSX one. Closes-bug: #1265671 Change-Id: I17f9c5c8e828316ff03f0eff42ae4ae6c6c58733 --- lib/neutron | 5 +++++ lib/neutron_thirdparty/README.md | 3 +++ lib/neutron_thirdparty/bigswitch_floodlight | 4 ++++ lib/neutron_thirdparty/midonet | 4 ++++ lib/neutron_thirdparty/ryu | 4 ++++ lib/neutron_thirdparty/trema | 4 ++++ lib/neutron_thirdparty/vmware_nsx | 4 ++++ stack.sh | 1 + 8 files changed, 29 insertions(+) diff --git a/lib/neutron b/lib/neutron index 43f43f951a..81faa103b5 100644 --- a/lib/neutron +++ b/lib/neutron @@ -958,6 +958,11 @@ function stop_neutron_third_party() { _neutron_third_party_do stop } +# check_neutron_third_party_integration() - Check that third party integration is sane +function check_neutron_third_party_integration() { + _neutron_third_party_do check +} + # Restore xtrace $XTRACE diff --git a/lib/neutron_thirdparty/README.md b/lib/neutron_thirdparty/README.md index b289f58c5d..2460e5cac7 100644 --- a/lib/neutron_thirdparty/README.md +++ b/lib/neutron_thirdparty/README.md @@ -34,3 +34,6 @@ functions to be implemented * ``stop_``: stop running processes (non-screen) + +* ``check_``: + verify that the integration between neutron server and third-party components is sane diff --git a/lib/neutron_thirdparty/bigswitch_floodlight b/lib/neutron_thirdparty/bigswitch_floodlight index ebde0673b8..1fd4fd801a 100644 --- a/lib/neutron_thirdparty/bigswitch_floodlight +++ b/lib/neutron_thirdparty/bigswitch_floodlight @@ -45,5 +45,9 @@ function stop_bigswitch_floodlight() { : } +function check_bigswitch_floodlight() { + : +} + # Restore xtrace $MY_XTRACE diff --git a/lib/neutron_thirdparty/midonet b/lib/neutron_thirdparty/midonet index 7928bca31f..e672528a2d 100644 --- a/lib/neutron_thirdparty/midonet +++ b/lib/neutron_thirdparty/midonet @@ -56,5 +56,9 @@ function stop_midonet() { : } +function check_midonet() { + : +} + # Restore xtrace $MY_XTRACE diff --git a/lib/neutron_thirdparty/ryu b/lib/neutron_thirdparty/ryu index 3b825a10c1..5edf273361 100644 --- a/lib/neutron_thirdparty/ryu +++ b/lib/neutron_thirdparty/ryu @@ -75,5 +75,9 @@ function stop_ryu() { : } +function check_ryu() { + : +} + # Restore xtrace $MY_XTRACE diff --git a/lib/neutron_thirdparty/trema b/lib/neutron_thirdparty/trema index bdc23568fb..2b125646dc 100644 --- a/lib/neutron_thirdparty/trema +++ b/lib/neutron_thirdparty/trema @@ -109,5 +109,9 @@ function stop_trema() { sudo TREMA_TMP=$TREMA_TMP_DIR trema killall } +function check_trema() { + : +} + # Restore xtrace $MY_XTRACE diff --git a/lib/neutron_thirdparty/vmware_nsx b/lib/neutron_thirdparty/vmware_nsx index 70d348274f..7c6202723f 100644 --- a/lib/neutron_thirdparty/vmware_nsx +++ b/lib/neutron_thirdparty/vmware_nsx @@ -78,5 +78,9 @@ function stop_vmware_nsx() { done } +function check_vmware_nsx() { + : +} + # Restore xtrace $MY_XTRACE diff --git a/stack.sh b/stack.sh index 7da41a98c8..621a058444 100755 --- a/stack.sh +++ b/stack.sh @@ -1116,6 +1116,7 @@ fi if is_service_enabled q-svc; then echo_summary "Starting Neutron" start_neutron_service_and_check + check_neutron_third_party_integration elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then NM_CONF=${NOVA_CONF} if is_service_enabled n-cell; then From 5eec5b6b80401842ad1f7275d9c7a6949cc6f848 Mon Sep 17 00:00:00 2001 From: Gordon Chung Date: Tue, 14 Jan 2014 11:05:31 -0500 Subject: [PATCH 0621/4704] command not found errors on unstack - add lib/ceilometer and lib/heat to source list for when stop_heat and stop_ceilometer functions are called. - add lib/tls source to lib/keystone for when is_ssl_enabled_service function called. Change-Id: Ief05766e9cfda71fb6392c8a757d04751283414e Closes-Bug: #1269047 --- lib/keystone | 1 + unstack.sh | 2 ++ 2 files changed, 3 insertions(+) diff --git a/lib/keystone b/lib/keystone index a7e5d66808..ceefe6a144 100644 --- a/lib/keystone +++ b/lib/keystone @@ -28,6 +28,7 @@ XTRACE=$(set +o | grep xtrace) set +o xtrace +source $TOP_DIR/lib/tls # Defaults # -------- diff --git a/unstack.sh b/unstack.sh index 77dbe074d2..4445f1fb31 100755 --- a/unstack.sh +++ b/unstack.sh @@ -35,10 +35,12 @@ source $TOP_DIR/lib/apache # Get project function libraries source $TOP_DIR/lib/baremetal +source $TOP_DIR/lib/ceilometer source $TOP_DIR/lib/cinder source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance source $TOP_DIR/lib/nova +source $TOP_DIR/lib/heat source $TOP_DIR/lib/horizon source $TOP_DIR/lib/swift source $TOP_DIR/lib/neutron From 52a7b6ecbad11c08dcd77a6fcd8bfef6a20324a9 Mon Sep 17 00:00:00 2001 From: Jakub Libosvar Date: Tue, 14 Jan 2014 18:52:51 +0100 Subject: [PATCH 0622/4704] Run neutron-debug with admin tenant in neutron-adv-test Because neutron-debug create-probe needs admin role only, demo tenants cannot create ports. neutron-debug is wrapped in order to run it only with admin tenant. Change-Id: Ib65e8639858c597345c6a5fdc0192b40f34a0300 Closes-Bug: #1269090 --- exercises/neutron-adv-test.sh | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index 0c0d42f458..1343f11553 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -185,6 +185,14 @@ function confirm_server_active { fi } +function neutron_debug_admin { + local os_username=$OS_USERNAME + local os_tenant_id=$OS_TENANT_ID + source $TOP_DIR/openrc admin admin + neutron-debug $@ + source $TOP_DIR/openrc $os_username $os_tenant_id +} + function add_tenant { local TENANT=$1 local USER=$2 @@ -241,7 +249,7 @@ function create_network { local NET_ID=$(neutron net-create --tenant-id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' ) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $TENANT_ID $NET_NAME $EXTRA" neutron subnet-create --ip-version 4 --tenant-id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR - neutron-debug probe-create --device-owner compute $NET_ID + neutron_debug_admin probe-create --device-owner compute $NET_ID source $TOP_DIR/openrc demo demo } From 55d9b9a9517ebe8c37f82136ff5eb7b781929325 Mon Sep 17 00:00:00 2001 From: Shiv Haris Date: Tue, 14 Jan 2014 11:33:28 -0800 Subject: [PATCH 0623/4704] Fix typo NEUTON to NEUTRON Fixes bug: #1269111 Change-Id: Icf66b4d474698b5f3ca22bc656ecd12d03164bce --- lib/neutron_plugins/brocade | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade index f9275cacc2..8e18d04984 100644 --- a/lib/neutron_plugins/brocade +++ b/lib/neutron_plugins/brocade @@ -38,7 +38,7 @@ function neutron_plugin_configure_l3_agent() { } function neutron_plugin_configure_plugin_agent() { - AGENT_BINARY="$NEUTON_BIN_DIR/neutron-linuxbridge-agent" + AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-linuxbridge-agent" } function neutron_plugin_setup_interface_driver() { From b4a215cce2c649ce811893f5e57b7ee6c55158e8 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Fri, 10 Jan 2014 16:39:32 +0900 Subject: [PATCH 0624/4704] Sanitize language settings To avoid commands bailing out with "unsupported locale setting" errors. Change-Id: I54ae4cd84a0a4b4875533181b1d96563a1604775 --- stack.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/stack.sh b/stack.sh index 7da41a98c8..c52514413c 100755 --- a/stack.sh +++ b/stack.sh @@ -23,6 +23,13 @@ # Make sure custom grep options don't get in the way unset GREP_OPTIONS +# Sanitize language settings to avoid commands bailing out +# with "unsupported locale setting" errors. +unset LANG +unset LANGUAGE +LC_ALL=C +export LC_ALL + # Keep track of the devstack directory TOP_DIR=$(cd $(dirname "$0") && pwd) From d5a5460888869eb22cc6f2622c3adbf492680971 Mon Sep 17 00:00:00 2001 From: Steven Dake Date: Wed, 15 Jan 2014 10:56:51 -0700 Subject: [PATCH 0625/4704] Revert "Change the libvirtd log level to DEBUG" Suggested by Daniel Berrange in this thread: http://lists.openstack.org/pipermail/openstack-dev/2014-January/024407.html This reverts commit 3bd85c9d6e257fc952cb3c6d0c09e199685bd5ed. Change-Id: I370ba61cf8a00b51684cd504fed4ba4078d868be --- lib/nova_plugins/hypervisor-libvirt | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index ef40e7ab4c..6f90f4ac17 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -93,9 +93,6 @@ EOF" fi fi - # Change the libvirtd log level to DEBUG. - sudo sed -i s/"#log_level = 3"/"log_level = 1"/ /etc/libvirt/libvirtd.conf - # The user that nova runs as needs to be member of **libvirtd** group otherwise # nova-compute will be unable to use libvirt. if ! getent group $LIBVIRT_GROUP >/dev/null; then From 2394605a635c86c9a90f683f1f3a3ee718d17d5f Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Wed, 15 Jan 2014 21:42:32 +0000 Subject: [PATCH 0626/4704] Typo: funstions=>functions Change-Id: I59caf62b049d09450ce3236648cf1ede2f48e7f5 --- lib/neutron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 43f43f951a..5dc5703f3c 100644 --- a/lib/neutron +++ b/lib/neutron @@ -1,5 +1,5 @@ # lib/neutron -# functions - funstions specific to neutron +# functions - functions specific to neutron # Dependencies: # ``functions`` file From 14daa57d67fed6dc98b833f4c3698fef8ff7f312 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Wed, 15 Jan 2014 21:43:25 +0000 Subject: [PATCH 0627/4704] Remove old DEFAULT.root_helper setting root_helper is now under the agent group and not DEFAULT Change-Id: I11867f7ceff1f3b8b0bc2ef8aa508b6ecee653fc --- lib/neutron | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/neutron b/lib/neutron index 43f43f951a..fd61d140d3 100644 --- a/lib/neutron +++ b/lib/neutron @@ -611,9 +611,6 @@ function _configure_neutron_debug_command() { iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT verbose False iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT debug False iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE - iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT root_helper "$Q_RR_COMMAND" - # Intermediate fix until Neutron patch lands and then line above will - # be cleaned. iniset $NEUTRON_TEST_CONFIG_FILE agent root_helper "$Q_RR_COMMAND" _neutron_setup_interface_driver $NEUTRON_TEST_CONFIG_FILE From fe4c4f7a9e6d1a4f26c67b8e1609fc5e80c5ef83 Mon Sep 17 00:00:00 2001 From: john-griffith Date: Wed, 15 Jan 2014 11:24:03 -0700 Subject: [PATCH 0628/4704] Update cinder cert script to use run_tempest Changes to tempest run_tests.sh (commit: 17520e49a7e69b3817856a739121a1fb2906f2cc) breaks the cinder_driver_cert script. A backward compatible run_tempest.sh script was added, so for now we should update the cinder_driver_cert script to use that Change-Id: I611a01dd4788ae01da8a6167a530f9e44733dfc6 Closes-Bug: #1269531 --- driver_certs/cinder_driver_cert.sh | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/driver_certs/cinder_driver_cert.sh b/driver_certs/cinder_driver_cert.sh index 18bef8b3b5..edcc6d4800 100755 --- a/driver_certs/cinder_driver_cert.sh +++ b/driver_certs/cinder_driver_cert.sh @@ -2,6 +2,22 @@ # **cinder_cert.sh** +# This script is a simple wrapper around the tempest volume api tests +# It requires that you have a working and functional devstack install +# and that you've enabled your device driver by making the necessary +# modifications to /etc/cinder/cinder.conf + +# This script will refresh your openstack repo's and restart the cinder +# services to pick up your driver changes. +# please NOTE; this script assumes your devstack install is functional +# and includes tempest. A good first step is to make sure you can +# create volumes on your device before you even try and run this script. + +# It also assumes default install location (/opt/stack/xxx) +# to aid in debug, you should also verify that you've added +# an output directory for screen logs: +# SCREEN_LOGDIR=/opt/stack/screen-logs + CERT_DIR=$(cd $(dirname "$0") && pwd) TOP_DIR=$(cd $CERT_DIR/..; pwd) @@ -73,9 +89,9 @@ start_cinder sleep 5 # run tempest api/volume/test_* -log_message "Run the actual tempest volume tests (run_tests.sh -N tempest.api.volume.test_*)...", True +log_message "Run the actual tempest volume tests (./tools/pretty_tox.sh api.volume_*)...", True exec 2> >(tee -a $TEMPFILE) -`./run_tests.sh -N tempest.api.volume.test_*` +`./tools/pretty_tox.sh api.volume` if [[ $? = 0 ]]; then log_message "CONGRATULATIONS!!! Device driver PASSED!", True log_message "Submit output: ($TEMPFILE)" From a0a23311c3c40f631663468e1ba45d5e84790019 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 15 Jan 2014 15:24:30 -0500 Subject: [PATCH 0629/4704] updated sar options to collect more data in order to have better data on the load state of the test nodes we should track things beyond just cpu time. Add in load time, process creation rates, and io rates during the tests. also add a sar filter that makes it report on one line reading sar input with multiple flags is somewhat problematic, because it's tons of interspersed headers. So build something with does a pivot filter to make it possible to get this all on one line. Change-Id: I8f085cedda65dfc37ad530eb97ba1fc5577314c3 --- stack.sh | 12 +++++-- tools/sar_filter.py | 82 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 91 insertions(+), 3 deletions(-) create mode 100755 tools/sar_filter.py diff --git a/stack.sh b/stack.sh index 7da41a98c8..382b75e7fc 100755 --- a/stack.sh +++ b/stack.sh @@ -860,11 +860,17 @@ init_service_check # ------- # If enabled, systat has to start early to track OpenStack service startup. -if is_service_enabled sysstat;then +if is_service_enabled sysstat; then + # what we want to measure + # -u : cpu statitics + # -q : load + # -b : io load rates + # -w : process creation and context switch rates + SYSSTAT_OPTS="-u -q -b -w" if [[ -n ${SCREEN_LOGDIR} ]]; then - screen_it sysstat "cd ; sar -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL" + screen_it sysstat "cd $TOP_DIR; ./tools/sar_filter.py $SYSSTAT_OPTS -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL" else - screen_it sysstat "sar $SYSSTAT_INTERVAL" + screen_it sysstat "./tools/sar_filter.py $SYSSTAT_OPTS $SYSSTAT_INTERVAL" fi fi diff --git a/tools/sar_filter.py b/tools/sar_filter.py new file mode 100755 index 0000000000..ed8c19687c --- /dev/null +++ b/tools/sar_filter.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python +# +# Copyright 2014 Samsung Electronics Corp. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re +import subprocess +import sys + + +def is_data_line(line): + timestamp, data = parse_line(line) + return re.search('\d\.d', data) + + +def parse_line(line): + m = re.search('(\d\d:\d\d:\d\d \w\w)(\s+((\S+)\s*)+)', line) + if m: + date = m.group(1) + data = m.group(2).rstrip() + return date, data + else: + return None, None + + +process = subprocess.Popen( + "sar %s" % " ".join(sys.argv[1:]), + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + +# Poll process for new output until finished + +start_time = "" +header = "" +data_line = "" +printed_header = False +current_ts = None +while True: + nextline = process.stdout.readline() + if nextline == '' and process.poll() is not None: + break + + date, data = parse_line(nextline) + # stop until we get to the first set of real lines + if not date: + continue + + # now we eat the header lines, and only print out the header + # if we've never seen them before + if not start_time: + start_time = date + header += "%s %s" % (date, data) + elif date == start_time: + header += " %s" % data + elif not printed_header: + printed_header = True + print header + + # now we know this is a data line, printing out if the timestamp + # has changed, and stacking up otherwise. + nextline = process.stdout.readline() + date, data = parse_line(nextline) + if date != current_ts: + current_ts = date + print data_line + data_line = "%s %s" % (date, data) + else: + data_line += " %s" % data + + sys.stdout.flush() From 0049c0c434b4672963b6622486c6c638259bdfda Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Thu, 16 Jan 2014 18:16:48 -0600 Subject: [PATCH 0630/4704] Make unstack.sh more like stack.sh unstack.sh and stack.sh both have to "configure projects", but the code was different. This change makes it so the 2 sections of the files are the same. Change-Id: Ia06f8bbfbe2a6e87fb406e34e13a39bd7fa9e5af --- lib/keystone | 2 -- stack.sh | 6 +++++- unstack.sh | 23 +++++++++++++++++------ 3 files changed, 22 insertions(+), 9 deletions(-) diff --git a/lib/keystone b/lib/keystone index ceefe6a144..71ac668ce5 100644 --- a/lib/keystone +++ b/lib/keystone @@ -28,8 +28,6 @@ XTRACE=$(set +o | grep xtrace) set +o xtrace -source $TOP_DIR/lib/tls - # Defaults # -------- diff --git a/stack.sh b/stack.sh index 7da41a98c8..50a4cd2af9 100755 --- a/stack.sh +++ b/stack.sh @@ -305,9 +305,13 @@ rm -f $SSL_BUNDLE_FILE # Configure Projects # ================== -# Source project function libraries +# Import apache functions source $TOP_DIR/lib/apache + +# Import TLS functions source $TOP_DIR/lib/tls + +# Source project function libraries source $TOP_DIR/lib/infra source $TOP_DIR/lib/oslo source $TOP_DIR/lib/stackforge diff --git a/unstack.sh b/unstack.sh index 4445f1fb31..31f6f01c8f 100755 --- a/unstack.sh +++ b/unstack.sh @@ -30,20 +30,31 @@ if [[ $EUID -eq 0 ]]; then exit 1 fi + +# Configure Projects +# ================== + # Import apache functions source $TOP_DIR/lib/apache -# Get project function libraries -source $TOP_DIR/lib/baremetal -source $TOP_DIR/lib/ceilometer -source $TOP_DIR/lib/cinder +# Import TLS functions +source $TOP_DIR/lib/tls + +# Source project function libraries +source $TOP_DIR/lib/infra +source $TOP_DIR/lib/oslo +source $TOP_DIR/lib/stackforge +source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance source $TOP_DIR/lib/nova -source $TOP_DIR/lib/heat -source $TOP_DIR/lib/horizon +source $TOP_DIR/lib/cinder source $TOP_DIR/lib/swift +source $TOP_DIR/lib/ceilometer +source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron +source $TOP_DIR/lib/baremetal +source $TOP_DIR/lib/ldap source $TOP_DIR/lib/ironic source $TOP_DIR/lib/trove From 04f6dc24a7845ee139977fa5b0c5e53aad8e99bd Mon Sep 17 00:00:00 2001 From: Emilien Macchi Date: Thu, 16 Jan 2014 18:03:38 -0500 Subject: [PATCH 0631/4704] Fix stop_neutron metadata agent function Currently, stop_neutron fails in Jenkins because it kills itself. This patch ensure we kill only neutron metadata agent, and not the awk process in itself. Change-Id: I25d1d90e002fa9eb3c5bc366cc74cb70a2daa69f Closes-bug: #1269982 Signed-off-by: Emilien Macchi --- lib/neutron | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index 43f43f951a..a909b8b81c 100644 --- a/lib/neutron +++ b/lib/neutron @@ -505,8 +505,7 @@ function stop_neutron() { [ ! -z "$pid" ] && sudo kill -9 $pid fi if is_service_enabled q-meta; then - pid=$(ps aux | awk '/neutron-ns-metadata-proxy/ { print $2 }') - [ ! -z "$pid" ] && sudo kill -9 $pid + pkill -9 -f neutron-ns-metadata-proxy fi if is_service_enabled q-lbaas; then From 39d500335ad2bff0ffdf1d543d0d7528b3812480 Mon Sep 17 00:00:00 2001 From: Ana Krivokapic Date: Mon, 6 Jan 2014 21:46:35 +0100 Subject: [PATCH 0632/4704] Add missing mongodb client package on Fedora On Fedora, when ceilometer is enabled and mongodb is used as backend, devstack installation would fail due to missing mongodb client package. This patch ensures the package gets installed. Change-Id: I981bb55f86541e5ff19c52160269a7789b94423f --- files/rpms/ceilometer-collector | 1 + lib/ceilometer | 2 ++ 2 files changed, 3 insertions(+) diff --git a/files/rpms/ceilometer-collector b/files/rpms/ceilometer-collector index d7b7ea89c1..c91bac36a2 100644 --- a/files/rpms/ceilometer-collector +++ b/files/rpms/ceilometer-collector @@ -1,3 +1,4 @@ selinux-policy-targeted mongodb-server pymongo +mongodb # NOPRIME diff --git a/lib/ceilometer b/lib/ceilometer index 75058c05a5..d0f00c07eb 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -151,6 +151,8 @@ function configure_ceilometer() { function configure_mongodb() { if is_fedora; then + # install mongodb client + install_package mongodb # ensure smallfiles selected to minimize freespace requirements sudo sed -i '/--smallfiles/!s/OPTIONS=\"/OPTIONS=\"--smallfiles /' /etc/sysconfig/mongod From 9acb965e572d672f1d5632ee92768b4708b03fbd Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Sun, 19 Jan 2014 11:05:08 +1300 Subject: [PATCH 0633/4704] Do not set bind_host for heat APIs This results in the APIs binding to 0.0.0.0 which is what other devstack services bind to anyway. Change-Id: Ic229dbed02b224fe7c5e14f20998bb5d5987aa39 Closes-Bug: #1172991 --- lib/heat | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/heat b/lib/heat index e35305b843..0307c64ae1 100644 --- a/lib/heat +++ b/lib/heat @@ -110,15 +110,12 @@ function configure_heat() { [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_CONF paste_deploy flavor standalone # OpenStack API - iniset $HEAT_CONF heat_api bind_host $HEAT_API_HOST iniset $HEAT_CONF heat_api bind_port $HEAT_API_PORT # Cloudformation API - iniset $HEAT_CONF heat_api_cfn bind_host $HEAT_API_CFN_HOST iniset $HEAT_CONF heat_api_cfn bind_port $HEAT_API_CFN_PORT # Cloudwatch API - iniset $HEAT_CONF heat_api_cloudwatch bind_host $HEAT_API_CW_HOST iniset $HEAT_CONF heat_api_cloudwatch bind_port $HEAT_API_CW_PORT # heat environment From cf903938eceb0188c9ecd405e6c89b63b1c8910d Mon Sep 17 00:00:00 2001 From: Jakub Libosvar Date: Mon, 20 Jan 2014 18:18:58 +0100 Subject: [PATCH 0634/4704] Added missing sudo when killing ns-metadata Closes-bug: #1269982 Change-Id: Ib6b641a8d5c92fb4a8aaed6b5d7b63e66acd6bd9 --- lib/neutron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 465b57cc35..4b280d1d53 100644 --- a/lib/neutron +++ b/lib/neutron @@ -505,7 +505,7 @@ function stop_neutron() { [ ! -z "$pid" ] && sudo kill -9 $pid fi if is_service_enabled q-meta; then - pkill -9 -f neutron-ns-metadata-proxy + sudo pkill -9 neutron-ns-metadata-proxy || : fi if is_service_enabled q-lbaas; then From c75c78ad5d0473bc97bf859810ddfc18bf270aa2 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Tue, 21 Jan 2014 15:01:01 +0000 Subject: [PATCH 0635/4704] Add xenserver image By adding a separate entry for xenserver, it will enforce the gate to cache cirros-0.3.0-x86_64-disk.vhd.tgz. Change-Id: Ibfd4618e98f079a53fc286f5e95f18a3d658e4d2 --- stackrc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stackrc b/stackrc index 49fb26b2c7..8a0280ecfa 100644 --- a/stackrc +++ b/stackrc @@ -284,6 +284,9 @@ case "$VIRT_DRIVER" in vsphere) DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-debian-2.6.32-i686} IMAGE_URLS=${IMAGE_URLS:-"http://partnerweb.vmware.com/programs/vmdkimage/debian-2.6.32-i686.vmdk"};; + xenserver) + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-disk} + IMAGE_URLS=${IMAGE_URLS:-"https://github.com/downloads/citrix-openstack/warehouse/cirros-0.3.0-x86_64-disk.vhd.tgz"};; *) # Default to Cirros with kernel, ramdisk and disk image DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.1-x86_64-uec} IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz"};; From e7a94efe77bf6738fcb778f36cf18ceb82a0fae6 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 21 Jan 2014 13:17:24 -0500 Subject: [PATCH 0636/4704] disable client side libvirt debug logging and tune server side libvirt logging to the values that danpb suggested would be useful on the openstack-dev mailing list. Change-Id: I4b1c780d1dd4d2eecc81fabe42c07cc2a9e0e3f4 --- lib/nova | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/lib/nova b/lib/nova index a50878950c..a4edb53cf8 100644 --- a/lib/nova +++ b/lib/nova @@ -650,12 +650,11 @@ function start_nova_compute() { if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then # Enable client side traces for libvirt - export LIBVIRT_LOG_FILTERS="1:libvirt" - export LIBVIRT_LOG_OUTPUTS="1:file:/var/log/libvirt/libvirtd-nova.log" - + local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util" + local log_outputs="1:file:/var/log/libvirt/libvirtd.log" # Enable server side traces for libvirtd - echo "log_filters=\"1:libvirt 1:qemu\"" | sudo tee -a /etc/libvirt/libvirtd.conf - echo "log_outputs=\"1:file:/var/log/libvirt/libvirtd.log\"" | sudo tee -a /etc/libvirt/libvirtd.conf + echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf + echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf # The group **$LIBVIRT_GROUP** is added to the current user in this script. # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group. From 1b6ba540887ab73432488f5d81339227052c423c Mon Sep 17 00:00:00 2001 From: ZhiQiang Fan Date: Wed, 22 Jan 2014 22:56:59 +0800 Subject: [PATCH 0637/4704] Remove unnecessary slash from ceilometer endpoint The last slash in ceilometer endpoint is not needed, it should be removed because it will generate redundant slash which has been treated as a bug in ceilometer. Change-Id: Ifcff9b63921f5b1dda667d8e77aab22ca2928a8b Closes-Bug: #1271556 ref: https://review.openstack.org/#/c/63279/ --- lib/ceilometer | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 75058c05a5..18f146eb90 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -85,9 +85,9 @@ create_ceilometer_accounts() { keystone endpoint-create \ --region RegionOne \ --service_id $CEILOMETER_SERVICE \ - --publicurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \ - --adminurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \ - --internalurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" + --publicurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" \ + --adminurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" \ + --internalurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" fi fi } From 4968d1ad5d8d6b0537c68548eb5f8c08bc33f63a Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Wed, 22 Jan 2014 19:06:44 -0600 Subject: [PATCH 0638/4704] Keystone use common logging setup The Keystone setup was using logging.conf to configure logging, unlike other projects. This may have been left over from before Keystone switched to oslo logging. Switching to common logging configuration allows: - Common format for logs for easier parsing - Pretty colorized logs - Keystone can control the default logging levels for libraries that are used by setting the defaults in keystone. - Potentially using a function to setup logging for all components using oslo-logging (e.g., share with lib/nova). Change-Id: I4e9b1e6cffce30f16a1e039224312852b8abda07 Closes-Bug: #1271775 Closes-Bug: #1269987 --- lib/keystone | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/keystone b/lib/keystone index ceefe6a144..7f0bcf24a7 100644 --- a/lib/keystone +++ b/lib/keystone @@ -247,14 +247,14 @@ function configure_keystone() { fi # Set up logging - LOGGING_ROOT="devel" if [ "$SYSLOG" != "False" ]; then - LOGGING_ROOT="$LOGGING_ROOT,production" + iniset $KEYSTONE_CONF DEFAULT use_syslog "True" + fi + + # Format logging + if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then + setup_colorized_logging $KEYSTONE_CONF DEFAULT fi - KEYSTONE_LOG_CONFIG="--log-config $KEYSTONE_CONF_DIR/logging.conf" - cp $KEYSTONE_DIR/etc/logging.conf.sample $KEYSTONE_CONF_DIR/logging.conf - iniset $KEYSTONE_CONF_DIR/logging.conf logger_root level "DEBUG" - iniset $KEYSTONE_CONF_DIR/logging.conf logger_root handlers "devel,production" if is_apache_enabled_service key; then _config_keystone_apache_wsgi @@ -412,7 +412,7 @@ function start_keystone() { screen_it key "cd $KEYSTONE_DIR && sudo tail -f /var/log/$APACHE_NAME/keystone" else # Start Keystone in a screen window - screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG --debug" + screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF --debug" fi echo "Waiting for keystone to start..." From 0b8f6e0fbba961de04d95ff0e06e515d1ea3ea8b Mon Sep 17 00:00:00 2001 From: IWAMOTO Toshihiro Date: Thu, 23 Jan 2014 12:02:34 +0900 Subject: [PATCH 0639/4704] Make sure not to revert local changes. "git diff --quiet" has a bug ignoring local changes if there's a unchanged file with a newer timestamp. This patch works around the bug. Change-Id: I0ddc24e0f7af21287c43c1e04dd166ebff6f2dca Closes-Bug: 1264422 --- functions | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/functions b/functions index 92b61ed974..eb92a6c615 100644 --- a/functions +++ b/functions @@ -1301,7 +1301,8 @@ function setup_develop() { echo "cd $REQUIREMENTS_DIR; $SUDO_CMD python update.py $project_dir" # Don't update repo if local changes exist - (cd $project_dir && git diff --quiet) + # Don't use buggy "git diff --quiet" + (cd $project_dir && git diff --exit-code >/dev/null) local update_requirements=$? if [ $update_requirements -eq 0 ]; then From ab491bcc88acd83e9fa21de1d4a3fe60bfba577a Mon Sep 17 00:00:00 2001 From: Masayuki Igawa Date: Tue, 21 Jan 2014 11:18:11 +0900 Subject: [PATCH 0640/4704] Add get-pip.py/*.qcow2 to .gitignore files/get-pip.py and *.qcow2 are installed by DevStack itself. So we shouldn't manage it with the git repository. Change-Id: Ib22ed814d3d3eb33ef3ff45874b0ff36b2036cf5 --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 43652024f3..1840352636 100644 --- a/.gitignore +++ b/.gitignore @@ -7,8 +7,10 @@ src localrc local.sh files/*.gz +files/*.qcow2 files/images files/pip-* +files/get-pip.py stack-screenrc *.pem accrc From 55c468c422ae7bc48f46847d6fa21e53d4673259 Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Thu, 23 Jan 2014 15:01:50 +0400 Subject: [PATCH 0641/4704] Use DATABASE/connection opt for db url in Savanna DATABASE/sql_connection opt is now deprecated. Change-Id: I58058f0d51e16de53e6472c8c01065438d709edc --- lib/savanna | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/savanna b/lib/savanna index bb4dfe693d..57d8ac39ce 100644 --- a/lib/savanna +++ b/lib/savanna @@ -96,8 +96,7 @@ function configure_savanna() { iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG recreate_database savanna utf8 - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE database sql_connection `database_connection_url savanna` - inicomment $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE database connection + iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE database connection `database_connection_url savanna` if is_service_enabled neutron; then iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT use_neutron true From fe42255bfac23a74890c2c7d8cfef385428cef32 Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Thu, 23 Jan 2014 14:18:54 +0400 Subject: [PATCH 0642/4704] Use savanna-db-manage to init db for Savanna It uses alembic migrations to initialize database. Change-Id: I6cf01f69c6bc7c9e403040607dd397cfc3b574a4 --- lib/savanna | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/savanna b/lib/savanna index 57d8ac39ce..c7d59f79c4 100644 --- a/lib/savanna +++ b/lib/savanna @@ -95,7 +95,6 @@ function configure_savanna() { iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG - recreate_database savanna utf8 iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE database connection `database_connection_url savanna` if is_service_enabled neutron; then @@ -104,6 +103,9 @@ function configure_savanna() { fi iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT use_syslog $SYSLOG + + recreate_database savanna utf8 + $SAVANNA_BIN_DIR/savanna-db-manage --config-file $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE upgrade head } # install_savanna() - Collect source and prepare From 579af5d6786f62008807a473749600e88cea21fc Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 23 Jan 2014 11:32:22 -0600 Subject: [PATCH 0643/4704] Kill process groups in screen_stop() Previously only the top child process was killed, killing the process group also takes all of the child processes with it. Closes-bug: 1271889 Change-Id: If1864cc4f1944f417ea3473d81d8b6e8e40030c2 --- functions | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/functions b/functions index 92b61ed974..13d021e147 100644 --- a/functions +++ b/functions @@ -1150,6 +1150,9 @@ function screen_it { # Stop a service in screen +# If a PID is available use it, kill the whole process group via TERM +# If screen is being used kill the screen window; this will catch processes +# that did not leave a PID behind # screen_stop service function screen_stop() { SCREEN_NAME=${SCREEN_NAME:-stack} @@ -1159,7 +1162,7 @@ function screen_stop() { if is_service_enabled $1; then # Kill via pid if we have one available if [[ -r $SERVICE_DIR/$SCREEN_NAME/$1.pid ]]; then - pkill -TERM -P $(cat $SERVICE_DIR/$SCREEN_NAME/$1.pid) + pkill -TERM -P -$(cat $SERVICE_DIR/$SCREEN_NAME/$1.pid) rm $SERVICE_DIR/$SCREEN_NAME/$1.pid fi if [[ "$USE_SCREEN" = "True" ]]; then From c3e5b77b45068ed07e53fdda1276f5c863de5973 Mon Sep 17 00:00:00 2001 From: Flavio Percoco Date: Thu, 23 Jan 2014 13:48:16 +0100 Subject: [PATCH 0644/4704] Add missing file argument to iniset_multiline Change Id9aab356b36b2150312324a0349d120bbbbd4e63 introduced a call to iniset_multiline to enable swift stores explicitly. However, the call has a missing file argument which resulted in this call setting the values to the wrong file, section and param. This patch fixes that. Change-Id: Ib17048e05c467bc8ca2c13fe4297d6bac6c8a880 --- lib/glance | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/glance b/lib/glance index 21c1fa595a..55d5fb37ec 100644 --- a/lib/glance +++ b/lib/glance @@ -125,7 +125,7 @@ function configure_glance() { iniset $GLANCE_API_CONF DEFAULT swift_store_key $SERVICE_PASSWORD iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True - iniset_multiline DEFAULT known_stores glance.store.filesystem.Store glance.store.http.Store glance.store.swift.Store + iniset $GLANCE_API_CONF DEFAULT known_stores "glance.store.filesystem.Store, glance.store.http.Store, glance.store.swift.Store" fi cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI From bdeadf59d4273515df0f47edb820ff159bbc5380 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Thu, 23 Jan 2014 17:41:18 +0000 Subject: [PATCH 0645/4704] Add pidstat support pidstat is a script that comes from sysstat, but will give us per-process information. Allow enabling "pidstat" that will run pidstat to give info every 5 seconds by default. Change-Id: I5ec7d5abce81125b55985bba3ccaf8073ccdfa2a --- stack.sh | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/stack.sh b/stack.sh index efdee64b34..1d02c16ff8 100755 --- a/stack.sh +++ b/stack.sh @@ -291,6 +291,9 @@ SYSLOG_PORT=${SYSLOG_PORT:-516} SYSSTAT_FILE=${SYSSTAT_FILE:-"sysstat.dat"} SYSSTAT_INTERVAL=${SYSSTAT_INTERVAL:-"1"} +PIDSTAT_FILE=${PIDSTAT_FILE:-"pidstat.txt"} +PIDSTAT_INTERVAL=${PIDSTAT_INTERVAL:-"5"} + # Use color for logging output (only available if syslog is not used) LOG_COLOR=`trueorfalse True $LOG_COLOR` @@ -874,6 +877,16 @@ if is_service_enabled sysstat; then fi fi +if is_service_enabled pidstat; then + # Per-process stats + PIDSTAT_OPTS="-l -p ALL -T ALL" + if [[ -n ${SCREEN_LOGDIR} ]]; then + screen_it pidstat "cd $TOP_DIR; pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL > $SCREEN_LOGDIR/$PIDSTAT_FILE" + else + screen_it pidstat "pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL" + fi +fi + # Start Services # ============== From b93cd643432d3633c48bec02fcd7cb4f354f67ed Mon Sep 17 00:00:00 2001 From: Arnaud Legendre Date: Thu, 23 Jan 2014 17:12:21 -0800 Subject: [PATCH 0646/4704] upload_image.sh should parse filenames correctly The upload_image script gives the ability to the user to provide specific metadata using the filename: file-adapter_type;disk_type;network_type.vmdk Currently, the regex expects each of these types to be populated. This patch fixes this issue by making the regex more flexible and accepts only one of these metadata to be populated. Change-Id: If74cb06cc640864e7e91fd88943cdb37e05935d6 Closes-Bug: #1272126 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 92b61ed974..276cea1e04 100644 --- a/functions +++ b/functions @@ -1539,7 +1539,7 @@ function upload_image() { # NOTE: For backwards compatibility reasons, colons may be used in place # of semi-colons for property delimiters but they are not permitted # characters in NTFS filesystems. - property_string=`echo "$IMAGE_NAME" | grep -oP '(?<=-)(?!.*-).+[:;].+[:;].+$'` + property_string=`echo "$IMAGE_NAME" | grep -oP '(?<=-)(?!.*-).*[:;].*[:;].*$'` IFS=':;' read -a props <<< "$property_string" vmdk_disktype="${props[0]:-$vmdk_disktype}" vmdk_adapter_type="${props[1]:-$vmdk_adapter_type}" From ab0595e9cd8f9bc77a3bb7e6c9611c2c771b0781 Mon Sep 17 00:00:00 2001 From: Gordon Chung Date: Thu, 16 Jan 2014 09:44:57 -0500 Subject: [PATCH 0647/4704] ERRORs in ceilometer-acentral log after succesful tempest run recent merge added duplicate creation of ceilometer user. remove ceilometer user creation from keystone_data so we can correctly add ResellerAdmin role to ceilometer user which it needs to interact with swift Change-Id: I043c6b9337dfb147c3c8f364b462708a4030b41c Closes-Bug: #1268730 --- files/keystone_data.sh | 22 +--------------------- 1 file changed, 1 insertion(+), 21 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 07b6b601d2..d477c42906 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -6,7 +6,6 @@ # ------------------------------------------------------------------ # service glance admin # service heat service # if enabled -# service ceilometer admin # if enabled # Tempest Only: # alt_demo alt_demo Member # @@ -113,30 +112,11 @@ if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then fi # Ceilometer -if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]]; then - keystone user-create --name=ceilometer \ - --pass="$SERVICE_PASSWORD" \ - --tenant $SERVICE_TENANT_NAME \ - --email=ceilometer@example.com - keystone user-role-add --tenant $SERVICE_TENANT_NAME \ - --user ceilometer \ - --role admin +if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" || "$ENABLED_SERVICES" =~ "swift" ]]; then # Ceilometer needs ResellerAdmin role to access swift account stats. keystone user-role-add --tenant $SERVICE_TENANT_NAME \ --user ceilometer \ --role ResellerAdmin - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - keystone service-create \ - --name=ceilometer \ - --type=metering \ - --description="Ceilometer Service" - keystone endpoint-create \ - --region RegionOne \ - --service ceilometer \ - --publicurl "http://$SERVICE_HOST:8777" \ - --adminurl "http://$SERVICE_HOST:8777" \ - --internalurl "http://$SERVICE_HOST:8777" - fi fi # EC2 From dc4dc7f03335e26ea3d86b6184f0475cc5f3d51b Mon Sep 17 00:00:00 2001 From: john-griffith Date: Wed, 22 Jan 2014 18:09:32 -0700 Subject: [PATCH 0648/4704] Fix up tempest conf settings The tempest api.volume.test_volume_types test won't work with non-default drivers configured for cinder's backend any more. The reason is that we create a type using capability scheduler keywords in the extra-specs for the test; (vendor_name and storage_protocol). The result is the extra-spec uses the filters: "vendor_name=Open Source" and "storage_protocol=iSCSI", but for example if you have another backend say SolidFire, EMC, NetApp, IBM etc the capabilities filter will fail the create with a "No valid host available". This is intended to work by simply setting these values in your tempest.conf file. That's fine, however upon setting this up in my localrc I found that the tempest config variables being set via devtsack were never picked up Currently devstack doesn't use the same variable names for configuration variables as tempest expects. Devstack is using the variable "TEMPEST_CONF" however the Tempest project is expecting the variable "TEMPEST_CONFIG", so currently the devstack lib/tempest rc variables are never picked up by tempest properly. This change modifes devstack's naming of TEMPEST_CONF, my though being that since this doesn't work in devstack currently that changing it here would be better than changing it in Tempest where it's possible people had their own custoizations already outside of devstack. In addition this change creates rc variables in devstack to actually set these via devstack. The idea here is that Cinder 3'rd party testing needs to be a simple devstack config and run stack.sh. By fixing up the configuration file variable naming and adding the variables for the vendor and protocol settings that's now possible. An example localrc for a custom config is shown below. The example sets the tempest config file to /etc/tempest/tempest.conf, and configures tempest to use the SolidFire driver as the cinder backend. TEMPEST_VOLUME_VENDOR ==> tempest.conf.volume_vendor TEMPEST_STORAGE_PROTOCOL ==> tempest.conf.storage_protocol relevant example localrc entries: TEMPEST_CONFIG=/etc/tempest/tempest.conf TEMPEST_CONFIG_DIR=/etc/tempest TEMPEST_VOLUME_DRIVER=solidfire TEMPEST_VOLUME_VENDOR="SolidFire Inc" ***NOTE*** storage_protocol and vendor_name MUST match what the backend device reports from get capabilities. Change-Id: I28dfa90c877b27f5d4919f2748fae092bb2f87fa Closes-Bug: 1271781 --- lib/tempest | 141 +++++++++++++++++++++++++++++----------------------- 1 file changed, 78 insertions(+), 63 deletions(-) diff --git a/lib/tempest b/lib/tempest index ef9dfe218b..a13cf10e84 100644 --- a/lib/tempest +++ b/lib/tempest @@ -46,8 +46,8 @@ set +o xtrace # Set up default directories TEMPEST_DIR=$DEST/tempest -TEMPEST_CONF_DIR=$TEMPEST_DIR/etc -TEMPEST_CONF=$TEMPEST_CONF_DIR/tempest.conf +TEMPEST_CONFIG_DIR=${TEMPEST_CONFIG_DIR:-$TEMPEST_DIR/etc} +TEMPEST_CONFIG=$TEMPEST_CONFIG_DIR/tempest.conf TEMPEST_STATE_PATH=${TEMPEST_STATE_PATH:=$DATA_DIR/tempest} NOVA_SOURCE_DIR=$DEST/nova @@ -58,6 +58,10 @@ BUILD_TIMEOUT=196 BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-0.3.1" +# Cinder/Volume variables +TEMPEST_VOLUME_DRIVER=${TEMPEST_VOLUME_DRIVER:-default} +TEMPEST_VOLUME_VENDOR=${TEMPEST_VOLUME_VENDOR:-"Open Source"} +TEMPEST_STORAGE_PROTOCOL=${TEMPEST_STORAGE_PROTOCOL:-iSCSI} # Functions # --------- @@ -83,6 +87,11 @@ function configure_tempest() { local boto_instance_type="m1.tiny" local ssh_connect_method="fixed" + if [[ ! -d $TEMPEST_CONFIG_DIR ]]; then + sudo mkdir -p $TEMPEST_CONFIG_DIR + fi + sudo chown $STACK_USER $TEMPEST_CONFIG_DIR + # TODO(afazekas): # sudo python setup.py deploy @@ -133,7 +142,8 @@ function configure_tempest() { # Create tempest.conf from tempest.conf.sample # copy every time, because the image UUIDS are going to change - cp $TEMPEST_CONF.sample $TEMPEST_CONF + sudo cp $TEMPEST_DIR/etc/tempest.conf.sample $TEMPEST_CONFIG + sudo chmod 644 $TEMPEST_CONFIG password=${ADMIN_PASSWORD:-secrete} @@ -224,121 +234,126 @@ function configure_tempest() { fi # Oslo - iniset $TEMPEST_CONF DEFAULT lock_path $TEMPEST_STATE_PATH + iniset $TEMPEST_CONFIG DEFAULT lock_path $TEMPEST_STATE_PATH mkdir -p $TEMPEST_STATE_PATH - iniset $TEMPEST_CONF DEFAULT use_stderr False - iniset $TEMPEST_CONF DEFAULT log_file tempest.log - iniset $TEMPEST_CONF DEFAULT debug True + iniset $TEMPEST_CONFIG DEFAULT use_stderr False + iniset $TEMPEST_CONFIG DEFAULT log_file tempest.log + iniset $TEMPEST_CONFIG DEFAULT debug True # Timeouts - iniset $TEMPEST_CONF compute build_timeout $BUILD_TIMEOUT - iniset $TEMPEST_CONF volume build_timeout $BUILD_TIMEOUT - iniset $TEMPEST_CONF boto build_timeout $BUILD_TIMEOUT - iniset $TEMPEST_CONF compute build_interval $BUILD_INTERVAL - iniset $TEMPEST_CONF volume build_interval $BUILD_INTERVAL - iniset $TEMPEST_CONF boto build_interval $BUILD_INTERVAL - iniset $TEMPEST_CONF boto http_socket_timeout 5 + iniset $TEMPEST_CONFIG compute build_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONFIG volume build_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONFIG boto build_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONFIG compute build_interval $BUILD_INTERVAL + iniset $TEMPEST_CONFIG volume build_interval $BUILD_INTERVAL + iniset $TEMPEST_CONFIG boto build_interval $BUILD_INTERVAL + iniset $TEMPEST_CONFIG boto http_socket_timeout 5 # Identity - iniset $TEMPEST_CONF identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/" - iniset $TEMPEST_CONF identity password "$password" - iniset $TEMPEST_CONF identity alt_username $ALT_USERNAME - iniset $TEMPEST_CONF identity alt_password "$password" - iniset $TEMPEST_CONF identity alt_tenant_name $ALT_TENANT_NAME - iniset $TEMPEST_CONF identity admin_password "$password" + iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/" + iniset $TEMPEST_CONFIG identity password "$password" + iniset $TEMPEST_CONFIG identity alt_username $ALT_USERNAME + iniset $TEMPEST_CONFIG identity alt_password "$password" + iniset $TEMPEST_CONFIG identity alt_tenant_name $ALT_TENANT_NAME + iniset $TEMPEST_CONFIG identity admin_password "$password" # Image # for the gate we want to be able to override this variable so we aren't # doing an HTTP fetch over the wide internet for this test if [[ ! -z "$TEMPEST_HTTP_IMAGE" ]]; then - iniset $TEMPEST_CONF image http_image $TEMPEST_HTTP_IMAGE + iniset $TEMPEST_CONFIG image http_image $TEMPEST_HTTP_IMAGE fi # Compute - iniset $TEMPEST_CONF compute change_password_available False + iniset $TEMPEST_CONFIG compute change_password_available False # Note(nati) current tempest don't create network for each tenant # so reuse same tenant for now if is_service_enabled neutron; then TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-False} fi - iniset $TEMPEST_CONF compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} - iniset $TEMPEST_CONF compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED - iniset $TEMPEST_CONF compute network_for_ssh $PRIVATE_NETWORK_NAME - iniset $TEMPEST_CONF compute ip_version_for_ssh 4 - iniset $TEMPEST_CONF compute ssh_timeout $BUILD_TIMEOUT - iniset $TEMPEST_CONF compute image_ref $image_uuid - iniset $TEMPEST_CONF compute image_ssh_user ${DEFAULT_INSTANCE_USER:-cirros} - iniset $TEMPEST_CONF compute image_ref_alt $image_uuid_alt - iniset $TEMPEST_CONF compute image_alt_ssh_user ${DEFAULT_INSTANCE_USER:-cirros} - iniset $TEMPEST_CONF compute flavor_ref $flavor_ref - iniset $TEMPEST_CONF compute flavor_ref_alt $flavor_ref_alt - iniset $TEMPEST_CONF compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False} - iniset $TEMPEST_CONF compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} - iniset $TEMPEST_CONF compute ssh_connect_method $ssh_connect_method + iniset $TEMPEST_CONFIG compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} + iniset $TEMPEST_CONFIG compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED + iniset $TEMPEST_CONFIG compute network_for_ssh $PRIVATE_NETWORK_NAME + iniset $TEMPEST_CONFIG compute ip_version_for_ssh 4 + iniset $TEMPEST_CONFIG compute ssh_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONFIG compute image_ref $image_uuid + iniset $TEMPEST_CONFIG compute image_ssh_user ${DEFAULT_INSTANCE_USER:-cirros} + iniset $TEMPEST_CONFIG compute image_ref_alt $image_uuid_alt + iniset $TEMPEST_CONFIG compute image_alt_ssh_user ${DEFAULT_INSTANCE_USER:-cirros} + iniset $TEMPEST_CONFIG compute flavor_ref $flavor_ref + iniset $TEMPEST_CONFIG compute flavor_ref_alt $flavor_ref_alt + iniset $TEMPEST_CONFIG compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False} + iniset $TEMPEST_CONFIG compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} + iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method # Compute admin - iniset $TEMPEST_CONF "compute-admin" password "$password" # DEPRECATED + iniset $TEMPEST_CONFIG "compute-admin" password "$password" # DEPRECATED - iniset $TEMPEST_CONF network api_version 2.0 - iniset $TEMPEST_CONF network tenant_networks_reachable "$tenant_networks_reachable" - iniset $TEMPEST_CONF network public_network_id "$public_network_id" - iniset $TEMPEST_CONF network public_router_id "$public_router_id" - iniset $TEMPEST_CONF network default_network "$FIXED_RANGE" + iniset $TEMPEST_CONFIG network api_version 2.0 + iniset $TEMPEST_CONFIG network tenant_networks_reachable "$tenant_networks_reachable" + iniset $TEMPEST_CONFIG network public_network_id "$public_network_id" + iniset $TEMPEST_CONFIG network public_router_id "$public_router_id" + iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE" # boto - iniset $TEMPEST_CONF boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" - iniset $TEMPEST_CONF boto s3_url "http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}" - iniset $TEMPEST_CONF boto s3_materials_path "$BOTO_MATERIALS_PATH" - iniset $TEMPEST_CONF boto instance_type "$boto_instance_type" - iniset $TEMPEST_CONF boto http_socket_timeout 30 - iniset $TEMPEST_CONF boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros} + iniset $TEMPEST_CONFIG boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" + iniset $TEMPEST_CONFIG boto s3_url "http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}" + iniset $TEMPEST_CONFIG boto s3_materials_path "$BOTO_MATERIALS_PATH" + iniset $TEMPEST_CONFIG boto instance_type "$boto_instance_type" + iniset $TEMPEST_CONFIG boto http_socket_timeout 30 + iniset $TEMPEST_CONFIG boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # Orchestration test image if [[ ! -z "$HEAT_FETCHED_TEST_IMAGE" ]]; then - iniset $TEMPEST_CONF orchestration image_ref "$HEAT_FETCHED_TEST_IMAGE" + iniset $TEMPEST_CONFIG orchestration image_ref "$HEAT_FETCHED_TEST_IMAGE" elif [[ "$HEAT_CREATE_TEST_IMAGE" = "True" ]]; then disk_image_create /usr/share/tripleo-image-elements "vm fedora heat-cfntools" "i386" "fedora-vm-heat-cfntools-tempest" - iniset $TEMPEST_CONF orchestration image_ref "fedora-vm-heat-cfntools-tempest" + iniset $TEMPEST_CONFIG orchestration image_ref "fedora-vm-heat-cfntools-tempest" fi # Scenario - iniset $TEMPEST_CONF scenario img_dir "$FILES/images/cirros-0.3.1-x86_64-uec" + iniset $TEMPEST_CONFIG scenario img_dir "$FILES/images/cirros-0.3.1-x86_64-uec" # Large Ops Number - iniset $TEMPEST_CONF scenario large_ops_number ${TEMPEST_LARGE_OPS_NUMBER:-0} + iniset $TEMPEST_CONFIG scenario large_ops_number ${TEMPEST_LARGE_OPS_NUMBER:-0} # Volume if is_service_enabled c-bak; then - iniset $TEMPEST_CONF volume volume_backup_enabled "True" + iniset $TEMPEST_CONFIG volume volume_backup_enabled "True" fi CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND) if [ $CINDER_MULTI_LVM_BACKEND == "True" ]; then - iniset $TEMPEST_CONF volume multi_backend_enabled "True" - iniset $TEMPEST_CONF volume backend1_name "LVM_iSCSI" - iniset $TEMPEST_CONF volume backend2_name "LVM_iSCSI_2" + iniset $TEMPEST_CONFIG volume multi_backend_enabled "True" + iniset $TEMPEST_CONFIG volume backend1_name "LVM_iSCSI" + iniset $TEMPEST_CONFIG volume backend2_name "LVM_iSCSI_2" + fi + + if [ $TEMPEST_VOLUME_DRIVER != "default" ]; then + iniset $TEMPEST_CONFIG volume vendor_name $TEMPEST_VOLUME_VENDOR + iniset $TEMPEST_CONFIG volume storage_protocol $TEMPEST_STORAGE_PROTOCOL fi # Dashboard - iniset $TEMPEST_CONF dashboard dashboard_url "http://$SERVICE_HOST/" - iniset $TEMPEST_CONF dashboard login_url "http://$SERVICE_HOST/auth/login/" + iniset $TEMPEST_CONFIG dashboard dashboard_url "http://$SERVICE_HOST/" + iniset $TEMPEST_CONFIG dashboard login_url "http://$SERVICE_HOST/auth/login/" # cli - iniset $TEMPEST_CONF cli cli_dir $NOVA_BIN_DIR + iniset $TEMPEST_CONFIG cli cli_dir $NOVA_BIN_DIR # Networking - iniset $TEMPEST_CONF network-feature-enabled api_extensions "${NETWORK_API_EXTENSIONS:-all}" + iniset $TEMPEST_CONFIG network-feature-enabled api_extensions "${NETWORK_API_EXTENSIONS:-all}" # service_available for service in nova cinder glance neutron swift heat horizon ceilometer ironic savanna trove marconi; do if is_service_enabled $service ; then - iniset $TEMPEST_CONF service_available $service "True" + iniset $TEMPEST_CONFIG service_available $service "True" else - iniset $TEMPEST_CONF service_available $service "False" + iniset $TEMPEST_CONFIG service_available $service "False" fi done echo "Created tempest configuration file:" - cat $TEMPEST_CONF + cat $TEMPEST_CONFIG # Restore IFS IFS=$ifs From db20cd5436ec6301b134f2d92053cb98fb15717b Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Sun, 5 Jan 2014 07:41:30 -0800 Subject: [PATCH 0649/4704] Add Neutron/NSX plugin sanity check Supports-blueprint: nvp-third-party-support (aka bp vmware-nsx-third-party) Related-bug: #1265671 Change-Id: Ifa4e1d36b8735e81f24b8852103a9c433d736e84 --- lib/neutron_thirdparty/vmware_nsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_thirdparty/vmware_nsx b/lib/neutron_thirdparty/vmware_nsx index 7c6202723f..4eb177a458 100644 --- a/lib/neutron_thirdparty/vmware_nsx +++ b/lib/neutron_thirdparty/vmware_nsx @@ -79,7 +79,7 @@ function stop_vmware_nsx() { } function check_vmware_nsx() { - : + neutron-check-nsx-config $NEUTRON_CONF_DIR/plugins/vmware/nsx.ini } # Restore xtrace From 53ffc713b1d352a9ecf701b452e8e6659daf9748 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 17 Dec 2013 11:13:40 -0600 Subject: [PATCH 0650/4704] clean.sh updates * Clean out data, log and state dirs * Include lib/apache to clear is_apache_enabled_service not found error * Clean errors removing tgt config files * Clean errors removing VG backing file in lib/cinder Change-Id: I33dfde17eb8daaaed7f7e76337fe6a8085a266bf --- clean.sh | 26 ++++++++++++++++---------- lib/cinder | 4 ++-- 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/clean.sh b/clean.sh index 480a81214f..e16bdb7f36 100755 --- a/clean.sh +++ b/clean.sh @@ -30,13 +30,17 @@ fi # and ``DISTRO`` GetDistro +# Import apache functions +source $TOP_DIR/lib/apache +source $TOP_DIR/lib/ldap # Import database library source $TOP_DIR/lib/database source $TOP_DIR/lib/rpc_backend -source $TOP_DIR/lib/oslo source $TOP_DIR/lib/tls + +source $TOP_DIR/lib/oslo source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance @@ -47,7 +51,9 @@ source $TOP_DIR/lib/ceilometer source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron source $TOP_DIR/lib/baremetal -source $TOP_DIR/lib/ldap +source $TOP_DIR/lib/ironic +source $TOP_DIR/lib/trove + # Extras Source # -------------- @@ -95,13 +101,6 @@ if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then cleanup_nova_hypervisor fi -# cinder doesn't always clean up the volume group as it might be used elsewhere... -# clean it up if it is a loop device -VG_DEV=$(sudo losetup -j $DATA_DIR/${VOLUME_GROUP}-backing-file | awk -F':' '/backing-file/ { print $1}') -if [[ -n "$VG_DEV" ]]; then - sudo losetup -d $VG_DEV -fi - #if mount | grep $DATA_DIR/swift/drives; then # sudo umount $DATA_DIR/swift/drives/sdb1 #fi @@ -111,12 +110,19 @@ fi sudo rm -rf /etc/keystone /etc/glance /etc/nova /etc/cinder /etc/swift # Clean out tgt -sudo rm /etc/tgt/conf.d/* +sudo rm -f /etc/tgt/conf.d/* # Clean up the message queue cleanup_rpc_backend cleanup_database +# Clean out data, logs and status +LOGDIR=$(dirname "$LOGFILE") +sudo rm -rf $DATA_DIR $LOGDIR $DEST/status +if [[ -n "$SCREEN_LOGDIR" ]] && [[ -d "$SCREEN_LOGDIR" ]]; then + sudo rm -rf $SCREEN_LOGDIR +fi + # Clean up networking... # should this be in nova? # FIXED_IP_ADDR in br100 diff --git a/lib/cinder b/lib/cinder index cbe732e9b0..45a9a25dc8 100644 --- a/lib/cinder +++ b/lib/cinder @@ -109,8 +109,8 @@ function _clean_lvm_backing_file() { # of the backing file if [ -z "`sudo lvs --noheadings -o lv_name $vg`" ]; then # if the backing physical device is a loop device, it was probably setup by devstack - VG_DEV=$(sudo losetup -j $DATA_DIR/${vg}-backing-file | awk -F':' '/backing-file/ { print $1}') - if [[ -n "$VG_DEV" ]]; then + if [[ -n "$VG_DEV" ]] && [[ -e "$VG_DEV" ]]; then + VG_DEV=$(sudo losetup -j $DATA_DIR/${vg}-backing-file | awk -F':' '/backing-file/ { print $1}') sudo losetup -d $VG_DEV rm -f $DATA_DIR/${vg}-backing-file fi From 38e38fb16d5d597e41c486812ae7ba480696b31c Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 10 Jan 2014 12:05:51 -0600 Subject: [PATCH 0651/4704] Update samples * Skip commands for services that are not started in local.sh * Rename localrc to local.conf Change-Id: Ida3a8cc836d56db94da4a133fbeb81c7f5fc5f26 --- samples/{localrc => local.conf} | 13 ++++--- samples/local.sh | 60 +++++++++++++++++---------------- 2 files changed, 39 insertions(+), 34 deletions(-) rename samples/{localrc => local.conf} (87%) diff --git a/samples/localrc b/samples/local.conf similarity index 87% rename from samples/localrc rename to samples/local.conf index 80cf0e75ac..c8126c22af 100644 --- a/samples/localrc +++ b/samples/local.conf @@ -1,19 +1,22 @@ -# Sample ``localrc`` for user-configurable variables in ``stack.sh`` +# Sample ``local.conf`` for user-configurable variables in ``stack.sh`` # NOTE: Copy this file to the root ``devstack`` directory for it to # work properly. -# ``localrc`` is a user-maintained setings file that is sourced from ``stackrc``. +# ``local.conf`` is a user-maintained setings file that is sourced from ``stackrc``. # This gives it the ability to override any variables set in ``stackrc``. # Also, most of the settings in ``stack.sh`` are written to only be set if no -# value has already been set; this lets ``localrc`` effectively override the +# value has already been set; this lets ``local.conf`` effectively override the # default values. # This is a collection of some of the settings we have found to be useful # in our DevStack development environments. Additional settings are described -# in http://devstack.org/localrc.html +# in http://devstack.org/local.conf.html # These should be considered as samples and are unsupported DevStack code. +# The ``localrc`` section replaces the old ``localrc`` configuration file. +# Note that if ``localrc`` is present it will be used in favor of this section. +[[local|localrc]] # Minimal Contents # ---------------- @@ -22,7 +25,7 @@ # there are a few minimal variables set: # If the ``*_PASSWORD`` variables are not set here you will be prompted to enter -# values for them by ``stack.sh`` and they will be added to ``localrc``. +# values for them by ``stack.sh`` and they will be added to ``local.conf``. ADMIN_PASSWORD=nomoresecrete MYSQL_PASSWORD=stackdb RABBIT_PASSWORD=stackqueue diff --git a/samples/local.sh b/samples/local.sh index 970cbb97e0..664cb663fe 100755 --- a/samples/local.sh +++ b/samples/local.sh @@ -23,45 +23,47 @@ source $TOP_DIR/stackrc # Destination path for installation ``DEST`` DEST=${DEST:-/opt/stack} +if is_service_enabled nova; then -# Import ssh keys -# --------------- + # Import ssh keys + # --------------- -# Import keys from the current user into the default OpenStack user (usually -# ``demo``) + # Import keys from the current user into the default OpenStack user (usually + # ``demo``) -# Get OpenStack auth -source $TOP_DIR/openrc + # Get OpenStack user auth + source $TOP_DIR/openrc -# Add first keypair found in localhost:$HOME/.ssh -for i in $HOME/.ssh/id_rsa.pub $HOME/.ssh/id_dsa.pub; do - if [[ -r $i ]]; then - nova keypair-add --pub_key=$i `hostname` - break - fi -done + # Add first keypair found in localhost:$HOME/.ssh + for i in $HOME/.ssh/id_rsa.pub $HOME/.ssh/id_dsa.pub; do + if [[ -r $i ]]; then + nova keypair-add --pub_key=$i `hostname` + break + fi + done -# Create A Flavor -# --------------- + # Create A Flavor + # --------------- -# Get OpenStack admin auth -source $TOP_DIR/openrc admin admin + # Get OpenStack admin auth + source $TOP_DIR/openrc admin admin -# Name of new flavor -# set in ``localrc`` with ``DEFAULT_INSTANCE_TYPE=m1.micro`` -MI_NAME=m1.micro + # Name of new flavor + # set in ``localrc`` with ``DEFAULT_INSTANCE_TYPE=m1.micro`` + MI_NAME=m1.micro -# Create micro flavor if not present -if [[ -z $(nova flavor-list | grep $MI_NAME) ]]; then - nova flavor-create $MI_NAME 6 128 0 1 -fi + # Create micro flavor if not present + if [[ -z $(nova flavor-list | grep $MI_NAME) ]]; then + nova flavor-create $MI_NAME 6 128 0 1 + fi -# Other Uses -# ---------- + # Other Uses + # ---------- -# Add tcp/22 and icmp to default security group -nova secgroup-add-rule default tcp 22 22 0.0.0.0/0 -nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0 + # Add tcp/22 and icmp to default security group + nova secgroup-add-rule default tcp 22 22 0.0.0.0/0 + nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0 +fi From fbe12f988cd1026b2f074a5b5bfe15ff19171b90 Mon Sep 17 00:00:00 2001 From: Akihiro Motoki Date: Sun, 26 Jan 2014 22:57:47 +0900 Subject: [PATCH 0652/4704] Add 'bc' to files/apts/general After commit def4c141, "bc" command is used in stack.sh, but 'bc' command is not available in very minimal Ubuntu installation (without any tasks installed). We need to add 'bc' to required package list. Closes-Bug: #1272914 Change-Id: I5797707e8eaa9dd2a21d1a1fc3af028d1951a2ee --- files/apts/general | 1 + 1 file changed, 1 insertion(+) diff --git a/files/apts/general b/files/apts/general index aff687fab4..32d31f0642 100644 --- a/files/apts/general +++ b/files/apts/general @@ -21,3 +21,4 @@ euca2ools # only for testing client tar python-cmd2 # dist:precise python2.7 +bc From c38d864cfb43592a4985441cc5c3de89d572c32e Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Sun, 26 Jan 2014 13:01:30 -0500 Subject: [PATCH 0653/4704] remove setting up fatal_deprecations using fatal deprecations only means you can't add new deprecations to the code base, which isn't helpful in actually deprecating features in a user friendly way. Change-Id: I26468f4c221a14f2eea746439d46e5fa192cfc57 --- lib/nova | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/nova b/lib/nova index a4edb53cf8..b85f0941f3 100644 --- a/lib/nova +++ b/lib/nova @@ -379,7 +379,6 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT metadata_workers "4" iniset $NOVA_CONF conductor workers "4" iniset $NOVA_CONF DEFAULT sql_connection `database_connection_url nova` - iniset $NOVA_CONF DEFAULT fatal_deprecations "True" iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" iniset $NOVA_CONF osapi_v3 enabled "True" From e61bc61a31ba05c9af5d0801d2f120e919e0bd5f Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Mon, 27 Jan 2014 15:21:29 +1300 Subject: [PATCH 0654/4704] Use HOST_IP instead of SERVICE_HOST for heat API conf Heat config values heat_metadata_server_url, heat_waitcondition_server_url and heat_waitcondition_server_url currently derive their host from devstack SERVICE_HOST. In gating this is set to 127.0.0.1, which would explain why nova servers are not reaching heat with waitcondition signalling. This change uses HOST_IP as the default instead of SERVICE_HOST. Change-Id: I373b086e3a36a3484cfd34f0d1c8c168ac6d465d --- lib/heat | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/heat b/lib/heat index 0307c64ae1..b9b8aa66ca 100644 --- a/lib/heat +++ b/lib/heat @@ -60,13 +60,13 @@ function configure_heat() { # remove old config files rm -f $HEAT_CONF_DIR/heat-*.conf - HEAT_API_CFN_HOST=${HEAT_API_CFN_HOST:-$SERVICE_HOST} + HEAT_API_CFN_HOST=${HEAT_API_CFN_HOST:-$HOST_IP} HEAT_API_CFN_PORT=${HEAT_API_CFN_PORT:-8000} HEAT_ENGINE_HOST=${HEAT_ENGINE_HOST:-$SERVICE_HOST} HEAT_ENGINE_PORT=${HEAT_ENGINE_PORT:-8001} - HEAT_API_CW_HOST=${HEAT_API_CW_HOST:-$SERVICE_HOST} + HEAT_API_CW_HOST=${HEAT_API_CW_HOST:-$HOST_IP} HEAT_API_CW_PORT=${HEAT_API_CW_PORT:-8003} - HEAT_API_HOST=${HEAT_API_HOST:-$SERVICE_HOST} + HEAT_API_HOST=${HEAT_API_HOST:-$HOST_IP} HEAT_API_PORT=${HEAT_API_PORT:-8004} HEAT_API_PASTE_FILE=$HEAT_CONF_DIR/api-paste.ini HEAT_POLICY_FILE=$HEAT_CONF_DIR/policy.json From daa9a734e2fe008a32ed0f98501e2ce2f80167c8 Mon Sep 17 00:00:00 2001 From: Hirofumi Ichihara Date: Mon, 27 Jan 2014 14:54:02 +0900 Subject: [PATCH 0655/4704] Repeatedly add log_filters,log_outputs to libvirtd.conf when restart Change-Id: I14f07f3164f9201305ed1e94e9277a5a5792e850 Closes-bug: 1273058 --- lib/nova | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/nova b/lib/nova index b85f0941f3..dbaa3f53d9 100644 --- a/lib/nova +++ b/lib/nova @@ -652,8 +652,12 @@ function start_nova_compute() { local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util" local log_outputs="1:file:/var/log/libvirt/libvirtd.log" # Enable server side traces for libvirtd - echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf - echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf + if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then + echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf + fi + if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then + echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf + fi # The group **$LIBVIRT_GROUP** is added to the current user in this script. # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group. From 315f7b0747effbd490ff3b25d85bc6399ed290a1 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Mon, 27 Jan 2014 09:40:29 +0100 Subject: [PATCH 0656/4704] Use service postgresql initdb with el6 postgresql-setup does not exists on el6, the service postgresql initdb is the documented db init command. Change-Id: I2b92a3c8e7db603eb13378e46893fc81f507405b --- lib/databases/postgresql | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/databases/postgresql b/lib/databases/postgresql index 60e5a33715..c459feb9e0 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -43,7 +43,13 @@ function configure_database_postgresql { if is_fedora; then PG_HBA=/var/lib/pgsql/data/pg_hba.conf PG_CONF=/var/lib/pgsql/data/postgresql.conf - sudo [ -e $PG_HBA ] || sudo postgresql-setup initdb + if ! sudo [ -e $PG_HBA ]; then + if ! [[ $DISTRO =~ (rhel6) ]]; then + sudo postgresql-setup initdb + else + sudo service postgresql initdb + fi + fi elif is_ubuntu; then PG_DIR=`find /etc/postgresql -name pg_hba.conf|xargs dirname` PG_HBA=$PG_DIR/pg_hba.conf From e7b6399d455ea3f44c46448449cc90d55356f23e Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Mon, 27 Jan 2014 11:44:03 +0100 Subject: [PATCH 0657/4704] Install bc with all distribution After commit def4c141 the bc is requred for devstack install on minimal image, commit fbe12f98 fixed the issue with ubuntu, but not with other distribution. Adding bc to the files/rpms-suse/general and files/rpms/general. Change-Id: Ieb2e3e2af454bca03bb3d7565ff731dc357e699f --- files/rpms-suse/general | 1 + files/rpms/general | 1 + 2 files changed, 2 insertions(+) diff --git a/files/rpms-suse/general b/files/rpms-suse/general index 98c279581e..704947ea53 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -17,6 +17,7 @@ tcpdump unzip vim-enhanced wget +bc findutils-locate # useful when debugging lsof # useful when debugging diff --git a/files/rpms/general b/files/rpms/general index 40246ea4ab..6cfe31eaf1 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -21,6 +21,7 @@ tcpdump unzip wget which +bc # [1] : some of installed tools have unversioned dependencies on this, # but others have versioned (<=0.7). So if a later version (0.7.1) From d8416d7c1c71c82fa9c0f0e7a6518ce043bff120 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 27 Jan 2014 15:36:06 -0500 Subject: [PATCH 0658/4704] allow for upgrade of the precise kernel we are getting kernel crashes in the OpenStack gate, to test getting around this we'd like devstack to be able to upgrade the precise kernel to the latest lts backported kernel. default to off Change-Id: I5d47aa8d15b1b1c0386a13b65022f6b8108c5c49 --- tools/fixup_stuff.sh | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 5fb47dc29b..a28e10ef2d 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -69,6 +69,22 @@ if [[ -d $dir ]]; then sudo chmod +r $dir/* fi +# Ubuntu 12.04 +# ----- +# We can regularly get kernel crashes on the 12.04 default kernel, so attempt +# to install a new kernel +if [[ ${DISTRO} =~ (precise) ]]; then + # Finally, because we suspect the Precise kernel is problematic, install a new kernel + UPGRADE_KERNEL=$(trueorfalse False $UPGRADE_KERNEL) + if [[ $UPGRADE_KERNEL == "True" ]]; then + if [[ ! `uname -r` =~ (^3\.11) ]]; then + apt_get install linux-generic-lts-saucy + echo "Installing Saucy LTS kernel, please reboot before proceeding" + exit 1 + fi + fi +fi + # RHEL6 # ----- From bb8227ce69b9b040b98dbe339e4f5c02172d19ac Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 27 Jan 2014 12:21:52 -0600 Subject: [PATCH 0659/4704] Fix Swift process kill stop_swift() was not killing all swift processes properly. Change to manually clean up all screen services with pkill. Closes-bug: 1268794 Change-Id: Ibb7a2e0dd10a313609f05963264087f82f6f00e2 --- lib/swift | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/swift b/lib/swift index 44c230be93..37b630c3fa 100644 --- a/lib/swift +++ b/lib/swift @@ -652,8 +652,10 @@ function stop_swift() { if type -p swift-init >/dev/null; then swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true fi - # Dump the proxy server - sudo pkill -f swift-proxy-server + for type in proxy object container account; do + # Dump all of the servers + pkill -f swift- + done } # Restore xtrace From fc744f9713fcccfebeb52e35c7fc1ce955b89200 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 27 Jan 2014 13:45:21 -0600 Subject: [PATCH 0660/4704] Convert trove to plugin Also adds an is_trove_enabled() function to match https://review.openstack.org/69497 changes for is_service_enabled(). Change-Id: Ic0408ff6d9816aec8a3506931470470342a5dcd7 --- extras.d/70-trove | 33 +++++++++++++++++++++++++++++++++ lib/trove | 10 ++++++++++ stack.sh | 26 +------------------------- unstack.sh | 4 ---- 4 files changed, 44 insertions(+), 29 deletions(-) create mode 100644 extras.d/70-trove diff --git a/extras.d/70-trove b/extras.d/70-trove new file mode 100644 index 0000000000..a4dc7fbc5b --- /dev/null +++ b/extras.d/70-trove @@ -0,0 +1,33 @@ +# trove.sh - Devstack extras script to install Trove + +if is_service_enabled trove; then + if [[ "$1" == "source" ]]; then + # Initial source + source $TOP_DIR/lib/trove + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + echo_summary "Installing Trove" + install_trove + install_troveclient + cleanup_trove + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + echo_summary "Configuring Trove" + configure_troveclient + configure_trove + + if is_service_enabled key; then + create_trove_accounts + fi + + elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + # Initialize trove + init_trove + + # Start the trove API and trove taskmgr components + echo_summary "Starting Trove" + start_trove + fi + + if [[ "$1" == "unstack" ]]; then + stop_trove + fi +fi diff --git a/lib/trove b/lib/trove index 8e817f5145..9c91024211 100644 --- a/lib/trove +++ b/lib/trove @@ -38,6 +38,16 @@ else TROVE_BIN_DIR=$(get_python_exec_prefix) fi +# Functions +# --------- + +# Test if any Trove services are enabled +# is_trove_enabled +function is_trove_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"tr-" ]] && return 0 + return 1 +} + # setup_trove_logging() - Adds logging configuration to conf files function setup_trove_logging() { local CONF=$1 diff --git a/stack.sh b/stack.sh index a2469f1868..45d47c819c 100755 --- a/stack.sh +++ b/stack.sh @@ -3,7 +3,7 @@ # ``stack.sh`` is an opinionated OpenStack developer installation. It # installs and configures various combinations of **Ceilometer**, **Cinder**, # **Glance**, **Heat**, **Horizon**, **Keystone**, **Nova**, **Neutron**, -# **Swift**, and **Trove** +# and **Swift** # This script allows you to specify configuration options of what git # repositories to use, enabled services, network configuration and various @@ -337,7 +337,6 @@ source $TOP_DIR/lib/neutron source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/ldap source $TOP_DIR/lib/ironic -source $TOP_DIR/lib/trove # Extras Source # -------------- @@ -739,12 +738,6 @@ if is_service_enabled heat; then configure_heat fi -if is_service_enabled trove; then - install_trove - install_troveclient - cleanup_trove -fi - if is_service_enabled tls-proxy; then configure_CA init_CA @@ -927,10 +920,6 @@ if is_service_enabled key; then create_cinder_accounts create_neutron_accounts - if is_service_enabled trove; then - create_trove_accounts - fi - if is_service_enabled ceilometer; then create_ceilometer_accounts fi @@ -1204,19 +1193,6 @@ if is_service_enabled heat; then start_heat fi -# Configure and launch the trove service api, and taskmanager -if is_service_enabled trove; then - # Initialize trove - echo_summary "Configuring Trove" - configure_troveclient - configure_trove - init_trove - - # Start the trove API and trove taskmgr components - echo_summary "Starting Trove" - start_trove -fi - # Create account rc files # ======================= diff --git a/unstack.sh b/unstack.sh index 31f6f01c8f..92d0642c38 100755 --- a/unstack.sh +++ b/unstack.sh @@ -56,7 +56,6 @@ source $TOP_DIR/lib/neutron source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/ldap source $TOP_DIR/lib/ironic -source $TOP_DIR/lib/trove # Extras Source # -------------- @@ -92,9 +91,6 @@ if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then fi # Call service stop -if is_service_enabled trove; then - stop_trove -fi if is_service_enabled heat; then stop_heat From abb7df152328fd83924070c4c40843847fb6d87a Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Tue, 28 Jan 2014 22:38:06 +0400 Subject: [PATCH 0661/4704] Include SAVANNA_CONF_DIR into SAVANNA_CONF_FILE It's the commom way of using X_CONF_FILE variable. Change-Id: Ibc284be44ffdd25be3191913c78424cbf06b2bb0 --- lib/savanna | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/lib/savanna b/lib/savanna index c7d59f79c4..de2044318b 100644 --- a/lib/savanna +++ b/lib/savanna @@ -26,7 +26,7 @@ SAVANNA_BRANCH=${SAVANNA_BRANCH:-master} # Set up default directories SAVANNA_DIR=$DEST/savanna SAVANNA_CONF_DIR=${SAVANNA_CONF_DIR:-/etc/savanna} -SAVANNA_CONF_FILE=savanna.conf +SAVANNA_CONF_FILE=${SAVANNA_CONF_DIR}/savanna.conf SAVANNA_DEBUG=${SAVANNA_DEBUG:-True} SAVANNA_SERVICE_HOST=${SAVANNA_SERVICE_HOST:-$SERVICE_HOST} @@ -88,24 +88,24 @@ function configure_savanna() { sudo chown $STACK_USER $SAVANNA_CONF_DIR # Copy over savanna configuration file and configure common parameters. - cp $SAVANNA_DIR/etc/savanna/savanna.conf.sample $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE + cp $SAVANNA_DIR/etc/savanna/savanna.conf.sample $SAVANNA_CONF_FILE - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_username savanna - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG + iniset $SAVANNA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD + iniset $SAVANNA_CONF_FILE DEFAULT os_admin_username savanna + iniset $SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME + iniset $SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE database connection `database_connection_url savanna` + iniset $SAVANNA_CONF_FILE database connection `database_connection_url savanna` if is_service_enabled neutron; then - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT use_neutron true - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT use_floating_ips true + iniset $SAVANNA_CONF_FILE DEFAULT use_neutron true + iniset $SAVANNA_CONF_FILE DEFAULT use_floating_ips true fi - iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT use_syslog $SYSLOG + iniset $SAVANNA_CONF_FILE DEFAULT use_syslog $SYSLOG recreate_database savanna utf8 - $SAVANNA_BIN_DIR/savanna-db-manage --config-file $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE upgrade head + $SAVANNA_BIN_DIR/savanna-db-manage --config-file $SAVANNA_CONF_FILE upgrade head } # install_savanna() - Collect source and prepare @@ -116,7 +116,7 @@ function install_savanna() { # start_savanna() - Start running processes, including screen function start_savanna() { - screen_it savanna "cd $SAVANNA_DIR && $SAVANNA_BIN_DIR/savanna-api --config-file $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE" + screen_it savanna "cd $SAVANNA_DIR && $SAVANNA_BIN_DIR/savanna-api --config-file $SAVANNA_CONF_FILE" } # stop_savanna() - Stop running processes From 1f76328027bb5cee0b0ea7077f4c59c919f1c4ae Mon Sep 17 00:00:00 2001 From: Jakub Libosvar Date: Tue, 28 Jan 2014 23:01:38 +0100 Subject: [PATCH 0662/4704] Stop all neutron-ns-metadata-proxy with stop_neutron Process name is actually python therefore neutron-ns-metadata-proxy pattern didn't match wanted process. Closes-bug: #1269982 Change-Id: Ib4439b0d32f103253b461841fa903c65763ff280 --- lib/neutron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 960f11b154..f9ee484607 100644 --- a/lib/neutron +++ b/lib/neutron @@ -505,7 +505,7 @@ function stop_neutron() { [ ! -z "$pid" ] && sudo kill -9 $pid fi if is_service_enabled q-meta; then - sudo pkill -9 neutron-ns-metadata-proxy || : + sudo pkill -9 -f neutron-ns-metadata-proxy || : fi if is_service_enabled q-lbaas; then From 4a0cd374e2911adb33af44fa6643d6323ea523e6 Mon Sep 17 00:00:00 2001 From: shalini khandelwal Date: Wed, 29 Jan 2014 09:48:15 +0000 Subject: [PATCH 0663/4704] Renamed file 70-trove to 70-trove.sh Reason: Devstack not installing trove stack.sh ignores the trove installation script(70-trove) Change-Id: I3f179a6b5ded46e9f96a1c4bcc673ec52fa8bf0e Closes-Bug: #1274022 --- extras.d/{70-trove => 70-trove.sh} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename extras.d/{70-trove => 70-trove.sh} (100%) diff --git a/extras.d/70-trove b/extras.d/70-trove.sh similarity index 100% rename from extras.d/70-trove rename to extras.d/70-trove.sh From f2c1a712e82ac1d347b0fb6526c79471a9ef8d55 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Wed, 29 Jan 2014 21:38:14 +0000 Subject: [PATCH 0664/4704] Copy container-sync-realms.conf in /etc/swift We need the new container-sync realms configuration or we will get a nasty harmless error opening file at swift proxy startup. Change-Id: If939da305dcb9403c418219032ac6b50b0099bd3 Closes-Bug: 1274295 --- lib/swift | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/swift b/lib/swift index 37b630c3fa..baa03ec5b8 100644 --- a/lib/swift +++ b/lib/swift @@ -258,6 +258,8 @@ function configure_swift() { SWIFT_CONFIG_PROXY_SERVER=${SWIFT_CONF_DIR}/proxy-server.conf cp ${SWIFT_DIR}/etc/proxy-server.conf-sample ${SWIFT_CONFIG_PROXY_SERVER} + cp ${SWIFT_DIR}/etc/container-sync-realms.conf-sample ${SWIFT_CONF_DIR}/container-sync-realms.conf + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${STACK_USER} From 4237f590b7b93117e59f9f777bc70d212969f61a Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 29 Jan 2014 16:22:11 -0600 Subject: [PATCH 0665/4704] Generate Tempest service list rather than hard-code it The list of services that Tempest used to set its 'service_available' config values was hard-coded. To be plugin-friendly have each service (project) add its name to the TEMPEST_SERVICES variable and use that for setting the 'service_avilable' values. Change-Id: I208efd7fd0798b18ac2e6353ee70b773e84a2683 --- lib/ceilometer | 5 ++++- lib/cinder | 3 +++ lib/glance | 3 +++ lib/heat | 4 ++++ lib/horizon | 3 +++ lib/ironic | 3 +++ lib/marconi | 4 ++++ lib/neutron | 4 ++++ lib/nova | 3 +++ lib/savanna | 4 ++++ lib/swift | 3 +++ lib/tempest | 2 +- lib/trove | 4 ++++ stackrc | 6 ++++++ 14 files changed, 49 insertions(+), 2 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 6f3896f2d4..30bf3aed50 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -52,7 +52,10 @@ CEILOMETER_BACKEND=${CEILOMETER_BACKEND:-mysql} CEILOMETER_SERVICE_PROTOCOL=http CEILOMETER_SERVICE_HOST=$SERVICE_HOST CEILOMETER_SERVICE_PORT=${CEILOMETER_SERVICE_PORT:-8777} -# + +# Tell Tempest this project is present +TEMPEST_SERVICES+=,ceilometer + # Functions # --------- diff --git a/lib/cinder b/lib/cinder index d76a41d4b8..9f70b2a0c9 100644 --- a/lib/cinder +++ b/lib/cinder @@ -79,6 +79,9 @@ VOLUME_BACKING_DEVICE2=${VOLUME_BACKING_DEVICE2:-} VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} +# Tell Tempest this project is present +TEMPEST_SERVICES+=,cinder + # Functions # --------- diff --git a/lib/glance b/lib/glance index 55d5fb37ec..2d41ea4653 100644 --- a/lib/glance +++ b/lib/glance @@ -52,6 +52,9 @@ fi # Glance connection info. Note the port must be specified. GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$SERVICE_HOST:9292} +# Tell Tempest this project is present +TEMPEST_SERVICES+=,glance + # Functions # --------- diff --git a/lib/heat b/lib/heat index b9b8aa66ca..467619f3c6 100644 --- a/lib/heat +++ b/lib/heat @@ -38,6 +38,10 @@ HEAT_CONF=$HEAT_CONF_DIR/heat.conf HEAT_ENV_DIR=$HEAT_CONF_DIR/environment.d HEAT_TEMPLATES_DIR=$HEAT_CONF_DIR/templates +# Tell Tempest this project is present +TEMPEST_SERVICES+=,heat + + # Functions # --------- diff --git a/lib/horizon b/lib/horizon index 5bff712743..c64d8502ba 100644 --- a/lib/horizon +++ b/lib/horizon @@ -31,6 +31,9 @@ HORIZON_DIR=$DEST/horizon # The example file in Horizon repo is used by default. HORIZON_SETTINGS=${HORIZON_SETTINGS:-$HORIZON_DIR/openstack_dashboard/local/local_settings.py.example} +# Tell Tempest this project is present +TEMPEST_SERVICES+=,horizon + # Functions # --------- diff --git a/lib/ironic b/lib/ironic index afbc3e09e4..b8838f59fb 100644 --- a/lib/ironic +++ b/lib/ironic @@ -42,6 +42,9 @@ IRONIC_BIN_DIR=$(get_python_exec_prefix) IRONIC_SERVICE_PROTOCOL=http IRONIC_HOSTPORT=${IRONIC_HOSTPORT:-$SERVICE_HOST:6385} +# Tell Tempest this project is present +TEMPEST_SERVICES+=,ironic + # Functions # --------- diff --git a/lib/marconi b/lib/marconi index 6b9ffdc0b3..1eaebbdf16 100644 --- a/lib/marconi +++ b/lib/marconi @@ -51,6 +51,10 @@ MARCONI_BRANCH=${MARCONI_BRANCH:-master} MARCONICLIENT_REPO=${MARCONICLIENT_REPO:-${GIT_BASE}/openstack/python-marconiclient.git} MARCONICLIENT_BRANCH=${MARCONICLIENT_BRANCH:-master} +# Tell Tempest this project is present +TEMPEST_SERVICES+=,marconi + + # Functions # --------- diff --git a/lib/neutron b/lib/neutron index 960f11b154..68dfd4a6a3 100644 --- a/lib/neutron +++ b/lib/neutron @@ -237,6 +237,10 @@ else Q_USE_SECGROUP=False fi +# Tell Tempest this project is present +TEMPEST_SERVICES+=,neutron + + # Functions # --------- diff --git a/lib/nova b/lib/nova index dbaa3f53d9..9db19ed532 100644 --- a/lib/nova +++ b/lib/nova @@ -122,6 +122,9 @@ MULTI_HOST=`trueorfalse False $MULTI_HOST` TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29} +# Tell Tempest this project is present +TEMPEST_SERVICES+=,nova + # Functions # --------- diff --git a/lib/savanna b/lib/savanna index c7d59f79c4..176f290c35 100644 --- a/lib/savanna +++ b/lib/savanna @@ -40,6 +40,10 @@ else SAVANNA_BIN_DIR=$(get_python_exec_prefix) fi +# Tell Tempest this project is present +TEMPEST_SERVICES+=,savanna + + # Functions # --------- diff --git a/lib/swift b/lib/swift index 37b630c3fa..afdf995d2e 100644 --- a/lib/swift +++ b/lib/swift @@ -111,6 +111,9 @@ OBJECT_PORT_BASE=${OBJECT_PORT_BASE:-6013} CONTAINER_PORT_BASE=${CONTAINER_PORT_BASE:-6011} ACCOUNT_PORT_BASE=${ACCOUNT_PORT_BASE:-6012} +# Tell Tempest this project is present +TEMPEST_SERVICES+=,swift + # Functions # --------- diff --git a/lib/tempest b/lib/tempest index ef9dfe218b..ee996657c2 100644 --- a/lib/tempest +++ b/lib/tempest @@ -329,7 +329,7 @@ function configure_tempest() { iniset $TEMPEST_CONF network-feature-enabled api_extensions "${NETWORK_API_EXTENSIONS:-all}" # service_available - for service in nova cinder glance neutron swift heat horizon ceilometer ironic savanna trove marconi; do + for service in ${TEMPEST_SERVICES//,/ }; do if is_service_enabled $service ; then iniset $TEMPEST_CONF service_available $service "True" else diff --git a/lib/trove b/lib/trove index 9c91024211..1fd011a530 100644 --- a/lib/trove +++ b/lib/trove @@ -38,6 +38,10 @@ else TROVE_BIN_DIR=$(get_python_exec_prefix) fi +# Tell Tempest this project is present +TEMPEST_SERVICES+=,trove + + # Functions # --------- diff --git a/stackrc b/stackrc index 8a0280ecfa..197b4cfc46 100644 --- a/stackrc +++ b/stackrc @@ -37,6 +37,12 @@ fi # enable_service tempest ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,tempest,mysql +# Tell Tempest which services are available. The default is set here as +# Tempest falls late in the configuration sequence. This differs from +# ``ENABLED_SERVICES`` in that the project names are used here rather than +# the service names, i.e.: TEMPEST_SERVICES="key,glance,nova" +TEMPEST_SERVICES="" + # Set the default Nova APIs to enable NOVA_ENABLED_APIS=ec2,osapi_compute,metadata From 6c57fbab26e40af5c5b19b46fb3da39341f34dab Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Wed, 20 Nov 2013 17:00:21 -0800 Subject: [PATCH 0666/4704] Set keystone admin_bind_host to KEYSTONE_SERVICE_HOST On Linux ports 32768-61000 can be used by just about anything needing a socket. Keystone's IANA assigned port is 35357. Occasionally something else will be using port 35357 first because Linux allows this. Workaround is to bind to port 127.0.0.1 instead of 0.0.0.0. $KEYSTONE_SERVICE_HOST gets its value from $SERVICE_HOST which is set to 127.0.0.1 in the gate. "Ephemeral (client) ports will *never* be sourced from 0.0.0.0, and are uniquely identified by the full connection five-tuple (proto, src IP, src port, dst IP, dst port) anyway, allowing them to overlap src IP/src port as long as proto/dst IP/dst port are different. Thus it is up to keystone/devstack to bind more appropriately and not use wildcard bind addresses unless explicitly necessary for some reason. For example, in the log output, the URLs are configured with dst IPs of 127.0.0.1 anyway, so binding explicitly to localhost would change nothing, while skirting this particular edge case nicely." ~Evan Callicoat This doesn't fix bug 1253482 it works around it while a better solution is prepared (running keystone behind apache in devstack). Co-Authored-By: Joe Gordon Change-Id: I112309661dadf8b753c3311182f82464d9d3595e Related-bug: #1253482 --- lib/keystone | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/keystone b/lib/keystone index 0850fb219e..4f7f68b57f 100644 --- a/lib/keystone +++ b/lib/keystone @@ -178,6 +178,7 @@ function configure_keystone() { # Set the URL advertised in the ``versions`` structure returned by the '/' route iniset $KEYSTONE_CONF DEFAULT public_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(public_port)s/" iniset $KEYSTONE_CONF DEFAULT admin_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(admin_port)s/" + iniset $KEYSTONE_CONF DEFAULT admin_bind_host "$KEYSTONE_SERVICE_HOST" # Register SSL certificates if provided if is_ssl_enabled_service key; then From ec5918f2f6ee54c3384e85866e98b67ef01e1e1e Mon Sep 17 00:00:00 2001 From: Ben Nemec Date: Thu, 30 Jan 2014 16:07:23 +0000 Subject: [PATCH 0667/4704] Retry rabbitmq password change Due to the bug referenced below, on Fedora it is possible for the rabbitmq password change to fail the first time rabbitmq is started. This change adds a retry loop to avoid the problem in devstack. One retry should be enough in most (all?) cases, but this will retry up to ten times just to be safe. Note that just retrying the password change is not enough. The rabbitmq-server service must be restarted as well. Change-Id: I403dcd503aa8e74e2ba6312a0decf0d4fd0d8795 bz: https://bugzilla.redhat.com/show_bug.cgi?id=1059028 --- lib/rpc_backend | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/lib/rpc_backend b/lib/rpc_backend index f59c80096f..3651bc0d20 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -139,12 +139,18 @@ function restart_rpc_backend() { if is_service_enabled rabbit; then # Start rabbitmq-server echo_summary "Starting RabbitMQ" - if is_fedora || is_suse; then - # service is not started by default - restart_service rabbitmq-server - fi - # change the rabbit password since the default is "guest" - sudo rabbitmqctl change_password guest $RABBIT_PASSWORD + # NOTE(bnemec): Retry initial rabbitmq configuration to deal with + # the fact that sometimes it fails to start properly. + # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1059028 + for i in `seq 10`; do + if is_fedora || is_suse; then + # service is not started by default + restart_service rabbitmq-server + fi + # change the rabbit password since the default is "guest" + sudo rabbitmqctl change_password guest $RABBIT_PASSWORD && break + [[ $i -eq "10" ]] && die $LINENO "Failed to set rabbitmq password" + done if is_service_enabled n-cell; then # Add partitioned access for the child cell if [ -z `sudo rabbitmqctl list_vhosts | grep child_cell` ]; then From f84eb5ba43ec0d548e59d982ec149a8feaa4d4d0 Mon Sep 17 00:00:00 2001 From: Don Dugger Date: Thu, 30 Jan 2014 09:59:30 -0700 Subject: [PATCH 0668/4704] Add support for Gantt Gantt is the new breakout of the scheduler code from the Nova source tree. These changes allow devstack to install/configure/startup gantt as the scheduler service for openstack. Change-Id: Ia2b6001f5ccf2469ee9fdee67564c9a915a13862 --- extras.d/70-gantt.sh | 31 ++++++++++++++ lib/gantt | 96 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 127 insertions(+) create mode 100644 extras.d/70-gantt.sh create mode 100644 lib/gantt diff --git a/extras.d/70-gantt.sh b/extras.d/70-gantt.sh new file mode 100644 index 0000000000..ac1efba748 --- /dev/null +++ b/extras.d/70-gantt.sh @@ -0,0 +1,31 @@ +# gantt.sh - Devstack extras script to install Gantt + +if is_service_enabled n-sch; then + disable_service gantt +fi + +if is_service_enabled gantt; then + if [[ "$1" == "source" ]]; then + # Initial source + source $TOP_DIR/lib/gantt + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + echo_summary "Installing Gantt" + install_gantt + cleanup_gantt + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + echo_summary "Configuring Gantt" + configure_gantt + + elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + # Initialize gantt + init_gantt + + # Start gantt + echo_summary "Starting Gantt" + start_gantt + fi + + if [[ "$1" == "unstack" ]]; then + stop_gantt + fi +fi diff --git a/lib/gantt b/lib/gantt new file mode 100644 index 0000000000..832d7590df --- /dev/null +++ b/lib/gantt @@ -0,0 +1,96 @@ +# lib/gantt +# Install and start **Gantt** scheduler service + +# Dependencies: +# +# - functions +# - DEST, DATA_DIR, STACK_USER must be defined + +# stack.sh +# --------- +# - install_gantt +# - configure_gantt +# - init_gantt +# - start_gantt +# - stop_gantt +# - cleanup_gantt + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# Defaults +# -------- + +# set up default directories +GANTT_DIR=$DEST/gantt +GANTT_STATE_PATH=${GANTT_STATE_PATH:=$DATA_DIR/gantt} +GANTT_REPO=${GANTT_REPO:-${GIT_BASE}/openstack/gantt.git} +GANTT_BRANCH=${GANTT_BRANCH:-master} + +GANTTCLIENT_DIR=$DEST/python-ganttclient +GANTTCLIENT_REPO=${GANTT_REPO:-${GIT_BASE}/openstack/python-ganttclient.git} +GANTTCLIENT_BRANCH=${GANTT_BRANCH:-master} + +# eventually we will have a separate gantt config +# file but for compatibility reasone stick with +# nova.conf for now +GANTT_CONF_DIR=${GANTT_CONF_DIR:-/etc/nova} +GANTT_CONF=$GANTT_CONF_DIR/nova.conf + +# Support entry points installation of console scripts +GANTT_BIN_DIR=$(get_python_exec_prefix) + + +# Functions +# --------- + +# cleanup_gantt() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_gantt() { + echo "Cleanup Gantt" +} + +# configure_gantt() - Set config files, create data dirs, etc +function configure_gantt() { + echo "Configure Gantt" +} + +# init_gantt() - Initialize database and volume group +function init_gantt() { + echo "Initialize Gantt" +} + +# install_gantt() - Collect source and prepare +function install_gantt() { + git_clone $GANTT_REPO $GANTT_DIR $GANTT_BRANCH + setup_develop $GANTT_DIR +} + +# install_ganttclient() - Collect source and prepare +function install_ganttclient() { + echo "Install Gantt Client" +# git_clone $GANTTCLIENT_REPO $GANTTCLIENT_DIR $GANTTCLIENT_BRANCH +# setup_develop $GANTTCLIENT_DIR +} + +# start_gantt() - Start running processes, including screen +function start_gantt() { + if is_service_enabled gantt; then + screen_it gantt "cd $GANTT_DIR && $GANTT_BIN_DIR/gantt-scheduler --config-file $GANTT_CONF" + fi +} + +# stop_gantt() - Stop running processes +function stop_gantt() { + echo "Stop Gantt" + screen_stop gantt +} + +# Restore xtrace +$XTRACE + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: From 2dac885e6c48989d9a7bc89aca2b69503d2b3399 Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Fri, 31 Jan 2014 01:25:28 -0500 Subject: [PATCH 0669/4704] Pull docker images from global registry The global docker registry is where images are being built and uploaded. It's effectively docker's version of, say, 'pip'. The static tarballs are not only an extra maintenance burden as they're outside the standard build and publishing process, but are presently outside the scope of an open development / release process as well. While this process does cause some trouble with network-independence for CI purposes, the fetching is still done from install-docker.sh; Additionally, this driver is not currently tested via the community CI effort. Change-Id: I3ee6bfee9c273cd3aabe1e00a1d1a8856a466189 --- lib/nova_plugins/hypervisor-docker | 8 ++++---- tools/docker/install_docker.sh | 23 ++++++----------------- 2 files changed, 10 insertions(+), 21 deletions(-) diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker index 0153953d6c..bb934b87d6 100644 --- a/lib/nova_plugins/hypervisor-docker +++ b/lib/nova_plugins/hypervisor-docker @@ -31,10 +31,10 @@ DOCKER_UNIX_SOCKET=/var/run/docker.sock DOCKER_PID_FILE=/var/run/docker.pid DOCKER_REGISTRY_PORT=${DOCKER_REGISTRY_PORT:-5042} -DOCKER_IMAGE=${DOCKER_IMAGE:-http://get.docker.io/images/openstack/docker-ut.tar.gz} -DOCKER_IMAGE_NAME=docker-busybox -DOCKER_REGISTRY_IMAGE=${DOCKER_REGISTRY_IMAGE:-http://get.docker.io/images/openstack/docker-registry.tar.gz} -DOCKER_REGISTRY_IMAGE_NAME=docker-registry +DOCKER_IMAGE=${DOCKER_IMAGE:-busybox:latest} +DOCKER_IMAGE_NAME=busybox +DOCKER_REGISTRY_IMAGE=${DOCKER_REGISTRY_IMAGE:-registry:latest} +DOCKER_REGISTRY_IMAGE_NAME=registry DOCKER_REPOSITORY_NAME=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT}/${DOCKER_IMAGE_NAME} DOCKER_APT_REPO=${DOCKER_APT_REPO:-https://get.docker.io/ubuntu} diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh index 375cfe958b..4fa23864fb 100755 --- a/tools/docker/install_docker.sh +++ b/tools/docker/install_docker.sh @@ -55,21 +55,10 @@ if ! timeout $SERVICE_TIMEOUT sh -c "$CONFIGURE_CMD"; then die $LINENO "docker did not start" fi +# Get guest container image +docker pull $DOCKER_IMAGE +docker tag $DOCKER_IMAGE $DOCKER_IMAGE_NAME -# Get Docker image -if [[ ! -r $FILES/docker-ut.tar.gz ]]; then - (cd $FILES; curl -OR $DOCKER_IMAGE) -fi -if [[ ! -r $FILES/docker-ut.tar.gz ]]; then - die $LINENO "Docker image unavailable" -fi -docker import - $DOCKER_IMAGE_NAME <$FILES/docker-ut.tar.gz - -# Get Docker registry image -if [[ ! -r $FILES/docker-registry.tar.gz ]]; then - (cd $FILES; curl -OR $DOCKER_REGISTRY_IMAGE) -fi -if [[ ! -r $FILES/docker-registry.tar.gz ]]; then - die $LINENO "Docker registry image unavailable" -fi -docker import - $DOCKER_REGISTRY_IMAGE_NAME <$FILES/docker-registry.tar.gz +# Get docker-registry image +docker pull $REGISTRY_IMAGE +docker tag $REGISTRY_IMAGE $REGISTRY_IMAGE_NAME From 19a3814b9a3afc24a77c5c301622661f388475d5 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 30 Jan 2014 15:49:53 +0100 Subject: [PATCH 0670/4704] glance: stop using deprecated notifier_strategy Change-Id: Ic796f0ad57db45bf053312ad10815461528030b3 --- lib/glance | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/lib/glance b/lib/glance index 2d41ea4653..07c4408efc 100644 --- a/lib/glance +++ b/lib/glance @@ -108,10 +108,8 @@ function configure_glance() { iniset $GLANCE_API_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $GLANCE_API_CONF keystone_authtoken admin_user glance iniset $GLANCE_API_CONF keystone_authtoken admin_password $SERVICE_PASSWORD - if is_service_enabled qpid; then - iniset $GLANCE_API_CONF DEFAULT notifier_strategy qpid - elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then - iniset $GLANCE_API_CONF DEFAULT notifier_strategy rabbit + if is_service_enabled qpid || [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then + iniset $GLANCE_API_CONF DEFAULT notification_driver messaging fi iniset_rpc_backend glance $GLANCE_API_CONF DEFAULT iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api From 061c14da01bb25ff86e0bfdb5e1bed887cb63997 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 30 Jan 2014 15:51:37 +0100 Subject: [PATCH 0671/4704] ironic: remove notifier_strategy option This has never exited in Ironic, and it does not even uses notification. Change-Id: I4a3d386116561d9a22d650f123df1aae5ed9849e --- lib/ironic | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lib/ironic b/lib/ironic index b8838f59fb..983add83d1 100644 --- a/lib/ironic +++ b/lib/ironic @@ -105,11 +105,6 @@ function configure_ironic_api() { iniset $IRONIC_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $IRONIC_CONF_FILE keystone_authtoken admin_user ironic iniset $IRONIC_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD - if is_service_enabled qpid; then - iniset $IRONIC_CONF_FILE DEFAULT notifier_strategy qpid - elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then - iniset $IRONIC_CONF_FILE DEFAULT notifier_strategy rabbit - fi iniset_rpc_backend ironic $IRONIC_CONF_FILE DEFAULT iniset $IRONIC_CONF_FILE keystone_authtoken signing_dir $IRONIC_AUTH_CACHE_DIR/api From 6114a518de8d2db560db193ed4bc26d6e1659ce7 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 31 Jan 2014 08:21:24 -0500 Subject: [PATCH 0672/4704] fix sar reporting in the gate the sar filter made an assumption of time display including an AM/PM... which isn't true in all environments. Hence the blank sysstat screen in the gate runs of late. This fixes that, and displays the first line which includes header version to make sure we are functioning. Change-Id: I537e0bf2127efaf337c4792bc23d938145c8990d --- tools/sar_filter.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tools/sar_filter.py b/tools/sar_filter.py index ed8c19687c..24ef0e476c 100755 --- a/tools/sar_filter.py +++ b/tools/sar_filter.py @@ -25,10 +25,10 @@ def is_data_line(line): def parse_line(line): - m = re.search('(\d\d:\d\d:\d\d \w\w)(\s+((\S+)\s*)+)', line) + m = re.search('(\d\d:\d\d:\d\d( \w\w)?)(\s+((\S+)\s*)+)', line) if m: date = m.group(1) - data = m.group(2).rstrip() + data = m.group(3).rstrip() return date, data else: return None, None @@ -47,6 +47,10 @@ def parse_line(line): data_line = "" printed_header = False current_ts = None + +# print out the first sysstat line regardless +print process.stdout.readline() + while True: nextline = process.stdout.readline() if nextline == '' and process.poll() is not None: From 43d950843769135d32ce316cfb0f72697a879623 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Thu, 30 Jan 2014 17:49:22 -0500 Subject: [PATCH 0673/4704] Install libguestfs for nova-compute on Ubuntu We were already installing this for n-cpu on rpm distros, but not Ubuntu. Install it so that nova-compute can use it for file injection, which is the preferred method over nbd. Set CONF.libvirt.inject_partition to -1. This enables using libguestfs to determine the proper partition to inject into. Don't bother trying to load the nbd kernel module anymore. It won't be used since we know always expect libguestfs to be installed. Change-Id: Ifa9d95bf759f1dad8685590a2df242d852dd2cb0 --- files/apts/n-cpu | 2 +- lib/nova | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/files/apts/n-cpu b/files/apts/n-cpu index 29e37603b7..b287107256 100644 --- a/files/apts/n-cpu +++ b/files/apts/n-cpu @@ -1,8 +1,8 @@ # Stuff for diablo volumes -nbd-client lvm2 open-iscsi open-iscsi-utils # Deprecated since quantal dist:precise genisoimage sysfsutils sg3-utils +python-guestfs diff --git a/lib/nova b/lib/nova index 9db19ed532..d5f7514be5 100644 --- a/lib/nova +++ b/lib/nova @@ -240,8 +240,10 @@ function configure_nova() { sudo sysctl -w net.ipv4.ip_forward=1 if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - # Attempt to load modules: network block device - used to manage qcow images - sudo modprobe nbd || true + # When libguestfs is available for file injection, enable using + # libguestfs to inspect the image and figure out the proper + # partition to inject into. + iniset $NOVA_CONF libvirt inject_partition '-1' # Check for kvm (hardware based virtualization). If unable to initialize # kvm, we drop back to the slower emulation mode (qemu). Note: many systems From c4f47345a588b15d83ebc5584c8698843b568a40 Mon Sep 17 00:00:00 2001 From: Jeremy Stanley Date: Sat, 25 Jan 2014 01:10:31 +0000 Subject: [PATCH 0674/4704] Make MySQL query logging optional * lib/databases/mysql: Wrap query log configuration in a check for a ENABLE_QUERY_LOGGING variable. * stackrc: Add the DATABASE_QUERY_LOGGING variable defaulted to True. Change-Id: Iddf8538ad0a1e36e2c6944dc70315984026c8245 --- lib/databases/mysql | 33 +++++++++++++++++++-------------- stackrc | 3 +++ 2 files changed, 22 insertions(+), 14 deletions(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index 0eb8fdd7a2..476b4b91b7 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -87,20 +87,25 @@ function configure_database_mysql { default-storage-engine = InnoDB" $MY_CONF fi - # Turn on slow query log - sudo sed -i '/log.slow.queries/d' $MY_CONF - sudo sed -i -e "/^\[mysqld\]/ a \ -log-slow-queries = /var/log/mysql/mysql-slow.log" $MY_CONF - - # Log all queries (any query taking longer than 0 seconds) - sudo sed -i '/long.query.time/d' $MY_CONF - sudo sed -i -e "/^\[mysqld\]/ a \ -long-query-time = 0" $MY_CONF - - # Log all non-indexed queries - sudo sed -i '/log.queries.not.using.indexes/d' $MY_CONF - sudo sed -i -e "/^\[mysqld\]/ a \ -log-queries-not-using-indexes" $MY_CONF + if [[ "$DATABASE_QUERY_LOGGING" == "True" ]]; then + echo_summary "Enabling MySQL query logging" + + # Turn on slow query log + sudo sed -i '/log.slow.queries/d' $MY_CONF + sudo sed -i -e "/^\[mysqld\]/ a \ + log-slow-queries = /var/log/mysql/mysql-slow.log" $MY_CONF + + # Log all queries (any query taking longer than 0 seconds) + sudo sed -i '/long.query.time/d' $MY_CONF + sudo sed -i -e "/^\[mysqld\]/ a \ + long-query-time = 0" $MY_CONF + + # Log all non-indexed queries + sudo sed -i '/log.queries.not.using.indexes/d' $MY_CONF + sudo sed -i -e "/^\[mysqld\]/ a \ + log-queries-not-using-indexes" $MY_CONF + + fi restart_service $MYSQL } diff --git a/stackrc b/stackrc index 49fb26b2c7..276ce33970 100644 --- a/stackrc +++ b/stackrc @@ -59,6 +59,9 @@ elif [[ -f $RC_DIR/.localrc.auto ]]; then source $RC_DIR/.localrc.auto fi +# This can be used to turn database query logging on and off +# (currently only implemented for MySQL backend) +DATABASE_QUERY_LOGGING=$(trueorfalse True $DATABASE_QUERY_LOGGING) # Repositories # ------------ From 1272bc5e93f171c8d7193475547c43b9032b5c39 Mon Sep 17 00:00:00 2001 From: Nikhil Manchanda Date: Fri, 31 Jan 2014 15:04:05 -0800 Subject: [PATCH 0675/4704] Pipeline filter is 'authtoken' and not 'tokenauth' The pipeline fileter in the api-paste.ini for the keystone middleware was renamed to 'authtoken'. Trove install is not able to authenticate against keystone unless this is renamed Change-Id: I6f912d29c143b3acbc43da222cf8b4c3fafb2c8d --- lib/trove | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/lib/trove b/lib/trove index 1fd011a530..2000446b13 100644 --- a/lib/trove +++ b/lib/trove @@ -129,14 +129,14 @@ function configure_trove() { # Copy api-paste file over to the trove conf dir and configure it cp $TROVE_LOCAL_CONF_DIR/api-paste.ini $TROVE_CONF_DIR/api-paste.ini TROVE_API_PASTE_INI=$TROVE_CONF_DIR/api-paste.ini - iniset $TROVE_API_PASTE_INI filter:tokenauth auth_host $KEYSTONE_AUTH_HOST - iniset $TROVE_API_PASTE_INI filter:tokenauth auth_port $KEYSTONE_AUTH_PORT - iniset $TROVE_API_PASTE_INI filter:tokenauth auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $TROVE_API_PASTE_INI filter:tokenauth cafile $KEYSTONE_SSL_CA - iniset $TROVE_API_PASTE_INI filter:tokenauth admin_tenant_name $SERVICE_TENANT_NAME - iniset $TROVE_API_PASTE_INI filter:tokenauth admin_user trove - iniset $TROVE_API_PASTE_INI filter:tokenauth admin_password $SERVICE_PASSWORD - iniset $TROVE_API_PASTE_INI filter:tokenauth signing_dir $TROVE_AUTH_CACHE_DIR + iniset $TROVE_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $TROVE_API_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $TROVE_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $TROVE_API_PASTE_INI filter:authtoken cafile $KEYSTONE_SSL_CA + iniset $TROVE_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $TROVE_API_PASTE_INI filter:authtoken admin_user trove + iniset $TROVE_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD + iniset $TROVE_API_PASTE_INI filter:authtoken signing_dir $TROVE_AUTH_CACHE_DIR # (Re)create trove conf files rm -f $TROVE_CONF_DIR/trove.conf From 7bc783b95b2e115f40a4db8823823573afe7a768 Mon Sep 17 00:00:00 2001 From: Nathan Kinder Date: Fri, 31 Jan 2014 16:54:10 -0800 Subject: [PATCH 0676/4704] LDAP root DN creation fails When keystone is configured to set up an LDAP server to use as it's identity backend, the creation of the root DN fails. The problem is that one of the mods in the modify operation that sets up the root DN is incorrect, which causes the entire modify operation to fail. The incorrect mod is attempting to configure some attribute indexes, but one of the attributes it specifies is undefined. This patch removes the undefined attribute from the template that is used to create the modify operation. Change-Id: I413587130c64ca4f5f467b2ea1c0ab12867999ce Closes-Bug: 1275158 --- files/ldap/manager.ldif.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/ldap/manager.ldif.in b/files/ldap/manager.ldif.in index de3b69de7c..2f1f1395ee 100644 --- a/files/ldap/manager.ldif.in +++ b/files/ldap/manager.ldif.in @@ -12,4 +12,4 @@ olcRootPW: ${SLAPPASS} replace: olcDbIndex olcDbIndex: objectClass eq olcDbIndex: default pres,eq -olcDbIndex: cn,sn,givenName,co +olcDbIndex: cn,sn,givenName From 6bf1f1fb332c93cb4b74cf6b6511d2f9818a501d Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Sat, 1 Feb 2014 17:05:18 -0500 Subject: [PATCH 0677/4704] use ext4 for guest default ephemeral this isn't upstream default because of compatibility questions with really old host on providers. However there is no reason not to do it in devstack. Change-Id: I6438c0efb297cfa5d3dbb5f00701b24f01c39d14 --- lib/nova_plugins/hypervisor-libvirt | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 6f90f4ac17..42d3af15cf 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -108,6 +108,7 @@ EOF" iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE" iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none" iniset $NOVA_CONF DEFAULT use_usb_tablet "False" + iniset $NOVA_CONF DEFAULT default_ephemeral_format "ext4" iniset $NOVA_CONF DEFAULT compute_driver "libvirt.LibvirtDriver" LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" From 2ed4ae70b820ad3cbd12f2b6c2452ff66005ebaa Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Sun, 2 Feb 2014 09:38:05 +0100 Subject: [PATCH 0678/4704] Have ceilometer to respect the keystone settings lib/ceilometer ignored the global settings related to keystone settings. It can cause issues for example when the keystone does not listen on 127.0.0.1 even in single node deployment. Change-Id: I6e4654daa2ec624ac11aaf7f49495fcfaa72071d --- lib/ceilometer | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/ceilometer b/lib/ceilometer index 30bf3aed50..75c00b6b07 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -137,7 +137,9 @@ function configure_ceilometer() { iniset $CEILOMETER_CONF DEFAULT os_password $SERVICE_PASSWORD iniset $CEILOMETER_CONF DEFAULT os_tenant_name $SERVICE_TENANT_NAME - iniset $CEILOMETER_CONF keystone_authtoken auth_protocol http + iniset $CEILOMETER_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $CEILOMETER_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $CEILOMETER_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL iniset $CEILOMETER_CONF keystone_authtoken admin_user ceilometer iniset $CEILOMETER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD iniset $CEILOMETER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME From 85a85f87f814446dd2364eea1b6d976d50500203 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Tue, 21 Jan 2014 11:13:55 +0100 Subject: [PATCH 0679/4704] Use service role with glance service glance just used to admin role for token validation, the service role is sufficient for this. glance also needs an user with enough permission to use swift, so creating a dedictated service user for swift usage when s-proxy is enabled. Change-Id: I6df3905e5db35ea3421468ca1ee6d8de3271f8d1 --- files/keystone_data.sh | 24 +++++++++++++++++++----- lib/glance | 2 +- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index d477c42906..9a34c7616f 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -2,12 +2,14 @@ # # Initial data for Keystone using python-keystoneclient # -# Tenant User Roles +# Tenant User Roles # ------------------------------------------------------------------ -# service glance admin -# service heat service # if enabled +# service glance service +# service glance-swift ResellerAdmin +# service heat service # if enabled +# service ceilometer admin # if enabled # Tempest Only: -# alt_demo alt_demo Member +# alt_demo alt_demo Member # # Variables set before calling this script: # SERVICE_TOKEN - aka admin_token in keystone.conf @@ -96,7 +98,19 @@ if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then keystone user-role-add \ --tenant $SERVICE_TENANT_NAME \ --user glance \ - --role admin + --role service + # required for swift access + if [[ "$ENABLED_SERVICES" =~ "s-proxy" ]]; then + keystone user-create \ + --name=glance-swift \ + --pass="$SERVICE_PASSWORD" \ + --tenant $SERVICE_TENANT_NAME \ + --email=glance-swift@example.com + keystone user-role-add \ + --tenant $SERVICE_TENANT_NAME \ + --user glance-swift \ + --role ResellerAdmin + fi if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then keystone service-create \ --name=glance \ diff --git a/lib/glance b/lib/glance index 2d41ea4653..00f499a0b9 100644 --- a/lib/glance +++ b/lib/glance @@ -124,7 +124,7 @@ function configure_glance() { if is_service_enabled s-proxy; then iniset $GLANCE_API_CONF DEFAULT default_store swift iniset $GLANCE_API_CONF DEFAULT swift_store_auth_address $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ - iniset $GLANCE_API_CONF DEFAULT swift_store_user $SERVICE_TENANT_NAME:glance + iniset $GLANCE_API_CONF DEFAULT swift_store_user $SERVICE_TENANT_NAME:glance-swift iniset $GLANCE_API_CONF DEFAULT swift_store_key $SERVICE_PASSWORD iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True From 8664ca53f80849553043aba9663f7cb72a9cec42 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Sun, 2 Feb 2014 10:07:39 +0100 Subject: [PATCH 0680/4704] bash_completion for heat and ceilometer Installing bash completion for heat and ceilometer by using a similar way used with other services. Change-Id: I5094648272f2666f6bff181bfa3aeb35e863bd97 --- lib/ceilometer | 1 + lib/heat | 1 + 2 files changed, 2 insertions(+) diff --git a/lib/ceilometer b/lib/ceilometer index 30bf3aed50..6a72459d41 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -105,6 +105,7 @@ function cleanup_ceilometer() { # configure_ceilometerclient() - Set config files, create data dirs, etc function configure_ceilometerclient() { setup_develop $CEILOMETERCLIENT_DIR + sudo install -D -m 0644 -o $STACK_USER {$CEILOMETERCLIENT_DIR/tools/,/etc/bash_completion.d/}ceilometer.bash_completion } # configure_ceilometer() - Set config files, create data dirs, etc diff --git a/lib/heat b/lib/heat index 467619f3c6..f171cb450c 100644 --- a/lib/heat +++ b/lib/heat @@ -157,6 +157,7 @@ function create_heat_cache_dir() { function install_heatclient() { git_clone $HEATCLIENT_REPO $HEATCLIENT_DIR $HEATCLIENT_BRANCH setup_develop $HEATCLIENT_DIR + sudo install -D -m 0644 -o $STACK_USER {$HEATCLIENT_DIR/tools/,/etc/bash_completion.d/}heat.bash_completion } # install_heat() - Collect source and prepare From 0af8122834917b4e44ee0cfae22eb5f93472f1a6 Mon Sep 17 00:00:00 2001 From: Robert Collins Date: Sun, 2 Feb 2014 09:59:07 +1300 Subject: [PATCH 0681/4704] Disable file injection for libvirt driver Change-Id: I73289195d3bb455f4076fadd2eadd6036b04b722 --- lib/nova | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/lib/nova b/lib/nova index d5f7514be5..0db242a34a 100644 --- a/lib/nova +++ b/lib/nova @@ -240,10 +240,9 @@ function configure_nova() { sudo sysctl -w net.ipv4.ip_forward=1 if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - # When libguestfs is available for file injection, enable using - # libguestfs to inspect the image and figure out the proper - # partition to inject into. - iniset $NOVA_CONF libvirt inject_partition '-1' + # File injection is being disabled by default in the near future - + # disable it here for now to avoid surprises later. + iniset $NOVA_CONF libvirt inject_partition '-2' # Check for kvm (hardware based virtualization). If unable to initialize # kvm, we drop back to the slower emulation mode (qemu). Note: many systems From 0d4bd7e6104bee974a544422456d731eb664805c Mon Sep 17 00:00:00 2001 From: Anita Kuno Date: Sun, 2 Feb 2014 14:59:39 -0600 Subject: [PATCH 0682/4704] Silence commands to echo copyright notices This patch silences commands that echoed copyright notices to the devstack logs. The copyright notices are moved to the top of the file as comments. Change-Id: I8d474a366af2954c168ba8d07329392f56e8e75a --- exercises/neutron-adv-test.sh | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index 1343f11553..a9199e62a6 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -1,6 +1,11 @@ #!/usr/bin/env bash # - +# Copyright 2012, Cisco Systems +# Copyright 2012, VMware, Inc. +# Copyright 2012, NTT MCL, Inc. +# +# Please direct any questions to dedutta@cisco.com, dwendlandt@vmware.com, nachi@nttmcl.com +# # **neutron-adv-test.sh** # Perform integration testing of Nova and other components with Neutron. @@ -406,14 +411,6 @@ usage() { main() { echo Description - echo - echo Copyright 2012, Cisco Systems - echo Copyright 2012, VMware, Inc. - echo Copyright 2012, NTT MCL, Inc. - echo - echo Please direct any questions to dedutta@cisco.com, dwendlandt@vmware.com, nachi@nttmcl.com - echo - if [ $# -eq 0 ] ; then # if no args are provided, run all tests From c643ebb26dac484e56aea7b5f30d97fe7711f6f3 Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Sun, 2 Feb 2014 09:16:20 +0000 Subject: [PATCH 0683/4704] XenAPI: Fix new useage of trueorfalse * Ensure that Xen setup scripts will continue to function when unset variables are used in stackrc * Ensure that the generic functions are sourced in all places that xenrc (which sources stackrc) is sourced. Change-Id: I54eba20733c2e149621b74a1387f0bef14fca12e --- tools/xen/build_xva.sh | 10 ++++++++++ tools/xen/prepare_guest_template.sh | 10 ++++++++++ tools/xen/xenrc | 3 +++ 3 files changed, 23 insertions(+) diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh index 958102b29c..fbbfd6fbe5 100755 --- a/tools/xen/build_xva.sh +++ b/tools/xen/build_xva.sh @@ -21,9 +21,19 @@ set -o xtrace # This directory TOP_DIR=$(cd $(dirname "$0") && pwd) +# Source lower level functions +. $TOP_DIR/../../functions + # Include onexit commands . $TOP_DIR/scripts/on_exit.sh +# xapi functions +. $TOP_DIR/functions + +# Determine what system we are running on. +# Might not be XenServer if we're using xenserver-core +GetDistro + # Source params - override xenrc params in your localrc to suite your taste source xenrc diff --git a/tools/xen/prepare_guest_template.sh b/tools/xen/prepare_guest_template.sh index 546ac99cd9..4fa70d377d 100755 --- a/tools/xen/prepare_guest_template.sh +++ b/tools/xen/prepare_guest_template.sh @@ -22,9 +22,19 @@ set -o xtrace # This directory TOP_DIR=$(cd $(dirname "$0") && pwd) +# Source lower level functions +. $TOP_DIR/../../functions + # Include onexit commands . $TOP_DIR/scripts/on_exit.sh +# xapi functions +. $TOP_DIR/functions + +# Determine what system we are running on. +# Might not be XenServer if we're using xenserver-core +GetDistro + # Source params - override xenrc params in your localrc to suite your taste source xenrc diff --git a/tools/xen/xenrc b/tools/xen/xenrc index cd282341cb..96f3734a1d 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -91,4 +91,7 @@ UBUNTU_INST_GATEWAY="" # Set the size to 0 to avoid creation of additional disk. XEN_XVDB_SIZE_GB=0 +restore_nounset=`set +o | grep nounset` +set +u source ../../stackrc +$restore_nounset From ca920576cb9c36b7d26a3ce523c9d9a25b3f5db8 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 3 Feb 2014 15:26:20 +0100 Subject: [PATCH 0684/4704] nova: use the correct notification driver Nova now uses oslo.messaging and not the Oslo RPC code anymore, therefore the new driver should be used instead. Change-Id: I3533975ad38ff99bee6cfaa5332843444650f61f --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index d5f7514be5..722b994896 100644 --- a/lib/nova +++ b/lib/nova @@ -447,7 +447,7 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT instance_usage_audit "True" iniset $NOVA_CONF DEFAULT instance_usage_audit_period "hour" iniset $NOVA_CONF DEFAULT notify_on_state_change "vm_and_task_state" - iniset $NOVA_CONF DEFAULT notification_driver "nova.openstack.common.notifier.rpc_notifier" + iniset $NOVA_CONF DEFAULT notification_driver "messaging" fi # Provide some transition from ``EXTRA_FLAGS`` to ``EXTRA_OPTS`` From a03607d03f18fbe842bb61a509a868e1447fc379 Mon Sep 17 00:00:00 2001 From: Ivar Lazzaro Date: Mon, 3 Feb 2014 06:28:14 -0800 Subject: [PATCH 0685/4704] Embrane Plugin Support Implements blueprint embrane-plugin-support This commit implements Embrane's Neutron plugin installation support in Devstack. This is an extension of the openvswitch installation module, which is used by the main plugin, and enables configuration by localrc Change-Id: Ia4824f8d2300bcdce170d226145bbce6088f1557 --- lib/neutron_plugins/embrane | 40 +++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 lib/neutron_plugins/embrane diff --git a/lib/neutron_plugins/embrane b/lib/neutron_plugins/embrane new file mode 100644 index 0000000000..4206a2053c --- /dev/null +++ b/lib/neutron_plugins/embrane @@ -0,0 +1,40 @@ +# Neutron Embrane plugin +# --------------------------- + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/neutron_plugins/openvswitch + +save_function() { + local ORIG_FUNC=$(declare -f $1) + local NEW_FUNC="$2${ORIG_FUNC#$1}" + eval "$NEW_FUNC" +} + +save_function neutron_plugin_configure_service _neutron_plugin_configure_service + +function neutron_plugin_configure_common() { + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/embrane + Q_PLUGIN_CONF_FILENAME=heleos_conf.ini + Q_DB_NAME="ovs_neutron" + Q_PLUGIN_CLASS="neutron.plugins.embrane.plugins.embrane_ovs_plugin.EmbraneOvsPlugin" +} + +function neutron_plugin_configure_service() { + _neutron_plugin_configure_service + iniset /$Q_PLUGIN_CONF_FILE heleos esm_mgmt $HELEOS_ESM_MGMT + iniset /$Q_PLUGIN_CONF_FILE heleos admin_username $HELEOS_ADMIN_USERNAME + iniset /$Q_PLUGIN_CONF_FILE heleos admin_password $HELEOS_ADMIN_PASSWORD + iniset /$Q_PLUGIN_CONF_FILE heleos router_image $HELEOS_ROUTER_IMAGE + iniset /$Q_PLUGIN_CONF_FILE heleos mgmt_id $HELEOS_MGMT_ID + iniset /$Q_PLUGIN_CONF_FILE heleos inband_id $HELEOS_INBAND_ID + iniset /$Q_PLUGIN_CONF_FILE heleos oob_id $HELEOS_OOB_ID + iniset /$Q_PLUGIN_CONF_FILE heleos dummy_utif_id $HELEOS_DUMMY_UTIF_ID + iniset /$Q_PLUGIN_CONF_FILE heleos resource_pool_id $HELEOS_RESOURCE_POOL_ID + iniset /$Q_PLUGIN_CONF_FILE heleos async_requests $HELEOS_ASYNC_REQUESTS +} + +# Restore xtrace +$MY_XTRACE \ No newline at end of file From 0656e12d6819f6dee671dd6200b2d0895e716c2c Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 3 Feb 2014 08:49:30 +0900 Subject: [PATCH 0686/4704] add ability to ignore rules in bash8 Change-Id: Ia6472f4bb251bf3e9846e08e30b2f9ea30ea1c03 --- tools/bash8.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/tools/bash8.py b/tools/bash8.py index edf7da4645..2623358182 100755 --- a/tools/bash8.py +++ b/tools/bash8.py @@ -30,8 +30,18 @@ import re import sys - ERRORS = 0 +IGNORE = None + + +def register_ignores(ignores): + global IGNORE + if ignores: + IGNORE='^(' + '|'.join(ignores.split(',')) + ')' + + +def should_ignore(error): + return IGNORE and re.search(IGNORE, error) def print_error(error, line): @@ -97,11 +107,13 @@ def get_options(): description='A bash script style checker') parser.add_argument('files', metavar='file', nargs='+', help='files to scan for errors') + parser.add_argument('-i', '--ignore', help='Rules to ignore') return parser.parse_args() def main(): opts = get_options() + register_ignores(opts.ignore) check_files(opts.files) if ERRORS > 0: From 864902ed01f92a9f587ebf0b582357fe2a9ea086 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Mon, 3 Feb 2014 21:00:39 +0000 Subject: [PATCH 0687/4704] Use github for swift3. swift3 is not on OpenStack infra (yet) use the github url instead. Closes-Bug: #1275923 Change-Id: I0cc393f93b65dcf8642b3a35925eb9eba3c2e1eb --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 8a0280ecfa..b138f42546 100644 --- a/stackrc +++ b/stackrc @@ -162,7 +162,7 @@ REQUIREMENTS_BRANCH=${REQUIREMENTS_BRANCH:-master} # storage service SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git} SWIFT_BRANCH=${SWIFT_BRANCH:-master} -SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/fujita/swift3.git} +SWIFT3_REPO=${SWIFT3_REPO:-http://github.com/fujita/swift3.git} SWIFT3_BRANCH=${SWIFT3_BRANCH:-master} # python swift client library From 891277fbbdf65427b43f194adaafbbf2a4ac4800 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Mon, 3 Feb 2014 21:07:03 +0000 Subject: [PATCH 0688/4704] s3_token has been moved to keystoneclient. Change-Id: I6ffe756d517d11f323bd0c5d3b877d9a9f739a3b --- lib/swift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/swift b/lib/swift index 37b630c3fa..a182e5adfb 100644 --- a/lib/swift +++ b/lib/swift @@ -336,7 +336,7 @@ function configure_swift() { # NOTE(chmou): s3token middleware is not updated yet to use only # username and password. [filter:s3token] -paste.filter_factory = keystone.middleware.s3_token:filter_factory +paste.filter_factory = keystoneclient.middleware.s3_token:filter_factory auth_port = ${KEYSTONE_AUTH_PORT} auth_host = ${KEYSTONE_AUTH_HOST} auth_protocol = ${KEYSTONE_AUTH_PROTOCOL} From f36a9b2136b4ba56ac2989f7829c55b4eb1c08af Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Mon, 3 Feb 2014 23:44:47 +0100 Subject: [PATCH 0689/4704] No need to loop over with pkill -f I guess four times is better than one but if we need four times to kill swift processes there is something pretty bad with it. Change-Id: Id2ea2f4ca60feb9fddc7b3181063760d2044b421 --- lib/swift | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/lib/swift b/lib/swift index 54d6f1c2e6..28ca8a80df 100644 --- a/lib/swift +++ b/lib/swift @@ -657,10 +657,8 @@ function stop_swift() { if type -p swift-init >/dev/null; then swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true fi - for type in proxy object container account; do - # Dump all of the servers - pkill -f swift- - done + # Dump all of the servers + pkill -f swift- } # Restore xtrace From 8dad4bde886ed2a5bb28d8eb43cfa874ee81c790 Mon Sep 17 00:00:00 2001 From: Arnaud Legendre Date: Mon, 3 Feb 2014 17:57:39 -0800 Subject: [PATCH 0690/4704] upload_image.sh to support streamOptimized disks The current version of the script will use "preallocated" as the disk type of a stream optimized disk. This needs to be fixed by introspecting the createType of the vmdk file. Closes-Bug: #1275993 Change-Id: I98594acecf26dd1164870f43890254a19ef23fe9 --- functions | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/functions b/functions index 73d65ce15b..281b6767c5 100644 --- a/functions +++ b/functions @@ -1450,7 +1450,7 @@ function upload_image() { # vmdk disk type vmdk_create_type="$(head -25 $IMAGE | grep -a -F -m 1 'createType=' $IMAGE)" vmdk_create_type="${vmdk_create_type#*\"}" - vmdk_create_type="${vmdk_create_type%?}" + vmdk_create_type="${vmdk_create_type%\"*}" descriptor_data_pair_msg="Monolithic flat and VMFS disks "` `"should use a descriptor-data pair." @@ -1495,6 +1495,8 @@ function upload_image() { IMAGE_NAME="${flat_fname}" fi vmdk_disktype="preallocated" + elif [[ "$vmdk_create_type" = "streamOptimized" ]]; then + vmdk_disktype="streamOptimized" elif [[ -z "$vmdk_create_type" ]]; then # *-flat.vmdk provided: attempt to retrieve the descriptor (*.vmdk) # to retrieve appropriate metadata @@ -1533,10 +1535,8 @@ function upload_image() { vmdk_adapter_type="${vmdk_adapter_type%?}" fi fi - #TODO(alegendre): handle streamOptimized once supported by the VMware driver. vmdk_disktype="preallocated" else - #TODO(alegendre): handle streamOptimized once supported by the VMware driver. vmdk_disktype="preallocated" fi From d70ba82b14b0c47fd87a957e9f2ca5ddda69948b Mon Sep 17 00:00:00 2001 From: Robert Collins Date: Tue, 4 Feb 2014 14:33:27 +1300 Subject: [PATCH 0691/4704] Move file injection setting to the right place The nova code was wiping nova.conf after our iniset :(. Change-Id: Ib618da1bd21da09f8855ec4691bff79c4c3b3d9c --- lib/nova | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/lib/nova b/lib/nova index 0db242a34a..dbc5c3db44 100644 --- a/lib/nova +++ b/lib/nova @@ -240,10 +240,6 @@ function configure_nova() { sudo sysctl -w net.ipv4.ip_forward=1 if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - # File injection is being disabled by default in the near future - - # disable it here for now to avoid surprises later. - iniset $NOVA_CONF libvirt inject_partition '-2' - # Check for kvm (hardware based virtualization). If unable to initialize # kvm, we drop back to the slower emulation mode (qemu). Note: many systems # come with hardware virtualization disabled in BIOS. @@ -499,6 +495,12 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST" iniset_rpc_backend nova $NOVA_CONF DEFAULT iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT" + + if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then + # File injection is being disabled by default in the near future - + # disable it here for now to avoid surprises later. + iniset $NOVA_CONF libvirt inject_partition '-2' + fi } function init_nova_cells() { From b408dd2072462f47ae294b601039c12136034e5e Mon Sep 17 00:00:00 2001 From: Denis Makogon Date: Tue, 4 Feb 2014 12:58:59 +0200 Subject: [PATCH 0692/4704] Remove unneeded guest conf values Reasons: - guest service doesn't depend on "sql_connection" value any more; - "exchange_control" already set in trove-guestagent.conf.sample to "trove"; Change-Id: Ifbdb21ac4639d86cf7775634f5b31cfb9739b49f Closes-Bug: #1256046 --- lib/trove | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/trove b/lib/trove index 2000446b13..bb4549121d 100644 --- a/lib/trove +++ b/lib/trove @@ -148,8 +148,6 @@ function configure_trove() { iniset $TROVE_CONF_DIR/trove.conf DEFAULT add_addresses True iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT sql_connection `database_connection_url trove` - iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT control_exchange trove sed -i "s/localhost/$NETWORK_GATEWAY/g" $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample setup_trove_logging $TROVE_CONF_DIR/trove.conf From db1c3847752c84a9fc06186a3352f02b76c1aa7c Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Tue, 4 Feb 2014 20:58:00 +0000 Subject: [PATCH 0693/4704] Fix config group for cinder multi_backend This commit just updates the config group for the multi_backend option. Tempest change d5c9602b created a volume-feature-enabled group and moved this volume there but devstack was never updated with the change. Closes-Bug: #1276326 Change-Id: Icf2e96783feec4edbd4d477f8492651cd9bb3f01 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 06183b107b..6fa35d19fd 100644 --- a/lib/tempest +++ b/lib/tempest @@ -323,7 +323,7 @@ function configure_tempest() { fi CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND) if [ $CINDER_MULTI_LVM_BACKEND == "True" ]; then - iniset $TEMPEST_CONFIG volume multi_backend_enabled "True" + iniset $TEMPEST_CONFIG volume-feature-enabled multi_backend "True" iniset $TEMPEST_CONFIG volume backend1_name "LVM_iSCSI" iniset $TEMPEST_CONFIG volume backend2_name "LVM_iSCSI_2" fi From 41e36d6bcd3ab04cd3955aef68162c3266dc958e Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Tue, 4 Feb 2014 13:39:32 -0800 Subject: [PATCH 0694/4704] Replace NvpPluginV2 with NsxPlugin The king is dead, long live the king! Partial-implements blueprint: nicira-plugin-renaming Change-Id: I9b71479a8d4228d45a6591b169c489c0107fb04c --- lib/neutron_plugins/vmware_nsx | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/neutron_plugins/vmware_nsx b/lib/neutron_plugins/vmware_nsx index d506cb6f8d..74f98df577 100644 --- a/lib/neutron_plugins/vmware_nsx +++ b/lib/neutron_plugins/vmware_nsx @@ -41,8 +41,7 @@ function neutron_plugin_configure_common() { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/vmware Q_PLUGIN_CONF_FILENAME=nsx.ini Q_DB_NAME="neutron_nsx" - # TODO(armando-migliaccio): rename this once the code rename is complete - Q_PLUGIN_CLASS="neutron.plugins.nicira.NeutronPlugin.NvpPluginV2" + Q_PLUGIN_CLASS="neutron.plugins.vmware.plugin.NsxPlugin" } function neutron_plugin_configure_debug_command() { From 1023ff7c3ac184da00b6306f361f285301849881 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 27 Jan 2014 14:56:44 -0600 Subject: [PATCH 0695/4704] Move ironic to plugin Also adds an is_ironic_enabled() function to prepare for an upcoming change in is_service_enabled(). Change-Id: I6e6e0e8b70221e231785ab27e9b5d4836933ac4c --- extras.d/50-ironic.sh | 33 +++++++++++++++++++++++++++++++++ lib/ironic | 7 +++++++ stack.sh | 21 --------------------- unstack.sh | 7 ------- 4 files changed, 40 insertions(+), 28 deletions(-) create mode 100644 extras.d/50-ironic.sh diff --git a/extras.d/50-ironic.sh b/extras.d/50-ironic.sh new file mode 100644 index 0000000000..f68a14680f --- /dev/null +++ b/extras.d/50-ironic.sh @@ -0,0 +1,33 @@ +# ironic.sh - Devstack extras script to install ironic + +if is_service_enabled ir-api ir-cond; then + if [[ "$1" == "source" ]]; then + # Initial source + source $TOP_DIR/lib/ironic + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + echo_summary "Installing Ironic" + install_ironic + install_ironicclient + cleanup_ironic + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + echo_summary "Configuring Ironic" + configure_ironic + + if is_service_enabled key; then + create_ironic_accounts + fi + + elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + # Initialize ironic + init_ironic + + # Start the ironic API and ironic taskmgr components + echo_summary "Starting Ironic" + start_ironic + fi + + if [[ "$1" == "unstack" ]]; then + stop_ironic + cleanup_ironic + fi +fi diff --git a/lib/ironic b/lib/ironic index afbc3e09e4..afb7c23d2c 100644 --- a/lib/ironic +++ b/lib/ironic @@ -46,6 +46,13 @@ IRONIC_HOSTPORT=${IRONIC_HOSTPORT:-$SERVICE_HOST:6385} # Functions # --------- +# Test if any Ironic services are enabled +# is_ironic_enabled +function is_ironic_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"ir-" ]] && return 0 + return 1 +} + # install_ironic() - Collect source and prepare function install_ironic() { git_clone $IRONIC_REPO $IRONIC_DIR $IRONIC_BRANCH diff --git a/stack.sh b/stack.sh index 45d47c819c..a1cf595cf0 100755 --- a/stack.sh +++ b/stack.sh @@ -336,7 +336,6 @@ source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/ldap -source $TOP_DIR/lib/ironic # Extras Source # -------------- @@ -746,11 +745,6 @@ if is_service_enabled tls-proxy; then # don't be naive and add to existing line! fi -if is_service_enabled ir-api ir-cond; then - install_ironic - install_ironicclient - configure_ironic -fi # Extras Install # -------------- @@ -966,15 +960,6 @@ if is_service_enabled g-reg; then fi -# Ironic -# ------ - -if is_service_enabled ir-api ir-cond; then - echo_summary "Configuring Ironic" - init_ironic -fi - - # Neutron # ------- @@ -1101,12 +1086,6 @@ if is_service_enabled g-api g-reg; then start_glance fi -# Launch the Ironic services -if is_service_enabled ir-api ir-cond; then - echo_summary "Starting Ironic" - start_ironic -fi - # Create an access key and secret key for nova ec2 register image if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nova; then NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1) diff --git a/unstack.sh b/unstack.sh index 92d0642c38..ea9c27d99b 100755 --- a/unstack.sh +++ b/unstack.sh @@ -55,7 +55,6 @@ source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/ldap -source $TOP_DIR/lib/ironic # Extras Source # -------------- @@ -118,12 +117,6 @@ if is_service_enabled s-proxy; then cleanup_swift fi -# Ironic runs daemons -if is_service_enabled ir-api ir-cond; then - stop_ironic - cleanup_ironic -fi - # Apache has the WSGI processes if is_service_enabled horizon; then stop_horizon From 75dbd9b1a3d6fa7d72b95d72a3102d8fdc76fd34 Mon Sep 17 00:00:00 2001 From: "Walter A. Boring IV" Date: Tue, 4 Feb 2014 14:56:15 -0800 Subject: [PATCH 0696/4704] Added the import of lib/infra This fixes an error in the devstack/functions setup_develop call, which tries to cd to $REQUIREMENTS_DIR, which is created in lib/infra Change-Id: Ie65d2ba83547acc4ea36d1191e6e90dc21da1fa7 Closes-Bug: #1276365 --- driver_certs/cinder_driver_cert.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/driver_certs/cinder_driver_cert.sh b/driver_certs/cinder_driver_cert.sh index edcc6d4800..0221e3779c 100755 --- a/driver_certs/cinder_driver_cert.sh +++ b/driver_certs/cinder_driver_cert.sh @@ -24,6 +24,7 @@ TOP_DIR=$(cd $CERT_DIR/..; pwd) source $TOP_DIR/functions source $TOP_DIR/stackrc source $TOP_DIR/openrc +source $TOP_DIR/lib/infra source $TOP_DIR/lib/tempest source $TOP_DIR/lib/cinder From 16dd8b3ed94d5cd217d22a26c18dca52bfca115e Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 3 Feb 2014 09:10:54 +0900 Subject: [PATCH 0697/4704] introduce if/then & for/do rules we mostly have a consistent style on if/then & for/do in devstack, except when we don't. This attempts to build a set of rules to enforce this. Because there are times when lines are legitimately long, and there is a continuation, this starts off ignoring if and for loops with continuations. But for short versions, we should enforce this. Changes to make devstack pass are included. The fact that the cleanup patch was so small is pretty solid reason that this is actually the style we've all agreed to. Part of a git stash from hong kong that I finally cleaned up. Change-Id: I6376d7afd59cc5ebba9ed69e5ee784a3d5934a10 --- lib/baremetal | 3 +-- lib/heat | 3 +-- lib/neutron_plugins/bigswitch_floodlight | 6 ++--- lib/neutron_plugins/nec | 3 +-- lib/neutron_thirdparty/bigswitch_floodlight | 3 +-- stack.sh | 4 +-- tests/functions.sh | 12 +++------ tools/bash8.py | 29 +++++++++++++++++++++ tools/xen/install_os_domU.sh | 3 +-- tools/xen/scripts/install-os-vpx.sh | 9 +++---- tools/xen/scripts/on_exit.sh | 6 ++--- tools/xen/test_functions.sh | 6 ++--- 12 files changed, 49 insertions(+), 38 deletions(-) diff --git a/lib/baremetal b/lib/baremetal index a0df85e700..d8cd7e936c 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -431,8 +431,7 @@ function upload_baremetal_image() { function clear_baremetal_of_all_nodes() { list=$(nova baremetal-node-list | awk -F '| ' 'NR>3 {print $2}' ) - for node in $list - do + for node in $list; do nova baremetal-node-delete $node done } diff --git a/lib/heat b/lib/heat index f171cb450c..9f5dd8b588 100644 --- a/lib/heat +++ b/lib/heat @@ -186,8 +186,7 @@ function disk_image_create { local elements=$2 local arch=$3 local output=$TOP_DIR/files/$4 - if [[ -f "$output.qcow2" ]]; - then + if [[ -f "$output.qcow2" ]]; then echo "Image file already exists: $output_file" else ELEMENTS_PATH=$elements_path disk-image-create \ diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight index 93ec497bb9..1e4aa00121 100644 --- a/lib/neutron_plugins/bigswitch_floodlight +++ b/lib/neutron_plugins/bigswitch_floodlight @@ -44,16 +44,14 @@ function neutron_plugin_configure_plugin_agent() { function neutron_plugin_configure_service() { iniset /$Q_PLUGIN_CONF_FILE restproxy servers $BS_FL_CONTROLLERS_PORT iniset /$Q_PLUGIN_CONF_FILE restproxy servertimeout $BS_FL_CONTROLLER_TIMEOUT - if [ "$BS_FL_VIF_DRIVER" = "ivs" ] - then + if [ "$BS_FL_VIF_DRIVER" = "ivs" ]; then iniset /$Q_PLUGIN_CONF_FILE nova vif_type ivs fi } function neutron_plugin_setup_interface_driver() { local conf_file=$1 - if [ "$BS_FL_VIF_DRIVER" = "ivs" ] - then + if [ "$BS_FL_VIF_DRIVER" = "ivs" ]; then iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.IVSInterfaceDriver else iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver diff --git a/lib/neutron_plugins/nec b/lib/neutron_plugins/nec index d8d8b7ce7e..1cb2fef533 100644 --- a/lib/neutron_plugins/nec +++ b/lib/neutron_plugins/nec @@ -106,8 +106,7 @@ function _neutron_setup_ovs_tunnels() { local id=0 GRE_LOCAL_IP=${GRE_LOCAL_IP:-$HOST_IP} if [ -n "$GRE_REMOTE_IPS" ]; then - for ip in ${GRE_REMOTE_IPS//:/ } - do + for ip in ${GRE_REMOTE_IPS//:/ }; do if [[ "$ip" == "$GRE_LOCAL_IP" ]]; then continue fi diff --git a/lib/neutron_thirdparty/bigswitch_floodlight b/lib/neutron_thirdparty/bigswitch_floodlight index 1fd4fd801a..24c10443b7 100644 --- a/lib/neutron_thirdparty/bigswitch_floodlight +++ b/lib/neutron_thirdparty/bigswitch_floodlight @@ -24,8 +24,7 @@ function init_bigswitch_floodlight() { sudo ovs-vsctl --no-wait br-set-external-id ${OVS_BRIDGE} bridge-id ${OVS_BRIDGE} ctrls= - for ctrl in `echo ${BS_FL_CONTROLLERS_PORT} | tr ',' ' '` - do + for ctrl in `echo ${BS_FL_CONTROLLERS_PORT} | tr ',' ' '`; do ctrl=${ctrl%:*} ctrls="${ctrls} tcp:${ctrl}:${BS_FL_OF_PORT}" done diff --git a/stack.sh b/stack.sh index 45d47c819c..15e14303cf 100755 --- a/stack.sh +++ b/stack.sh @@ -1124,8 +1124,8 @@ fi # Create a randomized default value for the keymgr's fixed_key if is_service_enabled nova; then FIXED_KEY="" - for i in $(seq 1 64); - do FIXED_KEY+=$(echo "obase=16; $(($RANDOM % 16))" | bc); + for i in $(seq 1 64); do + FIXED_KEY+=$(echo "obase=16; $(($RANDOM % 16))" | bc); done; iniset $NOVA_CONF keymgr fixed_key "$FIXED_KEY" fi diff --git a/tests/functions.sh b/tests/functions.sh index 95dafe1028..06a4134abf 100755 --- a/tests/functions.sh +++ b/tests/functions.sh @@ -49,8 +49,7 @@ function test_enable_service() { ENABLED_SERVICES="$start" enable_service $add - if [ "$ENABLED_SERVICES" = "$finish" ] - then + if [ "$ENABLED_SERVICES" = "$finish" ]; then echo "OK: $start + $add -> $ENABLED_SERVICES" else echo "changing $start to $finish with $add failed: $ENABLED_SERVICES" @@ -76,8 +75,7 @@ function test_disable_service() { ENABLED_SERVICES="$start" disable_service "$del" - if [ "$ENABLED_SERVICES" = "$finish" ] - then + if [ "$ENABLED_SERVICES" = "$finish" ]; then echo "OK: $start - $del -> $ENABLED_SERVICES" else echo "changing $start to $finish with $del failed: $ENABLED_SERVICES" @@ -102,8 +100,7 @@ echo "Testing disable_all_services()" ENABLED_SERVICES=a,b,c disable_all_services -if [[ -z "$ENABLED_SERVICES" ]] -then +if [[ -z "$ENABLED_SERVICES" ]]; then echo "OK" else echo "disabling all services FAILED: $ENABLED_SERVICES" @@ -118,8 +115,7 @@ function test_disable_negated_services() { ENABLED_SERVICES="$start" disable_negated_services - if [ "$ENABLED_SERVICES" = "$finish" ] - then + if [ "$ENABLED_SERVICES" = "$finish" ]; then echo "OK: $start + $add -> $ENABLED_SERVICES" else echo "changing $start to $finish failed: $ENABLED_SERVICES" diff --git a/tools/bash8.py b/tools/bash8.py index 2623358182..9fb51ecc9e 100755 --- a/tools/bash8.py +++ b/tools/bash8.py @@ -21,9 +21,19 @@ # Currently Supported checks # # Errors +# Basic white space errors, for consistent indenting # - E001: check that lines do not end with trailing whitespace # - E002: ensure that indents are only spaces, and not hard tabs # - E003: ensure all indents are a multiple of 4 spaces +# +# Structure errors +# +# A set of rules that help keep things consistent in control blocks. +# These are ignored on long lines that have a continuation, because +# unrolling that is kind of "interesting" +# +# - E010: *do* not on the same line as *for* +# - E011: *then* not on the same line as *if* import argparse import fileinput @@ -51,6 +61,23 @@ def print_error(error, line): print(" - %s: L%s" % (fileinput.filename(), fileinput.filelineno())) +def not_continuation(line): + return not re.search('\\\\$', line) + +def check_for_do(line): + if not_continuation(line): + if re.search('^\s*for ', line): + if not re.search(';\s*do(\b|$)', line): + print_error('E010: Do not on same line as for', line) + + +def check_if_then(line): + if not_continuation(line): + if re.search('^\s*if \[', line): + if not re.search(';\s*then(\b|$)', line): + print_error('E011: Then non on same line as if', line) + + def check_no_trailing_whitespace(line): if re.search('[ \t]+$', line): print_error('E001: Trailing Whitespace', line) @@ -100,6 +127,8 @@ def check_files(files): check_no_trailing_whitespace(logical_line) check_indents(logical_line) + check_for_do(logical_line) + check_if_then(logical_line) def get_options(): diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 41b184c6ac..d172c7ba1b 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -194,8 +194,7 @@ function wait_for_VM_to_halt() { while true do state=$(xe_min vm-list name-label="$GUEST_NAME" power-state=halted) - if [ -n "$state" ] - then + if [ -n "$state" ]; then break else echo -n "." diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh index 7b0d891493..8412fdc3ca 100755 --- a/tools/xen/scripts/install-os-vpx.sh +++ b/tools/xen/scripts/install-os-vpx.sh @@ -63,8 +63,7 @@ get_params() ;; esac done - if [[ -z $BRIDGE ]] - then + if [[ -z $BRIDGE ]]; then BRIDGE=xenbr0 fi @@ -91,8 +90,7 @@ xe_min() find_network() { result=$(xe_min network-list bridge="$1") - if [ "$result" = "" ] - then + if [ "$result" = "" ]; then result=$(xe_min network-list name-label="$1") fi echo "$result" @@ -121,8 +119,7 @@ destroy_vifs() { local v="$1" IFS=, - for vif in $(xe_min vif-list vm-uuid="$v") - do + for vif in $(xe_min vif-list vm-uuid="$v"); do xe vif-destroy uuid="$vif" done unset IFS diff --git a/tools/xen/scripts/on_exit.sh b/tools/xen/scripts/on_exit.sh index a4db39c225..2441e3d84a 100755 --- a/tools/xen/scripts/on_exit.sh +++ b/tools/xen/scripts/on_exit.sh @@ -7,8 +7,7 @@ declare -a on_exit_hooks on_exit() { - for i in $(seq $((${#on_exit_hooks[*]} - 1)) -1 0) - do + for i in $(seq $((${#on_exit_hooks[*]} - 1)) -1 0); do eval "${on_exit_hooks[$i]}" done } @@ -17,8 +16,7 @@ add_on_exit() { local n=${#on_exit_hooks[*]} on_exit_hooks[$n]="$*" - if [[ $n -eq 0 ]] - then + if [[ $n -eq 0 ]]; then trap on_exit EXIT fi } diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh index 373d996760..838f86a525 100755 --- a/tools/xen/test_functions.sh +++ b/tools/xen/test_functions.sh @@ -227,16 +227,14 @@ function test_get_local_sr_path { } [ "$1" = "run_tests" ] && { - for testname in $($0) - do + for testname in $($0); do echo "$testname" before_each_test ( set -eux $testname ) - if [ "$?" != "0" ] - then + if [ "$?" != "0" ]; then echo "FAIL" exit 1 else From 86a8e9767912ae957cbbf6ea20a08106011a7728 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Tue, 4 Feb 2014 15:20:15 +0100 Subject: [PATCH 0698/4704] Add while/until to the for/do rule Like 'for/do' check that the while/until operator are on the same line with the do. Fixes some pep8 error along the way. Change-Id: I440afe60691263365bf35310bf4212d94f30c339 --- tools/bash8.py | 10 +++++++--- tools/create_userrc.sh | 3 +-- tools/xen/install_os_domU.sh | 3 +-- tools/xen/scripts/install-os-vpx.sh | 3 +-- 4 files changed, 10 insertions(+), 9 deletions(-) diff --git a/tools/bash8.py b/tools/bash8.py index 9fb51ecc9e..7552e0d642 100755 --- a/tools/bash8.py +++ b/tools/bash8.py @@ -47,7 +47,7 @@ def register_ignores(ignores): global IGNORE if ignores: - IGNORE='^(' + '|'.join(ignores.split(',')) + ')' + IGNORE = '^(' + '|'.join(ignores.split(',')) + ')' def should_ignore(error): @@ -64,11 +64,15 @@ def print_error(error, line): def not_continuation(line): return not re.search('\\\\$', line) + def check_for_do(line): if not_continuation(line): - if re.search('^\s*for ', line): + match = re.match('^\s*(for|while|until)\s', line) + if match: + operator = match.group(1).strip() if not re.search(';\s*do(\b|$)', line): - print_error('E010: Do not on same line as for', line) + print_error('E010: Do not on same line as %s' % operator, + line) def check_if_then(line): diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index 5f4c48660b..e2d855c4df 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -71,8 +71,7 @@ MODE="" ROLE=Member USER_NAME="" USER_PASS="" -while [ $# -gt 0 ] -do +while [ $# -gt 0 ]; do case "$1" in -h|--help) display_help; exit 0 ;; --os-username) export OS_USERNAME=$2; shift ;; diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index d172c7ba1b..d0d81a2d7e 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -191,8 +191,7 @@ function wait_for_VM_to_halt() { domid=$(xe vm-list name-label="$GUEST_NAME" params=dom-id minimal=true) port=$(xenstore-read /local/domain/$domid/console/vnc-port) echo "vncviewer -via root@$mgmt_ip localhost:${port:2}" - while true - do + while true; do state=$(xe_min vm-list name-label="$GUEST_NAME" power-state=halted) if [ -n "$state" ]; then break diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh index 8412fdc3ca..b9b65fdce2 100755 --- a/tools/xen/scripts/install-os-vpx.sh +++ b/tools/xen/scripts/install-os-vpx.sh @@ -42,8 +42,7 @@ EOF get_params() { - while getopts "hbn:r:l:t:" OPTION; - do + while getopts "hbn:r:l:t:" OPTION; do case $OPTION in h) usage exit 1 From d15c8a082464695a4e715bab093bf4d876bbc341 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Tue, 4 Feb 2014 12:38:14 +0000 Subject: [PATCH 0699/4704] Move install responsibilities to domU As we are moving forward to test XenAPI in the gate, it is necessary to move dom0 related modifications to be performed from domU. For this purpose, a new user is created, and that user should be used to talk to dom0 from domU. This change creates that user, makes it possible for dom0 to log in to domU with that account, and configure that account to be able to talk down to dom0. Also move several steps to the nova xenserver plugin: - dom0 plugin installation - create kernels and images directory - install console rotate script - configure a cron to execute console rotate script Configuration changes: A new configuration option, DOMZERO_USER has been created, that specifies a user account that is configured to be able to do passwordless ssh to dom0. Change-Id: If9de0b297a67b7cdb5de78d8dd0e8b2ca578b601 --- lib/nova_plugins/hypervisor-xenserver | 28 ++++++++++++++ stackrc | 4 ++ tools/xen/functions | 8 ++++ tools/xen/install_os_domU.sh | 51 ++++++++++++++++---------- tools/xen/prepare_guest.sh | 53 +++++++++++++++++++++++++++ tools/xen/prepare_guest_template.sh | 2 +- 6 files changed, 126 insertions(+), 20 deletions(-) diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver index f47994f187..9843261065 100644 --- a/lib/nova_plugins/hypervisor-xenserver +++ b/lib/nova_plugins/hypervisor-xenserver @@ -56,6 +56,34 @@ function configure_nova_hypervisor() { # Need to avoid crash due to new firewall support XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"} iniset $NOVA_CONF DEFAULT firewall_driver "$XEN_FIREWALL_DRIVER" + + local dom0_ip + dom0_ip=$(echo "$XENAPI_CONNECTION_URL" | cut -d "/" -f 3-) + + local ssh_dom0 + ssh_dom0="sudo -u $DOMZERO_USER ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@$dom0_ip" + + # install nova plugins to dom0 + tar -czf - -C $NOVA_DIR/plugins/xenserver/xenapi/etc/xapi.d/plugins/ ./ | + $ssh_dom0 'tar -xzf - -C /etc/xapi.d/plugins/ && chmod a+x /etc/xapi.d/plugins/*' + + # install console logrotate script + tar -czf - -C $NOVA_DIR/tools/xenserver/ rotate_xen_guest_logs.sh | + $ssh_dom0 'tar -xzf - -C /root/ && chmod +x /root/rotate_xen_guest_logs.sh && mkdir -p /var/log/xen/guest' + + # Create a cron job that will rotate guest logs + $ssh_dom0 crontab - << CRONTAB +* * * * * /root/rotate_xen_guest_logs.sh +CRONTAB + + # Create directories for kernels and images + { + echo "set -eux" + cat $TOP_DIR/tools/xen/functions + echo "create_directory_for_images" + echo "create_directory_for_kernels" + } | $ssh_dom0 + } # install_nova_hypervisor() - Install external components diff --git a/stackrc b/stackrc index e89d25e4ab..db5b1889af 100644 --- a/stackrc +++ b/stackrc @@ -245,6 +245,10 @@ case "$VIRT_DRIVER" in xenserver) # Xen config common to nova and neutron XENAPI_USER=${XENAPI_USER:-"root"} + # This user will be used for dom0 - domU communication + # should be able to log in to dom0 without a password + # will be used to install the plugins + DOMZERO_USER=${DOMZERO_USER:-"domzero"} ;; *) ;; diff --git a/tools/xen/functions b/tools/xen/functions index 97c56bc1af..ab0be84bd2 100644 --- a/tools/xen/functions +++ b/tools/xen/functions @@ -336,3 +336,11 @@ function max_vcpus() { xe vm-param-set uuid=$vm VCPUs-max=$cpu_count xe vm-param-set uuid=$vm VCPUs-at-startup=$cpu_count } + +function get_domid() { + local vm_name_label + + vm_name_label="$1" + + xe vm-list name-label="$vm_name_label" params=dom-id minimal=true +} diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 41b184c6ac..663f09c1b4 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -67,21 +67,6 @@ fi # Install plugins -## Nova plugins -NOVA_ZIPBALL_URL=${NOVA_ZIPBALL_URL:-$(zip_snapshot_location $NOVA_REPO $NOVA_BRANCH)} -EXTRACTED_NOVA=$(extract_remote_zipball "$NOVA_ZIPBALL_URL") -install_xapi_plugins_from "$EXTRACTED_NOVA" - -LOGROT_SCRIPT=$(find "$EXTRACTED_NOVA" -name "rotate_xen_guest_logs.sh" -print) -if [ -n "$LOGROT_SCRIPT" ]; then - mkdir -p "/var/log/xen/guest" - cp "$LOGROT_SCRIPT" /root/consolelogrotate - chmod +x /root/consolelogrotate - echo "* * * * * /root/consolelogrotate" | crontab -fi - -rm -rf "$EXTRACTED_NOVA" - ## Install the netwrap xapi plugin to support agent control of dom0 networking if [[ "$ENABLED_SERVICES" =~ "q-agt" && "$Q_PLUGIN" = "openvswitch" ]]; then NEUTRON_ZIPBALL_URL=${NEUTRON_ZIPBALL_URL:-$(zip_snapshot_location $NEUTRON_REPO $NEUTRON_BRANCH)} @@ -90,9 +75,6 @@ if [[ "$ENABLED_SERVICES" =~ "q-agt" && "$Q_PLUGIN" = "openvswitch" ]]; then rm -rf "$EXTRACTED_NEUTRON" fi -create_directory_for_kernels -create_directory_for_images - # # Configure Networking # @@ -188,7 +170,7 @@ function wait_for_VM_to_halt() { set +x echo "Waiting for the VM to halt. Progress in-VM can be checked with vncviewer:" mgmt_ip=$(echo $XENAPI_CONNECTION_URL | tr -d -c '1234567890.') - domid=$(xe vm-list name-label="$GUEST_NAME" params=dom-id minimal=true) + domid=$(get_domid "$GUEST_NAME") port=$(xenstore-read /local/domain/$domid/console/vnc-port) echo "vncviewer -via root@$mgmt_ip localhost:${port:2}" while true @@ -361,6 +343,37 @@ else fi fi +# Create an ssh-keypair, and set it up for dom0 user +rm -f /root/dom0key /root/dom0key.pub +ssh-keygen -f /root/dom0key -P "" -C "dom0" +DOMID=$(get_domid "$GUEST_NAME") + +xenstore-write /local/domain/$DOMID/authorized_keys/$DOMZERO_USER "$(cat /root/dom0key.pub)" +xenstore-chmod -u /local/domain/$DOMID/authorized_keys/$DOMZERO_USER r$DOMID + +function run_on_appliance() { + ssh \ + -i /root/dom0key \ + -o UserKnownHostsFile=/dev/null \ + -o StrictHostKeyChecking=no \ + -o BatchMode=yes \ + "$DOMZERO_USER@$OS_VM_MANAGEMENT_ADDRESS" "$@" +} + +# Wait until we can log in to the appliance +while ! run_on_appliance true; do + sleep 1 +done + +# Remove authenticated_keys updater cronjob +echo "" | run_on_appliance crontab - + +# Generate a passwordless ssh key for domzero user +echo "ssh-keygen -f /home/$DOMZERO_USER/.ssh/id_rsa -C $DOMZERO_USER@appliance -N \"\" -q" | run_on_appliance + +# Authenticate that user to dom0 +run_on_appliance cat /home/$DOMZERO_USER/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys + # If we have copied our ssh credentials, use ssh to monitor while the installation runs WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1} COPYENV=${COPYENV:-1} diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh index 05ac86cf99..094612624b 100755 --- a/tools/xen/prepare_guest.sh +++ b/tools/xen/prepare_guest.sh @@ -18,6 +18,57 @@ set -o xtrace GUEST_PASSWORD="$1" XS_TOOLS_PATH="$2" STACK_USER="$3" +DOMZERO_USER="$4" + + +function setup_domzero_user() { + local username + + username="$1" + + local key_updater_script + local sudoers_file + key_updater_script="/home/$username/update_authorized_keys.sh" + sudoers_file="/etc/sudoers.d/allow_$username" + + # Create user + adduser --disabled-password --quiet "$username" --gecos "$username" + + # Give passwordless sudo + cat > $sudoers_file << EOF + $username ALL = NOPASSWD: ALL +EOF + chmod 0440 $sudoers_file + + # A script to populate this user's authenticated_keys from xenstore + cat > $key_updater_script << EOF +#!/bin/bash +set -eux + +DOMID=\$(sudo xenstore-read domid) +sudo xenstore-exists /local/domain/\$DOMID/authorized_keys/$username +sudo xenstore-read /local/domain/\$DOMID/authorized_keys/$username > /home/$username/xenstore_value +cat /home/$username/xenstore_value > /home/$username/.ssh/authorized_keys +EOF + + # Give the key updater to the user + chown $username:$username $key_updater_script + chmod 0700 $key_updater_script + + # Setup the .ssh folder + mkdir -p /home/$username/.ssh + chown $username:$username /home/$username/.ssh + chmod 0700 /home/$username/.ssh + touch /home/$username/.ssh/authorized_keys + chown $username:$username /home/$username/.ssh/authorized_keys + chmod 0600 /home/$username/.ssh/authorized_keys + + # Setup the key updater as a cron job + crontab -u $username - << EOF +* * * * * $key_updater_script +EOF + +} # Install basics apt-get update @@ -48,6 +99,8 @@ useradd $STACK_USER -s /bin/bash -d /opt/stack -G libvirtd echo $STACK_USER:$GUEST_PASSWORD | chpasswd echo "$STACK_USER ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers +setup_domzero_user "$DOMZERO_USER" + # Add an udev rule, so that new block devices could be written by stack user cat > /etc/udev/rules.d/50-openstack-blockdev.rules << EOF KERNEL=="xvd[b-z]", GROUP="$STACK_USER", MODE="0660" diff --git a/tools/xen/prepare_guest_template.sh b/tools/xen/prepare_guest_template.sh index 546ac99cd9..a25535dc22 100755 --- a/tools/xen/prepare_guest_template.sh +++ b/tools/xen/prepare_guest_template.sh @@ -76,7 +76,7 @@ cp $STAGING_DIR/etc/rc.local $STAGING_DIR/etc/rc.local.preparebackup cat <$STAGING_DIR/etc/rc.local #!/bin/sh -e bash /opt/stack/prepare_guest.sh \\ - "$GUEST_PASSWORD" "$XS_TOOLS_PATH" "$STACK_USER" \\ + "$GUEST_PASSWORD" "$XS_TOOLS_PATH" "$STACK_USER" "$DOMZERO_USER" \\ > /opt/stack/prepare_guest.log 2>&1 EOF From a7a23addd3634d890a44ff3e44ebefe29a3f7910 Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Wed, 5 Feb 2014 15:19:27 -0600 Subject: [PATCH 0700/4704] Update orchestration-related service names in template catalog The orchestration-related service names were not consistent with the other AWS compatibility and native API names, so this change makes them consistent. Related-Bug: #1240138 Change-Id: I29a26bc6b0ddab0bff579a900e28da65df097a96 --- files/default_catalog.templates | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/files/default_catalog.templates b/files/default_catalog.templates index e64f68f033..ff00e38e09 100644 --- a/files/default_catalog.templates +++ b/files/default_catalog.templates @@ -50,12 +50,12 @@ catalog.RegionOne.image.name = Image Service catalog.RegionOne.cloudformation.publicURL = http://%SERVICE_HOST%:8000/v1 catalog.RegionOne.cloudformation.adminURL = http://%SERVICE_HOST%:8000/v1 catalog.RegionOne.cloudformation.internalURL = http://%SERVICE_HOST%:8000/v1 -catalog.RegionOne.cloudformation.name = Heat CloudFormation Service +catalog.RegionOne.cloudformation.name = CloudFormation service catalog.RegionOne.orchestration.publicURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s catalog.RegionOne.orchestration.adminURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s catalog.RegionOne.orchestration.internalURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s -catalog.RegionOne.orchestration.name = Heat Service +catalog.RegionOne.orchestration.name = Orchestration Service catalog.RegionOne.metering.publicURL = http://%SERVICE_HOST%:8777/v1 catalog.RegionOne.metering.adminURL = http://%SERVICE_HOST%:8777/v1 From d5d4974cb72880799d7ec736237ca01eacb2f6da Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Thu, 6 Feb 2014 16:00:08 +0100 Subject: [PATCH 0701/4704] Cleanup cinder-rootwrap support cinder_rootwrap support in devstack handled a number of now-abandoned use cases: - no $CINDER_DIR/etc/cinder/rootwrap.d (old-style rootwrap) - using oslo-rootwrap instead of cinder-rootwrap (abandoned experiment) This change removes unused code paths and aligns configure_cinder_rootwrap() with configure_nova_rootwrap(). Change-Id: I387808dae0e064cc9c894c74ab78e86124f08dd2 --- lib/cinder | 53 +++++++++++++++++++---------------------------------- 1 file changed, 19 insertions(+), 34 deletions(-) diff --git a/lib/cinder b/lib/cinder index 9f70b2a0c9..75e9c97e80 100644 --- a/lib/cinder +++ b/lib/cinder @@ -170,43 +170,28 @@ function cleanup_cinder() { function configure_cinder_rootwrap() { # Set the paths of certain binaries CINDER_ROOTWRAP=$(get_rootwrap_location cinder) - if [[ ! -x $CINDER_ROOTWRAP ]]; then - CINDER_ROOTWRAP=$(get_rootwrap_location oslo) - if [[ ! -x $CINDER_ROOTWRAP ]]; then - die $LINENO "No suitable rootwrap found." - fi - fi - # If Cinder ships the new rootwrap filters files, deploy them - # (owned by root) and add a parameter to $CINDER_ROOTWRAP - ROOTWRAP_CINDER_SUDOER_CMD="$CINDER_ROOTWRAP" - if [[ -d $CINDER_DIR/etc/cinder/rootwrap.d ]]; then - # Wipe any existing rootwrap.d files first - if [[ -d $CINDER_CONF_DIR/rootwrap.d ]]; then - sudo rm -rf $CINDER_CONF_DIR/rootwrap.d - fi - # Deploy filters to /etc/cinder/rootwrap.d - sudo mkdir -m 755 $CINDER_CONF_DIR/rootwrap.d - sudo cp $CINDER_DIR/etc/cinder/rootwrap.d/*.filters $CINDER_CONF_DIR/rootwrap.d - sudo chown -R root:root $CINDER_CONF_DIR/rootwrap.d - sudo chmod 644 $CINDER_CONF_DIR/rootwrap.d/* - # Set up rootwrap.conf, pointing to /etc/cinder/rootwrap.d - if [[ -f $CINDER_DIR/etc/cinder/rootwrap.conf ]]; then - sudo cp $CINDER_DIR/etc/cinder/rootwrap.conf $CINDER_CONF_DIR/ - else - # rootwrap.conf is no longer shipped in Cinder itself - echo "filters_path=" | sudo tee $CINDER_CONF_DIR/rootwrap.conf > /dev/null - fi - sudo sed -e "s:^filters_path=.*$:filters_path=$CINDER_CONF_DIR/rootwrap.d:" -i $CINDER_CONF_DIR/rootwrap.conf - sudo chown root:root $CINDER_CONF_DIR/rootwrap.conf - sudo chmod 0644 $CINDER_CONF_DIR/rootwrap.conf - # Specify rootwrap.conf as first parameter to rootwrap - CINDER_ROOTWRAP="$CINDER_ROOTWRAP $CINDER_CONF_DIR/rootwrap.conf" - ROOTWRAP_CINDER_SUDOER_CMD="$CINDER_ROOTWRAP *" + # Deploy new rootwrap filters files (owned by root). + # Wipe any existing rootwrap.d files first + if [[ -d $CINDER_CONF_DIR/rootwrap.d ]]; then + sudo rm -rf $CINDER_CONF_DIR/rootwrap.d fi - + # Deploy filters to /etc/cinder/rootwrap.d + sudo mkdir -m 755 $CINDER_CONF_DIR/rootwrap.d + sudo cp $CINDER_DIR/etc/cinder/rootwrap.d/*.filters $CINDER_CONF_DIR/rootwrap.d + sudo chown -R root:root $CINDER_CONF_DIR/rootwrap.d + sudo chmod 644 $CINDER_CONF_DIR/rootwrap.d/* + # Set up rootwrap.conf, pointing to /etc/cinder/rootwrap.d + sudo cp $CINDER_DIR/etc/cinder/rootwrap.conf $CINDER_CONF_DIR/ + sudo sed -e "s:^filters_path=.*$:filters_path=$CINDER_CONF_DIR/rootwrap.d:" -i $CINDER_CONF_DIR/rootwrap.conf + sudo chown root:root $CINDER_CONF_DIR/rootwrap.conf + sudo chmod 0644 $CINDER_CONF_DIR/rootwrap.conf + # Specify rootwrap.conf as first parameter to rootwrap + ROOTWRAP_CSUDOER_CMD="$CINDER_ROOTWRAP $CINDER_CONF_DIR/rootwrap.conf *" + + # Set up the rootwrap sudoers for cinder TEMPFILE=`mktemp` - echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_CINDER_SUDOER_CMD" >$TEMPFILE + echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_CSUDOER_CMD" >$TEMPFILE chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/cinder-rootwrap From 6c9430e5679c36ecdc827184cf160297458c4a3c Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Thu, 6 Feb 2014 17:06:00 +0000 Subject: [PATCH 0702/4704] Set uri_v3 in tempest config This properly sets the v3 uri for keystone in the tempest config. Previously tempest would just guess the v3 uri by replacing v2 with v3. However, moving forward this will no longer be the case so devstack should properly set this uri to enable tempest to use the keystone v3 api in addition to the v2. Change-Id: Ib02b2e9f24d8ca1f381186c48747ca0fbc45f3f1 --- lib/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tempest b/lib/tempest index 06183b107b..1eea9b6bb4 100644 --- a/lib/tempest +++ b/lib/tempest @@ -251,6 +251,7 @@ function configure_tempest() { # Identity iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/" + iniset $TEMPEST_CONFIG identity uri_v3 "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v3/" iniset $TEMPEST_CONFIG identity password "$password" iniset $TEMPEST_CONFIG identity alt_username $ALT_USERNAME iniset $TEMPEST_CONFIG identity alt_password "$password" From e4fa72132228688d2fe74dd974fe04b0fe4c3d6b Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 15 Jan 2014 15:04:49 -0600 Subject: [PATCH 0703/4704] Begin is_service_enabled() cleanup This converts the special cases in the is_service_enabled() function to call individual functions declared by the projects. This allows projects that are not in the DevStack repo and called via the extras.d plugin to handle an equivalent service alias. * Ceilometer * Cinder * Glance * Neutron * Nova * Swift TODO: remove the tests from is_service_enabled() after a transition period Patch Set 2: Rebased Change-Id: Ic78be433f93a9dd5f46be548bdbd4c984e0da6e7 --- clean.sh | 2 +- exercises/boot_from_volume.sh | 8 +++----- exercises/euca.sh | 5 ----- exercises/floating_ips.sh | 8 +++----- exercises/volumes.sh | 8 +++----- functions | 10 ++++++++++ lib/ceilometer | 9 ++++++++- lib/cinder | 8 ++++++++ lib/glance | 7 +++++++ lib/neutron | 7 +++++++ lib/nova | 14 ++++++++++++++ lib/swift | 7 +++++++ lib/template | 8 ++++++++ stack.sh | 2 +- stackrc | 2 +- unstack.sh | 2 +- 16 files changed, 82 insertions(+), 25 deletions(-) diff --git a/clean.sh b/clean.sh index e16bdb7f36..09f08dc8c2 100755 --- a/clean.sh +++ b/clean.sh @@ -97,7 +97,7 @@ if is_service_enabled ldap; then fi # Do the hypervisor cleanup until this can be moved back into lib/nova -if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then +if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then cleanup_nova_hypervisor fi diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index ed8ba6310e..79120460b8 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -30,14 +30,12 @@ TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) # Import common functions source $TOP_DIR/functions +# Import project functions +source $TOP_DIR/lib/cinder + # Import configuration source $TOP_DIR/openrc -# Import neutron functions if needed -if is_service_enabled neutron; then - source $TOP_DIR/lib/neutron -fi - # Import exercise configuration source $TOP_DIR/exerciserc diff --git a/exercises/euca.sh b/exercises/euca.sh index 51b2644458..ad852a4f79 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -33,11 +33,6 @@ source $TOP_DIR/functions # Import EC2 configuration source $TOP_DIR/eucarc -# Import neutron functions if needed -if is_service_enabled neutron; then - source $TOP_DIR/lib/neutron -fi - # Import exercise configuration source $TOP_DIR/exerciserc diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 4ca90a5c35..b981aa8294 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -27,14 +27,12 @@ TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) # Import common functions source $TOP_DIR/functions +# Import project functions +source $TOP_DIR/lib/neutron + # Import configuration source $TOP_DIR/openrc -# Import neutron functions if needed -if is_service_enabled neutron; then - source $TOP_DIR/lib/neutron -fi - # Import exercise configuration source $TOP_DIR/exerciserc diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 21b5d21c04..33e24589eb 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -27,14 +27,12 @@ TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) # Import common functions source $TOP_DIR/functions +# Import project functions +source $TOP_DIR/lib/cinder + # Import configuration source $TOP_DIR/openrc -# Import neutron functions if needed -if is_service_enabled neutron; then - source $TOP_DIR/lib/neutron -fi - # Import exercise configuration source $TOP_DIR/exerciserc diff --git a/functions b/functions index 281b6767c5..dc3278b56d 100644 --- a/functions +++ b/functions @@ -840,6 +840,16 @@ function is_service_enabled() { services=$@ for service in ${services}; do [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 + + # Look for top-level 'enabled' function for this service + if type is_${service}_enabled >/dev/null 2>&1; then + # A function exists for this service, use it + is_${service}_enabled + return $? + fi + + # TODO(dtroyer): Remove these legacy special-cases after the is_XXX_enabled() + # are implemented [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && return 0 [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0 [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0 diff --git a/lib/ceilometer b/lib/ceilometer index f9c76915d5..4ca77bb72b 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -59,7 +59,14 @@ TEMPEST_SERVICES+=,ceilometer # Functions # --------- -# + +# Test if any Ceilometer services are enabled +# is_ceilometer_enabled +function is_ceilometer_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"ceilometer-" ]] && return 0 + return 1 +} + # create_ceilometer_accounts() - Set up common required ceilometer accounts create_ceilometer_accounts() { diff --git a/lib/cinder b/lib/cinder index 9f70b2a0c9..3ec0fd4f09 100644 --- a/lib/cinder +++ b/lib/cinder @@ -85,6 +85,14 @@ TEMPEST_SERVICES+=,cinder # Functions # --------- + +# Test if any Cinder services are enabled +# is_cinder_enabled +function is_cinder_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"c-" ]] && return 0 + return 1 +} + # _clean_lvm_lv removes all cinder LVM volumes # # Usage: _clean_lvm_lv $VOLUME_GROUP $VOLUME_NAME_PREFIX diff --git a/lib/glance b/lib/glance index a5cb360743..1ebeeb3b2e 100644 --- a/lib/glance +++ b/lib/glance @@ -59,6 +59,13 @@ TEMPEST_SERVICES+=,glance # Functions # --------- +# Test if any Glance services are enabled +# is_glance_enabled +function is_glance_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"g-" ]] && return 0 + return 1 +} + # cleanup_glance() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_glance() { diff --git a/lib/neutron b/lib/neutron index 81db2a74d1..5bd38bcf73 100644 --- a/lib/neutron +++ b/lib/neutron @@ -244,6 +244,13 @@ TEMPEST_SERVICES+=,neutron # Functions # --------- +# Test if any Neutron services are enabled +# is_neutron_enabled +function is_neutron_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"q-" ]] && return 0 + return 1 +} + # configure_neutron() # Set common config for all neutron server and agents. function configure_neutron() { diff --git a/lib/nova b/lib/nova index dbc5c3db44..c6d99367c2 100644 --- a/lib/nova +++ b/lib/nova @@ -129,6 +129,20 @@ TEMPEST_SERVICES+=,nova # Functions # --------- +# Test if any Nova services are enabled +# is_nova_enabled +function is_nova_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"n-" ]] && return 0 + return 1 +} + +# Test if any Nova Cell services are enabled +# is_nova_enabled +function is_n-cell_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"n-cell-" ]] && return 0 + return 1 +} + # Helper to clean iptables rules function clean_iptables() { # Delete rules diff --git a/lib/swift b/lib/swift index 28ca8a80df..197c01b63c 100644 --- a/lib/swift +++ b/lib/swift @@ -118,6 +118,13 @@ TEMPEST_SERVICES+=,swift # Functions # --------- +# Test if any Swift services are enabled +# is_swift_enabled +function is_swift_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"s-" ]] && return 0 + return 1 +} + # cleanup_swift() - Remove residual data files function cleanup_swift() { rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz} diff --git a/lib/template b/lib/template index 629e110271..b8e7c4d86f 100644 --- a/lib/template +++ b/lib/template @@ -10,6 +10,7 @@ # ``stack.sh`` calls the entry points in this order: # +# - is_XXXX_enabled # - install_XXXX # - configure_XXXX # - init_XXXX @@ -35,6 +36,13 @@ XXX_CONF_DIR=/etc/XXXX # Entry Points # ------------ +# Test if any XXXX services are enabled +# is_XXXX_enabled +function is_XXXX_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"XX-" ]] && return 0 + return 1 +} + # cleanup_XXXX() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_XXXX() { diff --git a/stack.sh b/stack.sh index 15e14303cf..d379d51c6e 100755 --- a/stack.sh +++ b/stack.sh @@ -1096,7 +1096,7 @@ if is_service_enabled s-proxy; then fi # Launch the Glance services -if is_service_enabled g-api g-reg; then +if is_service_enabled glance; then echo_summary "Starting Glance" start_glance fi diff --git a/stackrc b/stackrc index e89d25e4ab..2527b0ad84 100644 --- a/stackrc +++ b/stackrc @@ -35,7 +35,7 @@ fi # enable_service neutron # # Optional, to enable tempest configuration as part of devstack # enable_service tempest -ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,tempest,mysql +ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,tempest,mysql # Tell Tempest which services are available. The default is set here as # Tempest falls late in the configuration sequence. This differs from diff --git a/unstack.sh b/unstack.sh index 92d0642c38..c233f93e6b 100755 --- a/unstack.sh +++ b/unstack.sh @@ -104,7 +104,7 @@ if is_service_enabled nova; then stop_nova fi -if is_service_enabled g-api g-reg; then +if is_service_enabled glance; then stop_glance fi From dd710b4f12bb09abdc0dfa4a5f5c4aba81eba650 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Fri, 7 Feb 2014 16:46:17 +0000 Subject: [PATCH 0704/4704] Make neutron tempest run with tenant isolation by default This commit removes the workaround that switched tempest tenant isolation to false if Neutron was enabled. Recent changes to both neutron and tempest should make this safe finally. Change-Id: I929fcc73a7ef9a10f01af422ff62f9d451d52ae3 --- lib/tempest | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lib/tempest b/lib/tempest index 06183b107b..0fc0de26c8 100644 --- a/lib/tempest +++ b/lib/tempest @@ -266,11 +266,6 @@ function configure_tempest() { # Compute iniset $TEMPEST_CONFIG compute change_password_available False - # Note(nati) current tempest don't create network for each tenant - # so reuse same tenant for now - if is_service_enabled neutron; then - TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-False} - fi iniset $TEMPEST_CONFIG compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} iniset $TEMPEST_CONFIG compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED iniset $TEMPEST_CONFIG compute network_for_ssh $PRIVATE_NETWORK_NAME From 67db4a9bd5c0d0a119c244e8dbb1a0a1990944b8 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 7 Feb 2014 16:02:37 -0500 Subject: [PATCH 0705/4704] remove database init from horizon horizon default config no longer uses a session database, it uses signed cookies instead, so we can stop doing db initialization and cleanup (which based on errexit we weren't doing correctly anyway). Change-Id: Icae4318e2784486db2888cbf353e95ac9a5d7cba --- lib/horizon | 9 --------- 1 file changed, 9 deletions(-) diff --git a/lib/horizon b/lib/horizon index c64d8502ba..2f5795d1ca 100644 --- a/lib/horizon +++ b/lib/horizon @@ -81,9 +81,6 @@ function configure_horizon() { # init_horizon() - Initialize databases, etc. function init_horizon() { - # Remove stale session database. - rm -f $HORIZON_DIR/openstack_dashboard/local/dashboard_openstack.sqlite3 - # ``local_settings.py`` is used to override horizon default settings. local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py cp $HORIZON_SETTINGS $local_settings @@ -106,12 +103,6 @@ function init_horizon() { _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_vpn True fi - # Initialize the horizon database (it stores sessions and notices shown to - # users). The user system is external (keystone). - cd $HORIZON_DIR - python manage.py syncdb --noinput - cd $TOP_DIR - # Create an empty directory that apache uses as docroot sudo mkdir -p $HORIZON_DIR/.blackhole From 5ed43bf82ac9eeb30ca543bcf695f9d45ddf77f5 Mon Sep 17 00:00:00 2001 From: Shane Wang Date: Fri, 7 Feb 2014 11:01:43 +0800 Subject: [PATCH 0706/4704] Fix misspellings in devstack Fix misspellings detected by: * pip install misspellings * git ls-files | grep -v locale | misspellings -f - Change-Id: I19726438d15cd27b813504aac530e7e53c4def12 Closes-Bug: #1257295 --- tools/xen/xenrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/xen/xenrc b/tools/xen/xenrc index cd282341cb..b355a10d4f 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -35,7 +35,7 @@ XEN_INT_BRIDGE_OR_NET_NAME="OpenStack VM Integration Network" GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} # Extracted variables for OpenStack VM network device numbers. -# Make sure, they form a continous sequence starting from 0 +# Make sure they form a continuous sequence starting from 0 MGT_DEV_NR=0 VM_DEV_NR=1 PUB_DEV_NR=2 From d1cd0c66487cc00fa50c6638fb233e04b023d744 Mon Sep 17 00:00:00 2001 From: Daniel Kuffner Date: Sat, 8 Feb 2014 12:35:48 +0100 Subject: [PATCH 0707/4704] Docker install script fails to install docker registry The tools/docker/install_docker.sh script fails during the installation/setup of the docker registry. The problem is that the used environment variables are wrong. REGISTRY_IMAGE > DOCKER_REGISTRY_IMAGE REGISTRY_IMAGE_NAME > DOCKER_REGISTRY_IMAGE_NAME Change-Id: I16f051abe5c426f295c69d518b49c1b9a7b4cc94 --- tools/docker/install_docker.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh index 4fa23864fb..b9e1b242dd 100755 --- a/tools/docker/install_docker.sh +++ b/tools/docker/install_docker.sh @@ -60,5 +60,5 @@ docker pull $DOCKER_IMAGE docker tag $DOCKER_IMAGE $DOCKER_IMAGE_NAME # Get docker-registry image -docker pull $REGISTRY_IMAGE -docker tag $REGISTRY_IMAGE $REGISTRY_IMAGE_NAME +docker pull $DOCKER_REGISTRY_IMAGE +docker tag $DOCKER_REGISTRY_IMAGE $DOCKER_REGISTRY_IMAGE_NAME From d73af8787280002321ab52a3262a2d0b5a8e54cd Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Thu, 6 Feb 2014 15:33:52 -0800 Subject: [PATCH 0708/4704] If n-api-meta is being run, remove from NOVA_ENABLED_APIS If running n-api-meta as a separate service we shouldn't run it inside of n-api. This patch is in support of Iddd44f7ee43b9287a788dea49eaa484316f8da04 Change-Id: I8a54cf13dc6083b78e89c9ea5413d9e4d8d4b37a Related-Bug: #1270845 --- lib/nova | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/nova b/lib/nova index dbc5c3db44..3ee28faaec 100644 --- a/lib/nova +++ b/lib/nova @@ -389,6 +389,10 @@ function create_nova_conf() { fi if is_service_enabled n-api; then + if is_service_enabled n-api-meta; then + # If running n-api-meta as a separate service + NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/,metadata//") + fi iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS" if is_service_enabled tls-proxy; then # Set the service port for a proxy to take the original From 9e032c2d374f80612c010775dd8d71389d5d09a3 Mon Sep 17 00:00:00 2001 From: Sahid Orentino Ferdjaoui Date: Mon, 10 Feb 2014 11:36:25 +0100 Subject: [PATCH 0709/4704] read_password needs to store in .localrc.auto if local.conf is used. when running stack.sh with no passwords in local.conf read_password() creates localrc and local.conf is ignored Change-Id: I25ad07569d2b42b190449591d5a01ade8022392c --- stack.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 1a1460d2f3..303541d63e 100755 --- a/stack.sh +++ b/stack.sh @@ -362,7 +362,11 @@ function read_password { var=$1; msg=$2 pw=${!var} - localrc=$TOP_DIR/localrc + if [[ -f $RC_DIR/localrc ]]; then + localrc=$TOP_DIR/localrc + else + localrc=$TOP_DIR/.localrc.auto + fi # If the password is not defined yet, proceed to prompt user for a password. if [ ! $pw ]; then From 6b1cb10809ae4c2cc9a4b39e0298458f0ecd4853 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Mon, 10 Feb 2014 09:59:43 -0800 Subject: [PATCH 0710/4704] Add cliff, pycadf, stevedore, & taskflow from oslo Oslo has adopted 4 libraries that were previously on stackforge, so we can now install them from source. Change-Id: I6b6e20a7884b47ade466fc38641a5ac1a5f3e146 --- lib/oslo | 16 ++++++++++++++++ stackrc | 16 ++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/lib/oslo b/lib/oslo index f644ed76c3..b089842ae4 100644 --- a/lib/oslo +++ b/lib/oslo @@ -20,9 +20,13 @@ set +o xtrace # Defaults # -------- +CLIFF_DIR=$DEST/cliff OSLOCFG_DIR=$DEST/oslo.config OSLOMSG_DIR=$DEST/oslo.messaging OSLORWRAP_DIR=$DEST/oslo.rootwrap +PYCADF_DIR=$DEST/pycadf +STEVEDORE_DIR=$DEST/stevedore +TASKFLOW_DIR=$DEST/taskflow # Entry Points # ------------ @@ -33,6 +37,9 @@ function install_oslo() { # for a smoother transition of existing users. cleanup_oslo + git_clone $CLIFF_REPO $CLIFF_DIR $CLIFF_BRANCH + setup_develop $CLIFF_DIR + git_clone $OSLOCFG_REPO $OSLOCFG_DIR $OSLOCFG_BRANCH setup_develop $OSLOCFG_DIR @@ -41,6 +48,15 @@ function install_oslo() { git_clone $OSLORWRAP_REPO $OSLORWRAP_DIR $OSLORWRAP_BRANCH setup_develop $OSLORWRAP_DIR + + git_clone $PYCADF_REPO $PYCADF_DIR $PYCADF_BRANCH + setup_develop $PYCADF_DIR + + git_clone $STEVEDORE_REPO $STEVEDORE_DIR $STEVEDORE_BRANCH + setup_develop $STEVEDORE_DIR + + git_clone $TASKFLOW_REPO $TASKFLOW_DIR $TASKFLOW_BRANCH + setup_develop $TASKFLOW_DIR } # cleanup_oslo() - purge possibly old versions of oslo diff --git a/stackrc b/stackrc index 7eed60cb2c..729c2f5b40 100644 --- a/stackrc +++ b/stackrc @@ -140,6 +140,10 @@ NOVACLIENT_BRANCH=${NOVACLIENT_BRANCH:-master} OPENSTACKCLIENT_REPO=${OPENSTACKCLIENT_REPO:-${GIT_BASE}/openstack/python-openstackclient.git} OPENSTACKCLIENT_BRANCH=${OPENSTACKCLIENT_BRANCH:-master} +# cliff command line framework +CLIFF_REPO=${CLIFF_REPO:-${GIT_BASE}/openstack/cliff.git} +CLIFF_BRANCH=${CLIFF_BRANCH:-master} + # oslo.config OSLOCFG_REPO=${OSLOCFG_REPO:-${GIT_BASE}/openstack/oslo.config.git} OSLOCFG_BRANCH=${OSLOCFG_BRANCH:-master} @@ -152,6 +156,18 @@ OSLOMSG_BRANCH=${OSLOMSG_BRANCH:-master} OSLORWRAP_REPO=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git} OSLORWRAP_BRANCH=${OSLORWRAP_BRANCH:-master} +# pycadf auditing library +PYCADF_REPO=${PYCADF_REPO:-${GIT_BASE}/openstack/pycadf.git} +PYCADF_BRANCH=${PYCADF_BRANCH:-master} + +# stevedore plugin manager +STEVEDORE_REPO=${STEVEDORE_REPO:-${GIT_BASE}/openstack/stevedore.git} +STEVEDORE_BRANCH=${STEVEDORE_BRANCH:-master} + +# taskflow plugin manager +TASKFLOW_REPO=${TASKFLOW_REPO:-${GIT_BASE}/openstack/taskflow.git} +TASKFLOW_BRANCH=${TASKFLOW_BRANCH:-master} + # pbr drives the setuptools configs PBR_REPO=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git} PBR_BRANCH=${PBR_BRANCH:-master} From 9972ec23c43cea1be6ee5174e72c06e32f295212 Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Mon, 10 Feb 2014 11:22:39 -0500 Subject: [PATCH 0711/4704] Add marconi to enabled services This patch adds marconi to enabled services. This is needed to run the tempest experimental job for marconi. Change-Id: I28794c3acacc6daa9f698f8031b58d1ee13c3bad Implements: blueprint add-basic-marconi-tests --- lib/marconi | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/marconi b/lib/marconi index 1eaebbdf16..d1ab5f3a5c 100644 --- a/lib/marconi +++ b/lib/marconi @@ -58,6 +58,13 @@ TEMPEST_SERVICES+=,marconi # Functions # --------- +# Test if any Marconi services are enabled +# is_marconi_enabled +function is_marconi_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"marconi-" ]] && return 0 + return 1 +} + # cleanup_marconi() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_marconi() { From ae90f67e05a93e7b69cd019f6c50fa20405edb68 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Mon, 10 Feb 2014 14:23:54 -0500 Subject: [PATCH 0712/4704] Stop catting tempest.config during tempest setup This commit removes the 'cat tempest.config' from lib/tempest. There is no reason to cat it as part of running devstack because the file is and can be interacted with after devstack finishes running. To prevent a loss of information in the gate this change should be coupled with devstack-gate change: Ifb36918cd0d686cb3865f5322cd62c209acaaf30 which copies the tempest.config file with the other test artifacts. Change-Id: Ia01cd53660b3490ea9faa9e9c746bafd9df12a9b --- lib/tempest | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/tempest b/lib/tempest index 76da17062c..c8eebfcf05 100644 --- a/lib/tempest +++ b/lib/tempest @@ -348,9 +348,6 @@ function configure_tempest() { fi done - echo "Created tempest configuration file:" - cat $TEMPEST_CONFIG - # Restore IFS IFS=$ifs #Restore errexit From bc76f748ebfc57f5af3e006f4092ae574b8febfe Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 10 Feb 2014 21:11:04 -0500 Subject: [PATCH 0713/4704] remove build_tempest we haven't actually used this script in about a year, in favor of the actual in tree lib/tempest. Change-Id: I9d78b395846ebe833a38ba50edae226040cd7f45 --- tools/build_tempest.sh | 53 ------------------------------------------ 1 file changed, 53 deletions(-) delete mode 100755 tools/build_tempest.sh diff --git a/tools/build_tempest.sh b/tools/build_tempest.sh deleted file mode 100755 index 6c527f5962..0000000000 --- a/tools/build_tempest.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env bash -# -# **build_tempest.sh** - -# Checkout and prepare a Tempest repo: git://git.openstack.org/openstack/tempest.git - -function usage { - echo "$0 - Check out and prepare a Tempest repo" - echo "" - echo "Usage: $0" - exit 1 -} - -if [ "$1" = "-h" ]; then - usage -fi - -# Clean up any resources that may be in use -cleanup() { - set +o errexit - - # Kill ourselves to signal any calling process - trap 2; kill -2 $$ -} - -trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT - -# Keep track of the current directory -TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $TOOLS_DIR/..; pwd) - -# Import common functions -. $TOP_DIR/functions - -# Abort if localrc is not set -if [ ! -e $TOP_DIR/localrc ]; then - echo "You must have a localrc with ALL necessary passwords and configuration defined before proceeding." - echo "See stack.sh for required passwords." - exit 1 -fi - -# Source params -source ./stackrc - -# Where Openstack code lives -DEST=${DEST:-/opt/stack} - -TEMPEST_DIR=$DEST/tempest - -# Install tests and prerequisites -git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH - -trap - SIGHUP SIGINT SIGTERM SIGQUIT EXIT From 0b9776d2f34197d1e920e1dc8506b8f8c31452ca Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Tue, 28 Jan 2014 11:20:53 -0500 Subject: [PATCH 0714/4704] Install glance images before starting Nova The docker driver for Nova needs a registry service to be running. It is being run inside a container using an image -- that image must be downloaded. The registry service must be started via nova_plugins/hypervisor-docker, but this is presently called before Glance's image download. The reordering is being done such that Glance may download the registry image, but prior to starting Nova such that "hypervisor-docker" may have an image downloaded and available to launch the registry. This change should cause no negative effects on other hypervisors. Change-Id: I7bccb42517e4c6187f2a90c64f39cda4577f89a3 blueprint: docker-glance-uploads --- stack.sh | 82 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/stack.sh b/stack.sh index 303541d63e..78cfbc58ce 100755 --- a/stack.sh +++ b/stack.sh @@ -1090,6 +1090,47 @@ if is_service_enabled g-api g-reg; then start_glance fi +# Install Images +# ============== + +# Upload an image to glance. +# +# The default image is cirros, a small testing image which lets you login as **root** +# cirros has a ``cloud-init`` analog supporting login via keypair and sending +# scripts as userdata. +# See https://help.ubuntu.com/community/CloudInit for more on cloud-init +# +# Override ``IMAGE_URLS`` with a comma-separated list of UEC images. +# * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz + +if is_service_enabled g-reg; then + TOKEN=$(keystone token-get | grep ' id ' | get_field 2) + die_if_not_set $LINENO TOKEN "Keystone fail to get token" + + if is_baremetal; then + echo_summary "Creating and uploading baremetal images" + + # build and upload separate deploy kernel & ramdisk + upload_baremetal_deploy $TOKEN + + # upload images, separating out the kernel & ramdisk for PXE boot + for image_url in ${IMAGE_URLS//,/ }; do + upload_baremetal_image $image_url $TOKEN + done + else + echo_summary "Uploading images" + + # Option to upload legacy ami-tty, which works with xenserver + if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then + IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz" + fi + + for image_url in ${IMAGE_URLS//,/ }; do + upload_image $image_url $TOKEN + done + fi +fi + # Create an access key and secret key for nova ec2 register image if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nova; then NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1) @@ -1195,47 +1236,6 @@ if is_service_enabled nova && is_service_enabled key; then fi -# Install Images -# ============== - -# Upload an image to glance. -# -# The default image is cirros, a small testing image which lets you login as **root** -# cirros has a ``cloud-init`` analog supporting login via keypair and sending -# scripts as userdata. -# See https://help.ubuntu.com/community/CloudInit for more on cloud-init -# -# Override ``IMAGE_URLS`` with a comma-separated list of UEC images. -# * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz - -if is_service_enabled g-reg; then - TOKEN=$(keystone token-get | grep ' id ' | get_field 2) - die_if_not_set $LINENO TOKEN "Keystone fail to get token" - - if is_baremetal; then - echo_summary "Creating and uploading baremetal images" - - # build and upload separate deploy kernel & ramdisk - upload_baremetal_deploy $TOKEN - - # upload images, separating out the kernel & ramdisk for PXE boot - for image_url in ${IMAGE_URLS//,/ }; do - upload_baremetal_image $image_url $TOKEN - done - else - echo_summary "Uploading images" - - # Option to upload legacy ami-tty, which works with xenserver - if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then - IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz" - fi - - for image_url in ${IMAGE_URLS//,/ }; do - upload_image $image_url $TOKEN - done - fi -fi - # If we are running nova with baremetal driver, there are a few # last-mile configuration bits to attend to, which must happen # after n-api and n-sch have started. From 97ce935a9244956fd977cd1eb62e7b429e5cb141 Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Fri, 31 Jan 2014 01:40:50 -0500 Subject: [PATCH 0715/4704] Update docker driver to use a CirrOS image For purposes of matching the VM image used in devstack across hypervisors, set the default container image for Docker to cirros. This uses the CirrOS image from stackbrew, the "standard library" for Docker. Change-Id: I9d767a4e06c5caa7b92ffea25e6a9aeda9bf282a --- lib/nova_plugins/hypervisor-docker | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker index bb934b87d6..cdd9317761 100644 --- a/lib/nova_plugins/hypervisor-docker +++ b/lib/nova_plugins/hypervisor-docker @@ -31,8 +31,8 @@ DOCKER_UNIX_SOCKET=/var/run/docker.sock DOCKER_PID_FILE=/var/run/docker.pid DOCKER_REGISTRY_PORT=${DOCKER_REGISTRY_PORT:-5042} -DOCKER_IMAGE=${DOCKER_IMAGE:-busybox:latest} -DOCKER_IMAGE_NAME=busybox +DOCKER_IMAGE=${DOCKER_IMAGE:-cirros:latest} +DOCKER_IMAGE_NAME=cirros DOCKER_REGISTRY_IMAGE=${DOCKER_REGISTRY_IMAGE:-registry:latest} DOCKER_REGISTRY_IMAGE_NAME=registry DOCKER_REPOSITORY_NAME=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT}/${DOCKER_IMAGE_NAME} From d0860cc26d78c3f1c70b332ecc793442a1c8048d Mon Sep 17 00:00:00 2001 From: john-griffith Date: Thu, 23 Jan 2014 11:31:10 -0700 Subject: [PATCH 0716/4704] Replace custom cinder driver configs The devstack/lib/cinder file has a number of third party driver config options hard-coded in it. Rather than add yet another if driver== statement here let's use plugin files and do something similar to what's already in place for nova_hypervisors and neutron plugins. This works the same way folks were implementing their drivers already, the key is to use a CINDER_DRIVER variable in your localrc file that matches the name of the lib/cinder_plugin file to use. The existing third party driver entries that were in lib/cinder have been migrated to cooresponding plugin files. Change-Id: I4ee51ea542d5aa63879afd5297311a9df727c57f --- lib/cinder | 44 ++++++------------------------- lib/cinder_plugins/XenAPINFS | 44 +++++++++++++++++++++++++++++++ lib/cinder_plugins/glusterfs | 50 ++++++++++++++++++++++++++++++++++++ lib/cinder_plugins/nfs | 42 ++++++++++++++++++++++++++++++ lib/cinder_plugins/sheepdog | 39 ++++++++++++++++++++++++++++ lib/cinder_plugins/solidfire | 48 ++++++++++++++++++++++++++++++++++ lib/cinder_plugins/vsphere | 42 ++++++++++++++++++++++++++++++ 7 files changed, 273 insertions(+), 36 deletions(-) create mode 100644 lib/cinder_plugins/XenAPINFS create mode 100644 lib/cinder_plugins/glusterfs create mode 100644 lib/cinder_plugins/nfs create mode 100644 lib/cinder_plugins/sheepdog create mode 100644 lib/cinder_plugins/solidfire create mode 100644 lib/cinder_plugins/vsphere diff --git a/lib/cinder b/lib/cinder index 9f70b2a0c9..51eb3c1262 100644 --- a/lib/cinder +++ b/lib/cinder @@ -27,6 +27,12 @@ set +o xtrace # set up default driver CINDER_DRIVER=${CINDER_DRIVER:-default} +CINDER_PLUGINS=$TOP_DIR/lib/cinder_plugins + +# grab plugin config if specified via cinder_driver +if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then + source $CINDER_PLUGINS/$CINDER_DRIVER +fi # set up default directories CINDER_DIR=$DEST/cinder @@ -300,42 +306,8 @@ function configure_cinder() { setup_colorized_logging $CINDER_CONF DEFAULT "project_id" "user_id" fi - if [ "$CINDER_DRIVER" == "XenAPINFS" ]; then - ( - set -u - iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.xenapi.sm.XenAPINFSDriver" - iniset $CINDER_CONF DEFAULT xenapi_connection_url "$CINDER_XENAPI_CONNECTION_URL" - iniset $CINDER_CONF DEFAULT xenapi_connection_username "$CINDER_XENAPI_CONNECTION_USERNAME" - iniset $CINDER_CONF DEFAULT xenapi_connection_password "$CINDER_XENAPI_CONNECTION_PASSWORD" - iniset $CINDER_CONF DEFAULT xenapi_nfs_server "$CINDER_XENAPI_NFS_SERVER" - iniset $CINDER_CONF DEFAULT xenapi_nfs_serverpath "$CINDER_XENAPI_NFS_SERVERPATH" - ) - elif [ "$CINDER_DRIVER" == "nfs" ]; then - iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.nfs.NfsDriver" - iniset $CINDER_CONF DEFAULT nfs_shares_config "$CINDER_CONF_DIR/nfs_shares.conf" - echo "$CINDER_NFS_SERVERPATH" | sudo tee "$CINDER_CONF_DIR/nfs_shares.conf" - sudo chmod 666 $CINDER_CONF_DIR/nfs_shares.conf - elif [ "$CINDER_DRIVER" == "sheepdog" ]; then - iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.sheepdog.SheepdogDriver" - elif [ "$CINDER_DRIVER" == "glusterfs" ]; then - # To use glusterfs, set the following in localrc: - # CINDER_DRIVER=glusterfs - # CINDER_GLUSTERFS_SHARES="127.0.0.1:/vol1;127.0.0.1:/vol2" - # Shares are : and separated by semicolons. - - iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.glusterfs.GlusterfsDriver" - iniset $CINDER_CONF DEFAULT glusterfs_shares_config "$CINDER_CONF_DIR/glusterfs_shares" - touch $CINDER_CONF_DIR/glusterfs_shares - if [ ! -z "$CINDER_GLUSTERFS_SHARES" ]; then - CINDER_GLUSTERFS_SHARES=$(echo $CINDER_GLUSTERFS_SHARES | tr ";" "\n") - echo "$CINDER_GLUSTERFS_SHARES" > $CINDER_CONF_DIR/glusterfs_shares - fi - elif [ "$CINDER_DRIVER" == "vsphere" ]; then - echo_summary "Using VMware vCenter driver" - iniset $CINDER_CONF DEFAULT vmware_host_ip "$VMWAREAPI_IP" - iniset $CINDER_CONF DEFAULT vmware_host_username "$VMWAREAPI_USER" - iniset $CINDER_CONF DEFAULT vmware_host_password "$VMWAREAPI_PASSWORD" - iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver" + if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then + configure_cinder_driver fi if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then diff --git a/lib/cinder_plugins/XenAPINFS b/lib/cinder_plugins/XenAPINFS new file mode 100644 index 0000000000..72e1c1398c --- /dev/null +++ b/lib/cinder_plugins/XenAPINFS @@ -0,0 +1,44 @@ +# lib/cinder_plugins/XenAPINFS +# Configure the XenAPINFS driver + +# Enable with: +# +# CINDER_DRIVER=XenAPINFS + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_cinder_driver - make configuration changes, including those to other services + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories + + +# Entry Points +# ------------ + +# configure_cinder_driver - Set config files, create data dirs, etc +function configure_cinder_driver() { + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.xenapi.sm.XenAPINFSDriver" + iniset $CINDER_CONF DEFAULT xenapi_connection_url "$CINDER_XENAPI_CONNECTION_URL" + iniset $CINDER_CONF DEFAULT xenapi_connection_username "$CINDER_XENAPI_CONNECTION_USERNAME" + iniset $CINDER_CONF DEFAULT xenapi_connection_password "$CINDER_XENAPI_CONNECTION_PASSWORD" + iniset $CINDER_CONF DEFAULT xenapi_nfs_server "$CINDER_XENAPI_NFS_SERVER" + iniset $CINDER_CONF DEFAULT xenapi_nfs_serverpath "$CINDER_XENAPI_NFS_SERVERPATH" +} + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_plugins/glusterfs b/lib/cinder_plugins/glusterfs new file mode 100644 index 0000000000..a0c5ae8d5e --- /dev/null +++ b/lib/cinder_plugins/glusterfs @@ -0,0 +1,50 @@ +# lib/cinder_plugins/glusterfs +# Configure the glusterfs driver + +# Enable with: +# +# CINDER_DRIVER=glusterfs + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_cinder_driver - make configuration changes, including those to other services + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories + + +# Entry Points +# ------------ + +# configure_cinder_driver - Set config files, create data dirs, etc +function configure_cinder_driver() { + # To use glusterfs, set the following in localrc: + # CINDER_DRIVER=glusterfs + # CINDER_GLUSTERFS_SHARES="127.0.0.1:/vol1;127.0.0.1:/vol2" + # Shares are : and separated by semicolons. + + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.glusterfs.GlusterfsDriver" + iniset $CINDER_CONF DEFAULT glusterfs_shares_config "$CINDER_CONF_DIR/glusterfs_shares" + touch $CINDER_CONF_DIR/glusterfs_shares + if [ ! -z "$CINDER_GLUSTERFS_SHARES" ]; then + CINDER_GLUSTERFS_SHARES=$(echo $CINDER_GLUSTERFS_SHARES | tr ";" "\n") + echo "$CINDER_GLUSTERFS_SHARES" > $CINDER_CONF_DIR/glusterfs_shares + fi +} + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_plugins/nfs b/lib/cinder_plugins/nfs new file mode 100644 index 0000000000..ea2c9ce665 --- /dev/null +++ b/lib/cinder_plugins/nfs @@ -0,0 +1,42 @@ +# lib/cinder_plugins/nfs +# Configure the nfs driver + +# Enable with: +# +# CINDER_DRIVER=nfs + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_cinder_driver - make configuration changes, including those to other services + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories + + +# Entry Points +# ------------ + +# configure_cinder_driver - Set config files, create data dirs, etc +function configure_cinder_driver() { + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.nfs.NfsDriver" + iniset $CINDER_CONF DEFAULT nfs_shares_config "$CINDER_CONF_DIR/nfs_shares.conf" + echo "$CINDER_NFS_SERVERPATH" | sudo tee "$CINDER_CONF_DIR/nfs_shares.conf" + sudo chmod 660 $CINDER_CONF_DIR/nfs_shares.conf +} + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_plugins/sheepdog b/lib/cinder_plugins/sheepdog new file mode 100644 index 0000000000..4435932371 --- /dev/null +++ b/lib/cinder_plugins/sheepdog @@ -0,0 +1,39 @@ +# lib/cinder_plugins/sheepdog +# Configure the sheepdog driver + +# Enable with: +# +# CINDER_DRIVER=sheepdog + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_cinder_driver - make configuration changes, including those to other services + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories + + +# Entry Points +# ------------ + +# configure_cinder_driver - Set config files, create data dirs, etc +function configure_cinder_driver() { + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.sheepdog.SheepdogDriver" +} + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_plugins/solidfire b/lib/cinder_plugins/solidfire new file mode 100644 index 0000000000..47c113e1a2 --- /dev/null +++ b/lib/cinder_plugins/solidfire @@ -0,0 +1,48 @@ +# lib/cinder_plugins/solidfire +# Configure the solidfire driver + +# Enable with: +# +# CINDER_DRIVER=solidfire + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_cinder_driver - make configuration changes, including those to other services + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories + + +# Entry Points +# ------------ + +# configure_cinder_driver - Set config files, create data dirs, etc +function configure_cinder_driver() { + # To use solidfire, set the following in localrc: + # CINDER_DRIVER=solidfire + # SAN_IP= + # SAN_LOGIN= + # SAN_PASSWORD= + + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.solidfire.SolidFireDriver" + iniset $CINDER_CONF DEFAULT san_ip $SAN_IP + iniset $CINDER_CONF DEFAULT san_login $SAN_LOGIN + iniset $CINDER_CONF DEFAULT san_password $SAN_PASSWORD +} + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_plugins/vsphere b/lib/cinder_plugins/vsphere new file mode 100644 index 0000000000..c8cab6a8c1 --- /dev/null +++ b/lib/cinder_plugins/vsphere @@ -0,0 +1,42 @@ +# lib/cinder_plugins/vsphere +# Configure the vsphere driver + +# Enable with: +# +# CINDER_DRIVER=vsphere + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_cinder_driver - make configuration changes, including those to other services + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories + + +# Entry Points +# ------------ + +# configure_cinder_driver - Set config files, create data dirs, etc +function configure_cinder_driver() { + iniset $CINDER_CONF DEFAULT vmware_host_ip "$VMWAREAPI_IP" + iniset $CINDER_CONF DEFAULT vmware_host_username "$VMWAREAPI_USER" + iniset $CINDER_CONF DEFAULT vmware_host_password "$VMWAREAPI_PASSWORD" + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver" +} + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: From 062cdaf84c11fbbef71cab1db833c4aac3baadbf Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 10 Feb 2014 22:24:49 -0500 Subject: [PATCH 0717/4704] add dstat to see top process info pidstat data isn't exceptionally useful, it's lots of lines, and seems to be missing some of the most critical one. dstat is kind of like sysstat, except the formatting is much better. It also supports advanced features like the top CPU using process at every interval. put this behind the sysstat variable, as we'll probably want to replace sysstat & pidstat with this if it works Change-Id: I48dc22a0a7e63fe3abb527646cc70525998a7d85 --- files/apts/dstat | 1 + files/rpms-suse/dstat | 1 + files/rpms/dstat | 1 + stack.sh | 12 ++++++++++++ 4 files changed, 15 insertions(+) create mode 100644 files/apts/dstat create mode 100644 files/rpms-suse/dstat create mode 100644 files/rpms/dstat diff --git a/files/apts/dstat b/files/apts/dstat new file mode 100644 index 0000000000..2b643b8b1b --- /dev/null +++ b/files/apts/dstat @@ -0,0 +1 @@ +dstat diff --git a/files/rpms-suse/dstat b/files/rpms-suse/dstat new file mode 100644 index 0000000000..2b643b8b1b --- /dev/null +++ b/files/rpms-suse/dstat @@ -0,0 +1 @@ +dstat diff --git a/files/rpms/dstat b/files/rpms/dstat new file mode 100644 index 0000000000..8a8f8fe737 --- /dev/null +++ b/files/rpms/dstat @@ -0,0 +1 @@ +dstat \ No newline at end of file diff --git a/stack.sh b/stack.sh index 303541d63e..1dfd4ddbc6 100755 --- a/stack.sh +++ b/stack.sh @@ -298,6 +298,8 @@ SYSLOG_PORT=${SYSLOG_PORT:-516} SYSSTAT_FILE=${SYSSTAT_FILE:-"sysstat.dat"} SYSSTAT_INTERVAL=${SYSSTAT_INTERVAL:-"1"} +DSTAT_FILE=${DSTAT_FILE:-"dstat.txt"} + PIDSTAT_FILE=${PIDSTAT_FILE:-"pidstat.txt"} PIDSTAT_INTERVAL=${PIDSTAT_INTERVAL:-"5"} @@ -879,6 +881,16 @@ if is_service_enabled sysstat; then fi fi +if is_service_enabled dstat; then + # Per-process stats + DSTAT_OPTS="-tcndylp --top-cpu-adv" + if [[ -n ${SCREEN_LOGDIR} ]]; then + screen_it dstat "cd $TOP_DIR; dstat $DSTAT_OPTS | tee $SCREEN_LOGDIR/$DSTAT_FILE" + else + screen_it dstat "dstat $DSTAT_OPTS" + fi +fi + if is_service_enabled pidstat; then # Per-process stats PIDSTAT_OPTS="-l -p ALL -T ALL" From c86ec3568c7ed11ce38584b654b91594eb0d235a Mon Sep 17 00:00:00 2001 From: Yuriy Zveryanskyy Date: Wed, 12 Feb 2014 11:03:18 +0200 Subject: [PATCH 0718/4704] Fix hook script for Ironic cleanup_ironic should not be started on "unstack" phase, otherwise API service not restarted because auth cache directory removed. Change-Id: I3da86b9fb8c3ce1185aff05df0fa83cf259708f4 --- extras.d/50-ironic.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/extras.d/50-ironic.sh b/extras.d/50-ironic.sh index f68a14680f..9e61dc5d78 100644 --- a/extras.d/50-ironic.sh +++ b/extras.d/50-ironic.sh @@ -28,6 +28,9 @@ if is_service_enabled ir-api ir-cond; then if [[ "$1" == "unstack" ]]; then stop_ironic + fi + + if [[ "$1" == "clean" ]]; then cleanup_ironic fi fi From 2b69f23625f988d17574d746773e2932ca109427 Mon Sep 17 00:00:00 2001 From: tanlin Date: Wed, 12 Feb 2014 16:11:32 +0800 Subject: [PATCH 0719/4704] Rename Openstack to OpenStack Change-Id: I78ac040e926ef8040ee674b6fea3223a8ab4ae99 --- openrc | 2 +- tools/create_userrc.sh | 2 +- tools/jenkins/README.md | 2 +- tools/xen/README.md | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/openrc b/openrc index 784b00e51b..fc066ad82c 100644 --- a/openrc +++ b/openrc @@ -67,7 +67,7 @@ GLANCE_HOST=${GLANCE_HOST:-$HOST_IP} # Identity API version export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-2.0} -# Authenticating against an Openstack cloud using Keystone returns a **Token** +# Authenticating against an OpenStack cloud using Keystone returns a **Token** # and **Service Catalog**. The catalog contains the endpoints for all services # the user/tenant has access to - including nova, glance, keystone, swift, ... # We currently recommend using the 2.0 *identity api*. diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index e2d855c4df..1c9565145b 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -199,7 +199,7 @@ export EC2_URL="$EC2_URL" export S3_URL="$S3_URL" # OpenStack USER ID = $user_id export OS_USERNAME="$user_name" -# Openstack Tenant ID = $tenant_id +# OpenStack Tenant ID = $tenant_id export OS_TENANT_NAME="$tenant_name" export OS_AUTH_URL="$OS_AUTH_URL" export OS_CACERT="$OS_CACERT" diff --git a/tools/jenkins/README.md b/tools/jenkins/README.md index 371017db1a..3586da9c66 100644 --- a/tools/jenkins/README.md +++ b/tools/jenkins/README.md @@ -1,6 +1,6 @@ Getting Started With Jenkins and Devstack ========================================= -This little corner of devstack is to show how to get an Openstack jenkins +This little corner of devstack is to show how to get an OpenStack jenkins environment up and running quickly, using the rcb configuration methodology. diff --git a/tools/xen/README.md b/tools/xen/README.md index ee1abcc091..712782bc5f 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -1,11 +1,11 @@ # Getting Started With XenServer and Devstack The purpose of the code in this directory it to help developers bootstrap a -XenServer 6.2 (older versions may also work) + Openstack development +XenServer 6.2 (older versions may also work) + OpenStack development environment. This file gives some pointers on how to get started. Xenserver is a Type 1 hypervisor, so it is best installed on bare metal. The -Openstack services are configured to run within a virtual machine (called OS +OpenStack services are configured to run within a virtual machine (called OS domU) on the XenServer host. The VM uses the XAPI toolstack to communicate with the host over a network connection (see `MGT_BRIDGE_OR_NET_NAME`). From 1814e671d3af0231aa18a08d3406d54332f9b4ef Mon Sep 17 00:00:00 2001 From: Sahid Orentino Ferdjaoui Date: Tue, 11 Feb 2014 17:56:07 +0100 Subject: [PATCH 0720/4704] Fix bug "Invalid OpenStack Nova credentials." on the gate During the process, when create_userrc.sh tries to creates certificates and sourcable rc, it assumes that all users have the same password. Change-Id: Ifb57a43aad439ffe041e98465719a8a8eceae544 Closes-Bug: #1260723 --- lib/swift | 11 ++++++++--- tools/create_userrc.sh | 8 ++++++++ 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/lib/swift b/lib/swift index 0febb00f60..be25c81468 100644 --- a/lib/swift +++ b/lib/swift @@ -520,6 +520,11 @@ function create_swift_disk() { # swifttenanttest2 swiftusertest2 admin function create_swift_accounts() { + # Defines specific passwords used by tools/create_userrc.sh + SWIFTUSERTEST1_PASSWORD=testing + SWIFTUSERTEST2_PASSWORD=testing2 + SWIFTUSERTEST3_PASSWORD=testing3 + KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql} SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") @@ -542,17 +547,17 @@ function create_swift_accounts() { SWIFT_TENANT_TEST1=$(keystone tenant-create --name=swifttenanttest1 | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_TENANT_TEST1 "Failure creating SWIFT_TENANT_TEST1" - SWIFT_USER_TEST1=$(keystone user-create --name=swiftusertest1 --pass=testing --email=test@example.com | grep " id " | get_field 2) + SWIFT_USER_TEST1=$(keystone user-create --name=swiftusertest1 --pass=$SWIFTUSERTEST1_PASSWORD --email=test@example.com | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1" keystone user-role-add --user-id $SWIFT_USER_TEST1 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST1 - SWIFT_USER_TEST3=$(keystone user-create --name=swiftusertest3 --pass=testing3 --email=test3@example.com | grep " id " | get_field 2) + SWIFT_USER_TEST3=$(keystone user-create --name=swiftusertest3 --pass=$SWIFTUSERTEST3_PASSWORD --email=test3@example.com | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_USER_TEST3 "Failure creating SWIFT_USER_TEST3" keystone user-role-add --user-id $SWIFT_USER_TEST3 --role-id $ANOTHER_ROLE --tenant-id $SWIFT_TENANT_TEST1 SWIFT_TENANT_TEST2=$(keystone tenant-create --name=swifttenanttest2 | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_TENANT_TEST2 "Failure creating SWIFT_TENANT_TEST2" - SWIFT_USER_TEST2=$(keystone user-create --name=swiftusertest2 --pass=testing2 --email=test2@example.com | grep " id " | get_field 2) + SWIFT_USER_TEST2=$(keystone user-create --name=swiftusertest2 --pass=$SWIFTUSERTEST2_PASSWORD --email=test2@example.com | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_USER_TEST2 "Failure creating SWIFT_USER_TEST2" keystone user-role-add --user-id $SWIFT_USER_TEST2 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST2 } diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index e2d855c4df..d9c93cc476 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -253,6 +253,14 @@ if [ $MODE != "create" ]; then if [ $MODE = one -a "$user_name" != "$USER_NAME" ]; then continue; fi + + # Checks for a specific password defined for an user. + # Example for an username johndoe: + # JOHNDOE_PASSWORD=1234 + eval SPECIFIC_UPASSWORD="\$${USER_NAME^^}_PASSWORD" + if [ -n "$SPECIFIC_UPASSWORD" ]; then + USER_PASS=$SPECIFIC_UPASSWORD + fi add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS" done done From dff49a242eef817efa23d4e808aaa6a74ac82ed0 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 30 Jan 2014 15:37:40 -0600 Subject: [PATCH 0721/4704] Split functions Move shared and non-DevStack-specific functions to `functions-common`. This is a code move only with some updated comments. The functions are now sorted alphabetically within function groups, eg. all git-related functions are grouped together. The groups are listed at the top of the file. 'functions' sources 'functions-common' so no additional changes are required for backward-compatability. All functions shared with Grenade have also been moved. functions-common was created from commit e0ed8ea038299952826b27a16753775472f108d8 Change-Id: I73bf7134fd6a60ec1ea44a5bfab08b0569b60ded --- functions | 1422 +-------------------------------------------- functions-common | 1433 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 1439 insertions(+), 1416 deletions(-) create mode 100644 functions-common diff --git a/functions b/functions index dc3278b56d..5eae7fe510 100644 --- a/functions +++ b/functions @@ -1,563 +1,21 @@ -# functions - Common functions used by DevStack components +# functions - DevStack-specific functions # # The following variables are assumed to be defined by certain functions: # # - ``ENABLED_SERVICES`` -# - ``ERROR_ON_CLONE`` # - ``FILES`` # - ``GLANCE_HOSTPORT`` -# - ``OFFLINE`` -# - ``PIP_DOWNLOAD_CACHE`` -# - ``PIP_USE_MIRRORS`` -# - ``RECLONE`` # - ``TRACK_DEPENDS`` -# - ``http_proxy``, ``https_proxy``, ``no_proxy`` +# Include the common functions +FUNC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) +source ${FUNC_DIR}/functions-common # Save trace setting XTRACE=$(set +o | grep xtrace) set +o xtrace -# Convert CIDR notation to a IPv4 netmask -# cidr2netmask cidr-bits -function cidr2netmask() { - local maskpat="255 255 255 255" - local maskdgt="254 252 248 240 224 192 128" - set -- ${maskpat:0:$(( ($1 / 8) * 4 ))}${maskdgt:$(( (7 - ($1 % 8)) * 4 )):3} - echo ${1-0}.${2-0}.${3-0}.${4-0} -} - - -# Return the network portion of the given IP address using netmask -# netmask is in the traditional dotted-quad format -# maskip ip-address netmask -function maskip() { - local ip=$1 - local mask=$2 - local l="${ip%.*}"; local r="${ip#*.}"; local n="${mask%.*}"; local m="${mask#*.}" - local subnet=$((${ip%%.*}&${mask%%.*})).$((${r%%.*}&${m%%.*})).$((${l##*.}&${n##*.})).$((${ip##*.}&${mask##*.})) - echo $subnet -} - - -# Exit 0 if address is in network or 1 if address is not in network -# ip-range is in CIDR notation: 1.2.3.4/20 -# address_in_net ip-address ip-range -function address_in_net() { - local ip=$1 - local range=$2 - local masklen=${range#*/} - local network=$(maskip ${range%/*} $(cidr2netmask $masklen)) - local subnet=$(maskip $ip $(cidr2netmask $masklen)) - [[ $network == $subnet ]] -} - - -# Wrapper for ``apt-get`` to set cache and proxy environment variables -# Uses globals ``OFFLINE``, ``*_proxy`` -# apt_get operation package [package ...] -function apt_get() { - [[ "$OFFLINE" = "True" || -z "$@" ]] && return - local sudo="sudo" - [[ "$(id -u)" = "0" ]] && sudo="env" - $sudo DEBIAN_FRONTEND=noninteractive \ - http_proxy=$http_proxy https_proxy=$https_proxy \ - no_proxy=$no_proxy \ - apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@" -} - - -# Gracefully cp only if source file/dir exists -# cp_it source destination -function cp_it { - if [ -e $1 ] || [ -d $1 ]; then - cp -pRL $1 $2 - fi -} - - -# Prints backtrace info -# filename:lineno:function -function backtrace { - local level=$1 - local deep=$((${#BASH_SOURCE[@]} - 1)) - echo "[Call Trace]" - while [ $level -le $deep ]; do - echo "${BASH_SOURCE[$deep]}:${BASH_LINENO[$deep-1]}:${FUNCNAME[$deep-1]}" - deep=$((deep - 1)) - done -} - - -# Prints line number and "message" then exits -# die $LINENO "message" -function die() { - local exitcode=$? - set +o xtrace - local line=$1; shift - if [ $exitcode == 0 ]; then - exitcode=1 - fi - backtrace 2 - err $line "$*" - exit $exitcode -} - - -# Checks an environment variable is not set or has length 0 OR if the -# exit code is non-zero and prints "message" and exits -# NOTE: env-var is the variable name without a '$' -# die_if_not_set $LINENO env-var "message" -function die_if_not_set() { - local exitcode=$? - FXTRACE=$(set +o | grep xtrace) - set +o xtrace - local line=$1; shift - local evar=$1; shift - if ! is_set $evar || [ $exitcode != 0 ]; then - die $line "$*" - fi - $FXTRACE -} - - -# Prints line number and "message" in error format -# err $LINENO "message" -function err() { - local exitcode=$? - errXTRACE=$(set +o | grep xtrace) - set +o xtrace - local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2" - echo $msg 1>&2; - if [[ -n ${SCREEN_LOGDIR} ]]; then - echo $msg >> "${SCREEN_LOGDIR}/error.log" - fi - $errXTRACE - return $exitcode -} - - -# Checks an environment variable is not set or has length 0 OR if the -# exit code is non-zero and prints "message" -# NOTE: env-var is the variable name without a '$' -# err_if_not_set $LINENO env-var "message" -function err_if_not_set() { - local exitcode=$? - errinsXTRACE=$(set +o | grep xtrace) - set +o xtrace - local line=$1; shift - local evar=$1; shift - if ! is_set $evar || [ $exitcode != 0 ]; then - err $line "$*" - fi - $errinsXTRACE - return $exitcode -} - - -# Prints line number and "message" in warning format -# warn $LINENO "message" -function warn() { - local exitcode=$? - errXTRACE=$(set +o | grep xtrace) - set +o xtrace - local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2" - echo $msg 1>&2; - if [[ -n ${SCREEN_LOGDIR} ]]; then - echo $msg >> "${SCREEN_LOGDIR}/error.log" - fi - $errXTRACE - return $exitcode -} - - -# HTTP and HTTPS proxy servers are supported via the usual environment variables [1] -# ``http_proxy``, ``https_proxy`` and ``no_proxy``. They can be set in -# ``localrc`` or on the command line if necessary:: -# -# [1] http://www.w3.org/Daemon/User/Proxies/ProxyClients.html -# -# http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh - -function export_proxy_variables() { - if [[ -n "$http_proxy" ]]; then - export http_proxy=$http_proxy - fi - if [[ -n "$https_proxy" ]]; then - export https_proxy=$https_proxy - fi - if [[ -n "$no_proxy" ]]; then - export no_proxy=$no_proxy - fi -} - - -# Grab a numbered field from python prettytable output -# Fields are numbered starting with 1 -# Reverse syntax is supported: -1 is the last field, -2 is second to last, etc. -# get_field field-number -function get_field() { - while read data; do - if [ "$1" -lt 0 ]; then - field="(\$(NF$1))" - else - field="\$$(($1 + 1))" - fi - echo "$data" | awk -F'[ \t]*\\|[ \t]*' "{print $field}" - done -} - - -# Get the default value for HOST_IP -# get_default_host_ip fixed_range floating_range host_ip_iface host_ip -function get_default_host_ip() { - local fixed_range=$1 - local floating_range=$2 - local host_ip_iface=$3 - local host_ip=$4 - - # Find the interface used for the default route - host_ip_iface=${host_ip_iface:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }' | head -1)} - # Search for an IP unless an explicit is set by ``HOST_IP`` environment variable - if [ -z "$host_ip" -o "$host_ip" == "dhcp" ]; then - host_ip="" - host_ips=`LC_ALL=C ip -f inet addr show ${host_ip_iface} | awk '/inet/ {split($2,parts,"/"); print parts[1]}'` - for IP in $host_ips; do - # Attempt to filter out IP addresses that are part of the fixed and - # floating range. Note that this method only works if the ``netaddr`` - # python library is installed. If it is not installed, an error - # will be printed and the first IP from the interface will be used. - # If that is not correct set ``HOST_IP`` in ``localrc`` to the correct - # address. - if ! (address_in_net $IP $fixed_range || address_in_net $IP $floating_range); then - host_ip=$IP - break; - fi - done - fi - echo $host_ip -} - - -function _get_package_dir() { - local pkg_dir - if is_ubuntu; then - pkg_dir=$FILES/apts - elif is_fedora; then - pkg_dir=$FILES/rpms - elif is_suse; then - pkg_dir=$FILES/rpms-suse - else - exit_distro_not_supported "list of packages" - fi - echo "$pkg_dir" -} - - -# get_packages() collects a list of package names of any type from the -# prerequisite files in ``files/{apts|rpms}``. The list is intended -# to be passed to a package installer such as apt or yum. -# -# Only packages required for the services in 1st argument will be -# included. Two bits of metadata are recognized in the prerequisite files: -# -# - ``# NOPRIME`` defers installation to be performed later in `stack.sh` -# - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection -# of the package to the distros listed. The distro names are case insensitive. -function get_packages() { - local services=$@ - local package_dir=$(_get_package_dir) - local file_to_parse - local service - - if [[ -z "$package_dir" ]]; then - echo "No package directory supplied" - return 1 - fi - if [[ -z "$DISTRO" ]]; then - GetDistro - fi - for service in ${services//,/ }; do - # Allow individual services to specify dependencies - if [[ -e ${package_dir}/${service} ]]; then - file_to_parse="${file_to_parse} $service" - fi - # NOTE(sdague) n-api needs glance for now because that's where - # glance client is - if [[ $service == n-api ]]; then - if [[ ! $file_to_parse =~ nova ]]; then - file_to_parse="${file_to_parse} nova" - fi - if [[ ! $file_to_parse =~ glance ]]; then - file_to_parse="${file_to_parse} glance" - fi - elif [[ $service == c-* ]]; then - if [[ ! $file_to_parse =~ cinder ]]; then - file_to_parse="${file_to_parse} cinder" - fi - elif [[ $service == ceilometer-* ]]; then - if [[ ! $file_to_parse =~ ceilometer ]]; then - file_to_parse="${file_to_parse} ceilometer" - fi - elif [[ $service == s-* ]]; then - if [[ ! $file_to_parse =~ swift ]]; then - file_to_parse="${file_to_parse} swift" - fi - elif [[ $service == n-* ]]; then - if [[ ! $file_to_parse =~ nova ]]; then - file_to_parse="${file_to_parse} nova" - fi - elif [[ $service == g-* ]]; then - if [[ ! $file_to_parse =~ glance ]]; then - file_to_parse="${file_to_parse} glance" - fi - elif [[ $service == key* ]]; then - if [[ ! $file_to_parse =~ keystone ]]; then - file_to_parse="${file_to_parse} keystone" - fi - elif [[ $service == q-* ]]; then - if [[ ! $file_to_parse =~ neutron ]]; then - file_to_parse="${file_to_parse} neutron" - fi - fi - done - - for file in ${file_to_parse}; do - local fname=${package_dir}/${file} - local OIFS line package distros distro - [[ -e $fname ]] || continue - - OIFS=$IFS - IFS=$'\n' - for line in $(<${fname}); do - if [[ $line =~ "NOPRIME" ]]; then - continue - fi - - # Assume we want this package - package=${line%#*} - inst_pkg=1 - - # Look for # dist:xxx in comment - if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then - # We are using BASH regexp matching feature. - package=${BASH_REMATCH[1]} - distros=${BASH_REMATCH[2]} - # In bash ${VAR,,} will lowecase VAR - # Look for a match in the distro list - if [[ ! ${distros,,} =~ ${DISTRO,,} ]]; then - # If no match then skip this package - inst_pkg=0 - fi - fi - - # Look for # testonly in comment - if [[ $line =~ (.*)#.*testonly.* ]]; then - package=${BASH_REMATCH[1]} - # Are we installing test packages? (test for the default value) - if [[ $INSTALL_TESTONLY_PACKAGES = "False" ]]; then - # If not installing test packages the skip this package - inst_pkg=0 - fi - fi - - if [[ $inst_pkg = 1 ]]; then - echo $package - fi - done - IFS=$OIFS - done -} - - -# Determine OS Vendor, Release and Update -# Tested with OS/X, Ubuntu, RedHat, CentOS, Fedora -# Returns results in global variables: -# os_VENDOR - vendor name -# os_RELEASE - release -# os_UPDATE - update -# os_PACKAGE - package type -# os_CODENAME - vendor's codename for release -# GetOSVersion -GetOSVersion() { - # Figure out which vendor we are - if [[ -x "`which sw_vers 2>/dev/null`" ]]; then - # OS/X - os_VENDOR=`sw_vers -productName` - os_RELEASE=`sw_vers -productVersion` - os_UPDATE=${os_RELEASE##*.} - os_RELEASE=${os_RELEASE%.*} - os_PACKAGE="" - if [[ "$os_RELEASE" =~ "10.7" ]]; then - os_CODENAME="lion" - elif [[ "$os_RELEASE" =~ "10.6" ]]; then - os_CODENAME="snow leopard" - elif [[ "$os_RELEASE" =~ "10.5" ]]; then - os_CODENAME="leopard" - elif [[ "$os_RELEASE" =~ "10.4" ]]; then - os_CODENAME="tiger" - elif [[ "$os_RELEASE" =~ "10.3" ]]; then - os_CODENAME="panther" - else - os_CODENAME="" - fi - elif [[ -x $(which lsb_release 2>/dev/null) ]]; then - os_VENDOR=$(lsb_release -i -s) - os_RELEASE=$(lsb_release -r -s) - os_UPDATE="" - os_PACKAGE="rpm" - if [[ "Debian,Ubuntu,LinuxMint" =~ $os_VENDOR ]]; then - os_PACKAGE="deb" - elif [[ "SUSE LINUX" =~ $os_VENDOR ]]; then - lsb_release -d -s | grep -q openSUSE - if [[ $? -eq 0 ]]; then - os_VENDOR="openSUSE" - fi - elif [[ $os_VENDOR == "openSUSE project" ]]; then - os_VENDOR="openSUSE" - elif [[ $os_VENDOR =~ Red.*Hat ]]; then - os_VENDOR="Red Hat" - fi - os_CODENAME=$(lsb_release -c -s) - elif [[ -r /etc/redhat-release ]]; then - # Red Hat Enterprise Linux Server release 5.5 (Tikanga) - # Red Hat Enterprise Linux Server release 7.0 Beta (Maipo) - # CentOS release 5.5 (Final) - # CentOS Linux release 6.0 (Final) - # Fedora release 16 (Verne) - # XenServer release 6.2.0-70446c (xenenterprise) - os_CODENAME="" - for r in "Red Hat" CentOS Fedora XenServer; do - os_VENDOR=$r - if [[ -n "`grep \"$r\" /etc/redhat-release`" ]]; then - ver=`sed -e 's/^.* \([0-9].*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release` - os_CODENAME=${ver#*|} - os_RELEASE=${ver%|*} - os_UPDATE=${os_RELEASE##*.} - os_RELEASE=${os_RELEASE%.*} - break - fi - os_VENDOR="" - done - os_PACKAGE="rpm" - elif [[ -r /etc/SuSE-release ]]; then - for r in openSUSE "SUSE Linux"; do - if [[ "$r" = "SUSE Linux" ]]; then - os_VENDOR="SUSE LINUX" - else - os_VENDOR=$r - fi - - if [[ -n "`grep \"$r\" /etc/SuSE-release`" ]]; then - os_CODENAME=`grep "CODENAME = " /etc/SuSE-release | sed 's:.* = ::g'` - os_RELEASE=`grep "VERSION = " /etc/SuSE-release | sed 's:.* = ::g'` - os_UPDATE=`grep "PATCHLEVEL = " /etc/SuSE-release | sed 's:.* = ::g'` - break - fi - os_VENDOR="" - done - os_PACKAGE="rpm" - # If lsb_release is not installed, we should be able to detect Debian OS - elif [[ -f /etc/debian_version ]] && [[ $(cat /proc/version) =~ "Debian" ]]; then - os_VENDOR="Debian" - os_PACKAGE="deb" - os_CODENAME=$(awk '/VERSION=/' /etc/os-release | sed 's/VERSION=//' | sed -r 's/\"|\(|\)//g' | awk '{print $2}') - os_RELEASE=$(awk '/VERSION_ID=/' /etc/os-release | sed 's/VERSION_ID=//' | sed 's/\"//g') - fi - export os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME -} - - -# Translate the OS version values into common nomenclature -# Sets ``DISTRO`` from the ``os_*`` values -function GetDistro() { - GetOSVersion - if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then - # 'Everyone' refers to Ubuntu / Debian releases by the code name adjective - DISTRO=$os_CODENAME - elif [[ "$os_VENDOR" =~ (Fedora) ]]; then - # For Fedora, just use 'f' and the release - DISTRO="f$os_RELEASE" - elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then - DISTRO="opensuse-$os_RELEASE" - elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then - # For SLE, also use the service pack - if [[ -z "$os_UPDATE" ]]; then - DISTRO="sle${os_RELEASE}" - else - DISTRO="sle${os_RELEASE}sp${os_UPDATE}" - fi - elif [[ "$os_VENDOR" =~ (Red Hat) || "$os_VENDOR" =~ (CentOS) ]]; then - # Drop the . release as we assume it's compatible - DISTRO="rhel${os_RELEASE::1}" - elif [[ "$os_VENDOR" =~ (XenServer) ]]; then - DISTRO="xs$os_RELEASE" - else - # Catch-all for now is Vendor + Release + Update - DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE" - fi - export DISTRO -} - - -# Determine if current distribution is a Fedora-based distribution -# (Fedora, RHEL, CentOS, etc). -# is_fedora -function is_fedora { - if [[ -z "$os_VENDOR" ]]; then - GetOSVersion - fi - - [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || [ "$os_VENDOR" = "CentOS" ] -} - - -# Determine if current distribution is a SUSE-based distribution -# (openSUSE, SLE). -# is_suse -function is_suse { - if [[ -z "$os_VENDOR" ]]; then - GetOSVersion - fi - - [ "$os_VENDOR" = "openSUSE" ] || [ "$os_VENDOR" = "SUSE LINUX" ] -} - - -# Determine if current distribution is an Ubuntu-based distribution -# It will also detect non-Ubuntu but Debian-based distros -# is_ubuntu -function is_ubuntu { - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - [ "$os_PACKAGE" = "deb" ] -} - - -# Exit after outputting a message about the distribution not being supported. -# exit_distro_not_supported [optional-string-telling-what-is-missing] -function exit_distro_not_supported { - if [[ -z "$DISTRO" ]]; then - GetDistro - fi - - if [ $# -gt 0 ]; then - die $LINENO "Support for $DISTRO is incomplete: no support for $@" - else - die $LINENO "Support for $DISTRO is incomplete." - fi -} - -# Utility function for checking machine architecture -# is_arch arch-type -function is_arch { - ARCH_TYPE=$1 - - [[ "$(uname -m)" == "$ARCH_TYPE" ]] -} - # Checks if installed Apache is <= given version # $1 = x.y.z (version string of Apache) function check_apache_version { @@ -570,488 +28,6 @@ function check_apache_version { expr "$version" '>=' $1 > /dev/null } -# git clone only if directory doesn't exist already. Since ``DEST`` might not -# be owned by the installation user, we create the directory and change the -# ownership to the proper user. -# Set global RECLONE=yes to simulate a clone when dest-dir exists -# Set global ERROR_ON_CLONE=True to abort execution with an error if the git repo -# does not exist (default is False, meaning the repo will be cloned). -# Uses global ``OFFLINE`` -# git_clone remote dest-dir branch -function git_clone { - GIT_REMOTE=$1 - GIT_DEST=$2 - GIT_REF=$3 - RECLONE=$(trueorfalse False $RECLONE) - - if [[ "$OFFLINE" = "True" ]]; then - echo "Running in offline mode, clones already exist" - # print out the results so we know what change was used in the logs - cd $GIT_DEST - git show --oneline | head -1 - return - fi - - if echo $GIT_REF | egrep -q "^refs"; then - # If our branch name is a gerrit style refs/changes/... - if [[ ! -d $GIT_DEST ]]; then - [[ "$ERROR_ON_CLONE" = "True" ]] && \ - die $LINENO "Cloning not allowed in this configuration" - git clone $GIT_REMOTE $GIT_DEST - fi - cd $GIT_DEST - git fetch $GIT_REMOTE $GIT_REF && git checkout FETCH_HEAD - else - # do a full clone only if the directory doesn't exist - if [[ ! -d $GIT_DEST ]]; then - [[ "$ERROR_ON_CLONE" = "True" ]] && \ - die $LINENO "Cloning not allowed in this configuration" - git clone $GIT_REMOTE $GIT_DEST - cd $GIT_DEST - # This checkout syntax works for both branches and tags - git checkout $GIT_REF - elif [[ "$RECLONE" = "True" ]]; then - # if it does exist then simulate what clone does if asked to RECLONE - cd $GIT_DEST - # set the url to pull from and fetch - git remote set-url origin $GIT_REMOTE - git fetch origin - # remove the existing ignored files (like pyc) as they cause breakage - # (due to the py files having older timestamps than our pyc, so python - # thinks the pyc files are correct using them) - find $GIT_DEST -name '*.pyc' -delete - - # handle GIT_REF accordingly to type (tag, branch) - if [[ -n "`git show-ref refs/tags/$GIT_REF`" ]]; then - git_update_tag $GIT_REF - elif [[ -n "`git show-ref refs/heads/$GIT_REF`" ]]; then - git_update_branch $GIT_REF - elif [[ -n "`git show-ref refs/remotes/origin/$GIT_REF`" ]]; then - git_update_remote_branch $GIT_REF - else - die $LINENO "$GIT_REF is neither branch nor tag" - fi - - fi - fi - - # print out the results so we know what change was used in the logs - cd $GIT_DEST - git show --oneline | head -1 -} - - -# git update using reference as a branch. -# git_update_branch ref -function git_update_branch() { - - GIT_BRANCH=$1 - - git checkout -f origin/$GIT_BRANCH - # a local branch might not exist - git branch -D $GIT_BRANCH || true - git checkout -b $GIT_BRANCH -} - - -# git update using reference as a branch. -# git_update_remote_branch ref -function git_update_remote_branch() { - - GIT_BRANCH=$1 - - git checkout -b $GIT_BRANCH -t origin/$GIT_BRANCH -} - - -# git update using reference as a tag. Be careful editing source at that repo -# as working copy will be in a detached mode -# git_update_tag ref -function git_update_tag() { - - GIT_TAG=$1 - - git tag -d $GIT_TAG - # fetching given tag only - git fetch origin tag $GIT_TAG - git checkout -f $GIT_TAG -} - - -# Comment an option in an INI file -# inicomment config-file section option -function inicomment() { - local file=$1 - local section=$2 - local option=$3 - sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file" -} - - -# Uncomment an option in an INI file -# iniuncomment config-file section option -function iniuncomment() { - local file=$1 - local section=$2 - local option=$3 - sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file" -} - - -# Get an option from an INI file -# iniget config-file section option -function iniget() { - local file=$1 - local section=$2 - local option=$3 - local line - line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") - echo ${line#*=} -} - - -# Determinate is the given option present in the INI file -# ini_has_option config-file section option -function ini_has_option() { - local file=$1 - local section=$2 - local option=$3 - local line - line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") - [ -n "$line" ] -} - - -# Set an option in an INI file -# iniset config-file section option value -function iniset() { - local file=$1 - local section=$2 - local option=$3 - local value=$4 - - [[ -z $section || -z $option ]] && return - - if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then - # Add section at the end - echo -e "\n[$section]" >>"$file" - fi - if ! ini_has_option "$file" "$section" "$option"; then - # Add it - sed -i -e "/^\[$section\]/ a\\ -$option = $value -" "$file" - else - local sep=$(echo -ne "\x01") - # Replace it - sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file" - fi -} - - -# Get a multiple line option from an INI file -# iniget_multiline config-file section option -function iniget_multiline() { - local file=$1 - local section=$2 - local option=$3 - local values - values=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { s/^$option[ \t]*=[ \t]*//gp; }" "$file") - echo ${values} -} - - -# Set a multiple line option in an INI file -# iniset_multiline config-file section option value1 value2 valu3 ... -function iniset_multiline() { - local file=$1 - local section=$2 - local option=$3 - shift 3 - local values - for v in $@; do - # The later sed command inserts each new value in the line next to - # the section identifier, which causes the values to be inserted in - # the reverse order. Do a reverse here to keep the original order. - values="$v ${values}" - done - if ! grep -q "^\[$section\]" "$file"; then - # Add section at the end - echo -e "\n[$section]" >>"$file" - else - # Remove old values - sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file" - fi - # Add new ones - for v in $values; do - sed -i -e "/^\[$section\]/ a\\ -$option = $v -" "$file" - done -} - - -# Append a new option in an ini file without replacing the old value -# iniadd config-file section option value1 value2 value3 ... -function iniadd() { - local file=$1 - local section=$2 - local option=$3 - shift 3 - local values="$(iniget_multiline $file $section $option) $@" - iniset_multiline $file $section $option $values -} - -# Find out if a process exists by partial name. -# is_running name -function is_running() { - local name=$1 - ps auxw | grep -v grep | grep ${name} > /dev/null - RC=$? - # some times I really hate bash reverse binary logic - return $RC -} - - -# is_service_enabled() checks if the service(s) specified as arguments are -# enabled by the user in ``ENABLED_SERVICES``. -# -# Multiple services specified as arguments are ``OR``'ed together; the test -# is a short-circuit boolean, i.e it returns on the first match. -# -# There are special cases for some 'catch-all' services:: -# **nova** returns true if any service enabled start with **n-** -# **cinder** returns true if any service enabled start with **c-** -# **ceilometer** returns true if any service enabled start with **ceilometer** -# **glance** returns true if any service enabled start with **g-** -# **neutron** returns true if any service enabled start with **q-** -# **swift** returns true if any service enabled start with **s-** -# **trove** returns true if any service enabled start with **tr-** -# For backward compatibility if we have **swift** in ENABLED_SERVICES all the -# **s-** services will be enabled. This will be deprecated in the future. -# -# Cells within nova is enabled if **n-cell** is in ``ENABLED_SERVICES``. -# We also need to make sure to treat **n-cell-region** and **n-cell-child** -# as enabled in this case. -# -# Uses global ``ENABLED_SERVICES`` -# is_service_enabled service [service ...] -function is_service_enabled() { - services=$@ - for service in ${services}; do - [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 - - # Look for top-level 'enabled' function for this service - if type is_${service}_enabled >/dev/null 2>&1; then - # A function exists for this service, use it - is_${service}_enabled - return $? - fi - - # TODO(dtroyer): Remove these legacy special-cases after the is_XXX_enabled() - # are implemented - [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && return 0 - [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0 - [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0 - [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0 - [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0 - [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && return 0 - [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0 - [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && return 0 - [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && return 0 - [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && return 0 - done - return 1 -} - - -# remove extra commas from the input string (i.e. ``ENABLED_SERVICES``) -# _cleanup_service_list service-list -function _cleanup_service_list () { - echo "$1" | sed -e ' - s/,,/,/g; - s/^,//; - s/,$// - ' -} - - -# enable_service() adds the services passed as argument to the -# ``ENABLED_SERVICES`` list, if they are not already present. -# -# For example: -# enable_service qpid -# -# This function does not know about the special cases -# for nova, glance, and neutron built into is_service_enabled(). -# Uses global ``ENABLED_SERVICES`` -# enable_service service [service ...] -function enable_service() { - local tmpsvcs="${ENABLED_SERVICES}" - for service in $@; do - if ! is_service_enabled $service; then - tmpsvcs+=",$service" - fi - done - ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") - disable_negated_services -} - - -# disable_service() removes the services passed as argument to the -# ``ENABLED_SERVICES`` list, if they are present. -# -# For example: -# disable_service rabbit -# -# This function does not know about the special cases -# for nova, glance, and neutron built into is_service_enabled(). -# Uses global ``ENABLED_SERVICES`` -# disable_service service [service ...] -function disable_service() { - local tmpsvcs=",${ENABLED_SERVICES}," - local service - for service in $@; do - if is_service_enabled $service; then - tmpsvcs=${tmpsvcs//,$service,/,} - fi - done - ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") -} - - -# disable_all_services() removes all current services -# from ``ENABLED_SERVICES`` to reset the configuration -# before a minimal installation -# Uses global ``ENABLED_SERVICES`` -# disable_all_services -function disable_all_services() { - ENABLED_SERVICES="" -} - - -# Remove all services starting with '-'. For example, to install all default -# services except rabbit (rabbit) set in ``localrc``: -# ENABLED_SERVICES+=",-rabbit" -# Uses global ``ENABLED_SERVICES`` -# disable_negated_services -function disable_negated_services() { - local tmpsvcs="${ENABLED_SERVICES}" - local service - for service in ${tmpsvcs//,/ }; do - if [[ ${service} == -* ]]; then - tmpsvcs=$(echo ${tmpsvcs}|sed -r "s/(,)?(-)?${service#-}(,)?/,/g") - fi - done - ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") -} - - -# Distro-agnostic package installer -# install_package package [package ...] -function install_package() { - if is_ubuntu; then - [[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update - NO_UPDATE_REPOS=True - - apt_get install "$@" - elif is_fedora; then - yum_install "$@" - elif is_suse; then - zypper_install "$@" - else - exit_distro_not_supported "installing packages" - fi -} - - -# Distro-agnostic package uninstaller -# uninstall_package package [package ...] -function uninstall_package() { - if is_ubuntu; then - apt_get purge "$@" - elif is_fedora; then - sudo yum remove -y "$@" - elif is_suse; then - sudo zypper rm "$@" - else - exit_distro_not_supported "uninstalling packages" - fi -} - - -# Distro-agnostic function to tell if a package is installed -# is_package_installed package [package ...] -function is_package_installed() { - if [[ -z "$@" ]]; then - return 1 - fi - - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - - if [[ "$os_PACKAGE" = "deb" ]]; then - dpkg -s "$@" > /dev/null 2> /dev/null - elif [[ "$os_PACKAGE" = "rpm" ]]; then - rpm --quiet -q "$@" - else - exit_distro_not_supported "finding if a package is installed" - fi -} - - -# Test if the named environment variable is set and not zero length -# is_set env-var -function is_set() { - local var=\$"$1" - eval "[ -n \"$var\" ]" # For ex.: sh -c "[ -n \"$var\" ]" would be better, but several exercises depends on this -} - - -# Wrapper for ``pip install`` to set cache and proxy environment variables -# Uses globals ``OFFLINE``, ``PIP_DOWNLOAD_CACHE``, ``PIP_USE_MIRRORS``, -# ``TRACK_DEPENDS``, ``*_proxy`` -# pip_install package [package ...] -function pip_install { - [[ "$OFFLINE" = "True" || -z "$@" ]] && return - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - if [[ $TRACK_DEPENDS = True ]]; then - source $DEST/.venv/bin/activate - CMD_PIP=$DEST/.venv/bin/pip - SUDO_PIP="env" - else - SUDO_PIP="sudo" - CMD_PIP=$(get_pip_command) - fi - - # Mirror option not needed anymore because pypi has CDN available, - # but it's useful in certain circumstances - PIP_USE_MIRRORS=${PIP_USE_MIRRORS:-False} - if [[ "$PIP_USE_MIRRORS" != "False" ]]; then - PIP_MIRROR_OPT="--use-mirrors" - fi - - # pip < 1.4 has a bug where it will use an already existing build - # directory unconditionally. Say an earlier component installs - # foo v1.1; pip will have built foo's source in - # /tmp/$USER-pip-build. Even if a later component specifies foo < - # 1.1, the existing extracted build will be used and cause - # confusing errors. By creating unique build directories we avoid - # this problem. See https://github.com/pypa/pip/issues/709 - local pip_build_tmp=$(mktemp --tmpdir -d pip-build.XXXXX) - - $SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \ - HTTP_PROXY=$http_proxy \ - HTTPS_PROXY=$https_proxy \ - NO_PROXY=$no_proxy \ - $CMD_PIP install --build=${pip_build_tmp} \ - $PIP_MIRROR_OPT $@ \ - && $SUDO_PIP rm -rf ${pip_build_tmp} -} - # Cleanup anything from /tmp on unstack # clean_tmp @@ -1062,243 +38,6 @@ function cleanup_tmp { sudo rm -rf ${tmp_dir}/pip-build.* } -# Service wrapper to restart services -# restart_service service-name -function restart_service() { - if is_ubuntu; then - sudo /usr/sbin/service $1 restart - else - sudo /sbin/service $1 restart - fi -} - - -# _run_process() is designed to be backgrounded by run_process() to simulate a -# fork. It includes the dirty work of closing extra filehandles and preparing log -# files to produce the same logs as screen_it(). The log filename is derived -# from the service name and global-and-now-misnamed SCREEN_LOGDIR -# _run_process service "command-line" -function _run_process() { - local service=$1 - local command="$2" - - # Undo logging redirections and close the extra descriptors - exec 1>&3 - exec 2>&3 - exec 3>&- - exec 6>&- - - if [[ -n ${SCREEN_LOGDIR} ]]; then - exec 1>&${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log 2>&1 - ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log - - # TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs. - export PYTHONUNBUFFERED=1 - fi - - exec /bin/bash -c "$command" - die "$service exec failure: $command" -} - - -# run_process() launches a child process that closes all file descriptors and -# then exec's the passed in command. This is meant to duplicate the semantics -# of screen_it() without screen. PIDs are written to -# $SERVICE_DIR/$SCREEN_NAME/$service.pid -# run_process service "command-line" -function run_process() { - local service=$1 - local command="$2" - - # Spawn the child process - _run_process "$service" "$command" & - echo $! -} - - -# Helper to launch a service in a named screen -# screen_it service "command-line" -function screen_it { - SCREEN_NAME=${SCREEN_NAME:-stack} - SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} - USE_SCREEN=$(trueorfalse True $USE_SCREEN) - - if is_service_enabled $1; then - # Append the service to the screen rc file - screen_rc "$1" "$2" - - if [[ "$USE_SCREEN" = "True" ]]; then - screen -S $SCREEN_NAME -X screen -t $1 - - if [[ -n ${SCREEN_LOGDIR} ]]; then - screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log - screen -S $SCREEN_NAME -p $1 -X log on - ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log - fi - - # sleep to allow bash to be ready to be send the command - we are - # creating a new window in screen and then sends characters, so if - # bash isn't running by the time we send the command, nothing happens - sleep 1.5 - - NL=`echo -ne '\015'` - # This fun command does the following: - # - the passed server command is backgrounded - # - the pid of the background process is saved in the usual place - # - the server process is brought back to the foreground - # - if the server process exits prematurely the fg command errors - # and a message is written to stdout and the service failure file - # The pid saved can be used in screen_stop() as a process group - # id to kill off all child processes - screen -S $SCREEN_NAME -p $1 -X stuff "$2 & echo \$! >$SERVICE_DIR/$SCREEN_NAME/$1.pid; fg || echo \"$1 failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL" - else - # Spawn directly without screen - run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$1.pid - fi - fi -} - - -# Stop a service in screen -# If a PID is available use it, kill the whole process group via TERM -# If screen is being used kill the screen window; this will catch processes -# that did not leave a PID behind -# screen_stop service -function screen_stop() { - SCREEN_NAME=${SCREEN_NAME:-stack} - SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} - USE_SCREEN=$(trueorfalse True $USE_SCREEN) - - if is_service_enabled $1; then - # Kill via pid if we have one available - if [[ -r $SERVICE_DIR/$SCREEN_NAME/$1.pid ]]; then - pkill -TERM -P -$(cat $SERVICE_DIR/$SCREEN_NAME/$1.pid) - rm $SERVICE_DIR/$SCREEN_NAME/$1.pid - fi - if [[ "$USE_SCREEN" = "True" ]]; then - # Clean up the screen window - screen -S $SCREEN_NAME -p $1 -X kill - fi - fi -} - - -# Screen rc file builder -# screen_rc service "command-line" -function screen_rc { - SCREEN_NAME=${SCREEN_NAME:-stack} - SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc - if [[ ! -e $SCREENRC ]]; then - # Name the screen session - echo "sessionname $SCREEN_NAME" > $SCREENRC - # Set a reasonable statusbar - echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC - # Some distributions override PROMPT_COMMAND for the screen terminal type - turn that off - echo "setenv PROMPT_COMMAND /bin/true" >> $SCREENRC - echo "screen -t shell bash" >> $SCREENRC - fi - # If this service doesn't already exist in the screenrc file - if ! grep $1 $SCREENRC 2>&1 > /dev/null; then - NL=`echo -ne '\015'` - echo "screen -t $1 bash" >> $SCREENRC - echo "stuff \"$2$NL\"" >> $SCREENRC - - if [[ -n ${SCREEN_LOGDIR} ]]; then - echo "logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log" >>$SCREENRC - echo "log on" >>$SCREENRC - fi - fi -} - - -# Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``. -# This is used for ``service_check`` when all the ``screen_it`` are called finished -# init_service_check -function init_service_check() { - SCREEN_NAME=${SCREEN_NAME:-stack} - SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} - - if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then - mkdir -p "$SERVICE_DIR/$SCREEN_NAME" - fi - - rm -f "$SERVICE_DIR/$SCREEN_NAME"/*.failure -} - - -# Helper to get the status of each running service -# service_check -function service_check() { - local service - local failures - SCREEN_NAME=${SCREEN_NAME:-stack} - SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} - - - if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then - echo "No service status directory found" - return - fi - - # Check if there is any falure flag file under $SERVICE_DIR/$SCREEN_NAME - failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null` - - for service in $failures; do - service=`basename $service` - service=${service%.failure} - echo "Error: Service $service is not running" - done - - if [ -n "$failures" ]; then - echo "More details about the above errors can be found with screen, with ./rejoin-stack.sh" - fi -} - -# Returns true if the directory is on a filesystem mounted via NFS. -function is_nfs_directory() { - local mount_type=`stat -f -L -c %T $1` - test "$mount_type" == "nfs" -} - -# Only run the command if the target file (the last arg) is not on an -# NFS filesystem. -function _safe_permission_operation() { - local args=( $@ ) - local last - local sudo_cmd - local dir_to_check - - let last="${#args[*]} - 1" - - dir_to_check=${args[$last]} - if [ ! -d "$dir_to_check" ]; then - dir_to_check=`dirname "$dir_to_check"` - fi - - if is_nfs_directory "$dir_to_check" ; then - return 0 - fi - - if [[ $TRACK_DEPENDS = True ]]; then - sudo_cmd="env" - else - sudo_cmd="sudo" - fi - - $sudo_cmd $@ -} - -# Only change ownership of a file or directory if it is not on an NFS -# filesystem. -function safe_chown() { - _safe_permission_operation chown $@ -} - -# Only change permissions of a file or directory if it is not on an -# NFS filesystem. -function safe_chmod() { - _safe_permission_operation chmod $@ -} # ``pip install -e`` the package, which processes the dependencies # using pip before running `setup.py develop` @@ -1340,6 +79,7 @@ function setup_develop() { fi } + # ``pip install -e`` the package, which processes the dependencies # using pip before running `setup.py develop` # Uses globals ``STACK_USER`` @@ -1353,43 +93,6 @@ function setup_develop_no_requirements_update() { } -# Service wrapper to start services -# start_service service-name -function start_service() { - if is_ubuntu; then - sudo /usr/sbin/service $1 start - else - sudo /sbin/service $1 start - fi -} - - -# Service wrapper to stop services -# stop_service service-name -function stop_service() { - if is_ubuntu; then - sudo /usr/sbin/service $1 stop - else - sudo /sbin/service $1 stop - fi -} - - -# Normalize config values to True or False -# Accepts as False: 0 no No NO false False FALSE -# Accepts as True: 1 yes Yes YES true True TRUE -# VAR=$(trueorfalse default-value test-value) -function trueorfalse() { - local default=$1 - local testval=$2 - - [[ -z "$testval" ]] && { echo "$default"; return; } - [[ "0 no No NO false False FALSE" =~ "$testval" ]] && { echo "False"; return; } - [[ "1 yes Yes YES true True TRUE" =~ "$testval" ]] && { echo "True"; return; } - echo "$default" -} - - # Retrieve an image from a URL and upload into Glance. # Uses the following variables: # @@ -1685,23 +388,6 @@ function use_database { } -# Toggle enable/disable_service for services that must run exclusive of each other -# $1 The name of a variable containing a space-separated list of services -# $2 The name of a variable in which to store the enabled service's name -# $3 The name of the service to enable -function use_exclusive_service { - local options=${!1} - local selection=$3 - out=$2 - [ -z $selection ] || [[ ! "$options" =~ "$selection" ]] && return 1 - for opt in $options;do - [[ "$opt" = "$selection" ]] && enable_service $opt || disable_service $opt - done - eval "$out=$selection" - return 0 -} - - # Wait for an HTTP server to start answering requests # wait_for_service timeout url function wait_for_service() { @@ -1711,30 +397,6 @@ function wait_for_service() { } -# Wrapper for ``yum`` to set proxy environment variables -# Uses globals ``OFFLINE``, ``*_proxy`` -# yum_install package [package ...] -function yum_install() { - [[ "$OFFLINE" = "True" ]] && return - local sudo="sudo" - [[ "$(id -u)" = "0" ]] && sudo="env" - $sudo http_proxy=$http_proxy https_proxy=$https_proxy \ - no_proxy=$no_proxy \ - yum install -y "$@" -} - - -# zypper wrapper to set arguments correctly -# zypper_install package [package ...] -function zypper_install() { - [[ "$OFFLINE" = "True" ]] && return - local sudo="sudo" - [[ "$(id -u)" = "0" ]] && sudo="env" - $sudo http_proxy=$http_proxy https_proxy=$https_proxy \ - zypper --non-interactive install --auto-agree-with-licenses "$@" -} - - # ping check # Uses globals ``ENABLED_SERVICES`` # ping_check from-net ip boot-timeout expected @@ -1809,36 +471,6 @@ function _ssh_check_novanet() { } -# Add a user to a group. -# add_user_to_group user group -function add_user_to_group() { - local user=$1 - local group=$2 - - if [[ -z "$os_VENDOR" ]]; then - GetOSVersion - fi - - # SLE11 and openSUSE 12.2 don't have the usual usermod - if ! is_suse || [[ "$os_VENDOR" = "openSUSE" && "$os_RELEASE" != "12.2" ]]; then - sudo usermod -a -G "$group" "$user" - else - sudo usermod -A "$group" "$user" - fi -} - - -# Get the path to the direcotry where python executables are installed. -# get_python_exec_prefix -function get_python_exec_prefix() { - if is_fedora || is_suse; then - echo "/usr/bin" - else - echo "/usr/local/bin" - fi -} - - # Get the location of the $module-rootwrap executables, where module is cinder # or nova. # get_rootwrap_location module @@ -1849,17 +481,6 @@ function get_rootwrap_location() { } -# Get the path to the pip command. -# get_pip_command -function get_pip_command() { - which pip || which pip-python - - if [ $? -ne 0 ]; then - die $LINENO "Unable to find pip; cannot continue" - fi -} - - # Path permissions sanity check # check_path_perm_sanity path function check_path_perm_sanity() { @@ -1944,37 +565,6 @@ vercmp_numbers() { } -# ``policy_add policy_file policy_name policy_permissions`` -# -# Add a policy to a policy.json file -# Do nothing if the policy already exists - -function policy_add() { - local policy_file=$1 - local policy_name=$2 - local policy_perm=$3 - - if grep -q ${policy_name} ${policy_file}; then - echo "Policy ${policy_name} already exists in ${policy_file}" - return - fi - - # Add a terminating comma to policy lines without one - # Remove the closing '}' and all lines following to the end-of-file - local tmpfile=$(mktemp) - uniq ${policy_file} | sed -e ' - s/]$/],/ - /^[}]/,$d - ' > ${tmpfile} - - # Append policy and closing brace - echo " \"${policy_name}\": ${policy_perm}" >>${tmpfile} - echo "}" >>${tmpfile} - - mv ${tmpfile} ${policy_file} -} - - # This function sets log formatting options for colorizing log # output to stdout. It is meant to be called by lib modules. # The last two parameters are optional and can be used to specify @@ -1994,10 +584,10 @@ function setup_colorized_logging() { iniset $conf_file $conf_section logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" } + # Restore xtrace $XTRACE - # Local variables: # mode: shell-script # End: diff --git a/functions-common b/functions-common new file mode 100644 index 0000000000..0cecb0b9fb --- /dev/null +++ b/functions-common @@ -0,0 +1,1433 @@ +# functions-common - Common functions used by DevStack components +# +# The canonical copy of this file is maintained in the DevStack repo. +# All modifications should be made there and then sync'ed to other repos +# as required. +# +# This file is sorted alphabetically within the function groups. +# +# - Config Functions +# - Control Functions +# - Distro Functions +# - Git Functions +# - OpenStack Functions +# - Package Functions +# - Process Functions +# - Python Functions +# - Service Functions +# +# The following variables are assumed to be defined by certain functions: +# +# - ``ENABLED_SERVICES`` +# - ``ERROR_ON_CLONE`` +# - ``FILES`` +# - ``OFFLINE`` +# - ``PIP_DOWNLOAD_CACHE`` +# - ``PIP_USE_MIRRORS`` +# - ``RECLONE`` +# - ``TRACK_DEPENDS`` +# - ``http_proxy``, ``https_proxy``, ``no_proxy`` + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Config Functions +# ================ + +# Append a new option in an ini file without replacing the old value +# iniadd config-file section option value1 value2 value3 ... +function iniadd() { + local file=$1 + local section=$2 + local option=$3 + shift 3 + local values="$(iniget_multiline $file $section $option) $@" + iniset_multiline $file $section $option $values +} + +# Comment an option in an INI file +# inicomment config-file section option +function inicomment() { + local file=$1 + local section=$2 + local option=$3 + sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file" +} + +# Get an option from an INI file +# iniget config-file section option +function iniget() { + local file=$1 + local section=$2 + local option=$3 + local line + line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") + echo ${line#*=} +} + +# Get a multiple line option from an INI file +# iniget_multiline config-file section option +function iniget_multiline() { + local file=$1 + local section=$2 + local option=$3 + local values + values=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { s/^$option[ \t]*=[ \t]*//gp; }" "$file") + echo ${values} +} + +# Determinate is the given option present in the INI file +# ini_has_option config-file section option +function ini_has_option() { + local file=$1 + local section=$2 + local option=$3 + local line + line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") + [ -n "$line" ] +} + +# Set an option in an INI file +# iniset config-file section option value +function iniset() { + local file=$1 + local section=$2 + local option=$3 + local value=$4 + + [[ -z $section || -z $option ]] && return + + if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then + # Add section at the end + echo -e "\n[$section]" >>"$file" + fi + if ! ini_has_option "$file" "$section" "$option"; then + # Add it + sed -i -e "/^\[$section\]/ a\\ +$option = $value +" "$file" + else + local sep=$(echo -ne "\x01") + # Replace it + sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file" + fi +} + +# Set a multiple line option in an INI file +# iniset_multiline config-file section option value1 value2 valu3 ... +function iniset_multiline() { + local file=$1 + local section=$2 + local option=$3 + shift 3 + local values + for v in $@; do + # The later sed command inserts each new value in the line next to + # the section identifier, which causes the values to be inserted in + # the reverse order. Do a reverse here to keep the original order. + values="$v ${values}" + done + if ! grep -q "^\[$section\]" "$file"; then + # Add section at the end + echo -e "\n[$section]" >>"$file" + else + # Remove old values + sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file" + fi + # Add new ones + for v in $values; do + sed -i -e "/^\[$section\]/ a\\ +$option = $v +" "$file" + done +} + +# Uncomment an option in an INI file +# iniuncomment config-file section option +function iniuncomment() { + local file=$1 + local section=$2 + local option=$3 + sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file" +} + +# Normalize config values to True or False +# Accepts as False: 0 no No NO false False FALSE +# Accepts as True: 1 yes Yes YES true True TRUE +# VAR=$(trueorfalse default-value test-value) +function trueorfalse() { + local default=$1 + local testval=$2 + + [[ -z "$testval" ]] && { echo "$default"; return; } + [[ "0 no No NO false False FALSE" =~ "$testval" ]] && { echo "False"; return; } + [[ "1 yes Yes YES true True TRUE" =~ "$testval" ]] && { echo "True"; return; } + echo "$default" +} + + +# Control Functions +# ================= + +# Prints backtrace info +# filename:lineno:function +# backtrace level +function backtrace { + local level=$1 + local deep=$((${#BASH_SOURCE[@]} - 1)) + echo "[Call Trace]" + while [ $level -le $deep ]; do + echo "${BASH_SOURCE[$deep]}:${BASH_LINENO[$deep-1]}:${FUNCNAME[$deep-1]}" + deep=$((deep - 1)) + done +} + +# Prints line number and "message" then exits +# die $LINENO "message" +function die() { + local exitcode=$? + set +o xtrace + local line=$1; shift + if [ $exitcode == 0 ]; then + exitcode=1 + fi + backtrace 2 + err $line "$*" + exit $exitcode +} + +# Checks an environment variable is not set or has length 0 OR if the +# exit code is non-zero and prints "message" and exits +# NOTE: env-var is the variable name without a '$' +# die_if_not_set $LINENO env-var "message" +function die_if_not_set() { + local exitcode=$? + FXTRACE=$(set +o | grep xtrace) + set +o xtrace + local line=$1; shift + local evar=$1; shift + if ! is_set $evar || [ $exitcode != 0 ]; then + die $line "$*" + fi + $FXTRACE +} + +# Prints line number and "message" in error format +# err $LINENO "message" +function err() { + local exitcode=$? + errXTRACE=$(set +o | grep xtrace) + set +o xtrace + local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2" + echo $msg 1>&2; + if [[ -n ${SCREEN_LOGDIR} ]]; then + echo $msg >> "${SCREEN_LOGDIR}/error.log" + fi + $errXTRACE + return $exitcode +} + +# Checks an environment variable is not set or has length 0 OR if the +# exit code is non-zero and prints "message" +# NOTE: env-var is the variable name without a '$' +# err_if_not_set $LINENO env-var "message" +function err_if_not_set() { + local exitcode=$? + errinsXTRACE=$(set +o | grep xtrace) + set +o xtrace + local line=$1; shift + local evar=$1; shift + if ! is_set $evar || [ $exitcode != 0 ]; then + err $line "$*" + fi + $errinsXTRACE + return $exitcode +} + +# Exit after outputting a message about the distribution not being supported. +# exit_distro_not_supported [optional-string-telling-what-is-missing] +function exit_distro_not_supported { + if [[ -z "$DISTRO" ]]; then + GetDistro + fi + + if [ $# -gt 0 ]; then + die $LINENO "Support for $DISTRO is incomplete: no support for $@" + else + die $LINENO "Support for $DISTRO is incomplete." + fi +} + +# Test if the named environment variable is set and not zero length +# is_set env-var +function is_set() { + local var=\$"$1" + eval "[ -n \"$var\" ]" # For ex.: sh -c "[ -n \"$var\" ]" would be better, but several exercises depends on this +} + +# Prints line number and "message" in warning format +# warn $LINENO "message" +function warn() { + local exitcode=$? + errXTRACE=$(set +o | grep xtrace) + set +o xtrace + local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2" + echo $msg 1>&2; + if [[ -n ${SCREEN_LOGDIR} ]]; then + echo $msg >> "${SCREEN_LOGDIR}/error.log" + fi + $errXTRACE + return $exitcode +} + + +# Distro Functions +# ================ + +# Determine OS Vendor, Release and Update +# Tested with OS/X, Ubuntu, RedHat, CentOS, Fedora +# Returns results in global variables: +# os_VENDOR - vendor name +# os_RELEASE - release +# os_UPDATE - update +# os_PACKAGE - package type +# os_CODENAME - vendor's codename for release +# GetOSVersion +GetOSVersion() { + # Figure out which vendor we are + if [[ -x "`which sw_vers 2>/dev/null`" ]]; then + # OS/X + os_VENDOR=`sw_vers -productName` + os_RELEASE=`sw_vers -productVersion` + os_UPDATE=${os_RELEASE##*.} + os_RELEASE=${os_RELEASE%.*} + os_PACKAGE="" + if [[ "$os_RELEASE" =~ "10.7" ]]; then + os_CODENAME="lion" + elif [[ "$os_RELEASE" =~ "10.6" ]]; then + os_CODENAME="snow leopard" + elif [[ "$os_RELEASE" =~ "10.5" ]]; then + os_CODENAME="leopard" + elif [[ "$os_RELEASE" =~ "10.4" ]]; then + os_CODENAME="tiger" + elif [[ "$os_RELEASE" =~ "10.3" ]]; then + os_CODENAME="panther" + else + os_CODENAME="" + fi + elif [[ -x $(which lsb_release 2>/dev/null) ]]; then + os_VENDOR=$(lsb_release -i -s) + os_RELEASE=$(lsb_release -r -s) + os_UPDATE="" + os_PACKAGE="rpm" + if [[ "Debian,Ubuntu,LinuxMint" =~ $os_VENDOR ]]; then + os_PACKAGE="deb" + elif [[ "SUSE LINUX" =~ $os_VENDOR ]]; then + lsb_release -d -s | grep -q openSUSE + if [[ $? -eq 0 ]]; then + os_VENDOR="openSUSE" + fi + elif [[ $os_VENDOR == "openSUSE project" ]]; then + os_VENDOR="openSUSE" + elif [[ $os_VENDOR =~ Red.*Hat ]]; then + os_VENDOR="Red Hat" + fi + os_CODENAME=$(lsb_release -c -s) + elif [[ -r /etc/redhat-release ]]; then + # Red Hat Enterprise Linux Server release 5.5 (Tikanga) + # Red Hat Enterprise Linux Server release 7.0 Beta (Maipo) + # CentOS release 5.5 (Final) + # CentOS Linux release 6.0 (Final) + # Fedora release 16 (Verne) + # XenServer release 6.2.0-70446c (xenenterprise) + os_CODENAME="" + for r in "Red Hat" CentOS Fedora XenServer; do + os_VENDOR=$r + if [[ -n "`grep \"$r\" /etc/redhat-release`" ]]; then + ver=`sed -e 's/^.* \([0-9].*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release` + os_CODENAME=${ver#*|} + os_RELEASE=${ver%|*} + os_UPDATE=${os_RELEASE##*.} + os_RELEASE=${os_RELEASE%.*} + break + fi + os_VENDOR="" + done + os_PACKAGE="rpm" + elif [[ -r /etc/SuSE-release ]]; then + for r in openSUSE "SUSE Linux"; do + if [[ "$r" = "SUSE Linux" ]]; then + os_VENDOR="SUSE LINUX" + else + os_VENDOR=$r + fi + + if [[ -n "`grep \"$r\" /etc/SuSE-release`" ]]; then + os_CODENAME=`grep "CODENAME = " /etc/SuSE-release | sed 's:.* = ::g'` + os_RELEASE=`grep "VERSION = " /etc/SuSE-release | sed 's:.* = ::g'` + os_UPDATE=`grep "PATCHLEVEL = " /etc/SuSE-release | sed 's:.* = ::g'` + break + fi + os_VENDOR="" + done + os_PACKAGE="rpm" + # If lsb_release is not installed, we should be able to detect Debian OS + elif [[ -f /etc/debian_version ]] && [[ $(cat /proc/version) =~ "Debian" ]]; then + os_VENDOR="Debian" + os_PACKAGE="deb" + os_CODENAME=$(awk '/VERSION=/' /etc/os-release | sed 's/VERSION=//' | sed -r 's/\"|\(|\)//g' | awk '{print $2}') + os_RELEASE=$(awk '/VERSION_ID=/' /etc/os-release | sed 's/VERSION_ID=//' | sed 's/\"//g') + fi + export os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME +} + +# Translate the OS version values into common nomenclature +# Sets global ``DISTRO`` from the ``os_*`` values +function GetDistro() { + GetOSVersion + if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then + # 'Everyone' refers to Ubuntu / Debian releases by the code name adjective + DISTRO=$os_CODENAME + elif [[ "$os_VENDOR" =~ (Fedora) ]]; then + # For Fedora, just use 'f' and the release + DISTRO="f$os_RELEASE" + elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then + DISTRO="opensuse-$os_RELEASE" + elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then + # For SLE, also use the service pack + if [[ -z "$os_UPDATE" ]]; then + DISTRO="sle${os_RELEASE}" + else + DISTRO="sle${os_RELEASE}sp${os_UPDATE}" + fi + elif [[ "$os_VENDOR" =~ (Red Hat) || "$os_VENDOR" =~ (CentOS) ]]; then + # Drop the . release as we assume it's compatible + DISTRO="rhel${os_RELEASE::1}" + elif [[ "$os_VENDOR" =~ (XenServer) ]]; then + DISTRO="xs$os_RELEASE" + else + # Catch-all for now is Vendor + Release + Update + DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE" + fi + export DISTRO +} + +# Utility function for checking machine architecture +# is_arch arch-type +function is_arch { + ARCH_TYPE=$1 + + [[ "$(uname -m)" == "$ARCH_TYPE" ]] +} + +# Determine if current distribution is a Fedora-based distribution +# (Fedora, RHEL, CentOS, etc). +# is_fedora +function is_fedora { + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + + [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || [ "$os_VENDOR" = "CentOS" ] +} + + +# Determine if current distribution is a SUSE-based distribution +# (openSUSE, SLE). +# is_suse +function is_suse { + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + + [ "$os_VENDOR" = "openSUSE" ] || [ "$os_VENDOR" = "SUSE LINUX" ] +} + + +# Determine if current distribution is an Ubuntu-based distribution +# It will also detect non-Ubuntu but Debian-based distros +# is_ubuntu +function is_ubuntu { + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + [ "$os_PACKAGE" = "deb" ] +} + + +# Git Functions +# ============= + +# git clone only if directory doesn't exist already. Since ``DEST`` might not +# be owned by the installation user, we create the directory and change the +# ownership to the proper user. +# Set global RECLONE=yes to simulate a clone when dest-dir exists +# Set global ERROR_ON_CLONE=True to abort execution with an error if the git repo +# does not exist (default is False, meaning the repo will be cloned). +# Uses global ``OFFLINE`` +# git_clone remote dest-dir branch +function git_clone { + GIT_REMOTE=$1 + GIT_DEST=$2 + GIT_REF=$3 + RECLONE=$(trueorfalse False $RECLONE) + + if [[ "$OFFLINE" = "True" ]]; then + echo "Running in offline mode, clones already exist" + # print out the results so we know what change was used in the logs + cd $GIT_DEST + git show --oneline | head -1 + return + fi + + if echo $GIT_REF | egrep -q "^refs"; then + # If our branch name is a gerrit style refs/changes/... + if [[ ! -d $GIT_DEST ]]; then + [[ "$ERROR_ON_CLONE" = "True" ]] && \ + die $LINENO "Cloning not allowed in this configuration" + git clone $GIT_REMOTE $GIT_DEST + fi + cd $GIT_DEST + git fetch $GIT_REMOTE $GIT_REF && git checkout FETCH_HEAD + else + # do a full clone only if the directory doesn't exist + if [[ ! -d $GIT_DEST ]]; then + [[ "$ERROR_ON_CLONE" = "True" ]] && \ + die $LINENO "Cloning not allowed in this configuration" + git clone $GIT_REMOTE $GIT_DEST + cd $GIT_DEST + # This checkout syntax works for both branches and tags + git checkout $GIT_REF + elif [[ "$RECLONE" = "True" ]]; then + # if it does exist then simulate what clone does if asked to RECLONE + cd $GIT_DEST + # set the url to pull from and fetch + git remote set-url origin $GIT_REMOTE + git fetch origin + # remove the existing ignored files (like pyc) as they cause breakage + # (due to the py files having older timestamps than our pyc, so python + # thinks the pyc files are correct using them) + find $GIT_DEST -name '*.pyc' -delete + + # handle GIT_REF accordingly to type (tag, branch) + if [[ -n "`git show-ref refs/tags/$GIT_REF`" ]]; then + git_update_tag $GIT_REF + elif [[ -n "`git show-ref refs/heads/$GIT_REF`" ]]; then + git_update_branch $GIT_REF + elif [[ -n "`git show-ref refs/remotes/origin/$GIT_REF`" ]]; then + git_update_remote_branch $GIT_REF + else + die $LINENO "$GIT_REF is neither branch nor tag" + fi + + fi + fi + + # print out the results so we know what change was used in the logs + cd $GIT_DEST + git show --oneline | head -1 +} + +# git update using reference as a branch. +# git_update_branch ref +function git_update_branch() { + + GIT_BRANCH=$1 + + git checkout -f origin/$GIT_BRANCH + # a local branch might not exist + git branch -D $GIT_BRANCH || true + git checkout -b $GIT_BRANCH +} + +# git update using reference as a branch. +# git_update_remote_branch ref +function git_update_remote_branch() { + + GIT_BRANCH=$1 + + git checkout -b $GIT_BRANCH -t origin/$GIT_BRANCH +} + +# git update using reference as a tag. Be careful editing source at that repo +# as working copy will be in a detached mode +# git_update_tag ref +function git_update_tag() { + + GIT_TAG=$1 + + git tag -d $GIT_TAG + # fetching given tag only + git fetch origin tag $GIT_TAG + git checkout -f $GIT_TAG +} + + +# OpenStack Functions +# =================== + +# Get the default value for HOST_IP +# get_default_host_ip fixed_range floating_range host_ip_iface host_ip +function get_default_host_ip() { + local fixed_range=$1 + local floating_range=$2 + local host_ip_iface=$3 + local host_ip=$4 + + # Find the interface used for the default route + host_ip_iface=${host_ip_iface:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }' | head -1)} + # Search for an IP unless an explicit is set by ``HOST_IP`` environment variable + if [ -z "$host_ip" -o "$host_ip" == "dhcp" ]; then + host_ip="" + host_ips=`LC_ALL=C ip -f inet addr show ${host_ip_iface} | awk '/inet/ {split($2,parts,"/"); print parts[1]}'` + for IP in $host_ips; do + # Attempt to filter out IP addresses that are part of the fixed and + # floating range. Note that this method only works if the ``netaddr`` + # python library is installed. If it is not installed, an error + # will be printed and the first IP from the interface will be used. + # If that is not correct set ``HOST_IP`` in ``localrc`` to the correct + # address. + if ! (address_in_net $IP $fixed_range || address_in_net $IP $floating_range); then + host_ip=$IP + break; + fi + done + fi + echo $host_ip +} + +# Grab a numbered field from python prettytable output +# Fields are numbered starting with 1 +# Reverse syntax is supported: -1 is the last field, -2 is second to last, etc. +# get_field field-number +function get_field() { + while read data; do + if [ "$1" -lt 0 ]; then + field="(\$(NF$1))" + else + field="\$$(($1 + 1))" + fi + echo "$data" | awk -F'[ \t]*\\|[ \t]*' "{print $field}" + done +} + +# Add a policy to a policy.json file +# Do nothing if the policy already exists +# ``policy_add policy_file policy_name policy_permissions`` +function policy_add() { + local policy_file=$1 + local policy_name=$2 + local policy_perm=$3 + + if grep -q ${policy_name} ${policy_file}; then + echo "Policy ${policy_name} already exists in ${policy_file}" + return + fi + + # Add a terminating comma to policy lines without one + # Remove the closing '}' and all lines following to the end-of-file + local tmpfile=$(mktemp) + uniq ${policy_file} | sed -e ' + s/]$/],/ + /^[}]/,$d + ' > ${tmpfile} + + # Append policy and closing brace + echo " \"${policy_name}\": ${policy_perm}" >>${tmpfile} + echo "}" >>${tmpfile} + + mv ${tmpfile} ${policy_file} +} + + +# Package Functions +# ================= + +# _get_package_dir +function _get_package_dir() { + local pkg_dir + if is_ubuntu; then + pkg_dir=$FILES/apts + elif is_fedora; then + pkg_dir=$FILES/rpms + elif is_suse; then + pkg_dir=$FILES/rpms-suse + else + exit_distro_not_supported "list of packages" + fi + echo "$pkg_dir" +} + +# Wrapper for ``apt-get`` to set cache and proxy environment variables +# Uses globals ``OFFLINE``, ``*_proxy`` +# apt_get operation package [package ...] +function apt_get() { + [[ "$OFFLINE" = "True" || -z "$@" ]] && return + local sudo="sudo" + [[ "$(id -u)" = "0" ]] && sudo="env" + $sudo DEBIAN_FRONTEND=noninteractive \ + http_proxy=$http_proxy https_proxy=$https_proxy \ + no_proxy=$no_proxy \ + apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@" +} + +# get_packages() collects a list of package names of any type from the +# prerequisite files in ``files/{apts|rpms}``. The list is intended +# to be passed to a package installer such as apt or yum. +# +# Only packages required for the services in 1st argument will be +# included. Two bits of metadata are recognized in the prerequisite files: +# +# - ``# NOPRIME`` defers installation to be performed later in `stack.sh` +# - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection +# of the package to the distros listed. The distro names are case insensitive. +function get_packages() { + local services=$@ + local package_dir=$(_get_package_dir) + local file_to_parse + local service + + if [[ -z "$package_dir" ]]; then + echo "No package directory supplied" + return 1 + fi + if [[ -z "$DISTRO" ]]; then + GetDistro + fi + for service in ${services//,/ }; do + # Allow individual services to specify dependencies + if [[ -e ${package_dir}/${service} ]]; then + file_to_parse="${file_to_parse} $service" + fi + # NOTE(sdague) n-api needs glance for now because that's where + # glance client is + if [[ $service == n-api ]]; then + if [[ ! $file_to_parse =~ nova ]]; then + file_to_parse="${file_to_parse} nova" + fi + if [[ ! $file_to_parse =~ glance ]]; then + file_to_parse="${file_to_parse} glance" + fi + elif [[ $service == c-* ]]; then + if [[ ! $file_to_parse =~ cinder ]]; then + file_to_parse="${file_to_parse} cinder" + fi + elif [[ $service == ceilometer-* ]]; then + if [[ ! $file_to_parse =~ ceilometer ]]; then + file_to_parse="${file_to_parse} ceilometer" + fi + elif [[ $service == s-* ]]; then + if [[ ! $file_to_parse =~ swift ]]; then + file_to_parse="${file_to_parse} swift" + fi + elif [[ $service == n-* ]]; then + if [[ ! $file_to_parse =~ nova ]]; then + file_to_parse="${file_to_parse} nova" + fi + elif [[ $service == g-* ]]; then + if [[ ! $file_to_parse =~ glance ]]; then + file_to_parse="${file_to_parse} glance" + fi + elif [[ $service == key* ]]; then + if [[ ! $file_to_parse =~ keystone ]]; then + file_to_parse="${file_to_parse} keystone" + fi + elif [[ $service == q-* ]]; then + if [[ ! $file_to_parse =~ neutron ]]; then + file_to_parse="${file_to_parse} neutron" + fi + fi + done + + for file in ${file_to_parse}; do + local fname=${package_dir}/${file} + local OIFS line package distros distro + [[ -e $fname ]] || continue + + OIFS=$IFS + IFS=$'\n' + for line in $(<${fname}); do + if [[ $line =~ "NOPRIME" ]]; then + continue + fi + + # Assume we want this package + package=${line%#*} + inst_pkg=1 + + # Look for # dist:xxx in comment + if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then + # We are using BASH regexp matching feature. + package=${BASH_REMATCH[1]} + distros=${BASH_REMATCH[2]} + # In bash ${VAR,,} will lowecase VAR + # Look for a match in the distro list + if [[ ! ${distros,,} =~ ${DISTRO,,} ]]; then + # If no match then skip this package + inst_pkg=0 + fi + fi + + # Look for # testonly in comment + if [[ $line =~ (.*)#.*testonly.* ]]; then + package=${BASH_REMATCH[1]} + # Are we installing test packages? (test for the default value) + if [[ $INSTALL_TESTONLY_PACKAGES = "False" ]]; then + # If not installing test packages the skip this package + inst_pkg=0 + fi + fi + + if [[ $inst_pkg = 1 ]]; then + echo $package + fi + done + IFS=$OIFS + done +} + +# Distro-agnostic package installer +# install_package package [package ...] +function install_package() { + if is_ubuntu; then + [[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update + NO_UPDATE_REPOS=True + + apt_get install "$@" + elif is_fedora; then + yum_install "$@" + elif is_suse; then + zypper_install "$@" + else + exit_distro_not_supported "installing packages" + fi +} + +# Distro-agnostic function to tell if a package is installed +# is_package_installed package [package ...] +function is_package_installed() { + if [[ -z "$@" ]]; then + return 1 + fi + + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + + if [[ "$os_PACKAGE" = "deb" ]]; then + dpkg -s "$@" > /dev/null 2> /dev/null + elif [[ "$os_PACKAGE" = "rpm" ]]; then + rpm --quiet -q "$@" + else + exit_distro_not_supported "finding if a package is installed" + fi +} + +# Distro-agnostic package uninstaller +# uninstall_package package [package ...] +function uninstall_package() { + if is_ubuntu; then + apt_get purge "$@" + elif is_fedora; then + sudo yum remove -y "$@" + elif is_suse; then + sudo zypper rm "$@" + else + exit_distro_not_supported "uninstalling packages" + fi +} + +# Wrapper for ``yum`` to set proxy environment variables +# Uses globals ``OFFLINE``, ``*_proxy`` +# yum_install package [package ...] +function yum_install() { + [[ "$OFFLINE" = "True" ]] && return + local sudo="sudo" + [[ "$(id -u)" = "0" ]] && sudo="env" + $sudo http_proxy=$http_proxy https_proxy=$https_proxy \ + no_proxy=$no_proxy \ + yum install -y "$@" +} + +# zypper wrapper to set arguments correctly +# zypper_install package [package ...] +function zypper_install() { + [[ "$OFFLINE" = "True" ]] && return + local sudo="sudo" + [[ "$(id -u)" = "0" ]] && sudo="env" + $sudo http_proxy=$http_proxy https_proxy=$https_proxy \ + zypper --non-interactive install --auto-agree-with-licenses "$@" +} + + +# Process Functions +# ================= + +# _run_process() is designed to be backgrounded by run_process() to simulate a +# fork. It includes the dirty work of closing extra filehandles and preparing log +# files to produce the same logs as screen_it(). The log filename is derived +# from the service name and global-and-now-misnamed SCREEN_LOGDIR +# _run_process service "command-line" +function _run_process() { + local service=$1 + local command="$2" + + # Undo logging redirections and close the extra descriptors + exec 1>&3 + exec 2>&3 + exec 3>&- + exec 6>&- + + if [[ -n ${SCREEN_LOGDIR} ]]; then + exec 1>&${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log 2>&1 + ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log + + # TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs. + export PYTHONUNBUFFERED=1 + fi + + exec /bin/bash -c "$command" + die "$service exec failure: $command" +} + +# Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``. +# This is used for ``service_check`` when all the ``screen_it`` are called finished +# init_service_check +function init_service_check() { + SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + + if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then + mkdir -p "$SERVICE_DIR/$SCREEN_NAME" + fi + + rm -f "$SERVICE_DIR/$SCREEN_NAME"/*.failure +} + +# Find out if a process exists by partial name. +# is_running name +function is_running() { + local name=$1 + ps auxw | grep -v grep | grep ${name} > /dev/null + RC=$? + # some times I really hate bash reverse binary logic + return $RC +} + +# run_process() launches a child process that closes all file descriptors and +# then exec's the passed in command. This is meant to duplicate the semantics +# of screen_it() without screen. PIDs are written to +# $SERVICE_DIR/$SCREEN_NAME/$service.pid +# run_process service "command-line" +function run_process() { + local service=$1 + local command="$2" + + # Spawn the child process + _run_process "$service" "$command" & + echo $! +} + +# Helper to launch a service in a named screen +# screen_it service "command-line" +function screen_it { + SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + USE_SCREEN=$(trueorfalse True $USE_SCREEN) + + if is_service_enabled $1; then + # Append the service to the screen rc file + screen_rc "$1" "$2" + + if [[ "$USE_SCREEN" = "True" ]]; then + screen -S $SCREEN_NAME -X screen -t $1 + + if [[ -n ${SCREEN_LOGDIR} ]]; then + screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log + screen -S $SCREEN_NAME -p $1 -X log on + ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log + fi + + # sleep to allow bash to be ready to be send the command - we are + # creating a new window in screen and then sends characters, so if + # bash isn't running by the time we send the command, nothing happens + sleep 1.5 + + NL=`echo -ne '\015'` + # This fun command does the following: + # - the passed server command is backgrounded + # - the pid of the background process is saved in the usual place + # - the server process is brought back to the foreground + # - if the server process exits prematurely the fg command errors + # and a message is written to stdout and the service failure file + # The pid saved can be used in screen_stop() as a process group + # id to kill off all child processes + screen -S $SCREEN_NAME -p $1 -X stuff "$2 & echo \$! >$SERVICE_DIR/$SCREEN_NAME/$1.pid; fg || echo \"$1 failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL" + else + # Spawn directly without screen + run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$1.pid + fi + fi +} + +# Screen rc file builder +# screen_rc service "command-line" +function screen_rc { + SCREEN_NAME=${SCREEN_NAME:-stack} + SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc + if [[ ! -e $SCREENRC ]]; then + # Name the screen session + echo "sessionname $SCREEN_NAME" > $SCREENRC + # Set a reasonable statusbar + echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC + # Some distributions override PROMPT_COMMAND for the screen terminal type - turn that off + echo "setenv PROMPT_COMMAND /bin/true" >> $SCREENRC + echo "screen -t shell bash" >> $SCREENRC + fi + # If this service doesn't already exist in the screenrc file + if ! grep $1 $SCREENRC 2>&1 > /dev/null; then + NL=`echo -ne '\015'` + echo "screen -t $1 bash" >> $SCREENRC + echo "stuff \"$2$NL\"" >> $SCREENRC + + if [[ -n ${SCREEN_LOGDIR} ]]; then + echo "logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log" >>$SCREENRC + echo "log on" >>$SCREENRC + fi + fi +} + +# Stop a service in screen +# If a PID is available use it, kill the whole process group via TERM +# If screen is being used kill the screen window; this will catch processes +# that did not leave a PID behind +# screen_stop service +function screen_stop() { + SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + USE_SCREEN=$(trueorfalse True $USE_SCREEN) + + if is_service_enabled $1; then + # Kill via pid if we have one available + if [[ -r $SERVICE_DIR/$SCREEN_NAME/$1.pid ]]; then + pkill -TERM -P -$(cat $SERVICE_DIR/$SCREEN_NAME/$1.pid) + rm $SERVICE_DIR/$SCREEN_NAME/$1.pid + fi + if [[ "$USE_SCREEN" = "True" ]]; then + # Clean up the screen window + screen -S $SCREEN_NAME -p $1 -X kill + fi + fi +} + +# Helper to get the status of each running service +# service_check +function service_check() { + local service + local failures + SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + + + if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then + echo "No service status directory found" + return + fi + + # Check if there is any falure flag file under $SERVICE_DIR/$SCREEN_NAME + failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null` + + for service in $failures; do + service=`basename $service` + service=${service%.failure} + echo "Error: Service $service is not running" + done + + if [ -n "$failures" ]; then + echo "More details about the above errors can be found with screen, with ./rejoin-stack.sh" + fi +} + + +# Python Functions +# ================ + +# Get the path to the pip command. +# get_pip_command +function get_pip_command() { + which pip || which pip-python + + if [ $? -ne 0 ]; then + die $LINENO "Unable to find pip; cannot continue" + fi +} + +# Get the path to the direcotry where python executables are installed. +# get_python_exec_prefix +function get_python_exec_prefix() { + if is_fedora || is_suse; then + echo "/usr/bin" + else + echo "/usr/local/bin" + fi +} + +# Wrapper for ``pip install`` to set cache and proxy environment variables +# Uses globals ``OFFLINE``, ``PIP_DOWNLOAD_CACHE``, ``PIP_USE_MIRRORS``, +# ``TRACK_DEPENDS``, ``*_proxy`` +# pip_install package [package ...] +function pip_install { + [[ "$OFFLINE" = "True" || -z "$@" ]] && return + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + if [[ $TRACK_DEPENDS = True ]]; then + source $DEST/.venv/bin/activate + CMD_PIP=$DEST/.venv/bin/pip + SUDO_PIP="env" + else + SUDO_PIP="sudo" + CMD_PIP=$(get_pip_command) + fi + + # Mirror option not needed anymore because pypi has CDN available, + # but it's useful in certain circumstances + PIP_USE_MIRRORS=${PIP_USE_MIRRORS:-False} + if [[ "$PIP_USE_MIRRORS" != "False" ]]; then + PIP_MIRROR_OPT="--use-mirrors" + fi + + # pip < 1.4 has a bug where it will use an already existing build + # directory unconditionally. Say an earlier component installs + # foo v1.1; pip will have built foo's source in + # /tmp/$USER-pip-build. Even if a later component specifies foo < + # 1.1, the existing extracted build will be used and cause + # confusing errors. By creating unique build directories we avoid + # this problem. See https://github.com/pypa/pip/issues/709 + local pip_build_tmp=$(mktemp --tmpdir -d pip-build.XXXXX) + + $SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \ + HTTP_PROXY=$http_proxy \ + HTTPS_PROXY=$https_proxy \ + NO_PROXY=$no_proxy \ + $CMD_PIP install --build=${pip_build_tmp} \ + $PIP_MIRROR_OPT $@ \ + && $SUDO_PIP rm -rf ${pip_build_tmp} +} + + +# Service Functions +# ================= + +# remove extra commas from the input string (i.e. ``ENABLED_SERVICES``) +# _cleanup_service_list service-list +function _cleanup_service_list () { + echo "$1" | sed -e ' + s/,,/,/g; + s/^,//; + s/,$// + ' +} + +# disable_all_services() removes all current services +# from ``ENABLED_SERVICES`` to reset the configuration +# before a minimal installation +# Uses global ``ENABLED_SERVICES`` +# disable_all_services +function disable_all_services() { + ENABLED_SERVICES="" +} + +# Remove all services starting with '-'. For example, to install all default +# services except rabbit (rabbit) set in ``localrc``: +# ENABLED_SERVICES+=",-rabbit" +# Uses global ``ENABLED_SERVICES`` +# disable_negated_services +function disable_negated_services() { + local tmpsvcs="${ENABLED_SERVICES}" + local service + for service in ${tmpsvcs//,/ }; do + if [[ ${service} == -* ]]; then + tmpsvcs=$(echo ${tmpsvcs}|sed -r "s/(,)?(-)?${service#-}(,)?/,/g") + fi + done + ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") +} + +# disable_service() removes the services passed as argument to the +# ``ENABLED_SERVICES`` list, if they are present. +# +# For example: +# disable_service rabbit +# +# This function does not know about the special cases +# for nova, glance, and neutron built into is_service_enabled(). +# Uses global ``ENABLED_SERVICES`` +# disable_service service [service ...] +function disable_service() { + local tmpsvcs=",${ENABLED_SERVICES}," + local service + for service in $@; do + if is_service_enabled $service; then + tmpsvcs=${tmpsvcs//,$service,/,} + fi + done + ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") +} + +# enable_service() adds the services passed as argument to the +# ``ENABLED_SERVICES`` list, if they are not already present. +# +# For example: +# enable_service qpid +# +# This function does not know about the special cases +# for nova, glance, and neutron built into is_service_enabled(). +# Uses global ``ENABLED_SERVICES`` +# enable_service service [service ...] +function enable_service() { + local tmpsvcs="${ENABLED_SERVICES}" + for service in $@; do + if ! is_service_enabled $service; then + tmpsvcs+=",$service" + fi + done + ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") + disable_negated_services +} + +# is_service_enabled() checks if the service(s) specified as arguments are +# enabled by the user in ``ENABLED_SERVICES``. +# +# Multiple services specified as arguments are ``OR``'ed together; the test +# is a short-circuit boolean, i.e it returns on the first match. +# +# There are special cases for some 'catch-all' services:: +# **nova** returns true if any service enabled start with **n-** +# **cinder** returns true if any service enabled start with **c-** +# **ceilometer** returns true if any service enabled start with **ceilometer** +# **glance** returns true if any service enabled start with **g-** +# **neutron** returns true if any service enabled start with **q-** +# **swift** returns true if any service enabled start with **s-** +# **trove** returns true if any service enabled start with **tr-** +# For backward compatibility if we have **swift** in ENABLED_SERVICES all the +# **s-** services will be enabled. This will be deprecated in the future. +# +# Cells within nova is enabled if **n-cell** is in ``ENABLED_SERVICES``. +# We also need to make sure to treat **n-cell-region** and **n-cell-child** +# as enabled in this case. +# +# Uses global ``ENABLED_SERVICES`` +# is_service_enabled service [service ...] +function is_service_enabled() { + services=$@ + for service in ${services}; do + [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 + + # Look for top-level 'enabled' function for this service + if type is_${service}_enabled >/dev/null 2>&1; then + # A function exists for this service, use it + is_${service}_enabled + return $? + fi + + # TODO(dtroyer): Remove these legacy special-cases after the is_XXX_enabled() + # are implemented + + [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && return 0 + [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0 + [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0 + [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0 + [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0 + [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && return 0 + [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0 + [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && return 0 + [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && return 0 + [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && return 0 + done + return 1 +} + +# Toggle enable/disable_service for services that must run exclusive of each other +# $1 The name of a variable containing a space-separated list of services +# $2 The name of a variable in which to store the enabled service's name +# $3 The name of the service to enable +function use_exclusive_service { + local options=${!1} + local selection=$3 + out=$2 + [ -z $selection ] || [[ ! "$options" =~ "$selection" ]] && return 1 + for opt in $options;do + [[ "$opt" = "$selection" ]] && enable_service $opt || disable_service $opt + done + eval "$out=$selection" + return 0 +} + + +# System Function +# =============== + +# Only run the command if the target file (the last arg) is not on an +# NFS filesystem. +function _safe_permission_operation() { + local args=( $@ ) + local last + local sudo_cmd + local dir_to_check + + let last="${#args[*]} - 1" + + dir_to_check=${args[$last]} + if [ ! -d "$dir_to_check" ]; then + dir_to_check=`dirname "$dir_to_check"` + fi + + if is_nfs_directory "$dir_to_check" ; then + return 0 + fi + + if [[ $TRACK_DEPENDS = True ]]; then + sudo_cmd="env" + else + sudo_cmd="sudo" + fi + + $sudo_cmd $@ +} + +# Exit 0 if address is in network or 1 if address is not in network +# ip-range is in CIDR notation: 1.2.3.4/20 +# address_in_net ip-address ip-range +function address_in_net() { + local ip=$1 + local range=$2 + local masklen=${range#*/} + local network=$(maskip ${range%/*} $(cidr2netmask $masklen)) + local subnet=$(maskip $ip $(cidr2netmask $masklen)) + [[ $network == $subnet ]] +} + +# Add a user to a group. +# add_user_to_group user group +function add_user_to_group() { + local user=$1 + local group=$2 + + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + + # SLE11 and openSUSE 12.2 don't have the usual usermod + if ! is_suse || [[ "$os_VENDOR" = "openSUSE" && "$os_RELEASE" != "12.2" ]]; then + sudo usermod -a -G "$group" "$user" + else + sudo usermod -A "$group" "$user" + fi +} + +# Convert CIDR notation to a IPv4 netmask +# cidr2netmask cidr-bits +function cidr2netmask() { + local maskpat="255 255 255 255" + local maskdgt="254 252 248 240 224 192 128" + set -- ${maskpat:0:$(( ($1 / 8) * 4 ))}${maskdgt:$(( (7 - ($1 % 8)) * 4 )):3} + echo ${1-0}.${2-0}.${3-0}.${4-0} +} + +# Gracefully cp only if source file/dir exists +# cp_it source destination +function cp_it { + if [ -e $1 ] || [ -d $1 ]; then + cp -pRL $1 $2 + fi +} + +# HTTP and HTTPS proxy servers are supported via the usual environment variables [1] +# ``http_proxy``, ``https_proxy`` and ``no_proxy``. They can be set in +# ``localrc`` or on the command line if necessary:: +# +# [1] http://www.w3.org/Daemon/User/Proxies/ProxyClients.html +# +# http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh + +function export_proxy_variables() { + if [[ -n "$http_proxy" ]]; then + export http_proxy=$http_proxy + fi + if [[ -n "$https_proxy" ]]; then + export https_proxy=$https_proxy + fi + if [[ -n "$no_proxy" ]]; then + export no_proxy=$no_proxy + fi +} + +# Returns true if the directory is on a filesystem mounted via NFS. +function is_nfs_directory() { + local mount_type=`stat -f -L -c %T $1` + test "$mount_type" == "nfs" +} + +# Return the network portion of the given IP address using netmask +# netmask is in the traditional dotted-quad format +# maskip ip-address netmask +function maskip() { + local ip=$1 + local mask=$2 + local l="${ip%.*}"; local r="${ip#*.}"; local n="${mask%.*}"; local m="${mask#*.}" + local subnet=$((${ip%%.*}&${mask%%.*})).$((${r%%.*}&${m%%.*})).$((${l##*.}&${n##*.})).$((${ip##*.}&${mask##*.})) + echo $subnet +} + +# Service wrapper to restart services +# restart_service service-name +function restart_service() { + if is_ubuntu; then + sudo /usr/sbin/service $1 restart + else + sudo /sbin/service $1 restart + fi +} + +# Only change permissions of a file or directory if it is not on an +# NFS filesystem. +function safe_chmod() { + _safe_permission_operation chmod $@ +} + +# Only change ownership of a file or directory if it is not on an NFS +# filesystem. +function safe_chown() { + _safe_permission_operation chown $@ +} + +# Service wrapper to start services +# start_service service-name +function start_service() { + if is_ubuntu; then + sudo /usr/sbin/service $1 start + else + sudo /sbin/service $1 start + fi +} + +# Service wrapper to stop services +# stop_service service-name +function stop_service() { + if is_ubuntu; then + sudo /usr/sbin/service $1 stop + else + sudo /sbin/service $1 stop + fi +} + + +# Restore xtrace +$XTRACE + +# Local variables: +# mode: shell-script +# End: From abc7b1d765665b66a027fe93c841b62e537c7843 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 12 Feb 2014 12:09:22 -0600 Subject: [PATCH 0722/4704] Backport Grenade updates Backport changes made in Grenade's copy of functions since the last sync: * d0654b9,i 4c7726e - get_release_name_from_branch() * 7907766 - edits to install_package() Change-Id: I0714c0b1072f1360c3c08fe24225e65e2a550fad --- functions-common | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/functions-common b/functions-common index 0cecb0b9fb..d92e39cd91 100644 --- a/functions-common +++ b/functions-common @@ -460,6 +460,17 @@ function is_ubuntu { # Git Functions # ============= +# Returns openstack release name for a given branch name +# ``get_release_name_from_branch branch-name`` +function get_release_name_from_branch(){ + local branch=$1 + if [[ $branch =~ "stable/" ]]; then + echo ${branch#*/} + else + echo "master" + fi +} + # git clone only if directory doesn't exist already. Since ``DEST`` might not # be owned by the installation user, we create the directory and change the # ownership to the proper user. @@ -792,7 +803,9 @@ function get_packages() { # install_package package [package ...] function install_package() { if is_ubuntu; then - [[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update + # if there are transient errors pulling the updates, that's fine. It may + # be secondary repositories that we don't really care about. + [[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update || /bin/true NO_UPDATE_REPOS=True apt_get install "$@" From 3f918a4541a49cc0d50d2931f8670e6e0074280e Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Tue, 11 Feb 2014 11:47:47 -0700 Subject: [PATCH 0723/4704] Set DEFAULT_IMAGE_NAME for Docker hypervisor This allows the tempest configuration to set the right image for booting docker containers with Nova. Since glance uploads are not yet integrated in devstack, IMAGE_URLS remains empty. Change-Id: I5df153cd1d5e1411bb3c11816122ce280148e129 --- lib/nova_plugins/hypervisor-docker | 2 +- stackrc | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker index cdd9317761..b5df19db02 100644 --- a/lib/nova_plugins/hypervisor-docker +++ b/lib/nova_plugins/hypervisor-docker @@ -32,7 +32,7 @@ DOCKER_PID_FILE=/var/run/docker.pid DOCKER_REGISTRY_PORT=${DOCKER_REGISTRY_PORT:-5042} DOCKER_IMAGE=${DOCKER_IMAGE:-cirros:latest} -DOCKER_IMAGE_NAME=cirros +DOCKER_IMAGE_NAME=$DEFAULT_IMAGE_NAME DOCKER_REGISTRY_IMAGE=${DOCKER_REGISTRY_IMAGE:-registry:latest} DOCKER_REGISTRY_IMAGE_NAME=registry DOCKER_REPOSITORY_NAME=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT}/${DOCKER_IMAGE_NAME} diff --git a/stackrc b/stackrc index 7eed60cb2c..d754f3b074 100644 --- a/stackrc +++ b/stackrc @@ -280,6 +280,9 @@ case "$VIRT_DRIVER" in openvz) DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-12.04-x86_64} IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-12.04-x86_64.tar.gz"};; + docker) + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros} + IMAGE_URLS=${IMAGE_URLS:-};; libvirt) case "$LIBVIRT_TYPE" in lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc From 1d50d78560910779d28db85591fbb67e1617ff34 Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Wed, 12 Feb 2014 18:23:36 -0500 Subject: [PATCH 0724/4704] Fix service name for marconi This patch updates TEMPEST_SERVICES, to have the same name as devstack marconi service. Change-Id: Ibc9b4a66fccd3d95ddd1717bf549476bd843204a Implements: blueprint add-basic-marconi-tests --- lib/marconi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/marconi b/lib/marconi index d1ab5f3a5c..0aaff1bd58 100644 --- a/lib/marconi +++ b/lib/marconi @@ -52,7 +52,7 @@ MARCONICLIENT_REPO=${MARCONICLIENT_REPO:-${GIT_BASE}/openstack/python-marconicli MARCONICLIENT_BRANCH=${MARCONICLIENT_BRANCH:-master} # Tell Tempest this project is present -TEMPEST_SERVICES+=,marconi +TEMPEST_SERVICES+=,marconi-server # Functions From a42541a9fb00e21b278a06d4034528976cbf8336 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 13 Feb 2014 09:39:15 -0500 Subject: [PATCH 0725/4704] add heat to the default devstack service list heat has been integrated for a while, we should turn it on out of the box. Also refactor the service list to make it simpler to understand what's enabled. Change-Id: I9738f39ce196d5c7f75b0a5b164222ea165fb340 --- stackrc | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 9166a171e1..165196c545 100644 --- a/stackrc +++ b/stackrc @@ -35,7 +35,18 @@ fi # enable_service neutron # # Optional, to enable tempest configuration as part of devstack # enable_service tempest -ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,tempest,mysql + +# core compute (glance / keystone / nova (+ nova-network)) +ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,n-sch,n-novnc,n-xvnc,n-cauth +# cinder +ENABLED_SERVICES+=,c-sch,c-api,c-vol +# heat +ENABLED_SERVICES+=,h-eng,h-api,h-api-cfn,h-api-cw +# dashboard +ENABLED_SERVICES+=,horizon +# additional services +ENABLED_SERVICES+=,rabbit,tempest,mysql + # Tell Tempest which services are available. The default is set here as # Tempest falls late in the configuration sequence. This differs from From 1bcd2800271d6a72237084ad7f36f84072eecd18 Mon Sep 17 00:00:00 2001 From: Steven Hardy Date: Thu, 13 Feb 2014 15:14:41 +0000 Subject: [PATCH 0726/4704] Don't warn about heat modifying flavors Since de0898a Heat no longer modifies flavors, so the comment and output related to modified flavors is no longer needed. Change-Id: I1007d2ab3387f28b8d7487f450cab4592f2824aa --- stack.sh | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/stack.sh b/stack.sh index e45707b781..1dc4b74ab3 100755 --- a/stack.sh +++ b/stack.sh @@ -1181,7 +1181,7 @@ fi # Configure and launch heat engine, api and metadata if is_service_enabled heat; then - # Initialize heat, including replacing nova flavors + # Initialize heat echo_summary "Configuring Heat" init_heat echo_summary "Starting Heat" @@ -1350,11 +1350,6 @@ if is_service_enabled horizon; then echo "Horizon is now available at http://$SERVICE_HOST/" fi -# Warn that the default flavors have been changed by Heat -if is_service_enabled heat; then - echo "Heat has replaced the default flavors. View by running: nova flavor-list" -fi - # If Keystone is present you can point ``nova`` cli to this server if is_service_enabled key; then echo "Keystone is serving at $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/" From 4074e298112ba179ba743982c6904c8bd70030b2 Mon Sep 17 00:00:00 2001 From: Akihiro Motoki Date: Fri, 14 Feb 2014 00:54:58 +0900 Subject: [PATCH 0727/4704] Use lowercase section name in Neutron ML2 security group config All other security group configs in Neutron are lower-case and it should be consistent. Change-Id: I683333c1e186446a69172446cca6d9b952673ed4 Closes-Bug: #1279862 --- lib/neutron_plugins/ml2 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index ab4e3474a6..4ceabe765d 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -93,9 +93,9 @@ function neutron_plugin_configure_service() { # instead use its own config variable to indicate whether security # groups is enabled, and that will need to be set here instead. if [[ "$Q_USE_SECGROUP" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver neutron.agent.not.a.real.FirewallDriver + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.not.a.real.FirewallDriver else - iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver neutron.agent.firewall.NoopFirewallDriver + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver fi # Since we enable the tunnel TypeDrivers, also enable a local_ip From 22dece0d969b476cf187fe7359fa38d96189cdc1 Mon Sep 17 00:00:00 2001 From: John Eckersberg Date: Thu, 13 Feb 2014 16:21:24 -0500 Subject: [PATCH 0728/4704] Add Fedora support to install_docker.sh On Fedora, just install the docker-io package as supplied in the Fedora repository. Change-Id: Iea74878d3e1c434863c188ea2253817384e56bf4 --- tools/docker/install_docker.sh | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh index b9e1b242dd..27c8c8210b 100755 --- a/tools/docker/install_docker.sh +++ b/tools/docker/install_docker.sh @@ -30,15 +30,19 @@ SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} # Install Docker Service # ====================== -# Stop the auto-repo updates and do it when required here -NO_UPDATE_REPOS=True - -# Set up home repo -curl https://get.docker.io/gpg | sudo apt-key add - -install_package python-software-properties && \ - sudo sh -c "echo deb $DOCKER_APT_REPO docker main > /etc/apt/sources.list.d/docker.list" -apt_get update -install_package --force-yes lxc-docker socat +if is_fedora; then + install_package docker-io socat +else + # Stop the auto-repo updates and do it when required here + NO_UPDATE_REPOS=True + + # Set up home repo + curl https://get.docker.io/gpg | sudo apt-key add - + install_package python-software-properties && \ + sudo sh -c "echo deb $DOCKER_APT_REPO docker main > /etc/apt/sources.list.d/docker.list" + apt_get update + install_package --force-yes lxc-docker socat +fi # Start the daemon - restart just in case the package ever auto-starts... restart_service docker From d6997d317685353482a0aa7a18408c1313583460 Mon Sep 17 00:00:00 2001 From: John Griffith Date: Thu, 13 Feb 2014 22:56:29 +0000 Subject: [PATCH 0729/4704] Fix tee statement for catching tempest output The use of exec and tee doesn't seem to be quite right, and was unreliable in terms of catching the exit status of the tempest test as well as not catching the output when things went wrong. This changes the way we do the redirect and the tee to something that should be more robust and seems to work reliably in testing. Change-Id: Ieb9d725839fb8e3f9e2e63a2b7b2e9c7c86713a2 --- driver_certs/cinder_driver_cert.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/driver_certs/cinder_driver_cert.sh b/driver_certs/cinder_driver_cert.sh index edcc6d4800..8380deea42 100755 --- a/driver_certs/cinder_driver_cert.sh +++ b/driver_certs/cinder_driver_cert.sh @@ -89,9 +89,8 @@ start_cinder sleep 5 # run tempest api/volume/test_* -log_message "Run the actual tempest volume tests (./tools/pretty_tox.sh api.volume_*)...", True -exec 2> >(tee -a $TEMPFILE) -`./tools/pretty_tox.sh api.volume` +log_message "Run the actual tempest volume tests (./tools/pretty_tox.sh api.volume)...", True +./tools/pretty_tox.sh api.volume 2>&1 | tee -a $TEMPFILE if [[ $? = 0 ]]; then log_message "CONGRATULATIONS!!! Device driver PASSED!", True log_message "Submit output: ($TEMPFILE)" From 0b3aacc707ab8b3593285e02dc172b3c96730efc Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Thu, 13 Feb 2014 18:18:51 -0500 Subject: [PATCH 0730/4704] Fix MARCONI_USER This patch fixes the MARCONI_USER in create_marconi_accounts(). Change-Id: I9618530fa20ee84d25646107c7450017ada908df --- lib/marconi | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/marconi b/lib/marconi index 0aaff1bd58..e400419d57 100644 --- a/lib/marconi +++ b/lib/marconi @@ -154,10 +154,12 @@ function create_marconi_accounts() { MARCONI_USER=$(get_id keystone user-create --name=marconi \ --pass="$SERVICE_PASSWORD" \ --tenant-id $SERVICE_TENANT \ - --email=marconi@example.com) + --email=marconi@example.com \ + | grep " id " | get_field 2) keystone user-role-add --tenant-id $SERVICE_TENANT \ --user-id $MARCONI_USER \ --role-id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then MARCONI_SERVICE=$(keystone service-create \ --name=marconi \ From 16d3ad057dc0b982c801fcfa9d5497c1daeb34cd Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Thu, 13 Feb 2014 18:59:50 -0600 Subject: [PATCH 0731/4704] Use database connection for keystone The keystone configuration used the 'connection' option in the 'sql' section of the keystone.conf file. This option is deprecated in favor of 'connection' in the 'database' section. The keystone setup code is changed to use the option in the new section rather than the deprecated one. Change-Id: I62fd2f50ded3b8848e9e5225e88c80ed8fed3bff --- lib/keystone | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index 4f7f68b57f..5e2e88d33f 100644 --- a/lib/keystone +++ b/lib/keystone @@ -201,7 +201,7 @@ function configure_keystone() { iniset $KEYSTONE_CONF token provider keystone.token.providers.uuid.Provider fi - iniset $KEYSTONE_CONF sql connection `database_connection_url keystone` + iniset $KEYSTONE_CONF database connection `database_connection_url keystone` iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2" if [[ "$KEYSTONE_TOKEN_BACKEND" = "sql" ]]; then From 41d96d7b4837c6bafc2622954a3c6c1fdcc13a82 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 11 Feb 2014 09:08:35 -0600 Subject: [PATCH 0732/4704] Marconi fixes for precise Fix a couple of problems found on Ubuntu: * $MARCONI_DIR/etc/marconi/policy.json doesn't exist; removed the copy to /etc/marconi * added a seting of nssize in /etc/mongodb.conf for Ubuntu * restart the correct serice name on Ubuntu Change-Id: I9bd2ab1aa4fb94ff96559e069e5b62138c358fb5 --- lib/marconi | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/lib/marconi b/lib/marconi index 1eaebbdf16..3d5ef82471 100644 --- a/lib/marconi +++ b/lib/marconi @@ -82,10 +82,6 @@ function configure_marconi() { iniset $MARCONI_CONF DEFAULT verbose True iniset $MARCONI_CONF 'drivers:transport:wsgi' bind '0.0.0.0' - # Install the policy file for the API server - cp $MARCONI_DIR/etc/marconi/policy.json $MARCONI_CONF_DIR - iniset $MARCONI_CONF DEFAULT policy_file $MARCONI_CONF_DIR/policy.json - iniset $MARCONI_CONF keystone_authtoken auth_protocol http iniset $MARCONI_CONF keystone_authtoken admin_user marconi iniset $MARCONI_CONF keystone_authtoken admin_password $SERVICE_PASSWORD @@ -102,9 +98,16 @@ function configure_marconi() { function configure_mongodb() { # Set nssize to 2GB. This increases the number of namespaces supported # # per database. - sudo sed -i '/--nssize/!s/OPTIONS=\"/OPTIONS=\"--nssize 2047 /' /etc/sysconfig/mongod - - restart_service mongod + if is_ubuntu; then + sudo sed -i -e " + s|[^ \t]*#[ \t]*\(nssize[ \t]*=.*\$\)|\1| + s|^\(nssize[ \t]*=[ \t]*\).*\$|\1 2047| + " /etc/mongodb.conf + restart_service mongodb + elif is_fedora; then + sudo sed -i '/--nssize/!s/OPTIONS=\"/OPTIONS=\"--nssize 2047 /' /etc/sysconfig/mongod + restart_service mongod + fi } # init_marconi() - Initialize etc. From 1e4e3acaadc1397a7d69a83e8fe9a54dd879983a Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Fri, 14 Feb 2014 11:29:26 -0500 Subject: [PATCH 0733/4704] Revert the tempest service name to marconi This patch is to rollback the change introduced by https://review.openstack.org/#/c/73100/. 73100 is no longer needed because of the recent https://review.openstack.org/#/c/69497/. Using 'marconi' as the service name will keep us aligned with the naming convention used by other projects. Change-Id: I5da6d2aaeb5c9dc29a1cbc70c8425449807eb34c --- lib/marconi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/marconi b/lib/marconi index 0aaff1bd58..d1ab5f3a5c 100644 --- a/lib/marconi +++ b/lib/marconi @@ -52,7 +52,7 @@ MARCONICLIENT_REPO=${MARCONICLIENT_REPO:-${GIT_BASE}/openstack/python-marconicli MARCONICLIENT_BRANCH=${MARCONICLIENT_BRANCH:-master} # Tell Tempest this project is present -TEMPEST_SERVICES+=,marconi-server +TEMPEST_SERVICES+=,marconi # Functions From 5705db691386809e288758a0314dfa60d9b36da7 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Sat, 1 Feb 2014 20:06:42 -0500 Subject: [PATCH 0734/4704] Optionally enable file injection There is a patch up for nova right now that disables file injection by default. This is a corresponding devstack change that only sets file injection options if it is enabled in the devstack config. This is good to keep around so that we can easily turn it on for testing. The nova change is id Icff1304fc816acc843f8962727aef8bbbc7bbaa3. Change-Id: I5015f2c351b1d680c205d7f9a5204febca490b91 --- lib/nova | 6 ------ lib/nova_plugins/hypervisor-libvirt | 16 ++++++++++++++++ 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/lib/nova b/lib/nova index eaaaa6210c..d90aea7108 100644 --- a/lib/nova +++ b/lib/nova @@ -513,12 +513,6 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST" iniset_rpc_backend nova $NOVA_CONF DEFAULT iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT" - - if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - # File injection is being disabled by default in the near future - - # disable it here for now to avoid surprises later. - iniset $NOVA_CONF libvirt inject_partition '-2' - fi } function init_nova_cells() { diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 42d3af15cf..415244ffae 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -23,6 +23,9 @@ set +o xtrace # Defaults # -------- +# File injection is disabled by default in Nova. This will turn it back on. +ENABLE_FILE_INJECTION=${ENABLE_FILE_INJECTION:-False} + # Entry Points # ------------ @@ -116,6 +119,19 @@ EOF" if is_arch "ppc64"; then iniset $NOVA_CONF DEFAULT vnc_enabled "false" fi + + ENABLE_FILE_INJECTION=$(trueorfalse False $ENABLE_FILE_INJECTION) + if [[ "$ENABLE_FILE_INJECTION" = "True" ]] ; then + # When libguestfs is available for file injection, enable using + # libguestfs to inspect the image and figure out the proper + # partition to inject into. + iniset $NOVA_CONF libvirt inject_partition '-1' + iniset $NOVA_CONF libvirt inject_key 'true' + else + # File injection is being disabled by default in the near future - + # disable it here for now to avoid surprises later. + iniset $NOVA_CONF libvirt inject_partition '-2' + fi } # install_nova_hypervisor() - Install external components From 19685428e3d3e51ff88aa5254f7c27d476053798 Mon Sep 17 00:00:00 2001 From: Steve Martinelli Date: Fri, 24 Jan 2014 13:02:26 -0600 Subject: [PATCH 0735/4704] Change most keystoneclient commands to openstacklient in libs migrated most keystoneclient commands from the following libs: ceilometer cinder ironic keystone marconi neutron nova savanna swift trove Also need to set and unset openstackclient specific environment variables from stack.sh Change-Id: I725f30bc08e1df5a4c5770576c19ad1ddaeb843a --- lib/ceilometer | 36 +++++++++++++-------------- lib/cinder | 39 ++++++++++++++--------------- lib/ironic | 30 +++++++++++----------- lib/keystone | 67 ++++++++++++++++++++++++++++---------------------- lib/marconi | 32 +++++++++++++----------- lib/neutron | 32 ++++++++++++------------ lib/nova | 38 ++++++++++++++-------------- lib/savanna | 32 ++++++++++++------------ lib/swift | 50 +++++++++++++++++++++++-------------- lib/trove | 29 +++++++++++----------- stack.sh | 4 +++ 11 files changed, 209 insertions(+), 180 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 4ca77bb72b..6c87d03b13 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -71,33 +71,33 @@ function is_ceilometer_enabled { create_ceilometer_accounts() { - SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") # Ceilometer if [[ "$ENABLED_SERVICES" =~ "ceilometer-api" ]]; then - CEILOMETER_USER=$(keystone user-create \ - --name=ceilometer \ - --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=ceilometer@example.com \ + CEILOMETER_USER=$(openstack user create \ + ceilometer \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email ceilometer@example.com \ | grep " id " | get_field 2) - keystone user-role-add \ - --tenant-id $SERVICE_TENANT \ - --user-id $CEILOMETER_USER \ - --role-id $ADMIN_ROLE + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $CEILOMETER_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - CEILOMETER_SERVICE=$(keystone service-create \ - --name=ceilometer \ + CEILOMETER_SERVICE=$(openstack service create \ + ceilometer \ --type=metering \ --description="OpenStack Telemetry Service" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $CEILOMETER_SERVICE \ --region RegionOne \ - --service_id $CEILOMETER_SERVICE \ - --publicurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" \ - --adminurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" \ - --internalurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" + --publicurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \ + --adminurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \ + --internalurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" fi fi } diff --git a/lib/cinder b/lib/cinder index d5e78bb39c..c8c90c098d 100644 --- a/lib/cinder +++ b/lib/cinder @@ -330,45 +330,44 @@ function configure_cinder() { # Migrated from keystone_data.sh create_cinder_accounts() { - SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") # Cinder if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then - CINDER_USER=$(keystone user-create \ - --name=cinder \ - --pass="$SERVICE_PASSWORD" \ - --tenant-id $SERVICE_TENANT \ - --email=cinder@example.com \ + CINDER_USER=$(openstack user create \ + cinder \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email cinder@example.com \ | grep " id " | get_field 2) - keystone user-role-add \ - --tenant-id $SERVICE_TENANT \ - --user-id $CINDER_USER \ - --role-id $ADMIN_ROLE + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $CINDER_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - CINDER_SERVICE=$(keystone service-create \ - --name=cinder \ + CINDER_SERVICE=$(openstack service create \ + cinder \ --type=volume \ --description="Cinder Volume Service" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $CINDER_SERVICE \ --region RegionOne \ - --service_id $CINDER_SERVICE \ --publicurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ --adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ --internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" - CINDER_V2_SERVICE=$(keystone service-create \ - --name=cinderv2 \ + CINDER_V2_SERVICE=$(openstack service create \ + cinderv2 \ --type=volumev2 \ --description="Cinder Volume Service V2" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $CINDER_V2_SERVICE \ --region RegionOne \ - --service_id $CINDER_V2_SERVICE \ --publicurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \ --adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \ --internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" - fi fi } diff --git a/lib/ironic b/lib/ironic index 3c0e3cbaf7..607b13125a 100644 --- a/lib/ironic +++ b/lib/ironic @@ -145,30 +145,30 @@ function create_ironic_cache_dir() { # service ironic admin # if enabled create_ironic_accounts() { - SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") # Ironic if [[ "$ENABLED_SERVICES" =~ "ir-api" ]]; then - IRONIC_USER=$(keystone user-create \ - --name=ironic \ - --pass="$SERVICE_PASSWORD" \ - --tenant-id $SERVICE_TENANT \ - --email=ironic@example.com \ + IRONIC_USER=$(openstack user create \ + ironic \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email ironic@example.com \ | grep " id " | get_field 2) - keystone user-role-add \ - --tenant-id $SERVICE_TENANT \ - --user_id $IRONIC_USER \ - --role_id $ADMIN_ROLE + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $IRONIC_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - IRONIC_SERVICE=$(keystone service-create \ - --name=ironic \ + IRONIC_SERVICE=$(openstack service create \ + ironic \ --type=baremetal \ --description="Ironic baremetal provisioning service" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $IRONIC_SERVICE \ --region RegionOne \ - --service_id $IRONIC_SERVICE \ --publicurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \ --adminurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \ --internalurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" diff --git a/lib/keystone b/lib/keystone index 4f7f68b57f..bf0dcbb1bb 100644 --- a/lib/keystone +++ b/lib/keystone @@ -275,60 +275,69 @@ function configure_keystone() { create_keystone_accounts() { # admin - ADMIN_TENANT=$(keystone tenant-create \ - --name admin \ + ADMIN_TENANT=$(openstack project create \ + admin \ | grep " id " | get_field 2) - ADMIN_USER=$(keystone user-create \ - --name admin \ - --pass "$ADMIN_PASSWORD" \ + ADMIN_USER=$(openstack user create \ + admin \ + --project "$ADMIN_TENANT" \ --email admin@example.com \ + --password "$ADMIN_PASSWORD" \ | grep " id " | get_field 2) - ADMIN_ROLE=$(keystone role-create \ - --name admin \ + ADMIN_ROLE=$(openstack role create \ + admin \ | grep " id " | get_field 2) - keystone user-role-add \ - --user-id $ADMIN_USER \ - --role-id $ADMIN_ROLE \ - --tenant-id $ADMIN_TENANT + openstack role add \ + $ADMIN_ROLE \ + --project $ADMIN_TENANT \ + --user $ADMIN_USER # service - SERVICE_TENANT=$(keystone tenant-create \ - --name $SERVICE_TENANT_NAME \ + SERVICE_TENANT=$(openstack project create \ + $SERVICE_TENANT_NAME \ | grep " id " | get_field 2) # The Member role is used by Horizon and Swift so we need to keep it: - MEMBER_ROLE=$(keystone role-create --name=Member | grep " id " | get_field 2) + MEMBER_ROLE=$(openstack role create \ + Member \ + | grep " id " | get_field 2) # ANOTHER_ROLE demonstrates that an arbitrary role may be created and used # TODO(sleepsonthefloor): show how this can be used for rbac in the future! - ANOTHER_ROLE=$(keystone role-create --name=anotherrole | grep " id " | get_field 2) + ANOTHER_ROLE=$(openstack role create \ + anotherrole \ + | grep " id " | get_field 2) # invisible tenant - admin can't see this one - INVIS_TENANT=$(keystone tenant-create --name=invisible_to_admin | grep " id " | get_field 2) + INVIS_TENANT=$(openstack project create \ + invisible_to_admin \ + | grep " id " | get_field 2) # demo - DEMO_TENANT=$(keystone tenant-create \ - --name=demo \ + DEMO_TENANT=$(openstack project create \ + demo \ | grep " id " | get_field 2) - DEMO_USER=$(keystone user-create \ - --name demo \ - --pass "$ADMIN_PASSWORD" \ + DEMO_USER=$(openstack user create \ + demo \ + --project $DEMO_TENANT \ --email demo@example.com \ + --password "$ADMIN_PASSWORD" \ | grep " id " | get_field 2) - keystone user-role-add --user-id $DEMO_USER --role-id $MEMBER_ROLE --tenant-id $DEMO_TENANT - keystone user-role-add --user-id $ADMIN_USER --role-id $ADMIN_ROLE --tenant-id $DEMO_TENANT - keystone user-role-add --user-id $DEMO_USER --role-id $ANOTHER_ROLE --tenant-id $DEMO_TENANT - keystone user-role-add --user-id $DEMO_USER --role-id $MEMBER_ROLE --tenant-id $INVIS_TENANT + + openstack role add --project $DEMO_TENANT --user $DEMO_USER $MEMBER_ROLE + openstack role add --project $DEMO_TENANT --user $ADMIN_USER $ADMIN_ROLE + openstack role add --project $DEMO_TENANT --user $DEMO_USER $ANOTHER_ROLE + openstack role add --project $INVIS_TENANT --user $DEMO_USER $MEMBER_ROLE # Keystone if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - KEYSTONE_SERVICE=$(keystone service-create \ - --name keystone \ + KEYSTONE_SERVICE=$(openstack service create \ + keystone \ --type identity \ --description "Keystone Identity Service" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $KEYSTONE_SERVICE \ --region RegionOne \ - --service_id $KEYSTONE_SERVICE \ --publicurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$IDENTITY_API_VERSION" \ --adminurl "$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v$IDENTITY_API_VERSION" \ --internalurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$IDENTITY_API_VERSION" diff --git a/lib/marconi b/lib/marconi index 7c8fd14255..88312cb1bd 100644 --- a/lib/marconi +++ b/lib/marconi @@ -151,27 +151,29 @@ function stop_marconi() { } function create_marconi_accounts() { - SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") - - MARCONI_USER=$(get_id keystone user-create --name=marconi \ - --pass="$SERVICE_PASSWORD" \ - --tenant-id $SERVICE_TENANT \ - --email=marconi@example.com \ - | grep " id " | get_field 2) - keystone user-role-add --tenant-id $SERVICE_TENANT \ - --user-id $MARCONI_USER \ - --role-id $ADMIN_ROLE + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") + + MARCONI_USER=$(openstack user create \ + marconi \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email marconi@example.com \ + | grep " id " | get_field 2) + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $MARCONI_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - MARCONI_SERVICE=$(keystone service-create \ - --name=marconi \ + MARCONI_SERVICE=$(openstack service create \ + marconi \ --type=queuing \ --description="Marconi Service" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $MARCONI_SERVICE \ --region RegionOne \ - --service_id $MARCONI_SERVICE \ --publicurl "http://$SERVICE_HOST:8888" \ --adminurl "http://$SERVICE_HOST:8888" \ --internalurl "http://$SERVICE_HOST:8888" diff --git a/lib/neutron b/lib/neutron index 5bd38bcf73..df276c71d5 100644 --- a/lib/neutron +++ b/lib/neutron @@ -332,29 +332,29 @@ function create_neutron_cache_dir() { # Migrated from keystone_data.sh function create_neutron_accounts() { - SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then - NEUTRON_USER=$(keystone user-create \ - --name=neutron \ - --pass="$SERVICE_PASSWORD" \ - --tenant-id $SERVICE_TENANT \ - --email=neutron@example.com \ + NEUTRON_USER=$(openstack user create \ + neutron \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email neutron@example.com \ | grep " id " | get_field 2) - keystone user-role-add \ - --tenant-id $SERVICE_TENANT \ - --user-id $NEUTRON_USER \ - --role-id $ADMIN_ROLE + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $NEUTRON_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - NEUTRON_SERVICE=$(keystone service-create \ - --name=neutron \ + NEUTRON_SERVICE=$(openstack service create \ + neutron \ --type=network \ --description="Neutron Service" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $NEUTRON_SERVICE \ --region RegionOne \ - --service_id $NEUTRON_SERVICE \ --publicurl "http://$SERVICE_HOST:9696/" \ --adminurl "http://$SERVICE_HOST:9696/" \ --internalurl "http://$SERVICE_HOST:9696/" @@ -363,7 +363,7 @@ function create_neutron_accounts() { } function create_neutron_initial_network() { - TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1) + TENANT_ID=$(openstack project list | grep " demo " | get_field 1) die_if_not_set $LINENO TENANT_ID "Failure retrieving TENANT_ID for demo" # Create a small network diff --git a/lib/nova b/lib/nova index d90aea7108..fefeda1236 100644 --- a/lib/nova +++ b/lib/nova @@ -324,41 +324,41 @@ function configure_nova() { # Migrated from keystone_data.sh create_nova_accounts() { - SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") # Nova if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then - NOVA_USER=$(keystone user-create \ - --name=nova \ - --pass="$SERVICE_PASSWORD" \ - --tenant-id $SERVICE_TENANT \ - --email=nova@example.com \ + NOVA_USER=$(openstack user create \ + nova \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email nova@example.com \ | grep " id " | get_field 2) - keystone user-role-add \ - --tenant-id $SERVICE_TENANT \ - --user-id $NOVA_USER \ - --role-id $ADMIN_ROLE + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $NOVA_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - NOVA_SERVICE=$(keystone service-create \ - --name=nova \ + NOVA_SERVICE=$(openstack service create \ + nova \ --type=compute \ --description="Nova Compute Service" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $NOVA_SERVICE \ --region RegionOne \ - --service_id $NOVA_SERVICE \ --publicurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \ --adminurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \ --internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" - NOVA_V3_SERVICE=$(keystone service-create \ - --name=novav3 \ + NOVA_V3_SERVICE=$(openstack service create \ + novav3 \ --type=computev3 \ --description="Nova Compute Service V3" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $NOVA_V3_SERVICE \ --region RegionOne \ - --service_id $NOVA_V3_SERVICE \ --publicurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" \ --adminurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" \ --internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" diff --git a/lib/savanna b/lib/savanna index 6f42311971..43c5e386fe 100644 --- a/lib/savanna +++ b/lib/savanna @@ -54,29 +54,29 @@ TEMPEST_SERVICES+=,savanna # service savanna admin function create_savanna_accounts() { - SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") - - SAVANNA_USER=$(keystone user-create \ - --name=savanna \ - --pass="$SERVICE_PASSWORD" \ - --tenant-id $SERVICE_TENANT \ - --email=savanna@example.com \ + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") + + SAVANNA_USER=$(openstack user create \ + savanna \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email savanna@example.com \ | grep " id " | get_field 2) - keystone user-role-add \ - --tenant-id $SERVICE_TENANT \ - --user-id $SAVANNA_USER \ - --role-id $ADMIN_ROLE + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $SAVANNA_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - SAVANNA_SERVICE=$(keystone service-create \ - --name=savanna \ + SAVANNA_SERVICE=$(openstack service create \ + savanna \ --type=data_processing \ --description="Savanna Data Processing" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $SAVANNA_SERVICE \ --region RegionOne \ - --service_id $SAVANNA_SERVICE \ --publicurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ --adminurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ --internalurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" diff --git a/lib/swift b/lib/swift index be25c81468..df586abe8b 100644 --- a/lib/swift +++ b/lib/swift @@ -527,39 +527,53 @@ function create_swift_accounts() { KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql} - SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") - - SWIFT_USER=$(keystone user-create --name=swift --pass="$SERVICE_PASSWORD" \ - --tenant-id $SERVICE_TENANT --email=swift@example.com | grep " id " | get_field 2) - keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $SWIFT_USER --role-id $ADMIN_ROLE + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") + + SWIFT_USER=$(openstack user create \ + swift \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email=swift@example.com \ + | grep " id " | get_field 2) + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $SWIFT_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - SWIFT_SERVICE=$(keystone service-create --name=swift --type="object-store" \ - --description="Swift Service" | grep " id " | get_field 2) - keystone endpoint-create \ + SWIFT_SERVICE=$(openstack service create \ + swift \ + --type="object-store" \ + --description="Swift Service" \ + | grep " id " | get_field 2) + openstack endpoint create \ + $SWIFT_SERVICE \ --region RegionOne \ - --service_id $SWIFT_SERVICE \ --publicurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" \ --adminurl "http://$SERVICE_HOST:8080" \ --internalurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" fi - SWIFT_TENANT_TEST1=$(keystone tenant-create --name=swifttenanttest1 | grep " id " | get_field 2) + SWIFT_TENANT_TEST1=$(openstack project create swifttenanttest1 | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_TENANT_TEST1 "Failure creating SWIFT_TENANT_TEST1" - SWIFT_USER_TEST1=$(keystone user-create --name=swiftusertest1 --pass=$SWIFTUSERTEST1_PASSWORD --email=test@example.com | grep " id " | get_field 2) + SWIFT_USER_TEST1=$(openstack user create swiftusertest1 --password=$SWIFTUSERTEST1_PASSWORD \ + --project "$SWIFT_TENANT_TEST1" --email=test@example.com | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1" - keystone user-role-add --user-id $SWIFT_USER_TEST1 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST1 + openstack role add --user $SWIFT_USER_TEST1 --project $SWIFT_TENANT_TEST1 $ADMIN_ROLE - SWIFT_USER_TEST3=$(keystone user-create --name=swiftusertest3 --pass=$SWIFTUSERTEST3_PASSWORD --email=test3@example.com | grep " id " | get_field 2) + SWIFT_USER_TEST3=$(openstack user create swiftusertest3 --password=$SWIFTUSERTEST3_PASSWORD \ + --project "$SWIFT_TENANT_TEST1" --email=test3@example.com | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_USER_TEST3 "Failure creating SWIFT_USER_TEST3" - keystone user-role-add --user-id $SWIFT_USER_TEST3 --role-id $ANOTHER_ROLE --tenant-id $SWIFT_TENANT_TEST1 + openstack role add --user $SWIFT_USER_TEST3 --project $SWIFT_TENANT_TEST1 $ANOTHER_ROLE - SWIFT_TENANT_TEST2=$(keystone tenant-create --name=swifttenanttest2 | grep " id " | get_field 2) + SWIFT_TENANT_TEST2=$(openstack project create swifttenanttest2 | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_TENANT_TEST2 "Failure creating SWIFT_TENANT_TEST2" - SWIFT_USER_TEST2=$(keystone user-create --name=swiftusertest2 --pass=$SWIFTUSERTEST2_PASSWORD --email=test2@example.com | grep " id " | get_field 2) + + SWIFT_USER_TEST2=$(openstack user create swiftusertest2 --password=$SWIFTUSERTEST2_PASSWORD \ + --project "$SWIFT_TENANT_TEST2" --email=test2@example.com | grep " id " | get_field 2) die_if_not_set $LINENO SWIFT_USER_TEST2 "Failure creating SWIFT_USER_TEST2" - keystone user-role-add --user-id $SWIFT_USER_TEST2 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST2 + openstack role add --user $SWIFT_USER_TEST2 --project $SWIFT_TENANT_TEST2 $ADMIN_ROLE } # init_swift() - Initialize rings diff --git a/lib/trove b/lib/trove index bb4549121d..5e1bbd548d 100644 --- a/lib/trove +++ b/lib/trove @@ -71,28 +71,29 @@ function setup_trove_logging() { create_trove_accounts() { # Trove - SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - SERVICE_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + SERVICE_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") if [[ "$ENABLED_SERVICES" =~ "trove" ]]; then - TROVE_USER=$(keystone user-create \ - --name=trove \ - --pass="$SERVICE_PASSWORD" \ - --tenant-id $SERVICE_TENANT \ - --email=trove@example.com \ + TROVE_USER=$(openstack user create \ + trove \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email trove@example.com \ | grep " id " | get_field 2) - keystone user-role-add --tenant-id $SERVICE_TENANT \ - --user-id $TROVE_USER \ - --role-id $SERVICE_ROLE + openstack role add \ + $SERVICE_ROLE \ + --project $SERVICE_TENANT \ + --user $TROVE_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - TROVE_SERVICE=$(keystone service-create \ - --name=trove \ + TROVE_SERVICE=$(openstack service create + trove \ --type=database \ --description="Trove Service" \ | grep " id " | get_field 2) - keystone endpoint-create \ + openstack endpoint create \ + $TROVE_SERVICE \ --region RegionOne \ - --service_id $TROVE_SERVICE \ --publicurl "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \ --adminurl "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \ --internalurl "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" diff --git a/stack.sh b/stack.sh index c153132485..e5d87cca11 100755 --- a/stack.sh +++ b/stack.sh @@ -925,6 +925,9 @@ if is_service_enabled key; then # Do the keystone-specific bits from keystone_data.sh export OS_SERVICE_TOKEN=$SERVICE_TOKEN export OS_SERVICE_ENDPOINT=$SERVICE_ENDPOINT + # Add temporarily to make openstackclient work + export OS_TOKEN=$SERVICE_TOKEN + export OS_URL=$SERVICE_ENDPOINT create_keystone_accounts create_nova_accounts create_cinder_accounts @@ -947,6 +950,7 @@ if is_service_enabled key; then bash -x $FILES/keystone_data.sh # Set up auth creds now that keystone is bootstrapped + unset OS_TOKEN OS_URL export OS_AUTH_URL=$SERVICE_ENDPOINT export OS_TENANT_NAME=admin export OS_USERNAME=admin From 33d1f86a4931de76fba555a9a3f5e5fa3fd7c171 Mon Sep 17 00:00:00 2001 From: Steven Hardy Date: Thu, 13 Feb 2014 15:00:33 +0000 Subject: [PATCH 0736/4704] Add support for creating heat stack domain The Heat instance-users blueprint requires an additional domain where heat creates projects and users related to stack resources so add support for creating this domain when configured to install Heat. Note a workaround is currently required to make the openstack command work with the v3 keystone API. Change-Id: I36157372d85b577952b55481ca5cc42146011a54 --- lib/heat | 20 ++++++++++++++++++++ stack.sh | 4 ++++ 2 files changed, 24 insertions(+) diff --git a/lib/heat b/lib/heat index 9f5dd8b588..efb01ef3b8 100644 --- a/lib/heat +++ b/lib/heat @@ -110,6 +110,15 @@ function configure_heat() { iniset $HEAT_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 iniset $HEAT_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens + # stack user domain + # Note we have to pass token/endpoint here because the current endpoint and + # version negotiation in OSC means just --os-identity-api-version=3 won't work + KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3" + D_ID=$(openstack --os-token $OS_SERVICE_TOKEN --os-url=$KS_ENDPOINT_V3 \ + --os-identity-api-version=3 domain show heat \ + | grep ' id ' | get_field 2) + iniset $HEAT_CONF stack_user_domain ${D_ID} + # paste_deploy [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_CONF paste_deploy flavor standalone @@ -196,6 +205,17 @@ function disk_image_create { upload_image "http://localhost/$output.qcow2" $TOKEN } +# create_heat_accounts() - Set up common required heat accounts +# Note this is in addition to what is in files/keystone_data.sh +function create_heat_accounts() { + # Note we have to pass token/endpoint here because the current endpoint and + # version negotiation in OSC means just --os-identity-api-version=3 won't work + KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3" + openstack --os-token $OS_SERVICE_TOKEN --os-url=$KS_ENDPOINT_V3 \ + --os-identity-api-version=3 domain create heat \ + --description "Owns users and projects created by heat" +} + # Restore xtrace $XTRACE diff --git a/stack.sh b/stack.sh index c153132485..824982e4c6 100755 --- a/stack.sh +++ b/stack.sh @@ -938,6 +938,10 @@ if is_service_enabled key; then create_swift_accounts fi + if is_service_enabled heat; then + create_heat_accounts + fi + # ``keystone_data.sh`` creates services, admin and demo users, and roles. ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \ SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \ From 351173624c4a3e24aa479c6ce5f557732bff40e7 Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Mon, 17 Feb 2014 18:38:07 +0400 Subject: [PATCH 0737/4704] Improve savanna-dashboard installation * split configurations setting to one-per-line; * don't set SAVANNA_URL in horizon configs - we're now using endpoits keystone to find corresponding edpoint. Change-Id: I9497a511656a2f70e923b651c66c5ef2917a0939 --- lib/savanna-dashboard | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/savanna-dashboard b/lib/savanna-dashboard index 7713a78637..691b23f6e8 100644 --- a/lib/savanna-dashboard +++ b/lib/savanna-dashboard @@ -37,8 +37,9 @@ SAVANNA_PYTHONCLIENT_DIR=$DEST/python-savannaclient function configure_savanna_dashboard() { - echo -e "SAVANNA_URL = \"http://$SERVICE_HOST:8386/v1.1\"\nAUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py - echo -e "HORIZON_CONFIG['dashboards'] += ('savanna',)\nINSTALLED_APPS += ('savannadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py + echo -e "AUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py + echo -e "HORIZON_CONFIG['dashboards'] += ('savanna',)" >> $HORIZON_DIR/openstack_dashboard/settings.py + echo -e "INSTALLED_APPS += ('savannadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py if is_service_enabled neutron; then echo -e "SAVANNA_USE_NEUTRON = True" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py From d8864feae93f898f043febf0b4734f0b61c602d4 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 17 Feb 2014 11:00:42 -0600 Subject: [PATCH 0738/4704] Fix shocco errors Clean up comments to fix errors seen while processing with shocco Change-Id: I0e97ad27613313f03e47c107051ea93b115d4744 --- driver_certs/cinder_driver_cert.sh | 1 + functions | 7 ++++++- lib/apache | 4 ++-- lib/marconi | 3 ++- lib/stackforge | 5 +++-- tools/create_userrc.sh | 4 +--- tools/fixup_stuff.sh | 3 ++- 7 files changed, 17 insertions(+), 10 deletions(-) diff --git a/driver_certs/cinder_driver_cert.sh b/driver_certs/cinder_driver_cert.sh index 99b2c8e899..e45b7f8736 100755 --- a/driver_certs/cinder_driver_cert.sh +++ b/driver_certs/cinder_driver_cert.sh @@ -16,6 +16,7 @@ # It also assumes default install location (/opt/stack/xxx) # to aid in debug, you should also verify that you've added # an output directory for screen logs: +# # SCREEN_LOGDIR=/opt/stack/screen-logs CERT_DIR=$(cd $(dirname "$0") && pwd) diff --git a/functions b/functions index 5eae7fe510..6979c6c155 100644 --- a/functions +++ b/functions @@ -2,10 +2,15 @@ # # The following variables are assumed to be defined by certain functions: # +# - ``DATABASE_BACKENDS`` # - ``ENABLED_SERVICES`` # - ``FILES`` # - ``GLANCE_HOSTPORT`` +# - ``REQUIREMENTS_DIR`` +# - ``STACK_USER`` # - ``TRACK_DEPENDS`` +# - ``UNDO_REQUIREMENTS`` +# # Include the common functions FUNC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) @@ -45,7 +50,7 @@ function cleanup_tmp { # Updates the dependencies in project_dir from the # openstack/requirements global list before installing anything. # -# Uses globals ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR`` +# Uses globals ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR``, ``UNDO_REQUIREMENTS`` # setup_develop directory function setup_develop() { local project_dir=$1 diff --git a/lib/apache b/lib/apache index 8ae78b2181..0e5712f56b 100644 --- a/lib/apache +++ b/lib/apache @@ -4,8 +4,8 @@ # Dependencies: # # - ``functions`` file -# -``STACK_USER`` must be defined - +# - ``STACK_USER`` must be defined +# # lib/apache exports the following functions: # # - is_apache_enabled_service diff --git a/lib/marconi b/lib/marconi index 88312cb1bd..cc33aebd2b 100644 --- a/lib/marconi +++ b/lib/marconi @@ -2,7 +2,8 @@ # Install and start **Marconi** service # To enable a minimal set of Marconi services, add the following to localrc: -# enable_service marconi-server +# +# enable_service marconi-server # # Dependencies: # - functions diff --git a/lib/stackforge b/lib/stackforge index 718b818ff6..5fa4570b74 100644 --- a/lib/stackforge +++ b/lib/stackforge @@ -6,8 +6,9 @@ # This is appropriate for python libraries that release to pypi and are # expected to be used beyond OpenStack like, but are requirements # for core services in global-requirements. -# * wsme -# * pecan +# +# * wsme +# * pecan # # This is not appropriate for stackforge projects which are early stage # OpenStack tools diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index d9c93cc476..c4eb8d4581 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -54,9 +54,7 @@ $0 -P -C mytenant -u myuser -p mypass EOF } -if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,skip-tenant:,os-cacert:,help,debug -- "$@") -then - #parse error +if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,skip-tenant:,os-cacert:,help,debug -- "$@"); then display_help exit 1 fi diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index a28e10ef2d..47b0cd10cd 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -70,7 +70,8 @@ if [[ -d $dir ]]; then fi # Ubuntu 12.04 -# ----- +# ------------ + # We can regularly get kernel crashes on the 12.04 default kernel, so attempt # to install a new kernel if [[ ${DISTRO} =~ (precise) ]]; then From b72235611d9659a49caf87b2cc89f05fce27a3e0 Mon Sep 17 00:00:00 2001 From: Daniel Salinas Date: Sun, 16 Feb 2014 18:57:20 -0600 Subject: [PATCH 0739/4704] Fixed missing backslash in lib/trove This is breaking the installation of trove with devstack Change-Id: I8b59d96072da47b8be5000eda835258654230b0f Closes-Bug: 1280915 --- lib/trove | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/trove b/lib/trove index 5e1bbd548d..6834149c64 100644 --- a/lib/trove +++ b/lib/trove @@ -86,7 +86,7 @@ create_trove_accounts() { --project $SERVICE_TENANT \ --user $TROVE_USER if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - TROVE_SERVICE=$(openstack service create + TROVE_SERVICE=$(openstack service create \ trove \ --type=database \ --description="Trove Service" \ From 18d5c833d47e41c8c8dcd73f35268d6e2b43df5b Mon Sep 17 00:00:00 2001 From: Ryu Ishimoto Date: Wed, 19 Feb 2014 00:33:46 +0900 Subject: [PATCH 0740/4704] Remove provider router configuration To be compatible with the Icehouse release of MidoNet, the provider router configuration is removed from devstack since it is no longer necessary to configure it. Change-Id: I4be2d9bbf2c82fd375702cbb1d60c3277086134f Implements: blueprint remove-provider-router-config-for-midonet --- lib/neutron_plugins/midonet | 11 ++++++----- lib/neutron_thirdparty/midonet | 19 ++----------------- 2 files changed, 8 insertions(+), 22 deletions(-) diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet index f95fcb75b9..dd3b2baeca 100644 --- a/lib/neutron_plugins/midonet +++ b/lib/neutron_plugins/midonet @@ -1,6 +1,10 @@ # Neutron MidoNet plugin # ---------------------- +MIDONET_DIR=${MIDONET_DIR:-$DEST/midonet} +MIDONET_API_PORT=${MIDONET_API_PORT:-8080} +MIDONET_API_URL=${MIDONET_API_URL:-http://localhost:$MIDONET_API_PORT/midonet-api} + # Save trace setting MY_XTRACE=$(set +o | grep xtrace) set +o xtrace @@ -47,8 +51,8 @@ function neutron_plugin_configure_plugin_agent() { } function neutron_plugin_configure_service() { - if [[ "$MIDONET_API_URI" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE MIDONET midonet_uri $MIDONET_API_URI + if [[ "$MIDONET_API_URL" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE MIDONET midonet_uri $MIDONET_API_URL fi if [[ "$MIDONET_USERNAME" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE MIDONET username $MIDONET_USERNAME @@ -59,9 +63,6 @@ function neutron_plugin_configure_service() { if [[ "$MIDONET_PROJECT_ID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE MIDONET project_id $MIDONET_PROJECT_ID fi - if [[ "$MIDONET_PROVIDER_ROUTER_ID" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE MIDONET provider_router_id $MIDONET_PROVIDER_ROUTER_ID - fi Q_L3_ENABLED=True Q_L3_ROUTER_PER_TENANT=True diff --git a/lib/neutron_thirdparty/midonet b/lib/neutron_thirdparty/midonet index e672528a2d..98be4254fc 100644 --- a/lib/neutron_thirdparty/midonet +++ b/lib/neutron_thirdparty/midonet @@ -10,20 +10,12 @@ # MidoNet devstack destination dir MIDONET_DIR=${MIDONET_DIR:-$DEST/midonet} -MIDONET_API_PORT=${MIDONET_API_PORT:-8080} -MIDONET_API_URL=${MIDONET_API_URL:-http://localhost:$MIDONET_API_PORT/midonet-api} # MidoNet client repo MIDONET_CLIENT_REPO=${MIDONET_CLIENT_REPO:-https://github.com/midokura/python-midonetclient.git} MIDONET_CLIENT_BRANCH=${MIDONET_CLIENT_BRANCH:-master} MIDONET_CLIENT_DIR=${MIDONET_CLIENT_DIR:-$MIDONET_DIR/python-midonetclient} -# MidoNet OpenStack repo -MIDONET_OS_REPO=${MIDONET_OS_REPO:-https://github.com/midokura/midonet-openstack.git} -MIDONET_OS_BRANCH=${MIDONET_OS_BRANCH:-master} -MIDONET_OS_DIR=${MIDONET_OS_DIR:-$MIDONET_DIR/midonet-openstack} -MIDONET_SETUP_SCRIPT=${MIDONET_SETUP_SCRIPT:-$MIDONET_OS_DIR/bin/setup_midonet_topology.py} - # Save trace setting MY_XTRACE=$(set +o | grep xtrace) set +o xtrace @@ -33,19 +25,12 @@ function configure_midonet() { } function init_midonet() { - - # Initialize DB. Evaluate the output of setup_midonet_topology.py to set - # env variables for provider router ID. - eval `python $MIDONET_SETUP_SCRIPT $MIDONET_API_URL admin $ADMIN_PASSWORD admin provider_devices` - die_if_not_set $LINENO provider_router_id "Error running midonet setup script, provider_router_id was not set." - - iniset /$Q_PLUGIN_CONF_FILE MIDONET provider_router_id $provider_router_id + : } function install_midonet() { git_clone $MIDONET_CLIENT_REPO $MIDONET_CLIENT_DIR $MIDONET_CLIENT_BRANCH - git_clone $MIDONET_OS_REPO $MIDONET_OS_DIR $MIDONET_OS_BRANCH - export PYTHONPATH=$MIDONET_CLIENT_DIR/src:$MIDONET_OS_DIR/src:$PYTHONPATH + export PYTHONPATH=$MIDONET_CLIENT_DIR/src:$PYTHONPATH } function start_midonet() { From 2dcc77422348e55b6f7028679647cfbdf872f6a2 Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Tue, 18 Feb 2014 13:45:18 -0500 Subject: [PATCH 0741/4704] Add retry to connect to mongo db This patch adds retries to connect to the mongodb, after a restart. Change-Id: I16e37614736c247fa0b737db2b868c052c2aa33a --- lib/marconi | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/marconi b/lib/marconi index 88312cb1bd..b6ce57a295 100644 --- a/lib/marconi +++ b/lib/marconi @@ -68,7 +68,9 @@ function is_marconi_enabled { # cleanup_marconi() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_marconi() { - mongo marconi --eval "db.dropDatabase();" + if ! timeout $SERVICE_TIMEOUT sh -c "while ! mongo marconi --eval 'db.dropDatabase();'; do sleep 1; done"; then + die $LINENO "Mongo DB did not start" + fi } # configure_marconiclient() - Set config files, create data dirs, etc From de2057290a368e339cb66a8a61d483c90f964089 Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Wed, 19 Feb 2014 14:00:42 +0400 Subject: [PATCH 0742/4704] Improve savanna keystone auth configuration We're doing to use common keystone configuration approach - section keystone_authtoken with config opts from the python-keystoneclient auth_token middleware. Change-Id: Ibbe0c76ee3b00045f5cb5134bd7661e9cef6ccdd --- extras.d/70-savanna.sh | 5 +++++ lib/savanna | 29 +++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/extras.d/70-savanna.sh b/extras.d/70-savanna.sh index 6bbe113fa7..edc1376deb 100644 --- a/extras.d/70-savanna.sh +++ b/extras.d/70-savanna.sh @@ -8,6 +8,7 @@ if is_service_enabled savanna; then elif [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing Savanna" install_savanna + cleanup_savanna if is_service_enabled horizon; then install_savanna_dashboard fi @@ -29,4 +30,8 @@ if is_service_enabled savanna; then cleanup_savanna_dashboard fi fi + + if [[ "$1" == "clean" ]]; then + cleanup_savanna + fi fi diff --git a/lib/savanna b/lib/savanna index 43c5e386fe..954f0e711e 100644 --- a/lib/savanna +++ b/lib/savanna @@ -10,6 +10,7 @@ # configure_savanna # start_savanna # stop_savanna +# cleanup_savanna # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -33,6 +34,8 @@ SAVANNA_SERVICE_HOST=${SAVANNA_SERVICE_HOST:-$SERVICE_HOST} SAVANNA_SERVICE_PORT=${SAVANNA_SERVICE_PORT:-8386} SAVANNA_SERVICE_PROTOCOL=${SAVANNA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} +SAVANNA_AUTH_CACHE_DIR=${SAVANNA_AUTH_CACHE_DIR:-/var/cache/savanna} + # Support entry points installation of console scripts if [[ -d $SAVANNA_DIR/bin ]]; then SAVANNA_BIN_DIR=$SAVANNA_DIR/bin @@ -83,6 +86,14 @@ function create_savanna_accounts() { fi } +# cleanup_savanna() - Remove residual data files, anything left over from +# previous runs that would need to clean up. +function cleanup_savanna() { + + # Cleanup auth cache dir + sudo rm -rf $SAVANNA_AUTH_CACHE_DIR +} + # configure_savanna() - Set config files, create data dirs, etc function configure_savanna() { @@ -94,9 +105,27 @@ function configure_savanna() { # Copy over savanna configuration file and configure common parameters. cp $SAVANNA_DIR/etc/savanna/savanna.conf.sample $SAVANNA_CONF_FILE + # Create auth cache dir + sudo mkdir -p $SAVANNA_AUTH_CACHE_DIR + sudo chown $STACK_USER $SAVANNA_AUTH_CACHE_DIR + rm -rf $SAVANNA_AUTH_CACHE_DIR/* + + # Set obsolete keystone auth configs for backward compatibility + iniset $SAVANNA_CONF_FILE DEFAULT os_auth_host $KEYSTONE_SERVICE_HOST + iniset $SAVANNA_CONF_FILE DEFAULT os_auth_port $KEYSTONE_SERVICE_PORT + iniset $SAVANNA_CONF_FILE DEFAULT os_auth_protocol $KEYSTONE_SERVICE_PROTOCOL iniset $SAVANNA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD iniset $SAVANNA_CONF_FILE DEFAULT os_admin_username savanna iniset $SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME + + # Set actual keystone auth configs + iniset $SAVANNA_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ + iniset $SAVANNA_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $SAVANNA_CONF_FILE keystone_authtoken admin_user savanna + iniset $SAVANNA_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD + iniset $SAVANNA_CONF_FILE keystone_authtoken signing_dir $SAVANNA_AUTH_CACHE_DIR + iniset $SAVANNA_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA + iniset $SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG iniset $SAVANNA_CONF_FILE database connection `database_connection_url savanna` From 27f29440d1b6f5343e02b8beff04c21882139ce7 Mon Sep 17 00:00:00 2001 From: Brett Campbell Date: Wed, 19 Feb 2014 18:23:16 -0800 Subject: [PATCH 0743/4704] Set umask Ensure we have a known-good umask. Otherwise files such as /etc/polkit-1/rules.d/50-libvirt-$STACK_USER.rules may not be readable by non-root users afterwards. Also reworded some comments to be more clear. Change-Id: I7653d4eee062cf32df22aa158da6269b1aa9a558 Closes-Bug: #1265195 --- stack.sh | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/stack.sh b/stack.sh index 4a55225685..a5d66cc8e8 100755 --- a/stack.sh +++ b/stack.sh @@ -5,11 +5,12 @@ # **Glance**, **Heat**, **Horizon**, **Keystone**, **Nova**, **Neutron**, # and **Swift** -# This script allows you to specify configuration options of what git -# repositories to use, enabled services, network configuration and various -# passwords. If you are crafty you can run the script on multiple nodes using -# shared settings for common resources (mysql, rabbitmq) and build a multi-node -# developer install. +# This script's options can be changed by setting appropriate environment +# variables. You can configure things like which git repositories to use, +# services to enable, OS images to use, etc. Default values are located in the +# ``stackrc`` file. If you are crafty you can run the script on multiple nodes +# using shared settings for common resources (eg., mysql or rabbitmq) and build +# a multi-node developer install. # To keep this script simple we assume you are running on a recent **Ubuntu** # (12.04 Precise or newer) or **Fedora** (F18 or newer) machine. (It may work @@ -30,6 +31,9 @@ unset LANGUAGE LC_ALL=C export LC_ALL +# Make sure umask is sane +umask 022 + # Keep track of the devstack directory TOP_DIR=$(cd $(dirname "$0") && pwd) From f6368d3eaccc33d5afdbc53a34bf6e37b6e11eb8 Mon Sep 17 00:00:00 2001 From: Masayuki Igawa Date: Thu, 20 Feb 2014 13:31:26 +0900 Subject: [PATCH 0744/4704] Fix comments about System Functions This commit fixes comments about "System Functions". * Add a missing comment about System Functions in the header * Fix singular to plural like others Change-Id: I3feb94cd11a6683ca80093574d60fdf7420e3af2 --- functions-common | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/functions-common b/functions-common index d92e39cd91..eb9b4ac8bb 100644 --- a/functions-common +++ b/functions-common @@ -15,6 +15,7 @@ # - Process Functions # - Python Functions # - Service Functions +# - System Functions # # The following variables are assumed to be defined by certain functions: # @@ -1280,8 +1281,8 @@ function use_exclusive_service { } -# System Function -# =============== +# System Functions +# ================ # Only run the command if the target file (the last arg) is not on an # NFS filesystem. From 1958c1eb5e3521a70a3cf4185a177da7d17d83e9 Mon Sep 17 00:00:00 2001 From: Masayuki Igawa Date: Thu, 20 Feb 2014 14:32:15 +0900 Subject: [PATCH 0745/4704] Remove unnecessary comment out lines This commit removes some comment-outed codes. If we want to use them, we can get them from the git repository. Change-Id: Ie438c43d332d0631750f0ad458653fc40e23faad --- clean.sh | 9 --------- tools/info.sh | 2 -- tools/xen/build_domU_multi.sh | 6 ------ 3 files changed, 17 deletions(-) diff --git a/clean.sh b/clean.sh index 09f08dc8c2..b2a9405c88 100755 --- a/clean.sh +++ b/clean.sh @@ -101,11 +101,6 @@ if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; th cleanup_nova_hypervisor fi -#if mount | grep $DATA_DIR/swift/drives; then -# sudo umount $DATA_DIR/swift/drives/sdb1 -#fi - - # Clean out /etc sudo rm -rf /etc/keystone /etc/glance /etc/nova /etc/cinder /etc/swift @@ -123,9 +118,5 @@ if [[ -n "$SCREEN_LOGDIR" ]] && [[ -d "$SCREEN_LOGDIR" ]]; then sudo rm -rf $SCREEN_LOGDIR fi -# Clean up networking... -# should this be in nova? -# FIXED_IP_ADDR in br100 - # Clean up files rm -f $TOP_DIR/.stackenv diff --git a/tools/info.sh b/tools/info.sh index 3ab7966ab4..1e521b9c4b 100755 --- a/tools/info.sh +++ b/tools/info.sh @@ -122,13 +122,11 @@ while read line; do ver=${BASH_REMATCH[2]} else # Unhandled format in freeze file - #echo "unknown: $p" continue fi echo "pip|${p}|${ver}" else # No match in freeze file - #echo "unknown: $p" continue fi done <$FREEZE_FILE diff --git a/tools/xen/build_domU_multi.sh b/tools/xen/build_domU_multi.sh index 0285f42e42..0eb2077414 100755 --- a/tools/xen/build_domU_multi.sh +++ b/tools/xen/build_domU_multi.sh @@ -25,11 +25,5 @@ function build_xva { # because rabbit won't launch with an ip addr hostname :( build_xva HEADNODE $HEAD_PUB_IP $HEAD_MGT_IP 1 "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vnc,horizon,mysql,rabbit" -# Wait till the head node is up -#while ! curl -L http://$HEAD_PUB_IP | grep -q username; do -# echo "Waiting for head node ($HEAD_PUB_IP) to start..." -# sleep 5 -#done - # Build the HA compute host build_xva COMPUTENODE $COMPUTE_PUB_IP $COMPUTE_MGT_IP 0 "ENABLED_SERVICES=n-cpu,n-net,n-api" From 3d60f4dd531388cd01a3aa689053dfc22acbd16c Mon Sep 17 00:00:00 2001 From: Giulio Fidente Date: Thu, 20 Feb 2014 16:43:49 +0100 Subject: [PATCH 0746/4704] Disable tempest backup tests if c-bak unavailable This will update the tempest config to not run the cinder backup tests when the c-bak service is not enabled. Change-Id: I0b6486f1222afa7ae9bd9d13c7d3648d2b870710 --- lib/tempest | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index c8eebfcf05..596750b32f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -314,8 +314,8 @@ function configure_tempest() { iniset $TEMPEST_CONFIG scenario large_ops_number ${TEMPEST_LARGE_OPS_NUMBER:-0} # Volume - if is_service_enabled c-bak; then - iniset $TEMPEST_CONFIG volume volume_backup_enabled "True" + if ! is_service_enabled c-bak; then + iniset $TEMPEST_CONFIG volume-feature-enabled backup False fi CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND) if [ $CINDER_MULTI_LVM_BACKEND == "True" ]; then From 2d65059e725ad27d1e9bdddbea9982d1d8027c01 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 20 Feb 2014 15:49:13 +0100 Subject: [PATCH 0747/4704] Add RHEL7 beta support RHEL7 still in beta status, so it will require the FORCE option, until the GA release. The main notable difference from another RHEL family members, it does not have the mysql alias for the mariadb. Change-Id: Ic90bb6c3dd9447fc80453c3dc1adb22cdfc6226f --- files/rpms/cinder | 2 +- files/rpms/glance | 4 ++-- files/rpms/neutron | 4 ++-- files/rpms/nova | 8 ++++---- files/rpms/swift | 2 +- lib/databases/mysql | 18 +++++++++++++++--- 6 files changed, 25 insertions(+), 13 deletions(-) diff --git a/files/rpms/cinder b/files/rpms/cinder index 623c13e676..199ae10b79 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -4,4 +4,4 @@ qemu-img python-devel postgresql-devel iscsi-initiator-utils -python-lxml #dist:f18,f19,f20 +python-lxml #dist:f18,f19,f20,rhel7 diff --git a/files/rpms/glance b/files/rpms/glance index fffd9c85b4..785ce25df5 100644 --- a/files/rpms/glance +++ b/files/rpms/glance @@ -9,8 +9,8 @@ python-argparse python-devel python-eventlet python-greenlet -python-lxml #dist:f18,f19,f20 -python-paste-deploy #dist:f18,f19,f20 +python-lxml #dist:f18,f19,f20,rhel7 +python-paste-deploy #dist:f18,f19,f20,rhel7 python-routes python-sqlalchemy python-wsgiref diff --git a/files/rpms/neutron b/files/rpms/neutron index 67bf52350a..42d7f68d37 100644 --- a/files/rpms/neutron +++ b/files/rpms/neutron @@ -11,8 +11,8 @@ python-greenlet python-iso8601 python-kombu #rhel6 gets via pip -python-paste # dist:f18,f19,f20 -python-paste-deploy # dist:f18,f19,f20 +python-paste # dist:f18,f19,f20,rhel7 +python-paste-deploy # dist:f18,f19,f20,rhel7 python-qpid python-routes python-sqlalchemy diff --git a/files/rpms/nova b/files/rpms/nova index ac70ac5d6f..a607d925e1 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -28,11 +28,11 @@ python-kombu python-lockfile python-migrate python-mox -python-paramiko # dist:f18,f19,f20 -# ^ on RHEL, brings in python-crypto which conflicts with version from +python-paramiko # dist:f18,f19,f20,rhel7 +# ^ on RHEL6, brings in python-crypto which conflicts with version from # pip we need -python-paste # dist:f18,f19,f20 -python-paste-deploy # dist:f18,f19,f20 +python-paste # dist:f18,f19,f20,rhel7 +python-paste-deploy # dist:f18,f19,f20,rhel7 python-qpid python-routes python-sqlalchemy diff --git a/files/rpms/swift b/files/rpms/swift index 32432bca9b..72253f7752 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -9,7 +9,7 @@ python-eventlet python-greenlet python-netifaces python-nose -python-paste-deploy # dist:f18,f19,f20 +python-paste-deploy # dist:f18,f19,f20,rhel7 python-simplejson python-webob pyxattr diff --git a/lib/databases/mysql b/lib/databases/mysql index 476b4b91b7..31e7163033 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -25,7 +25,11 @@ function cleanup_database_mysql { sudo rm -rf /var/lib/mysql return elif is_fedora; then - MYSQL=mysqld + if [[ $DISTRO =~ (rhel7) ]]; then + MYSQL=mariadb + else + MYSQL=mysqld + fi elif is_suse; then MYSQL=mysql else @@ -48,8 +52,12 @@ function configure_database_mysql { MY_CONF=/etc/mysql/my.cnf MYSQL=mysql elif is_fedora; then + if [[ $DISTRO =~ (rhel7) ]]; then + MYSQL=mariadb + else + MYSQL=mysqld + fi MY_CONF=/etc/my.cnf - MYSQL=mysqld elif is_suse; then MY_CONF=/etc/my.cnf MYSQL=mysql @@ -135,7 +143,11 @@ EOF fi # Install mysql-server if is_ubuntu || is_fedora; then - install_package mysql-server + if [[ $DISTRO =~ (rhel7) ]]; then + install_package mariadb-server + else + install_package mysql-server + fi elif is_suse; then if ! is_package_installed mariadb; then install_package mysql-community-server From 09bb9e67923c1de4d4479000eb329b139732c57b Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 21 Feb 2014 14:33:29 +1100 Subject: [PATCH 0748/4704] Add more files to run_tests.sh bash8 check Add functions-common, stackrc, openrc, exerciserc, eucarc to bash8 checks Change-Id: Ic14b348c871bf98bf35c7e866e715bb75bdccf97 --- run_tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/run_tests.sh b/run_tests.sh index 9d9d18661e..b4f26c5709 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -20,7 +20,7 @@ if [[ -n $@ ]]; then else LIBS=`find lib -type f | grep -v \.md` SCRIPTS=`find . -type f -name \*\.sh` - EXTRA="functions" + EXTRA="functions functions-common stackrc openrc exerciserc eucarc" FILES="$SCRIPTS $LIBS $EXTRA" fi From f8e86bb3129c6aa5cb9c70ceb2a55f01b2dd1bf0 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 21 Feb 2014 15:16:31 +1100 Subject: [PATCH 0749/4704] Un-nest generate_swift_config I think this got accidentally nested during some code refactorizing? Change-Id: Ie486cf3395b6acf3a10eb32e116d39ca56134b9f --- lib/swift | 79 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 40 insertions(+), 39 deletions(-) diff --git a/lib/swift b/lib/swift index df586abe8b..6c33af5082 100644 --- a/lib/swift +++ b/lib/swift @@ -231,6 +231,46 @@ function _config_swift_apache_wsgi() { done } +# This function generates an object/container/account configuration +# emulating 4 nodes on different ports +function generate_swift_config() { + local swift_node_config=$1 + local node_id=$2 + local bind_port=$3 + local server_type=$4 + + log_facility=$[ node_id - 1 ] + node_path=${SWIFT_DATA_DIR}/${node_number} + + iniuncomment ${swift_node_config} DEFAULT user + iniset ${swift_node_config} DEFAULT user ${STACK_USER} + + iniuncomment ${swift_node_config} DEFAULT bind_port + iniset ${swift_node_config} DEFAULT bind_port ${bind_port} + + iniuncomment ${swift_node_config} DEFAULT swift_dir + iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONF_DIR} + + iniuncomment ${swift_node_config} DEFAULT devices + iniset ${swift_node_config} DEFAULT devices ${node_path} + + iniuncomment ${swift_node_config} DEFAULT log_facility + iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility} + + iniuncomment ${swift_node_config} DEFAULT workers + iniset ${swift_node_config} DEFAULT workers 1 + + iniuncomment ${swift_node_config} DEFAULT disable_fallocate + iniset ${swift_node_config} DEFAULT disable_fallocate true + + iniuncomment ${swift_node_config} DEFAULT mount_check + iniset ${swift_node_config} DEFAULT mount_check false + + iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode + iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes +} + + # configure_swift() - Set config files, create data dirs and loop image function configure_swift() { local swift_pipeline="${SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH}" @@ -364,45 +404,6 @@ EOF cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONF_DIR}/swift.conf iniset ${SWIFT_CONF_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH} - # This function generates an object/container/account configuration - # emulating 4 nodes on different ports - function generate_swift_config() { - local swift_node_config=$1 - local node_id=$2 - local bind_port=$3 - local server_type=$4 - - log_facility=$[ node_id - 1 ] - node_path=${SWIFT_DATA_DIR}/${node_number} - - iniuncomment ${swift_node_config} DEFAULT user - iniset ${swift_node_config} DEFAULT user ${STACK_USER} - - iniuncomment ${swift_node_config} DEFAULT bind_port - iniset ${swift_node_config} DEFAULT bind_port ${bind_port} - - iniuncomment ${swift_node_config} DEFAULT swift_dir - iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONF_DIR} - - iniuncomment ${swift_node_config} DEFAULT devices - iniset ${swift_node_config} DEFAULT devices ${node_path} - - iniuncomment ${swift_node_config} DEFAULT log_facility - iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility} - - iniuncomment ${swift_node_config} DEFAULT workers - iniset ${swift_node_config} DEFAULT workers 1 - - iniuncomment ${swift_node_config} DEFAULT disable_fallocate - iniset ${swift_node_config} DEFAULT disable_fallocate true - - iniuncomment ${swift_node_config} DEFAULT mount_check - iniset ${swift_node_config} DEFAULT mount_check false - - iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode - iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes - } - for node_number in ${SWIFT_REPLICAS_SEQ}; do swift_node_config=${SWIFT_CONF_DIR}/object-server/${node_number}.conf cp ${SWIFT_DIR}/etc/object-server.conf-sample ${swift_node_config} From 8e1a1ffdfbf59e01688fd2e6e007ab72d49263ed Mon Sep 17 00:00:00 2001 From: Steven Hardy Date: Fri, 21 Feb 2014 14:45:48 +0000 Subject: [PATCH 0750/4704] Set stack_user_domain config correctly The recently merged patch which creates a domain for heat fails to correctly set the domain ID in heat.conf, so move the setting of the config option to immediately after we create the domain. Also add the missing DEFAULT section identifier in the iniset, and use OS_TOKEN instead of OS_SERVICE token, because the stack.sh comment says this is exported for the openstackclient workaround. Change-Id: I912f774f1215d68cbcfe44229b371f318d92966a Closes-Bug: #1283075 --- lib/heat | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/lib/heat b/lib/heat index efb01ef3b8..af10fa6f1d 100644 --- a/lib/heat +++ b/lib/heat @@ -110,15 +110,6 @@ function configure_heat() { iniset $HEAT_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 iniset $HEAT_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens - # stack user domain - # Note we have to pass token/endpoint here because the current endpoint and - # version negotiation in OSC means just --os-identity-api-version=3 won't work - KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3" - D_ID=$(openstack --os-token $OS_SERVICE_TOKEN --os-url=$KS_ENDPOINT_V3 \ - --os-identity-api-version=3 domain show heat \ - | grep ' id ' | get_field 2) - iniset $HEAT_CONF stack_user_domain ${D_ID} - # paste_deploy [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_CONF paste_deploy flavor standalone @@ -211,9 +202,11 @@ function create_heat_accounts() { # Note we have to pass token/endpoint here because the current endpoint and # version negotiation in OSC means just --os-identity-api-version=3 won't work KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3" - openstack --os-token $OS_SERVICE_TOKEN --os-url=$KS_ENDPOINT_V3 \ + D_ID=$(openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \ --os-identity-api-version=3 domain create heat \ - --description "Owns users and projects created by heat" + --description "Owns users and projects created by heat" \ + | grep ' id ' | get_field 2) + iniset $HEAT_CONF DEFAULT stack_user_domain ${D_ID} } # Restore xtrace From f2ca87a8d8ded80384b2cafb46ef2ca4cf19a986 Mon Sep 17 00:00:00 2001 From: Rabi Mishra Date: Fri, 21 Feb 2014 20:08:28 +0530 Subject: [PATCH 0751/4704] Implements fix to run lbaas service on fedora with devstack changes 'user_group = nobody' in 'haproxy' section of lbaas_agent.ini Change-Id: I801fec5a11d8abd97cb6f5cdff35fabb9eaf9000 Closes-Bug: 1283064 --- lib/neutron_plugins/services/loadbalancer | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer index 5d7a94e5d8..3714142a83 100644 --- a/lib/neutron_plugins/services/loadbalancer +++ b/lib/neutron_plugins/services/loadbalancer @@ -38,6 +38,7 @@ function neutron_agent_lbaas_configure_agent() { if is_fedora; then iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT user_group "nobody" + iniset $LBAAS_AGENT_CONF_FILENAME haproxy user_group "nobody" fi } From 67df3b2fc2b2e7b1cfb0418e59f96db7561277be Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Thu, 20 Feb 2014 14:48:59 -0500 Subject: [PATCH 0752/4704] Bind Marconi to SERVICE_HOST & add health check This patch, 1. Binds Marconi to SERVICE_HOST, to be consistent with other services. 2. Adds a health check to verify if marconi started correctly. Change-Id: I1d48d0e610369cc97d479a5cd47b2bd11656da3f --- lib/marconi | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/lib/marconi b/lib/marconi index b6ce57a295..ee7bf0ec7b 100644 --- a/lib/marconi +++ b/lib/marconi @@ -51,6 +51,11 @@ MARCONI_BRANCH=${MARCONI_BRANCH:-master} MARCONICLIENT_REPO=${MARCONICLIENT_REPO:-${GIT_BASE}/openstack/python-marconiclient.git} MARCONICLIENT_BRANCH=${MARCONICLIENT_BRANCH:-master} +# Set Marconi Connection Info +MARCONI_SERVICE_HOST=${MARCONI_SERVICE_HOST:-$SERVICE_HOST} +MARCONI_SERVICE_PORT=${MARCONI_SERVICE_PORT:-8888} +MARCONI_SERVICE_PROTOCOL=${MARCONI_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} + # Tell Tempest this project is present TEMPEST_SERVICES+=,marconi @@ -89,7 +94,7 @@ function configure_marconi() { sudo chown $USER $MARCONI_API_LOG_DIR iniset $MARCONI_CONF DEFAULT verbose True - iniset $MARCONI_CONF 'drivers:transport:wsgi' bind '0.0.0.0' + iniset $MARCONI_CONF 'drivers:transport:wsgi' bind $MARCONI_SERVICE_HOST iniset $MARCONI_CONF keystone_authtoken auth_protocol http iniset $MARCONI_CONF keystone_authtoken admin_user marconi @@ -142,6 +147,10 @@ function install_marconiclient() { # start_marconi() - Start running processes, including screen function start_marconi() { screen_it marconi-server "marconi-server --config-file $MARCONI_CONF" + echo "Waiting for Marconi to start..." + if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT/v1/health; do sleep 1; done"; then + die $LINENO "Marconi did not start" + fi } # stop_marconi() - Stop running processes @@ -176,9 +185,9 @@ function create_marconi_accounts() { openstack endpoint create \ $MARCONI_SERVICE \ --region RegionOne \ - --publicurl "http://$SERVICE_HOST:8888" \ - --adminurl "http://$SERVICE_HOST:8888" \ - --internalurl "http://$SERVICE_HOST:8888" + --publicurl "$MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT" \ + --adminurl "$MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT" \ + --internalurl "$MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT" fi } From f5aa05c0ab1e1ae0c9f56d5eaf9164adcd4cd7b9 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Fri, 21 Feb 2014 22:03:59 -0500 Subject: [PATCH 0753/4704] Add support for oslo.vmware Change-Id: I2162a339b1869c27850afcda6be3c4e11de94e0e --- lib/oslo | 4 ++++ stackrc | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/lib/oslo b/lib/oslo index b089842ae4..516ce1c3a9 100644 --- a/lib/oslo +++ b/lib/oslo @@ -24,6 +24,7 @@ CLIFF_DIR=$DEST/cliff OSLOCFG_DIR=$DEST/oslo.config OSLOMSG_DIR=$DEST/oslo.messaging OSLORWRAP_DIR=$DEST/oslo.rootwrap +OSLOVMWARE_DIR=$DEST/oslo.vmware PYCADF_DIR=$DEST/pycadf STEVEDORE_DIR=$DEST/stevedore TASKFLOW_DIR=$DEST/taskflow @@ -49,6 +50,9 @@ function install_oslo() { git_clone $OSLORWRAP_REPO $OSLORWRAP_DIR $OSLORWRAP_BRANCH setup_develop $OSLORWRAP_DIR + git_clone $OSLOVMWARE_REPO $OSLOVMWARE_DIR $OSLOVMWARE_BRANCH + setup_develop $OSLOVMWARE_DIR + git_clone $PYCADF_REPO $PYCADF_DIR $PYCADF_BRANCH setup_develop $PYCADF_DIR diff --git a/stackrc b/stackrc index 0b081c4014..91f5751966 100644 --- a/stackrc +++ b/stackrc @@ -167,6 +167,10 @@ OSLOMSG_BRANCH=${OSLOMSG_BRANCH:-master} OSLORWRAP_REPO=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git} OSLORWRAP_BRANCH=${OSLORWRAP_BRANCH:-master} +# oslo.vmware +OSLOVMWARE_REPO=${OSLOVMWARE_REPO:-${GIT_BASE}/openstack/oslo.vmware.git} +OSLOVMWARE_BRANCH=${OSLOVMWARE_BRANCH:-master} + # pycadf auditing library PYCADF_REPO=${PYCADF_REPO:-${GIT_BASE}/openstack/pycadf.git} PYCADF_BRANCH=${PYCADF_BRANCH:-master} From d53ad0b07d3e7bdd2668c2d3f1815d95d4b8f532 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 20 Feb 2014 13:55:13 +1100 Subject: [PATCH 0754/4704] Add GIT_TIMEOUT variable to watch git operations During my CI testing of each devstack change I can often see git get itself stuck and hang indefinitely. I'm not sure if it's transient network issues, or issues at the remote end (seen with both github.com and git.openstack.org) but it hits fairly frequently. Retrying the command usually gets it going again. Searching for "git hanging" and similar shows its not entirely uncommon... This adds a watchdog timeout for remote git operations based on a new environment variable GIT_TIMEOUT. It will retry 3 times before giving up. The wrapper is applied to the main remote git calls. Change-Id: I5b0114ca26b7ac2f25993264f761cba9ec8c09e1 --- functions-common | 41 ++++++++++++++++++++++++++++++++++++----- stackrc | 11 +++++++++++ 2 files changed, 47 insertions(+), 5 deletions(-) diff --git a/functions-common b/functions-common index d92e39cd91..9cd5acd47b 100644 --- a/functions-common +++ b/functions-common @@ -498,16 +498,16 @@ function git_clone { if [[ ! -d $GIT_DEST ]]; then [[ "$ERROR_ON_CLONE" = "True" ]] && \ die $LINENO "Cloning not allowed in this configuration" - git clone $GIT_REMOTE $GIT_DEST + git_timed clone $GIT_REMOTE $GIT_DEST fi cd $GIT_DEST - git fetch $GIT_REMOTE $GIT_REF && git checkout FETCH_HEAD + git_timed fetch $GIT_REMOTE $GIT_REF && git checkout FETCH_HEAD else # do a full clone only if the directory doesn't exist if [[ ! -d $GIT_DEST ]]; then [[ "$ERROR_ON_CLONE" = "True" ]] && \ die $LINENO "Cloning not allowed in this configuration" - git clone $GIT_REMOTE $GIT_DEST + git_timed clone $GIT_REMOTE $GIT_DEST cd $GIT_DEST # This checkout syntax works for both branches and tags git checkout $GIT_REF @@ -516,7 +516,7 @@ function git_clone { cd $GIT_DEST # set the url to pull from and fetch git remote set-url origin $GIT_REMOTE - git fetch origin + git_timed fetch origin # remove the existing ignored files (like pyc) as they cause breakage # (due to the py files having older timestamps than our pyc, so python # thinks the pyc files are correct using them) @@ -541,6 +541,37 @@ function git_clone { git show --oneline | head -1 } +# git can sometimes get itself infinitely stuck with transient network +# errors or other issues with the remote end. This wraps git in a +# timeout/retry loop and is intended to watch over non-local git +# processes that might hang. GIT_TIMEOUT, if set, is passed directly +# to timeout(1); otherwise the default value of 0 maintains the status +# quo of waiting forever. +# usage: git_timed +function git_timed() { + local count=0 + local timeout=0 + + if [[ -n "${GIT_TIMEOUT}" ]]; then + timeout=${GIT_TIMEOUT} + fi + + until timeout -s SIGINT ${timeout} git "$@"; do + # 124 is timeout(1)'s special return code when it reached the + # timeout; otherwise assume fatal failure + if [[ $? -ne 124 ]]; then + die $LINENO "git call failed: [git $@]" + fi + + count=$(($count + 1)) + warn "timeout ${count} for git call: [git $@]" + if [ $count -eq 3 ]; then + die $LINENO "Maximum of 3 git retries reached" + fi + sleep 5 + done +} + # git update using reference as a branch. # git_update_branch ref function git_update_branch() { @@ -571,7 +602,7 @@ function git_update_tag() { git tag -d $GIT_TAG # fetching given tag only - git fetch origin tag $GIT_TAG + git_timed fetch origin tag $GIT_TAG git checkout -f $GIT_TAG } diff --git a/stackrc b/stackrc index 56fa40269c..8cec09eb28 100644 --- a/stackrc +++ b/stackrc @@ -69,6 +69,17 @@ fi # (currently only implemented for MySQL backend) DATABASE_QUERY_LOGGING=$(trueorfalse True $DATABASE_QUERY_LOGGING) +# Set a timeout for git operations. If git is still running when the +# timeout expires, the command will be retried up to 3 times. This is +# in the format for timeout(1); +# +# DURATION is a floating point number with an optional suffix: 's' +# for seconds (the default), 'm' for minutes, 'h' for hours or 'd' +# for days. +# +# Zero disables timeouts +GIT_TIMEOUT=${GIT_TIMEOUT:-0} + # Repositories # ------------ From b93ee25b64de5d587c2e0889a9ce689c92aaa0f9 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Sun, 23 Feb 2014 20:41:07 -0500 Subject: [PATCH 0755/4704] make bash8 take a -v flag this ensures that we actually know we are processing all the files we believe we are. Change-Id: I8e99b5f9dc987c946586475f374f7040ca63a478 --- run_tests.sh | 2 +- tools/bash8.py | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/run_tests.sh b/run_tests.sh index b4f26c5709..a0bfbee0c0 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -26,4 +26,4 @@ fi echo "Running bash8..." -./tools/bash8.py $FILES +./tools/bash8.py -v $FILES diff --git a/tools/bash8.py b/tools/bash8.py index 7552e0d642..ca0abd964a 100755 --- a/tools/bash8.py +++ b/tools/bash8.py @@ -110,11 +110,13 @@ def end_of_multiline(line, token): return False -def check_files(files): +def check_files(files, verbose): in_multiline = False logical_line = "" token = False for line in fileinput.input(files): + if verbose and fileinput.isfirstline(): + print "Running bash8 on %s" % fileinput.filename() # NOTE(sdague): multiline processing of heredocs is interesting if not in_multiline: logical_line = line @@ -141,13 +143,14 @@ def get_options(): parser.add_argument('files', metavar='file', nargs='+', help='files to scan for errors') parser.add_argument('-i', '--ignore', help='Rules to ignore') + parser.add_argument('-v', '--verbose', action='store_true', default=False) return parser.parse_args() def main(): opts = get_options() register_ignores(opts.ignore) - check_files(opts.files) + check_files(opts.files, opts.verbose) if ERRORS > 0: print("%d bash8 error(s) found" % ERRORS) From 010959de403660e13eca54c6ef306ef5df24b436 Mon Sep 17 00:00:00 2001 From: Jim Rollenhagen Date: Tue, 18 Feb 2014 13:17:58 -0600 Subject: [PATCH 0756/4704] Perform safety checks in create-stack-user.sh This adds some safety checks to the stack user creation script. This includes: - Using set -o errexit to exit early on errors - Make sure STACK_USER is set before doing anything with it Change-Id: If027daddd03e32c5ba3c2ebb05ad5b27d2868b0a --- tools/create-stack-user.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh index 50f6592a3a..9c29ecd901 100755 --- a/tools/create-stack-user.sh +++ b/tools/create-stack-user.sh @@ -15,6 +15,7 @@ # and it was time for this nonsense to stop. Run this script as root to create # the user and configure sudo. +set -o errexit # Keep track of the devstack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) @@ -27,12 +28,14 @@ source $TOP_DIR/functions # and ``DISTRO`` GetDistro -# Needed to get ``ENABLED_SERVICES`` +# Needed to get ``ENABLED_SERVICES`` and ``STACK_USER`` source $TOP_DIR/stackrc # Give the non-root user the ability to run as **root** via ``sudo`` is_package_installed sudo || install_package sudo +[[ -z "$STACK_USER" ]] && die "STACK_USER is not set. Exiting." + if ! getent group $STACK_USER >/dev/null; then echo "Creating a group called $STACK_USER" groupadd $STACK_USER From e9648276a6396a630d0eca812e36fc82ec4b2a0c Mon Sep 17 00:00:00 2001 From: Sahid Orentino Ferdjaoui Date: Sun, 23 Feb 2014 18:55:51 +0100 Subject: [PATCH 0757/4704] Removes the dependence with aptitude Removes the dependence with aptitude by replacing the call of: aptitude purge -y ~npackage by apt_get purge -y package* Change-Id: I08875ffad9dc6293047827666f02453a355b16ea Closes-Bug: 1281410 --- lib/databases/mysql | 2 +- lib/databases/postgresql | 2 +- lib/rpc_backend | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index 476b4b91b7..3c002f7c43 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -21,7 +21,7 @@ function cleanup_database_mysql { if is_ubuntu; then # Get ruthless with mysql stop_service $MYSQL - sudo aptitude purge -y ~nmysql-server + apt_get purge -y mysql* sudo rm -rf /var/lib/mysql return elif is_fedora; then diff --git a/lib/databases/postgresql b/lib/databases/postgresql index c459feb9e0..96a5947a60 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -21,7 +21,7 @@ function cleanup_database_postgresql { stop_service postgresql if is_ubuntu; then # Get ruthless with mysql - sudo aptitude purge -y ~npostgresql + apt_get purge -y postgresql* return elif is_fedora; then uninstall_package postgresql-server diff --git a/lib/rpc_backend b/lib/rpc_backend index 3651bc0d20..34f576f5b8 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -67,7 +67,7 @@ function cleanup_rpc_backend { sudo killall epmd || sudo killall -9 epmd if is_ubuntu; then # And the Erlang runtime too - sudo aptitude purge -y ~nerlang + apt_get purge -y erlang* fi elif is_service_enabled qpid; then if is_fedora; then From 2e2b28b531e392ac59fdfa948bc79a0c74b2f332 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 19 Feb 2014 09:02:02 -0500 Subject: [PATCH 0758/4704] reset prereqs status on clean.sh when running a clean, we should really reset the prereq status as well, as this should start us back from zeroish. Change-Id: I5fae151ab13bcf7fb82feb1e91eed19e0215dc59 --- clean.sh | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/clean.sh b/clean.sh index 09f08dc8c2..465b304a17 100755 --- a/clean.sh +++ b/clean.sh @@ -128,4 +128,10 @@ fi # FIXED_IP_ADDR in br100 # Clean up files -rm -f $TOP_DIR/.stackenv + +FILES_TO_CLEAN=".localrc.auto docs-files docs/ shocco/ stack-screenrc test*.conf* test.ini*" +FILES_TO_CLEAN+=".stackenv .prereqs" + +for file in FILES_TO_CLEAN; do + rm -f $TOP_DIR/$file +done From f1eb0475d9320875f1a6c4a9c398e9388350d206 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 11 Feb 2014 17:28:56 -0500 Subject: [PATCH 0759/4704] don't check for service enabled service enabled is something that's not required for sysstat and friends, because instead we actually can do this with screen_it. Change-Id: I4aa5787101cb0def46690f38a7f82effbb85f502 --- stack.sh | 52 +++++++++++++++++++++++----------------------------- 1 file changed, 23 insertions(+), 29 deletions(-) diff --git a/stack.sh b/stack.sh index 4a55225685..ce19b8fc5c 100755 --- a/stack.sh +++ b/stack.sh @@ -863,42 +863,36 @@ fi init_service_check -# Sysstat +# Sysstat and friends # ------- # If enabled, systat has to start early to track OpenStack service startup. -if is_service_enabled sysstat; then - # what we want to measure - # -u : cpu statitics - # -q : load - # -b : io load rates - # -w : process creation and context switch rates - SYSSTAT_OPTS="-u -q -b -w" - if [[ -n ${SCREEN_LOGDIR} ]]; then - screen_it sysstat "cd $TOP_DIR; ./tools/sar_filter.py $SYSSTAT_OPTS -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL" - else - screen_it sysstat "./tools/sar_filter.py $SYSSTAT_OPTS $SYSSTAT_INTERVAL" - fi +# what we want to measure +# -u : cpu statitics +# -q : load +# -b : io load rates +# -w : process creation and context switch rates +SYSSTAT_OPTS="-u -q -b -w" +if [[ -n ${SCREEN_LOGDIR} ]]; then + screen_it sysstat "cd $TOP_DIR; ./tools/sar_filter.py $SYSSTAT_OPTS -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL" +else + screen_it sysstat "./tools/sar_filter.py $SYSSTAT_OPTS $SYSSTAT_INTERVAL" fi -if is_service_enabled dstat; then - # Per-process stats - DSTAT_OPTS="-tcndylp --top-cpu-adv" - if [[ -n ${SCREEN_LOGDIR} ]]; then - screen_it dstat "cd $TOP_DIR; dstat $DSTAT_OPTS | tee $SCREEN_LOGDIR/$DSTAT_FILE" - else - screen_it dstat "dstat $DSTAT_OPTS" - fi +# A better kind of sysstat, with the top process per time slice +DSTAT_OPTS="-tcndylp --top-cpu-adv" +if [[ -n ${SCREEN_LOGDIR} ]]; then + screen_it dstat "cd $TOP_DIR; dstat $DSTAT_OPTS | tee $SCREEN_LOGDIR/$DSTAT_FILE" +else + screen_it dstat "dstat $DSTAT_OPTS" fi -if is_service_enabled pidstat; then - # Per-process stats - PIDSTAT_OPTS="-l -p ALL -T ALL" - if [[ -n ${SCREEN_LOGDIR} ]]; then - screen_it pidstat "cd $TOP_DIR; pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL > $SCREEN_LOGDIR/$PIDSTAT_FILE" - else - screen_it pidstat "pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL" - fi +# Per-process stats +PIDSTAT_OPTS="-l -p ALL -T ALL" +if [[ -n ${SCREEN_LOGDIR} ]]; then + screen_it pidstat "cd $TOP_DIR; pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL > $SCREEN_LOGDIR/$PIDSTAT_FILE" +else + screen_it pidstat "pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL" fi From af616d93411a9a446ce0d2e72ea4fb7d281cd940 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 17 Feb 2014 12:57:55 -0600 Subject: [PATCH 0760/4704] Move setup_develop() to common It's in the wrong place for current Grenade Change-Id: Ia670198332af5945a56d708cd83d9239df0c2287 --- functions | 54 ------------------------------------------------ functions-common | 52 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 54 deletions(-) diff --git a/functions b/functions index 6979c6c155..3101111c63 100644 --- a/functions +++ b/functions @@ -44,60 +44,6 @@ function cleanup_tmp { } -# ``pip install -e`` the package, which processes the dependencies -# using pip before running `setup.py develop` -# -# Updates the dependencies in project_dir from the -# openstack/requirements global list before installing anything. -# -# Uses globals ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR``, ``UNDO_REQUIREMENTS`` -# setup_develop directory -function setup_develop() { - local project_dir=$1 - - echo "cd $REQUIREMENTS_DIR; $SUDO_CMD python update.py $project_dir" - - # Don't update repo if local changes exist - # Don't use buggy "git diff --quiet" - (cd $project_dir && git diff --exit-code >/dev/null) - local update_requirements=$? - - if [ $update_requirements -eq 0 ]; then - (cd $REQUIREMENTS_DIR; \ - $SUDO_CMD python update.py $project_dir) - fi - - setup_develop_no_requirements_update $project_dir - - # We've just gone and possibly modified the user's source tree in an - # automated way, which is considered bad form if it's a development - # tree because we've screwed up their next git checkin. So undo it. - # - # However... there are some circumstances, like running in the gate - # where we really really want the overridden version to stick. So provide - # a variable that tells us whether or not we should UNDO the requirements - # changes (this will be set to False in the OpenStack ci gate) - if [ $UNDO_REQUIREMENTS = "True" ]; then - if [ $update_requirements -eq 0 ]; then - (cd $project_dir && git reset --hard) - fi - fi -} - - -# ``pip install -e`` the package, which processes the dependencies -# using pip before running `setup.py develop` -# Uses globals ``STACK_USER`` -# setup_develop_no_requirements_update directory -function setup_develop_no_requirements_update() { - local project_dir=$1 - - pip_install -e $project_dir - # ensure that further actions can do things like setup.py sdist - safe_chown -R $STACK_USER $1/*.egg-info -} - - # Retrieve an image from a URL and upload into Glance. # Uses the following variables: # diff --git a/functions-common b/functions-common index d92e39cd91..d6f71b4825 100644 --- a/functions-common +++ b/functions-common @@ -1130,6 +1130,58 @@ function pip_install { && $SUDO_PIP rm -rf ${pip_build_tmp} } +# ``pip install -e`` the package, which processes the dependencies +# using pip before running `setup.py develop` +# +# Updates the dependencies in project_dir from the +# openstack/requirements global list before installing anything. +# +# Uses globals ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR``, ``UNDO_REQUIREMENTS`` +# setup_develop directory +function setup_develop() { + local project_dir=$1 + + echo "cd $REQUIREMENTS_DIR; $SUDO_CMD python update.py $project_dir" + + # Don't update repo if local changes exist + # Don't use buggy "git diff --quiet" + (cd $project_dir && git diff --exit-code >/dev/null) + local update_requirements=$? + + if [ $update_requirements -eq 0 ]; then + (cd $REQUIREMENTS_DIR; \ + $SUDO_CMD python update.py $project_dir) + fi + + setup_develop_no_requirements_update $project_dir + + # We've just gone and possibly modified the user's source tree in an + # automated way, which is considered bad form if it's a development + # tree because we've screwed up their next git checkin. So undo it. + # + # However... there are some circumstances, like running in the gate + # where we really really want the overridden version to stick. So provide + # a variable that tells us whether or not we should UNDO the requirements + # changes (this will be set to False in the OpenStack ci gate) + if [ $UNDO_REQUIREMENTS = "True" ]; then + if [ $update_requirements -eq 0 ]; then + (cd $project_dir && git reset --hard) + fi + fi +} + +# ``pip install -e`` the package, which processes the dependencies +# using pip before running `setup.py develop` +# Uses globals ``STACK_USER`` +# setup_develop_no_requirements_update directory +function setup_develop_no_requirements_update() { + local project_dir=$1 + + pip_install -e $project_dir + # ensure that further actions can do things like setup.py sdist + safe_chown -R $STACK_USER $1/*.egg-info +} + # Service Functions # ================= From 71ef61ac8727137da01b3ca970a70b3adc81fd51 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Wed, 19 Feb 2014 22:19:24 -0800 Subject: [PATCH 0761/4704] Add variable to configure the run of IPv6 Tests Related Tempest change: https://review.openstack.org/#/c/74933/ Closes-bug: 1282387 Change-Id: If9e9c5319c484dc4c00ed3bdcefc132410719b87 --- lib/tempest | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/tempest b/lib/tempest index 596750b32f..d2227feed9 100644 --- a/lib/tempest +++ b/lib/tempest @@ -63,6 +63,9 @@ TEMPEST_VOLUME_DRIVER=${TEMPEST_VOLUME_DRIVER:-default} TEMPEST_VOLUME_VENDOR=${TEMPEST_VOLUME_VENDOR:-"Open Source"} TEMPEST_STORAGE_PROTOCOL=${TEMPEST_STORAGE_PROTOCOL:-iSCSI} +# Neutron/Network variables +IPV6_ENABLED=$(trueorfalse True $IPV6_ENABLED) + # Functions # --------- @@ -285,11 +288,13 @@ function configure_tempest() { # Compute admin iniset $TEMPEST_CONFIG "compute-admin" password "$password" # DEPRECATED + # Network iniset $TEMPEST_CONFIG network api_version 2.0 iniset $TEMPEST_CONFIG network tenant_networks_reachable "$tenant_networks_reachable" iniset $TEMPEST_CONFIG network public_network_id "$public_network_id" iniset $TEMPEST_CONFIG network public_router_id "$public_router_id" iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE" + iniset $TEMPEST_CONFIG network ipv6_enabled "$IPV6_ENABLED" # boto iniset $TEMPEST_CONFIG boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" From 041fa712472d887550a540dd50ade546f847c6b4 Mon Sep 17 00:00:00 2001 From: David Kranz Date: Mon, 24 Feb 2014 13:30:59 -0500 Subject: [PATCH 0762/4704] Make admin_bind_host configurable The use case is running devstack inside an OpenStack vm and running tempest from some other machine. To make the catalog export urls that can be accessed from off the devstack machine, you need to set KEYSTONE_SERVICE_HOST to an external IP. But devstack uses that address in its setup of keystone in addition to exporting in the catalog. Because OpenStack has an issue where a vm cannot access itself through its own floating ip, devstack fails. There is no way to have this use case by providing an ip address. The workaround is to use the hostname of the devstack machine. That worked until recently when a change was made to set admin_bind_host to the value of KEYSTONE_SERVICE_HOST. The result is that port 35357 is only opened locally. This change allows the devstack user to restore the original behavior allowing this use case. Change-Id: I97b938b305b7dd878397e7e64462650064e59cd2 Closes-Bug: #1283803 --- lib/keystone | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index cebb4d3522..44ac94d802 100644 --- a/lib/keystone +++ b/lib/keystone @@ -70,6 +70,8 @@ KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000} KEYSTONE_SERVICE_PORT_INT=${KEYSTONE_SERVICE_PORT_INT:-5001} KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} +# Bind hosts +KEYSTONE_ADMIN_BIND_HOST=${KEYSTONE_ADMIN_BIND_HOST:-$KEYSTONE_SERVICE_HOST} # Set the tenant for service accounts in Keystone SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} @@ -178,7 +180,7 @@ function configure_keystone() { # Set the URL advertised in the ``versions`` structure returned by the '/' route iniset $KEYSTONE_CONF DEFAULT public_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(public_port)s/" iniset $KEYSTONE_CONF DEFAULT admin_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(admin_port)s/" - iniset $KEYSTONE_CONF DEFAULT admin_bind_host "$KEYSTONE_SERVICE_HOST" + iniset $KEYSTONE_CONF DEFAULT admin_bind_host "$KEYSTONE_ADMIN_BIND_HOST" # Register SSL certificates if provided if is_ssl_enabled_service key; then From 80313b24404105fb68d1488d48e00574129ccd69 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Sun, 23 Feb 2014 09:55:01 -0500 Subject: [PATCH 0763/4704] match devstack-gate format support millisecond resolution and the | separator for ts vs. content. everything else in openstack is running at millisecond resolution, and some times it's actually useful to see that when debugging gate failures. Change-Id: I2227ab0b4965cd1a24b579bdf2ba8c1f9a432f70 --- stack.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index c153132485..eaccc76537 100755 --- a/stack.sh +++ b/stack.sh @@ -530,9 +530,9 @@ if [[ -n "$LOGFILE" ]]; then # Redirect stdout/stderr to tee to write the log file exec 1> >( awk ' { - cmd ="date +\"%Y-%m-%d %H:%M:%S \"" + cmd ="date +\"%Y-%m-%d %H:%M:%S.%3N | \"" cmd | getline now - close("date +\"%Y-%m-%d %H:%M:%S \"") + close("date +\"%Y-%m-%d %H:%M:%S.%3N | \"") sub(/^/, now) print fflush() From 4f1fee6eae300a3384900df06ebc857e95854eb0 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 24 Feb 2014 14:24:13 +1100 Subject: [PATCH 0764/4704] Fix missing libffi-devel for python-glanceclient python-glanceclient is failing to install on rhel6 with a dependency chain from pip as cryptography>=0.2.1 (from pyOpenSSL>=0.11->python-glanceclient==0.12.0.56.gb8a850c) cryptography requires libffi-devel to build. I'm not sure what changed, but remove it from "testonly" so it is always installed. However, RHEL6 includes this in the optional repo, so we enable this repo in the fixup script. Change-Id: I9da0e91b75f41578861ee9685b8c7e91dd12dae7 --- files/apts/glance | 2 +- files/rpms/glance | 2 +- tools/fixup_stuff.sh | 4 ++++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/files/apts/glance b/files/apts/glance index 22787bc5a2..6dc878e4de 100644 --- a/files/apts/glance +++ b/files/apts/glance @@ -1,5 +1,5 @@ gcc -libffi-dev # testonly +libffi-dev libmysqlclient-dev # testonly libpq-dev # testonly libssl-dev # testonly diff --git a/files/rpms/glance b/files/rpms/glance index 785ce25df5..25c5d3902b 100644 --- a/files/rpms/glance +++ b/files/rpms/glance @@ -1,5 +1,5 @@ gcc -libffi-devel # testonly +libffi-devel libxml2-devel # testonly libxslt-devel # testonly mysql-devel # testonly diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 47b0cd10cd..048024a325 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -98,6 +98,10 @@ if [[ $DISTRO =~ (rhel6) ]]; then sudo setenforce 0 fi + # make sure we have the "optional" repo enabled; it provides some + # packages like libffi-devel for example + sudo yum-config-manager --enable rhel-6-server-optional-rpms + # If the ``dbus`` package was installed by DevStack dependencies the # uuid may not be generated because the service was never started (PR#598200), # causing Nova to stop later on complaining that ``/var/lib/dbus/machine-id`` From 5f90fc06f5cd3138de112eddf1b04fe1db56d226 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 24 Feb 2014 15:40:42 +1100 Subject: [PATCH 0765/4704] Fix permissions for tempest.conf The current script uses 'sudo' to copy tempest.conf.sample and thus the .conf file gets owned by root. It then makes the permissions 644, meaning that when the 'stack' user does the iniset() calls, it doesn't have permisson on the .conf file. Since the dir has been chowned to the stack user, it seems safe to just copy the sample file in without sudo. In addition, I moved the $TEMPEST_CONFIG_DIR creation closer to the copy to make it clearer what's going on. Seems to be related to dc4dc7f03335e26ea3d86b6184f0475cc5f3d51b Fixes bug: #1284378 Change-Id: I103b4e90cbcfa693c9cef319f4135868a1b83de3 --- lib/tempest | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/lib/tempest b/lib/tempest index 596750b32f..83ce5d2e2a 100644 --- a/lib/tempest +++ b/lib/tempest @@ -87,11 +87,6 @@ function configure_tempest() { local boto_instance_type="m1.tiny" local ssh_connect_method="fixed" - if [[ ! -d $TEMPEST_CONFIG_DIR ]]; then - sudo mkdir -p $TEMPEST_CONFIG_DIR - fi - sudo chown $STACK_USER $TEMPEST_CONFIG_DIR - # TODO(afazekas): # sudo python setup.py deploy @@ -142,8 +137,12 @@ function configure_tempest() { # Create tempest.conf from tempest.conf.sample # copy every time, because the image UUIDS are going to change - sudo cp $TEMPEST_DIR/etc/tempest.conf.sample $TEMPEST_CONFIG - sudo chmod 644 $TEMPEST_CONFIG + if [[ ! -d $TEMPEST_CONFIG_DIR ]]; then + sudo mkdir -p $TEMPEST_CONFIG_DIR + fi + sudo chown $STACK_USER $TEMPEST_CONFIG_DIR + cp $TEMPEST_DIR/etc/tempest.conf.sample $TEMPEST_CONFIG + chmod 644 $TEMPEST_CONFIG password=${ADMIN_PASSWORD:-secrete} From 6857dbb2b4fb40a2ed3362ba46f7b130a85b2de1 Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Tue, 25 Feb 2014 11:02:44 +0400 Subject: [PATCH 0766/4704] Savanna: use heat infra engine if available In Savanna we have two provisioning engines: * "savanna" that directly work with nova/neutron/glance/cinder/etc and we'd like to deprecate it early in Juno release cycle, but it's still useful due to the fact that it could work ok w/o Heat; * "heat" engine uses Heat for provisioning of cluster resources, it's currently under active development and we're ready to make it default for OpenStack installations with enabled Heat. Change-Id: I937337b3921e9e51768a118fb4b6bd95962622bd --- lib/savanna | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/savanna b/lib/savanna index 954f0e711e..9feff236bc 100644 --- a/lib/savanna +++ b/lib/savanna @@ -135,6 +135,12 @@ function configure_savanna() { iniset $SAVANNA_CONF_FILE DEFAULT use_floating_ips true fi + if is_service_enabled heat; then + iniset $SAVANNA_CONF_FILE DEFAULT infrastructure_engine heat + else + iniset $SAVANNA_CONF_FILE DEFAULT infrastructure_engine savanna + fi + iniset $SAVANNA_CONF_FILE DEFAULT use_syslog $SYSLOG recreate_database savanna utf8 From 45917cc4d941a530d75a84fa4dff738fe87f928b Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 24 Feb 2014 16:09:14 -0500 Subject: [PATCH 0767/4704] xtrace less we are xtrace happy, however that's just generating bulk in log files that are mostly ignorable. For the basically bullet proof functions we should not xtrace. Change-Id: Iab4e6d270c1546e0db2a06395cefcdf7f7929c3c --- functions-common | 85 ++++++++++++++++++++++++++++++++++++++++-------- stack.sh | 1 + 2 files changed, 72 insertions(+), 14 deletions(-) diff --git a/functions-common b/functions-common index d92e39cd91..79003fcfaf 100644 --- a/functions-common +++ b/functions-common @@ -39,59 +39,76 @@ set +o xtrace # Append a new option in an ini file without replacing the old value # iniadd config-file section option value1 value2 value3 ... function iniadd() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local file=$1 local section=$2 local option=$3 shift 3 local values="$(iniget_multiline $file $section $option) $@" iniset_multiline $file $section $option $values + $xtrace } # Comment an option in an INI file # inicomment config-file section option function inicomment() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local file=$1 local section=$2 local option=$3 sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file" + $xtrace } # Get an option from an INI file # iniget config-file section option function iniget() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local file=$1 local section=$2 local option=$3 local line line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") echo ${line#*=} + $xtrace } # Get a multiple line option from an INI file # iniget_multiline config-file section option function iniget_multiline() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local file=$1 local section=$2 local option=$3 local values values=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { s/^$option[ \t]*=[ \t]*//gp; }" "$file") echo ${values} + $xtrace } # Determinate is the given option present in the INI file # ini_has_option config-file section option function ini_has_option() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local file=$1 local section=$2 local option=$3 local line line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") + $xtrace [ -n "$line" ] } # Set an option in an INI file # iniset config-file section option value function iniset() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local file=$1 local section=$2 local option=$3 @@ -113,11 +130,14 @@ $option = $value # Replace it sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file" fi + $xtrace } # Set a multiple line option in an INI file # iniset_multiline config-file section option value1 value2 valu3 ... function iniset_multiline() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local file=$1 local section=$2 local option=$3 @@ -142,15 +162,19 @@ function iniset_multiline() { $option = $v " "$file" done + $xtrace } # Uncomment an option in an INI file # iniuncomment config-file section option function iniuncomment() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local file=$1 local section=$2 local option=$3 sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file" + $xtrace } # Normalize config values to True or False @@ -158,6 +182,8 @@ function iniuncomment() { # Accepts as True: 1 yes Yes YES true True TRUE # VAR=$(trueorfalse default-value test-value) function trueorfalse() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local default=$1 local testval=$2 @@ -165,6 +191,7 @@ function trueorfalse() { [[ "0 no No NO false False FALSE" =~ "$testval" ]] && { echo "False"; return; } [[ "1 yes Yes YES true True TRUE" =~ "$testval" ]] && { echo "True"; return; } echo "$default" + $xtrace } @@ -675,9 +702,14 @@ function _get_package_dir() { # Uses globals ``OFFLINE``, ``*_proxy`` # apt_get operation package [package ...] function apt_get() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace + [[ "$OFFLINE" = "True" || -z "$@" ]] && return local sudo="sudo" [[ "$(id -u)" = "0" ]] && sudo="env" + + $xtrace $sudo DEBIAN_FRONTEND=noninteractive \ http_proxy=$http_proxy https_proxy=$https_proxy \ no_proxy=$no_proxy \ @@ -695,6 +727,8 @@ function apt_get() { # - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection # of the package to the distros listed. The distro names are case insensitive. function get_packages() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local services=$@ local package_dir=$(_get_package_dir) local file_to_parse @@ -706,6 +740,7 @@ function get_packages() { fi if [[ -z "$DISTRO" ]]; then GetDistro + echo "Found Distro $DISTRO" fi for service in ${services//,/ }; do # Allow individual services to specify dependencies @@ -797,23 +832,30 @@ function get_packages() { done IFS=$OIFS done + $xtrace } # Distro-agnostic package installer # install_package package [package ...] function install_package() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace if is_ubuntu; then # if there are transient errors pulling the updates, that's fine. It may # be secondary repositories that we don't really care about. [[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update || /bin/true NO_UPDATE_REPOS=True + $xtrace apt_get install "$@" elif is_fedora; then + $xtrace yum_install "$@" elif is_suse; then + $xtrace zypper_install "$@" else + $xtrace exit_distro_not_supported "installing packages" fi } @@ -1092,7 +1134,13 @@ function get_python_exec_prefix() { # ``TRACK_DEPENDS``, ``*_proxy`` # pip_install package [package ...] function pip_install { - [[ "$OFFLINE" = "True" || -z "$@" ]] && return + local xtrace=$(set +o | grep xtrace) + set +o xtrace + if [[ "$OFFLINE" = "True" || -z "$@" ]]; then + $xtrace + return + fi + if [[ -z "$os_PACKAGE" ]]; then GetOSVersion fi @@ -1121,6 +1169,7 @@ function pip_install { # this problem. See https://github.com/pypa/pip/issues/709 local pip_build_tmp=$(mktemp --tmpdir -d pip-build.XXXXX) + $xtrace $SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \ HTTP_PROXY=$http_proxy \ HTTPS_PROXY=$https_proxy \ @@ -1235,32 +1284,36 @@ function enable_service() { # Uses global ``ENABLED_SERVICES`` # is_service_enabled service [service ...] function is_service_enabled() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace + local enabled=1 services=$@ for service in ${services}; do - [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 + [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && enabled=0 # Look for top-level 'enabled' function for this service if type is_${service}_enabled >/dev/null 2>&1; then # A function exists for this service, use it is_${service}_enabled - return $? + enabled=$? fi # TODO(dtroyer): Remove these legacy special-cases after the is_XXX_enabled() # are implemented - [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && return 0 - [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0 - [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0 - [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0 - [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0 - [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && return 0 - [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0 - [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && return 0 - [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && return 0 - [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && return 0 + [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && enabled=0 + [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && enabled=0 + [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && enabled=0 + [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && enabled=0 + [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && enabled=0 + [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && enabled=0 + [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && enabled=0 + [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && enabled=0 + [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && enabled=0 + [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && enabled=0 done - return 1 + $xtrace + return $enabled } # Toggle enable/disable_service for services that must run exclusive of each other @@ -1286,6 +1339,8 @@ function use_exclusive_service { # Only run the command if the target file (the last arg) is not on an # NFS filesystem. function _safe_permission_operation() { + local xtrace=$(set +o | grep xtrace) + set +o xtrace local args=( $@ ) local last local sudo_cmd @@ -1299,6 +1354,7 @@ function _safe_permission_operation() { fi if is_nfs_directory "$dir_to_check" ; then + $xtrace return 0 fi @@ -1308,6 +1364,7 @@ function _safe_permission_operation() { sudo_cmd="sudo" fi + $xtrace $sudo_cmd $@ } diff --git a/stack.sh b/stack.sh index ce19b8fc5c..0fdac3394a 100755 --- a/stack.sh +++ b/stack.sh @@ -529,6 +529,7 @@ if [[ -n "$LOGFILE" ]]; then if [[ "$VERBOSE" == "True" ]]; then # Redirect stdout/stderr to tee to write the log file exec 1> >( awk ' + /((set \+o$)|xtrace)/ { next } { cmd ="date +\"%Y-%m-%d %H:%M:%S \"" cmd | getline now From dd029da5b9b5600b8f6893247645db4fb0b95efe Mon Sep 17 00:00:00 2001 From: Steven Hardy Date: Mon, 24 Feb 2014 18:09:10 +0000 Subject: [PATCH 0768/4704] Create stack_domain_admin user Create an additional service user for Heat, which is a domain admin for the stack_user_domain - this is necessary since the normal service user cannot manage the projects/users in the stack_user_domain when keystone is configured to use the v3cloudsample policy (such as in gate integration tests). Change-Id: If59c11a74145b9bd02f78a7e0882afe1b0a72e40 --- lib/heat | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/lib/heat b/lib/heat index af10fa6f1d..1b6dc86989 100644 --- a/lib/heat +++ b/lib/heat @@ -207,6 +207,16 @@ function create_heat_accounts() { --description "Owns users and projects created by heat" \ | grep ' id ' | get_field 2) iniset $HEAT_CONF DEFAULT stack_user_domain ${D_ID} + + openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \ + --os-identity-api-version=3 user create --password $SERVICE_PASSWORD \ + --domain $D_ID heat_domain_admin \ + --description "Manages users and projects created by heat" + openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \ + --os-identity-api-version=3 role add \ + --user ${U_ID} --domain ${D_ID} admin + iniset $HEAT_CONF DEFAULT stack_domain_admin heat_domain_admin + iniset $HEAT_CONF DEFAULT stack_domain_admin_password $SERVICE_PASSWORD } # Restore xtrace From 78096b5073c70ef2c1f0626c802e095cd288c097 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 25 Feb 2014 10:23:04 -0500 Subject: [PATCH 0769/4704] remove sysstat & pidstat dstat is far cleaner for getting results out of the environment, and covers the bulk of our use cases for sysstat and pidstat with a much better ui. devstack is allowed to be opinionated, so become opinionated here. Change-Id: I21ec96339dcd704098512fdafd896738f352962d --- files/apts/sysstat | 1 - files/rpms-suse/sysstat | 1 - files/rpms/sysstat | 1 - stack.sh | 33 +--------------- tools/sar_filter.py | 86 ----------------------------------------- 5 files changed, 2 insertions(+), 120 deletions(-) delete mode 100644 files/apts/sysstat delete mode 100644 files/rpms-suse/sysstat delete mode 100644 files/rpms/sysstat delete mode 100755 tools/sar_filter.py diff --git a/files/apts/sysstat b/files/apts/sysstat deleted file mode 100644 index ea0c342d91..0000000000 --- a/files/apts/sysstat +++ /dev/null @@ -1 +0,0 @@ -sysstat diff --git a/files/rpms-suse/sysstat b/files/rpms-suse/sysstat deleted file mode 100644 index ea0c342d91..0000000000 --- a/files/rpms-suse/sysstat +++ /dev/null @@ -1 +0,0 @@ -sysstat diff --git a/files/rpms/sysstat b/files/rpms/sysstat deleted file mode 100644 index ea0c342d91..0000000000 --- a/files/rpms/sysstat +++ /dev/null @@ -1 +0,0 @@ -sysstat diff --git a/stack.sh b/stack.sh index 9f08e0f017..1d281587b1 100755 --- a/stack.sh +++ b/stack.sh @@ -294,15 +294,9 @@ SYSLOG=`trueorfalse False $SYSLOG` SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP} SYSLOG_PORT=${SYSLOG_PORT:-516} -# Enable sysstat logging -SYSSTAT_FILE=${SYSSTAT_FILE:-"sysstat.dat"} -SYSSTAT_INTERVAL=${SYSSTAT_INTERVAL:-"1"} - +# for DSTAT logging DSTAT_FILE=${DSTAT_FILE:-"dstat.txt"} -PIDSTAT_FILE=${PIDSTAT_FILE:-"pidstat.txt"} -PIDSTAT_INTERVAL=${PIDSTAT_INTERVAL:-"5"} - # Use color for logging output (only available if syslog is not used) LOG_COLOR=`trueorfalse True $LOG_COLOR` @@ -862,23 +856,9 @@ fi # Initialize the directory for service status check init_service_check - -# Sysstat and friends +# Dstat # ------- -# If enabled, systat has to start early to track OpenStack service startup. -# what we want to measure -# -u : cpu statitics -# -q : load -# -b : io load rates -# -w : process creation and context switch rates -SYSSTAT_OPTS="-u -q -b -w" -if [[ -n ${SCREEN_LOGDIR} ]]; then - screen_it sysstat "cd $TOP_DIR; ./tools/sar_filter.py $SYSSTAT_OPTS -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL" -else - screen_it sysstat "./tools/sar_filter.py $SYSSTAT_OPTS $SYSSTAT_INTERVAL" -fi - # A better kind of sysstat, with the top process per time slice DSTAT_OPTS="-tcndylp --top-cpu-adv" if [[ -n ${SCREEN_LOGDIR} ]]; then @@ -887,15 +867,6 @@ else screen_it dstat "dstat $DSTAT_OPTS" fi -# Per-process stats -PIDSTAT_OPTS="-l -p ALL -T ALL" -if [[ -n ${SCREEN_LOGDIR} ]]; then - screen_it pidstat "cd $TOP_DIR; pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL > $SCREEN_LOGDIR/$PIDSTAT_FILE" -else - screen_it pidstat "pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL" -fi - - # Start Services # ============== diff --git a/tools/sar_filter.py b/tools/sar_filter.py deleted file mode 100755 index 24ef0e476c..0000000000 --- a/tools/sar_filter.py +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2014 Samsung Electronics Corp. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re -import subprocess -import sys - - -def is_data_line(line): - timestamp, data = parse_line(line) - return re.search('\d\.d', data) - - -def parse_line(line): - m = re.search('(\d\d:\d\d:\d\d( \w\w)?)(\s+((\S+)\s*)+)', line) - if m: - date = m.group(1) - data = m.group(3).rstrip() - return date, data - else: - return None, None - - -process = subprocess.Popen( - "sar %s" % " ".join(sys.argv[1:]), - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - -# Poll process for new output until finished - -start_time = "" -header = "" -data_line = "" -printed_header = False -current_ts = None - -# print out the first sysstat line regardless -print process.stdout.readline() - -while True: - nextline = process.stdout.readline() - if nextline == '' and process.poll() is not None: - break - - date, data = parse_line(nextline) - # stop until we get to the first set of real lines - if not date: - continue - - # now we eat the header lines, and only print out the header - # if we've never seen them before - if not start_time: - start_time = date - header += "%s %s" % (date, data) - elif date == start_time: - header += " %s" % data - elif not printed_header: - printed_header = True - print header - - # now we know this is a data line, printing out if the timestamp - # has changed, and stacking up otherwise. - nextline = process.stdout.readline() - date, data = parse_line(nextline) - if date != current_ts: - current_ts = date - print data_line - data_line = "%s %s" % (date, data) - else: - data_line += " %s" % data - - sys.stdout.flush() From b8e250232ec55b946d2fd7e4237f12632408bdcc Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 21 Feb 2014 16:14:29 +1100 Subject: [PATCH 0770/4704] Add end-of-file checks to bash8 Add two end-of-file checks to bash8. Firstly, alert if heredoc hasn't finished. Some heredocs were done like: --- sudo bash -c "cat < foo ... EOF" --- (A better way to do this is "cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla + cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla + cat < $rules_dir/50-libvirt-$STACK_USER.rules + cat < Date: Wed, 26 Feb 2014 11:16:09 +1100 Subject: [PATCH 0771/4704] Run yum repolist commands as root Otherwise you get yum errors like [1] when you run stack.sh as !root. The solution is to run yum commands as root so it can access the right certs [1] https://access.redhat.com/site/solutions/312413 Change-Id: I54b0df13508c50aba67e23da11953c536933917a --- stack.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 9cdf2648f1..b3c507b600 100755 --- a/stack.sh +++ b/stack.sh @@ -181,7 +181,7 @@ if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then # Installing Open vSwitch on RHEL6 requires enabling the RDO repo. RHEL6_RDO_REPO_RPM=${RHEL6_RDO_REPO_RPM:-"http://rdo.fedorapeople.org/openstack-havana/rdo-release-havana.rpm"} RHEL6_RDO_REPO_ID=${RHEL6_RDO_REPO_ID:-"openstack-havana"} - if ! yum repolist enabled $RHEL6_RDO_REPO_ID | grep -q $RHEL6_RDO_REPO_ID; then + if ! sudo yum repolist enabled $RHEL6_RDO_REPO_ID | grep -q $RHEL6_RDO_REPO_ID; then echo "RDO repo not detected; installing" yum_install $RHEL6_RDO_REPO_RPM || \ die $LINENO "Error installing RDO repo, cannot continue" @@ -189,7 +189,7 @@ if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then # RHEL6 requires EPEL for many Open Stack dependencies RHEL6_EPEL_RPM=${RHEL6_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm"} - if ! yum repolist enabled epel | grep -q 'epel'; then + if ! sudo yum repolist enabled epel | grep -q 'epel'; then echo "EPEL not detected; installing" yum_install ${RHEL6_EPEL_RPM} || \ die $LINENO "Error installing EPEL repo, cannot continue" From 3e37326a3566ac38ea7ccf053fc183b7a8fccc08 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 26 Feb 2014 13:29:31 +1100 Subject: [PATCH 0772/4704] Move enablement of rhel6-optional repo earlier Change 4f1fee6eae300a3384900df06ebc857e95854eb0 added the RHEL6 optional repo in fixup_stuff.sh, but it turns out that doesn't get run until after the package prerequisites phase. Move this into stack.sh with the RDO repo setup. Change-Id: Iae0df85fa94c6c1b6f497dd29fda90d03b903a41 --- stack.sh | 4 ++++ tools/fixup_stuff.sh | 4 ---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/stack.sh b/stack.sh index 9cdf2648f1..217afbc2e3 100755 --- a/stack.sh +++ b/stack.sh @@ -194,6 +194,10 @@ if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then yum_install ${RHEL6_EPEL_RPM} || \ die $LINENO "Error installing EPEL repo, cannot continue" fi + + # ... and also optional to be enabled + sudo yum-config-manager --enable rhel-6-server-optional-rpms + fi diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 048024a325..47b0cd10cd 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -98,10 +98,6 @@ if [[ $DISTRO =~ (rhel6) ]]; then sudo setenforce 0 fi - # make sure we have the "optional" repo enabled; it provides some - # packages like libffi-devel for example - sudo yum-config-manager --enable rhel-6-server-optional-rpms - # If the ``dbus`` package was installed by DevStack dependencies the # uuid may not be generated because the service was never started (PR#598200), # causing Nova to stop later on complaining that ``/var/lib/dbus/machine-id`` From 201850120bec762347b80b22b5c60df43a262c6e Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Mon, 3 Feb 2014 12:14:08 -0500 Subject: [PATCH 0773/4704] Make python-guestfs NOPRIME & install for libvirt The libguestfs dependency tree includes a number of packages that we may not want or work everywhere, such as fuse. Now python-(lib)guestfs will install from lib/nova_plugins/hypervisor-libvirt Change-Id: I6c3a614010ee8d65813eec66a56680def622514c --- files/apts/n-cpu | 2 +- files/rpms/n-cpu | 2 +- lib/nova_plugins/hypervisor-libvirt | 2 ++ 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/files/apts/n-cpu b/files/apts/n-cpu index b287107256..a82304dfe2 100644 --- a/files/apts/n-cpu +++ b/files/apts/n-cpu @@ -5,4 +5,4 @@ open-iscsi-utils # Deprecated since quantal dist:precise genisoimage sysfsutils sg3-utils -python-guestfs +python-guestfs # NOPRIME diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu index e4fdaf4eda..32b1546c39 100644 --- a/files/rpms/n-cpu +++ b/files/rpms/n-cpu @@ -4,4 +4,4 @@ lvm2 genisoimage sysfsutils sg3_utils -python-libguestfs +python-libguestfs # NOPRIME diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 415244ffae..7f0880494e 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -140,10 +140,12 @@ function install_nova_hypervisor() { install_package kvm install_package libvirt-bin install_package python-libvirt + install_package python-guestfs elif is_fedora || is_suse; then install_package kvm install_package libvirt install_package libvirt-python + install_package python-libguestfs fi # Install and configure **LXC** if specified. LXC is another approach to From 06ba5193bebe27b2d7ead2d31ed9171885c6a5d8 Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Wed, 26 Feb 2014 13:46:56 +1000 Subject: [PATCH 0774/4704] Insecure check if keystone is up If we start keystone with an SSL endpoint then the curl check to see if it is running will fail because it cannot create a secure connection. This check can be done insecurely as all we care about is that the service has started. Change-Id: I826753d4d46e9956f443110029346bc70282951a --- lib/keystone | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index cebb4d3522..73af1d356d 100644 --- a/lib/keystone +++ b/lib/keystone @@ -424,7 +424,7 @@ function start_keystone() { fi echo "Waiting for keystone to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -k -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then die $LINENO "keystone did not start" fi From 3b57829ece7aa231770b640afd6da961dae2fc1c Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 26 Feb 2014 14:52:02 +1100 Subject: [PATCH 0775/4704] Don't use --skip-redirect for cinder restart on rhel6 RHEL6 doesn't support this flag so the restart fails. Not exactly sure why it is required, seems unchagned from the initial commit 67787e6b4c6f31388cbee6d83b67371b31c443d4 (found running stack.sh with -e per [1]) [1] https://review.openstack.org/#/c/71996/ Change-Id: Ib34c3663409d7b96b932286cb5a6974e940075d3 --- lib/cinder | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/cinder b/lib/cinder index c8c90c098d..e8f30b683c 100644 --- a/lib/cinder +++ b/lib/cinder @@ -496,8 +496,12 @@ function start_cinder() { sudo stop tgt || true sudo start tgt elif is_fedora; then - # bypass redirection to systemctl during restart - sudo /sbin/service --skip-redirect tgtd restart + if [[ $DISTRO =~ (rhel6) ]]; then + sudo /sbin/service tgtd restart + else + # bypass redirection to systemctl during restart + sudo /sbin/service --skip-redirect tgtd restart + fi elif is_suse; then restart_service tgtd else From 1755f689e807cd73b7bb2c67ac0531afbc8c6448 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 26 Feb 2014 13:08:00 -0600 Subject: [PATCH 0776/4704] Fix heat role create error https://review.openstack.org/#/c/76036/ changed the user creat commands, missed the argument to --user Change-Id: Iaf10ef80a2fb0227dd66a314e7ec253dfb4dc4fe --- lib/heat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/heat b/lib/heat index 1b6dc86989..972c35ce72 100644 --- a/lib/heat +++ b/lib/heat @@ -214,7 +214,7 @@ function create_heat_accounts() { --description "Manages users and projects created by heat" openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \ --os-identity-api-version=3 role add \ - --user ${U_ID} --domain ${D_ID} admin + --user heat_domain_admin --domain ${D_ID} admin iniset $HEAT_CONF DEFAULT stack_domain_admin heat_domain_admin iniset $HEAT_CONF DEFAULT stack_domain_admin_password $SERVICE_PASSWORD } From a25a6f6d80cb844f13540fecf616b289c42e3ebe Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 24 Feb 2014 16:03:41 -0600 Subject: [PATCH 0777/4704] Unbuffer log output * Force-flush log output so we don't lose log output in certain error cases. * Slow down exit paths: add sleep to die(), wait until last moment to kill child processes (including the awk log output filter) Change-Id: I1620fd33b89b237d9c2bb6206f3de2c81719f676 --- functions-common | 2 ++ stack.sh | 26 +++++++++++++++----------- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/functions-common b/functions-common index 79003fcfaf..4bc3bbaac5 100644 --- a/functions-common +++ b/functions-common @@ -222,6 +222,8 @@ function die() { fi backtrace 2 err $line "$*" + # Give buffers a second to flush + sleep 1 exit $exitcode } diff --git a/stack.sh b/stack.sh index 22a418f306..c95199769f 100755 --- a/stack.sh +++ b/stack.sh @@ -522,7 +522,7 @@ if [[ -n "$LOGFILE" ]]; then exec 3>&1 if [[ "$VERBOSE" == "True" ]]; then # Redirect stdout/stderr to tee to write the log file - exec 1> >( awk ' + exec 1> >( awk -v logfile=${LOGFILE} ' /((set \+o$)|xtrace)/ { next } { cmd ="date +\"%Y-%m-%d %H:%M:%S.%3N | \"" @@ -530,8 +530,9 @@ if [[ -n "$LOGFILE" ]]; then close("date +\"%Y-%m-%d %H:%M:%S.%3N | \"") sub(/^/, now) print - fflush() - }' | tee "${LOGFILE}" ) 2>&1 + print > logfile + fflush("") + }' ) 2>&1 # Set up a second fd for output exec 6> >( tee "${SUMFILE}" ) else @@ -579,21 +580,24 @@ fi # ----------------------- # Kill background processes on exit -trap clean EXIT -clean() { +trap exit_trap EXIT +function exit_trap { local r=$? - kill >/dev/null 2>&1 $(jobs -p) + echo "exit_trap called, cleaning up child processes" + kill 2>&1 $(jobs -p) exit $r } - # Exit on any errors so that errors don't compound -trap failed ERR -failed() { +trap err_trap ERR +function err_trap { local r=$? - kill >/dev/null 2>&1 $(jobs -p) set +o xtrace - [ -n "$LOGFILE" ] && echo "${0##*/} failed: full log in $LOGFILE" + if [[ -n "$LOGFILE" ]]; then + echo "${0##*/} failed: full log in $LOGFILE" + else + echo "${0##*/} failed" + fi exit $r } From 09bd7c8fd5a662ef697eb61638efbe862a4875a6 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 3 Feb 2014 08:35:26 +0900 Subject: [PATCH 0778/4704] enable -o errexit devstack should run under -o errexit to ensure that we fail early when something has gone wrong, otherwise determination of the root failure location is often quite challenging. this clears all the normal use cases for devstack, there could be tests which now die early, which we're going to have to check for later. Change-Id: Ibd828c4f4fd95a60d3918d3d7ae90e10649479ab --- functions-common | 3 ++- stack.sh | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/functions-common b/functions-common index 4bc3bbaac5..3e29e8c7de 100644 --- a/functions-common +++ b/functions-common @@ -1094,7 +1094,8 @@ function service_check() { fi # Check if there is any falure flag file under $SERVICE_DIR/$SCREEN_NAME - failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null` + # make this -o errexit safe + failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null || /bin/true` for service in $failures; do service=`basename $service` diff --git a/stack.sh b/stack.sh index c95199769f..d43a73a889 100755 --- a/stack.sh +++ b/stack.sh @@ -601,6 +601,9 @@ function err_trap { exit $r } + +set -o errexit + # Print the commands being run so that we can see the command that triggers # an error. It is also useful for following along as the install occurs. set -o xtrace From a42650fb7e4d3fc8853f04d84109199fa1d9f5e4 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 27 Feb 2014 13:08:30 +0100 Subject: [PATCH 0779/4704] Fix libvirt polkit settings After the https://review.openstack.org/#/c/75314 merged the /etc/polkit-1/rules.d/50-libvirt-stack.rules files contains subject.user == '"stack"' instead of subject.user == 'stack'. Change-Id: I09f252b2d0e53f012facb9f7eaa21c1e1bdf492b --- lib/nova_plugins/hypervisor-libvirt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index a550600363..dc999edfe9 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -87,7 +87,7 @@ EOF cat < Date: Thu, 27 Feb 2014 11:13:36 -0600 Subject: [PATCH 0780/4704] Fix exit_trap() error if no child processes Bug-Id: 1285776 Change-Id: Iad7a9f2c03cc39159beda55345f232cefed10520 --- stack.sh | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 4333fb2c7e..f4342dd206 100755 --- a/stack.sh +++ b/stack.sh @@ -587,8 +587,11 @@ fi trap exit_trap EXIT function exit_trap { local r=$? - echo "exit_trap called, cleaning up child processes" - kill 2>&1 $(jobs -p) + jobs=$(jobs -p) + if [[ -n $jobs ]]; then + echo "exit_trap: cleaning up child processes" + kill 2>&1 $jobs + fi exit $r } From 83b6c99b503dced1e92761e1de8ceaf23a396453 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 27 Feb 2014 12:41:28 -0600 Subject: [PATCH 0781/4704] Handle non-zero exit code from git diff The check for a changed repo in setup_develop() 'git diff --exit-code' returns a status of 1 when the repo has changes; trap that so errexit does not abort the script. Bug-Id: 1285780 Change-Id: Ic97e68348f46245b271567893b447fcedbd7bd6e --- functions-common | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/functions-common b/functions-common index 8e6b2b1895..0d85068a2f 100644 --- a/functions-common +++ b/functions-common @@ -1223,14 +1223,12 @@ function pip_install { function setup_develop() { local project_dir=$1 - echo "cd $REQUIREMENTS_DIR; $SUDO_CMD python update.py $project_dir" - # Don't update repo if local changes exist # Don't use buggy "git diff --quiet" - (cd $project_dir && git diff --exit-code >/dev/null) - local update_requirements=$? + # ``errexit`` requires us to trap the exit code when the repo is changed + local update_requirements=$(cd $project_dir && git diff --exit-code >/dev/null || echo "changed") - if [ $update_requirements -eq 0 ]; then + if [[ $update_requirements = "changed" ]]; then (cd $REQUIREMENTS_DIR; \ $SUDO_CMD python update.py $project_dir) fi @@ -1246,7 +1244,7 @@ function setup_develop() { # a variable that tells us whether or not we should UNDO the requirements # changes (this will be set to False in the OpenStack ci gate) if [ $UNDO_REQUIREMENTS = "True" ]; then - if [ $update_requirements -eq 0 ]; then + if [[ $update_requirements = "changed" ]]; then (cd $project_dir && git reset --hard) fi fi From 657ce7fa213b680904c07f09029467d8a195761d Mon Sep 17 00:00:00 2001 From: Devananda van der Veen Date: Thu, 27 Feb 2014 10:50:38 -0800 Subject: [PATCH 0782/4704] Stop trying to create the 'ironic' user twice After 09bd7c8fd5a6 landed, a conflict between lib/ironic and extras.d/50-ironic.sh was exposed, breaking Ironic's check and gate tests. This resolves that conflict by only creating the 'ironic' user once. Change-Id: Ic41517f0977c84a82f92f58565aaee6b5cc7eb3e --- lib/ironic | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/ironic b/lib/ironic index 607b13125a..f4454749dc 100644 --- a/lib/ironic +++ b/lib/ironic @@ -186,9 +186,6 @@ function init_ironic() { $IRONIC_BIN_DIR/ironic-dbsync create_ironic_cache_dir - - # Create keystone artifacts for Ironic. - create_ironic_accounts } # start_ironic() - Start running processes, including screen From aee18c749b0e3a1a3a6907a33db76ae83b8d41d9 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 21 Feb 2014 15:35:08 +1100 Subject: [PATCH 0783/4704] Enforce function declaration format in bash8 Check that function calls look like ^function foo {$ in bash8, and fix all existing failures of that check. Add a note to HACKING.rst Change-Id: Ic19eecb39e0b20273d1bcd551a42fe400d54e938 --- HACKING.rst | 2 + driver_certs/cinder_driver_cert.sh | 2 +- exercises/aggregates.sh | 2 +- exercises/client-args.sh | 2 +- exercises/client-env.sh | 2 +- exercises/neutron-adv-test.sh | 6 +- functions | 22 ++-- functions-common | 122 ++++++++++---------- lib/apache | 14 +-- lib/baremetal | 22 ++-- lib/ceilometer | 18 +-- lib/cinder | 28 ++--- lib/cinder_plugins/XenAPINFS | 2 +- lib/cinder_plugins/glusterfs | 2 +- lib/cinder_plugins/nfs | 2 +- lib/cinder_plugins/sheepdog | 2 +- lib/cinder_plugins/solidfire | 2 +- lib/cinder_plugins/vsphere | 2 +- lib/config | 8 +- lib/gantt | 14 +-- lib/glance | 16 +-- lib/heat | 18 +-- lib/horizon | 14 +-- lib/infra | 4 +- lib/ironic | 26 ++--- lib/keystone | 22 ++-- lib/ldap | 14 +-- lib/marconi | 20 ++-- lib/neutron | 85 +++++++------- lib/neutron_plugins/bigswitch_floodlight | 22 ++-- lib/neutron_plugins/brocade | 22 ++-- lib/neutron_plugins/cisco | 42 +++---- lib/neutron_plugins/embrane | 6 +- lib/neutron_plugins/linuxbridge | 6 +- lib/neutron_plugins/linuxbridge_agent | 18 +-- lib/neutron_plugins/midonet | 24 ++-- lib/neutron_plugins/ml2 | 8 +- lib/neutron_plugins/nec | 26 ++--- lib/neutron_plugins/openvswitch | 6 +- lib/neutron_plugins/openvswitch_agent | 16 +-- lib/neutron_plugins/ovs_base | 16 +-- lib/neutron_plugins/plumgrid | 16 +-- lib/neutron_plugins/ryu | 22 ++-- lib/neutron_plugins/services/firewall | 6 +- lib/neutron_plugins/services/loadbalancer | 8 +- lib/neutron_plugins/services/metering | 6 +- lib/neutron_plugins/services/vpn | 6 +- lib/neutron_plugins/vmware_nsx | 26 ++--- lib/neutron_thirdparty/bigswitch_floodlight | 12 +- lib/neutron_thirdparty/midonet | 12 +- lib/neutron_thirdparty/ryu | 12 +- lib/neutron_thirdparty/trema | 16 +-- lib/neutron_thirdparty/vmware_nsx | 12 +- lib/nova | 34 +++--- lib/nova_plugins/hypervisor-baremetal | 10 +- lib/nova_plugins/hypervisor-docker | 10 +- lib/nova_plugins/hypervisor-fake | 10 +- lib/nova_plugins/hypervisor-libvirt | 10 +- lib/nova_plugins/hypervisor-openvz | 10 +- lib/nova_plugins/hypervisor-vsphere | 10 +- lib/nova_plugins/hypervisor-xenserver | 10 +- lib/oslo | 4 +- lib/rpc_backend | 10 +- lib/savanna | 12 +- lib/savanna-dashboard | 8 +- lib/stackforge | 4 +- lib/swift | 24 ++-- lib/tempest | 6 +- lib/template | 12 +- lib/tls | 20 ++-- lib/trove | 20 ++-- stack.sh | 6 +- tests/functions.sh | 6 +- tests/test_config.sh | 6 +- tools/bash8.py | 16 +++ tools/build_pxe_env.sh | 2 +- tools/build_ramdisk.sh | 4 +- tools/build_uec_ramdisk.sh | 2 +- tools/build_usb_boot.sh | 2 +- tools/copy_dev_environment_to_uec.sh | 2 +- tools/create_userrc.sh | 11 +- tools/fixup_stuff.sh | 2 +- tools/get_uec_image.sh | 4 +- tools/info.sh | 2 +- tools/install_openvpn.sh | 8 +- tools/install_pip.sh | 6 +- tools/jenkins/build_configuration.sh | 2 +- tools/jenkins/configurations/kvm.sh | 2 +- tools/jenkins/configurations/xs.sh | 2 +- tools/jenkins/run_test.sh | 2 +- tools/warm_apts_for_uec.sh | 2 +- tools/xen/build_xva.sh | 4 +- tools/xen/install_os_domU.sh | 6 +- tools/xen/prepare_guest.sh | 2 +- 94 files changed, 601 insertions(+), 585 deletions(-) diff --git a/HACKING.rst b/HACKING.rst index 103b579621..5c15537915 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -275,3 +275,5 @@ Variables and Functions - local variables should be lower case, global variables should be upper case - function names should_have_underscores, NotCamelCase. +- functions should be declared as per the regex ^function foo {$ + with code starting on the next line diff --git a/driver_certs/cinder_driver_cert.sh b/driver_certs/cinder_driver_cert.sh index e45b7f8736..d2c636f89d 100755 --- a/driver_certs/cinder_driver_cert.sh +++ b/driver_certs/cinder_driver_cert.sh @@ -32,7 +32,7 @@ source $TOP_DIR/lib/cinder TEMPFILE=`mktemp` RECLONE=True -function log_message() { +function log_message { MESSAGE=$1 STEP_HEADER=$2 if [[ "$STEP_HEADER" = "True" ]]; then diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh index d223301f35..01d548d1f2 100755 --- a/exercises/aggregates.sh +++ b/exercises/aggregates.sh @@ -57,7 +57,7 @@ AGGREGATE_NAME=test_aggregate_$RANDOM AGGREGATE2_NAME=test_aggregate_$RANDOM AGGREGATE_A_ZONE=nova -exit_if_aggregate_present() { +function exit_if_aggregate_present { aggregate_name=$1 if [ $(nova aggregate-list | grep -c " $aggregate_name ") == 0 ]; then diff --git a/exercises/client-args.sh b/exercises/client-args.sh index e79774f98c..b360f1e86a 100755 --- a/exercises/client-args.sh +++ b/exercises/client-args.sh @@ -154,7 +154,7 @@ set +o xtrace # Results # ======= -function report() { +function report { if [[ -n "$2" ]]; then echo "$1: $2" fi diff --git a/exercises/client-env.sh b/exercises/client-env.sh index 6c6fe12282..d955e4d1e1 100755 --- a/exercises/client-env.sh +++ b/exercises/client-env.sh @@ -165,7 +165,7 @@ set +o xtrace # Results # ======= -function report() { +function report { if [[ -n "$2" ]]; then echo "$1: $2" fi diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index a9199e62a6..0a24fe9df7 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -20,7 +20,7 @@ echo "*********************************************************************" set -o errtrace trap failed ERR -failed() { +function failed { local r=$? set +o errtrace set +o xtrace @@ -395,7 +395,7 @@ function test_functions { # Usage and main # -------------- -usage() { +function usage { echo "$0: [-h]" echo " -h, --help Display help message" echo " -t, --tenant Create tenants" @@ -408,7 +408,7 @@ usage() { echo " -T, --test Test functions" } -main() { +function main { echo Description diff --git a/functions b/functions index 3101111c63..43639c79fb 100644 --- a/functions +++ b/functions @@ -51,7 +51,7 @@ function cleanup_tmp { # - ``GLANCE_HOSTPORT`` # # upload_image image-url glance-token -function upload_image() { +function upload_image { local image_url=$1 local token=$2 @@ -341,7 +341,7 @@ function use_database { # Wait for an HTTP server to start answering requests # wait_for_service timeout url -function wait_for_service() { +function wait_for_service { local timeout=$1 local url=$2 timeout $timeout sh -c "while ! curl --noproxy '*' -s $url >/dev/null; do sleep 1; done" @@ -351,7 +351,7 @@ function wait_for_service() { # ping check # Uses globals ``ENABLED_SERVICES`` # ping_check from-net ip boot-timeout expected -function ping_check() { +function ping_check { if is_service_enabled neutron; then _ping_check_neutron "$1" $2 $3 $4 return @@ -361,7 +361,7 @@ function ping_check() { # ping check for nova # Uses globals ``MULTI_HOST``, ``PRIVATE_NETWORK`` -function _ping_check_novanet() { +function _ping_check_novanet { local from_net=$1 local ip=$2 local boot_timeout=$3 @@ -386,7 +386,7 @@ function _ping_check_novanet() { } # Get ip of instance -function get_instance_ip(){ +function get_instance_ip { local vm_id=$1 local network_name=$2 local nova_result="$(nova show $vm_id)" @@ -401,7 +401,7 @@ function get_instance_ip(){ # ssh check # ssh_check net-name key-file floating-ip default-user active-timeout -function ssh_check() { +function ssh_check { if is_service_enabled neutron; then _ssh_check_neutron "$1" $2 $3 $4 $5 return @@ -409,7 +409,7 @@ function ssh_check() { _ssh_check_novanet "$1" $2 $3 $4 $5 } -function _ssh_check_novanet() { +function _ssh_check_novanet { local NET_NAME=$1 local KEY_FILE=$2 local FLOATING_IP=$3 @@ -425,7 +425,7 @@ function _ssh_check_novanet() { # Get the location of the $module-rootwrap executables, where module is cinder # or nova. # get_rootwrap_location module -function get_rootwrap_location() { +function get_rootwrap_location { local module=$1 echo "$(get_python_exec_prefix)/$module-rootwrap" @@ -434,7 +434,7 @@ function get_rootwrap_location() { # Path permissions sanity check # check_path_perm_sanity path -function check_path_perm_sanity() { +function check_path_perm_sanity { # Ensure no element of the path has 0700 permissions, which is very # likely to cause issues for daemons. Inspired by default 0700 # homedir permissions on RHEL and common practice of making DEST in @@ -505,7 +505,7 @@ function _vercmp_r { # The above will return "0", as the versions are equal. # # vercmp_numbers ver1 ver2 -vercmp_numbers() { +function vercmp_numbers { typeset v1=$1 v2=$2 sep typeset -a ver1 ver2 @@ -523,7 +523,7 @@ vercmp_numbers() { # Defaults are respectively 'project_name' and 'user_name' # # setup_colorized_logging something.conf SOMESECTION -function setup_colorized_logging() { +function setup_colorized_logging { local conf_file=$1 local conf_section=$2 local project_var=${3:-"project_name"} diff --git a/functions-common b/functions-common index 2248fbb610..eba4985e40 100644 --- a/functions-common +++ b/functions-common @@ -38,7 +38,7 @@ set +o xtrace # Append a new option in an ini file without replacing the old value # iniadd config-file section option value1 value2 value3 ... -function iniadd() { +function iniadd { local xtrace=$(set +o | grep xtrace) set +o xtrace local file=$1 @@ -52,7 +52,7 @@ function iniadd() { # Comment an option in an INI file # inicomment config-file section option -function inicomment() { +function inicomment { local xtrace=$(set +o | grep xtrace) set +o xtrace local file=$1 @@ -64,7 +64,7 @@ function inicomment() { # Get an option from an INI file # iniget config-file section option -function iniget() { +function iniget { local xtrace=$(set +o | grep xtrace) set +o xtrace local file=$1 @@ -78,7 +78,7 @@ function iniget() { # Get a multiple line option from an INI file # iniget_multiline config-file section option -function iniget_multiline() { +function iniget_multiline { local xtrace=$(set +o | grep xtrace) set +o xtrace local file=$1 @@ -92,7 +92,7 @@ function iniget_multiline() { # Determinate is the given option present in the INI file # ini_has_option config-file section option -function ini_has_option() { +function ini_has_option { local xtrace=$(set +o | grep xtrace) set +o xtrace local file=$1 @@ -106,7 +106,7 @@ function ini_has_option() { # Set an option in an INI file # iniset config-file section option value -function iniset() { +function iniset { local xtrace=$(set +o | grep xtrace) set +o xtrace local file=$1 @@ -135,7 +135,7 @@ $option = $value # Set a multiple line option in an INI file # iniset_multiline config-file section option value1 value2 valu3 ... -function iniset_multiline() { +function iniset_multiline { local xtrace=$(set +o | grep xtrace) set +o xtrace local file=$1 @@ -167,7 +167,7 @@ $option = $v # Uncomment an option in an INI file # iniuncomment config-file section option -function iniuncomment() { +function iniuncomment { local xtrace=$(set +o | grep xtrace) set +o xtrace local file=$1 @@ -181,7 +181,7 @@ function iniuncomment() { # Accepts as False: 0 no No NO false False FALSE # Accepts as True: 1 yes Yes YES true True TRUE # VAR=$(trueorfalse default-value test-value) -function trueorfalse() { +function trueorfalse { local xtrace=$(set +o | grep xtrace) set +o xtrace local default=$1 @@ -213,7 +213,7 @@ function backtrace { # Prints line number and "message" then exits # die $LINENO "message" -function die() { +function die { local exitcode=$? set +o xtrace local line=$1; shift @@ -231,7 +231,7 @@ function die() { # exit code is non-zero and prints "message" and exits # NOTE: env-var is the variable name without a '$' # die_if_not_set $LINENO env-var "message" -function die_if_not_set() { +function die_if_not_set { local exitcode=$? FXTRACE=$(set +o | grep xtrace) set +o xtrace @@ -245,7 +245,7 @@ function die_if_not_set() { # Prints line number and "message" in error format # err $LINENO "message" -function err() { +function err { local exitcode=$? errXTRACE=$(set +o | grep xtrace) set +o xtrace @@ -262,7 +262,7 @@ function err() { # exit code is non-zero and prints "message" # NOTE: env-var is the variable name without a '$' # err_if_not_set $LINENO env-var "message" -function err_if_not_set() { +function err_if_not_set { local exitcode=$? errinsXTRACE=$(set +o | grep xtrace) set +o xtrace @@ -291,14 +291,14 @@ function exit_distro_not_supported { # Test if the named environment variable is set and not zero length # is_set env-var -function is_set() { +function is_set { local var=\$"$1" eval "[ -n \"$var\" ]" # For ex.: sh -c "[ -n \"$var\" ]" would be better, but several exercises depends on this } # Prints line number and "message" in warning format # warn $LINENO "message" -function warn() { +function warn { local exitcode=$? errXTRACE=$(set +o | grep xtrace) set +o xtrace @@ -324,7 +324,7 @@ function warn() { # os_PACKAGE - package type # os_CODENAME - vendor's codename for release # GetOSVersion -GetOSVersion() { +function GetOSVersion { # Figure out which vendor we are if [[ -x "`which sw_vers 2>/dev/null`" ]]; then # OS/X @@ -414,7 +414,7 @@ GetOSVersion() { # Translate the OS version values into common nomenclature # Sets global ``DISTRO`` from the ``os_*`` values -function GetDistro() { +function GetDistro { GetOSVersion if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then # 'Everyone' refers to Ubuntu / Debian releases by the code name adjective @@ -491,7 +491,7 @@ function is_ubuntu { # Returns openstack release name for a given branch name # ``get_release_name_from_branch branch-name`` -function get_release_name_from_branch(){ +function get_release_name_from_branch { local branch=$1 if [[ $branch =~ "stable/" ]]; then echo ${branch#*/} @@ -577,7 +577,7 @@ function git_clone { # to timeout(1); otherwise the default value of 0 maintains the status # quo of waiting forever. # usage: git_timed -function git_timed() { +function git_timed { local count=0 local timeout=0 @@ -603,7 +603,7 @@ function git_timed() { # git update using reference as a branch. # git_update_branch ref -function git_update_branch() { +function git_update_branch { GIT_BRANCH=$1 @@ -615,7 +615,7 @@ function git_update_branch() { # git update using reference as a branch. # git_update_remote_branch ref -function git_update_remote_branch() { +function git_update_remote_branch { GIT_BRANCH=$1 @@ -625,7 +625,7 @@ function git_update_remote_branch() { # git update using reference as a tag. Be careful editing source at that repo # as working copy will be in a detached mode # git_update_tag ref -function git_update_tag() { +function git_update_tag { GIT_TAG=$1 @@ -641,7 +641,7 @@ function git_update_tag() { # Get the default value for HOST_IP # get_default_host_ip fixed_range floating_range host_ip_iface host_ip -function get_default_host_ip() { +function get_default_host_ip { local fixed_range=$1 local floating_range=$2 local host_ip_iface=$3 @@ -673,7 +673,7 @@ function get_default_host_ip() { # Fields are numbered starting with 1 # Reverse syntax is supported: -1 is the last field, -2 is second to last, etc. # get_field field-number -function get_field() { +function get_field { while read data; do if [ "$1" -lt 0 ]; then field="(\$(NF$1))" @@ -687,7 +687,7 @@ function get_field() { # Add a policy to a policy.json file # Do nothing if the policy already exists # ``policy_add policy_file policy_name policy_permissions`` -function policy_add() { +function policy_add { local policy_file=$1 local policy_name=$2 local policy_perm=$3 @@ -717,7 +717,7 @@ function policy_add() { # ================= # _get_package_dir -function _get_package_dir() { +function _get_package_dir { local pkg_dir if is_ubuntu; then pkg_dir=$FILES/apts @@ -734,7 +734,7 @@ function _get_package_dir() { # Wrapper for ``apt-get`` to set cache and proxy environment variables # Uses globals ``OFFLINE``, ``*_proxy`` # apt_get operation package [package ...] -function apt_get() { +function apt_get { local xtrace=$(set +o | grep xtrace) set +o xtrace @@ -759,7 +759,7 @@ function apt_get() { # - ``# NOPRIME`` defers installation to be performed later in `stack.sh` # - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection # of the package to the distros listed. The distro names are case insensitive. -function get_packages() { +function get_packages { local xtrace=$(set +o | grep xtrace) set +o xtrace local services=$@ @@ -870,7 +870,7 @@ function get_packages() { # Distro-agnostic package installer # install_package package [package ...] -function install_package() { +function install_package { local xtrace=$(set +o | grep xtrace) set +o xtrace if is_ubuntu; then @@ -895,7 +895,7 @@ function install_package() { # Distro-agnostic function to tell if a package is installed # is_package_installed package [package ...] -function is_package_installed() { +function is_package_installed { if [[ -z "$@" ]]; then return 1 fi @@ -915,7 +915,7 @@ function is_package_installed() { # Distro-agnostic package uninstaller # uninstall_package package [package ...] -function uninstall_package() { +function uninstall_package { if is_ubuntu; then apt_get purge "$@" elif is_fedora; then @@ -930,7 +930,7 @@ function uninstall_package() { # Wrapper for ``yum`` to set proxy environment variables # Uses globals ``OFFLINE``, ``*_proxy`` # yum_install package [package ...] -function yum_install() { +function yum_install { [[ "$OFFLINE" = "True" ]] && return local sudo="sudo" [[ "$(id -u)" = "0" ]] && sudo="env" @@ -941,7 +941,7 @@ function yum_install() { # zypper wrapper to set arguments correctly # zypper_install package [package ...] -function zypper_install() { +function zypper_install { [[ "$OFFLINE" = "True" ]] && return local sudo="sudo" [[ "$(id -u)" = "0" ]] && sudo="env" @@ -958,7 +958,7 @@ function zypper_install() { # files to produce the same logs as screen_it(). The log filename is derived # from the service name and global-and-now-misnamed SCREEN_LOGDIR # _run_process service "command-line" -function _run_process() { +function _run_process { local service=$1 local command="$2" @@ -983,7 +983,7 @@ function _run_process() { # Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``. # This is used for ``service_check`` when all the ``screen_it`` are called finished # init_service_check -function init_service_check() { +function init_service_check { SCREEN_NAME=${SCREEN_NAME:-stack} SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} @@ -996,7 +996,7 @@ function init_service_check() { # Find out if a process exists by partial name. # is_running name -function is_running() { +function is_running { local name=$1 ps auxw | grep -v grep | grep ${name} > /dev/null RC=$? @@ -1009,7 +1009,7 @@ function is_running() { # of screen_it() without screen. PIDs are written to # $SERVICE_DIR/$SCREEN_NAME/$service.pid # run_process service "command-line" -function run_process() { +function run_process { local service=$1 local command="$2" @@ -1092,7 +1092,7 @@ function screen_rc { # If screen is being used kill the screen window; this will catch processes # that did not leave a PID behind # screen_stop service -function screen_stop() { +function screen_stop { SCREEN_NAME=${SCREEN_NAME:-stack} SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} USE_SCREEN=$(trueorfalse True $USE_SCREEN) @@ -1112,7 +1112,7 @@ function screen_stop() { # Helper to get the status of each running service # service_check -function service_check() { +function service_check { local service local failures SCREEN_NAME=${SCREEN_NAME:-stack} @@ -1145,7 +1145,7 @@ function service_check() { # Get the path to the pip command. # get_pip_command -function get_pip_command() { +function get_pip_command { which pip || which pip-python if [ $? -ne 0 ]; then @@ -1155,7 +1155,7 @@ function get_pip_command() { # Get the path to the direcotry where python executables are installed. # get_python_exec_prefix -function get_python_exec_prefix() { +function get_python_exec_prefix { if is_fedora || is_suse; then echo "/usr/bin" else @@ -1221,7 +1221,7 @@ function pip_install { # # Uses globals ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR``, ``UNDO_REQUIREMENTS`` # setup_develop directory -function setup_develop() { +function setup_develop { local project_dir=$1 echo "cd $REQUIREMENTS_DIR; $SUDO_CMD python update.py $project_dir" @@ -1257,7 +1257,7 @@ function setup_develop() { # using pip before running `setup.py develop` # Uses globals ``STACK_USER`` # setup_develop_no_requirements_update directory -function setup_develop_no_requirements_update() { +function setup_develop_no_requirements_update { local project_dir=$1 pip_install -e $project_dir @@ -1271,7 +1271,7 @@ function setup_develop_no_requirements_update() { # remove extra commas from the input string (i.e. ``ENABLED_SERVICES``) # _cleanup_service_list service-list -function _cleanup_service_list () { +function _cleanup_service_list { echo "$1" | sed -e ' s/,,/,/g; s/^,//; @@ -1284,7 +1284,7 @@ function _cleanup_service_list () { # before a minimal installation # Uses global ``ENABLED_SERVICES`` # disable_all_services -function disable_all_services() { +function disable_all_services { ENABLED_SERVICES="" } @@ -1293,7 +1293,7 @@ function disable_all_services() { # ENABLED_SERVICES+=",-rabbit" # Uses global ``ENABLED_SERVICES`` # disable_negated_services -function disable_negated_services() { +function disable_negated_services { local tmpsvcs="${ENABLED_SERVICES}" local service for service in ${tmpsvcs//,/ }; do @@ -1314,7 +1314,7 @@ function disable_negated_services() { # for nova, glance, and neutron built into is_service_enabled(). # Uses global ``ENABLED_SERVICES`` # disable_service service [service ...] -function disable_service() { +function disable_service { local tmpsvcs=",${ENABLED_SERVICES}," local service for service in $@; do @@ -1335,7 +1335,7 @@ function disable_service() { # for nova, glance, and neutron built into is_service_enabled(). # Uses global ``ENABLED_SERVICES`` # enable_service service [service ...] -function enable_service() { +function enable_service { local tmpsvcs="${ENABLED_SERVICES}" for service in $@; do if ! is_service_enabled $service; then @@ -1369,7 +1369,7 @@ function enable_service() { # # Uses global ``ENABLED_SERVICES`` # is_service_enabled service [service ...] -function is_service_enabled() { +function is_service_enabled { local xtrace=$(set +o | grep xtrace) set +o xtrace local enabled=1 @@ -1424,7 +1424,7 @@ function use_exclusive_service { # Only run the command if the target file (the last arg) is not on an # NFS filesystem. -function _safe_permission_operation() { +function _safe_permission_operation { local xtrace=$(set +o | grep xtrace) set +o xtrace local args=( $@ ) @@ -1457,7 +1457,7 @@ function _safe_permission_operation() { # Exit 0 if address is in network or 1 if address is not in network # ip-range is in CIDR notation: 1.2.3.4/20 # address_in_net ip-address ip-range -function address_in_net() { +function address_in_net { local ip=$1 local range=$2 local masklen=${range#*/} @@ -1468,7 +1468,7 @@ function address_in_net() { # Add a user to a group. # add_user_to_group user group -function add_user_to_group() { +function add_user_to_group { local user=$1 local group=$2 @@ -1486,7 +1486,7 @@ function add_user_to_group() { # Convert CIDR notation to a IPv4 netmask # cidr2netmask cidr-bits -function cidr2netmask() { +function cidr2netmask { local maskpat="255 255 255 255" local maskdgt="254 252 248 240 224 192 128" set -- ${maskpat:0:$(( ($1 / 8) * 4 ))}${maskdgt:$(( (7 - ($1 % 8)) * 4 )):3} @@ -1509,7 +1509,7 @@ function cp_it { # # http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh -function export_proxy_variables() { +function export_proxy_variables { if [[ -n "$http_proxy" ]]; then export http_proxy=$http_proxy fi @@ -1522,7 +1522,7 @@ function export_proxy_variables() { } # Returns true if the directory is on a filesystem mounted via NFS. -function is_nfs_directory() { +function is_nfs_directory { local mount_type=`stat -f -L -c %T $1` test "$mount_type" == "nfs" } @@ -1530,7 +1530,7 @@ function is_nfs_directory() { # Return the network portion of the given IP address using netmask # netmask is in the traditional dotted-quad format # maskip ip-address netmask -function maskip() { +function maskip { local ip=$1 local mask=$2 local l="${ip%.*}"; local r="${ip#*.}"; local n="${mask%.*}"; local m="${mask#*.}" @@ -1540,7 +1540,7 @@ function maskip() { # Service wrapper to restart services # restart_service service-name -function restart_service() { +function restart_service { if is_ubuntu; then sudo /usr/sbin/service $1 restart else @@ -1550,19 +1550,19 @@ function restart_service() { # Only change permissions of a file or directory if it is not on an # NFS filesystem. -function safe_chmod() { +function safe_chmod { _safe_permission_operation chmod $@ } # Only change ownership of a file or directory if it is not on an NFS # filesystem. -function safe_chown() { +function safe_chown { _safe_permission_operation chown $@ } # Service wrapper to start services # start_service service-name -function start_service() { +function start_service { if is_ubuntu; then sudo /usr/sbin/service $1 start else @@ -1572,7 +1572,7 @@ function start_service() { # Service wrapper to stop services # stop_service service-name -function stop_service() { +function stop_service { if is_ubuntu; then sudo /usr/sbin/service $1 stop else diff --git a/lib/apache b/lib/apache index 0e5712f56b..2d5e39a65d 100644 --- a/lib/apache +++ b/lib/apache @@ -50,7 +50,7 @@ fi # # Uses global ``APACHE_ENABLED_SERVICES`` # APACHE_ENABLED_SERVICES service [service ...] -function is_apache_enabled_service() { +function is_apache_enabled_service { services=$@ for service in ${services}; do [[ ,${APACHE_ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 @@ -59,7 +59,7 @@ function is_apache_enabled_service() { } # install_apache_wsgi() - Install Apache server and wsgi module -function install_apache_wsgi() { +function install_apache_wsgi { # Apache installation, because we mark it NOPRIME if is_ubuntu; then # Install apache2, which is NOPRIME'd @@ -79,7 +79,7 @@ function install_apache_wsgi() { } # enable_apache_site() - Enable a particular apache site -function enable_apache_site() { +function enable_apache_site { local site=$@ if is_ubuntu; then sudo a2ensite ${site} @@ -90,7 +90,7 @@ function enable_apache_site() { } # disable_apache_site() - Disable a particular apache site -function disable_apache_site() { +function disable_apache_site { local site=$@ if is_ubuntu; then sudo a2dissite ${site} @@ -100,12 +100,12 @@ function disable_apache_site() { } # start_apache_server() - Start running apache server -function start_apache_server() { +function start_apache_server { start_service $APACHE_NAME } # stop_apache_server() - Stop running apache server -function stop_apache_server() { +function stop_apache_server { if [ -n "$APACHE_NAME" ]; then stop_service $APACHE_NAME else @@ -114,7 +114,7 @@ function stop_apache_server() { } # restart_apache_server -function restart_apache_server() { +function restart_apache_server { restart_service $APACHE_NAME } diff --git a/lib/baremetal b/lib/baremetal index d8cd7e936c..473de0dd39 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -166,7 +166,7 @@ BM_SHELL_IN_A_BOX=${BM_SHELL_IN_A_BOX:-http://shellinabox.googlecode.com/files/s # Check if baremetal is properly enabled # Returns false if VIRT_DRIVER is not baremetal, or if ENABLED_SERVICES # does not contain "baremetal" -function is_baremetal() { +function is_baremetal { if [[ "$ENABLED_SERVICES" =~ 'baremetal' && "$VIRT_DRIVER" = 'baremetal' ]]; then return 0 fi @@ -175,7 +175,7 @@ function is_baremetal() { # Install diskimage-builder and shell-in-a-box # so that we can build the deployment kernel & ramdisk -function prepare_baremetal_toolchain() { +function prepare_baremetal_toolchain { git_clone $BM_IMAGE_BUILD_REPO $BM_IMAGE_BUILD_DIR $BM_IMAGE_BUILD_BRANCH git_clone $BM_POSEUR_REPO $BM_POSEUR_DIR $BM_POSEUR_BRANCH @@ -197,7 +197,7 @@ function prepare_baremetal_toolchain() { } # set up virtualized environment for devstack-gate testing -function create_fake_baremetal_env() { +function create_fake_baremetal_env { local bm_poseur="$BM_POSEUR_DIR/bm_poseur" # TODO(deva): add support for >1 VM sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS create-bridge @@ -211,14 +211,14 @@ function create_fake_baremetal_env() { BM_SECOND_MAC='12:34:56:78:90:12' } -function cleanup_fake_baremetal_env() { +function cleanup_fake_baremetal_env { local bm_poseur="$BM_POSEUR_DIR/bm_poseur" sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-vm sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-bridge } # prepare various directories needed by baremetal hypervisor -function configure_baremetal_nova_dirs() { +function configure_baremetal_nova_dirs { # ensure /tftpboot is prepared sudo mkdir -p /tftpboot sudo mkdir -p /tftpboot/pxelinux.cfg @@ -249,7 +249,7 @@ function configure_baremetal_nova_dirs() { # build deploy kernel+ramdisk, then upload them to glance # this function sets BM_DEPLOY_KERNEL_ID and BM_DEPLOY_RAMDISK_ID -function upload_baremetal_deploy() { +function upload_baremetal_deploy { token=$1 if [ "$BM_BUILD_DEPLOY_RAMDISK" = "True" ]; then @@ -281,7 +281,7 @@ function upload_baremetal_deploy() { # create a basic baremetal flavor, associated with deploy kernel & ramdisk # # Usage: create_baremetal_flavor -function create_baremetal_flavor() { +function create_baremetal_flavor { aki=$1 ari=$2 nova flavor-create $BM_FLAVOR_NAME $BM_FLAVOR_ID \ @@ -298,7 +298,7 @@ function create_baremetal_flavor() { # Sets KERNEL_ID and RAMDISK_ID # # Usage: extract_and_upload_k_and_r_from_image $token $file -function extract_and_upload_k_and_r_from_image() { +function extract_and_upload_k_and_r_from_image { token=$1 file=$2 image_name=$(basename "$file" ".qcow2") @@ -339,7 +339,7 @@ function extract_and_upload_k_and_r_from_image() { # Takes the same parameters, but has some peculiarities which made it # easier to create a separate method, rather than complicate the logic # of the existing function. -function upload_baremetal_image() { +function upload_baremetal_image { local image_url=$1 local token=$2 @@ -429,7 +429,7 @@ function upload_baremetal_image() { DEFAULT_IMAGE_NAME="${IMAGE_NAME%.img}" } -function clear_baremetal_of_all_nodes() { +function clear_baremetal_of_all_nodes { list=$(nova baremetal-node-list | awk -F '| ' 'NR>3 {print $2}' ) for node in $list; do nova baremetal-node-delete $node @@ -440,7 +440,7 @@ function clear_baremetal_of_all_nodes() { # Defaults to using BM_FIRST_MAC and BM_SECOND_MAC if parameters not specified # # Usage: add_baremetal_node -function add_baremetal_node() { +function add_baremetal_node { mac_1=${1:-$BM_FIRST_MAC} mac_2=${2:-$BM_SECOND_MAC} diff --git a/lib/ceilometer b/lib/ceilometer index 6c87d03b13..d20d628247 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -105,18 +105,18 @@ create_ceilometer_accounts() { # cleanup_ceilometer() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_ceilometer() { +function cleanup_ceilometer { mongo ceilometer --eval "db.dropDatabase();" } # configure_ceilometerclient() - Set config files, create data dirs, etc -function configure_ceilometerclient() { +function configure_ceilometerclient { setup_develop $CEILOMETERCLIENT_DIR sudo install -D -m 0644 -o $STACK_USER {$CEILOMETERCLIENT_DIR/tools/,/etc/bash_completion.d/}ceilometer.bash_completion } # configure_ceilometer() - Set config files, create data dirs, etc -function configure_ceilometer() { +function configure_ceilometer { setup_develop $CEILOMETER_DIR [ ! -d $CEILOMETER_CONF_DIR ] && sudo mkdir -m 755 -p $CEILOMETER_CONF_DIR @@ -162,7 +162,7 @@ function configure_ceilometer() { fi } -function configure_mongodb() { +function configure_mongodb { if is_fedora; then # install mongodb client install_package mongodb @@ -174,7 +174,7 @@ function configure_mongodb() { } # init_ceilometer() - Initialize etc. -function init_ceilometer() { +function init_ceilometer { # Create cache dir sudo mkdir -p $CEILOMETER_AUTH_CACHE_DIR sudo chown $STACK_USER $CEILOMETER_AUTH_CACHE_DIR @@ -187,17 +187,17 @@ function init_ceilometer() { } # install_ceilometer() - Collect source and prepare -function install_ceilometer() { +function install_ceilometer { git_clone $CEILOMETER_REPO $CEILOMETER_DIR $CEILOMETER_BRANCH } # install_ceilometerclient() - Collect source and prepare -function install_ceilometerclient() { +function install_ceilometerclient { git_clone $CEILOMETERCLIENT_REPO $CEILOMETERCLIENT_DIR $CEILOMETERCLIENT_BRANCH } # start_ceilometer() - Start running processes, including screen -function start_ceilometer() { +function start_ceilometer { if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then screen_it ceilometer-acompute "cd ; sg $LIBVIRT_GROUP \"ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" fi @@ -216,7 +216,7 @@ function start_ceilometer() { } # stop_ceilometer() - Stop running processes -function stop_ceilometer() { +function stop_ceilometer { # Kill the ceilometer screen windows for serv in ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do screen_stop $serv diff --git a/lib/cinder b/lib/cinder index e8f30b683c..d003f5dc7b 100644 --- a/lib/cinder +++ b/lib/cinder @@ -102,7 +102,7 @@ function is_cinder_enabled { # _clean_lvm_lv removes all cinder LVM volumes # # Usage: _clean_lvm_lv $VOLUME_GROUP $VOLUME_NAME_PREFIX -function _clean_lvm_lv() { +function _clean_lvm_lv { local vg=$1 local lv_prefix=$2 @@ -119,7 +119,7 @@ function _clean_lvm_lv() { # volume group used by cinder # # Usage: _clean_lvm_backing_file() $VOLUME_GROUP -function _clean_lvm_backing_file() { +function _clean_lvm_backing_file { local vg=$1 # if there is no logical volume left, it's safe to attempt a cleanup @@ -136,7 +136,7 @@ function _clean_lvm_backing_file() { # cleanup_cinder() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_cinder() { +function cleanup_cinder { # ensure the volume group is cleared up because fails might # leave dead volumes in the group TARGETS=$(sudo tgtadm --op show --mode target) @@ -181,7 +181,7 @@ function cleanup_cinder() { } # configure_cinder_rootwrap() - configure Cinder's rootwrap -function configure_cinder_rootwrap() { +function configure_cinder_rootwrap { # Set the paths of certain binaries CINDER_ROOTWRAP=$(get_rootwrap_location cinder) @@ -212,7 +212,7 @@ function configure_cinder_rootwrap() { } # configure_cinder() - Set config files, create data dirs, etc -function configure_cinder() { +function configure_cinder { if [[ ! -d $CINDER_CONF_DIR ]]; then sudo mkdir -p $CINDER_CONF_DIR fi @@ -328,7 +328,7 @@ function configure_cinder() { # service cinder admin # if enabled # Migrated from keystone_data.sh -create_cinder_accounts() { +function create_cinder_accounts { SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") @@ -373,14 +373,14 @@ create_cinder_accounts() { } # create_cinder_cache_dir() - Part of the init_cinder() process -function create_cinder_cache_dir() { +function create_cinder_cache_dir { # Create cache dir sudo mkdir -p $CINDER_AUTH_CACHE_DIR sudo chown $STACK_USER $CINDER_AUTH_CACHE_DIR rm -f $CINDER_AUTH_CACHE_DIR/* } -create_cinder_volume_group() { +function create_cinder_volume_group { # According to the ``CINDER_MULTI_LVM_BACKEND`` value, configure one or two default volumes # group called ``stack-volumes`` (and ``stack-volumes2``) for the volume # service if it (they) does (do) not yet exist. If you don't wish to use a @@ -428,7 +428,7 @@ create_cinder_volume_group() { } # init_cinder() - Initialize database and volume group -function init_cinder() { +function init_cinder { # Force nova volumes off NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/osapi_volume,//") @@ -464,20 +464,20 @@ function init_cinder() { } # install_cinder() - Collect source and prepare -function install_cinder() { +function install_cinder { git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH setup_develop $CINDER_DIR } # install_cinderclient() - Collect source and prepare -function install_cinderclient() { +function install_cinderclient { git_clone $CINDERCLIENT_REPO $CINDERCLIENT_DIR $CINDERCLIENT_BRANCH setup_develop $CINDERCLIENT_DIR sudo install -D -m 0644 -o $STACK_USER {$CINDERCLIENT_DIR/tools/,/etc/bash_completion.d/}cinder.bash_completion } # apply config.d approach for cinder volumes directory -function _configure_tgt_for_config_d() { +function _configure_tgt_for_config_d { if [[ ! -d /etc/tgt/stack.d/ ]]; then sudo ln -sf $CINDER_STATE_PATH/volumes /etc/tgt/stack.d echo "include /etc/tgt/stack.d/*" | sudo tee -a /etc/tgt/targets.conf @@ -485,7 +485,7 @@ function _configure_tgt_for_config_d() { } # start_cinder() - Start running processes, including screen -function start_cinder() { +function start_cinder { if is_service_enabled c-vol; then # Delete any old stack.conf sudo rm -f /etc/tgt/conf.d/stack.conf @@ -529,7 +529,7 @@ function start_cinder() { } # stop_cinder() - Stop running processes -function stop_cinder() { +function stop_cinder { # Kill the cinder screen windows for serv in c-api c-bak c-sch c-vol; do screen_stop $serv diff --git a/lib/cinder_plugins/XenAPINFS b/lib/cinder_plugins/XenAPINFS index 72e1c1398c..fa10715bdf 100644 --- a/lib/cinder_plugins/XenAPINFS +++ b/lib/cinder_plugins/XenAPINFS @@ -27,7 +27,7 @@ set +o xtrace # ------------ # configure_cinder_driver - Set config files, create data dirs, etc -function configure_cinder_driver() { +function configure_cinder_driver { iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.xenapi.sm.XenAPINFSDriver" iniset $CINDER_CONF DEFAULT xenapi_connection_url "$CINDER_XENAPI_CONNECTION_URL" iniset $CINDER_CONF DEFAULT xenapi_connection_username "$CINDER_XENAPI_CONNECTION_USERNAME" diff --git a/lib/cinder_plugins/glusterfs b/lib/cinder_plugins/glusterfs index a0c5ae8d5e..b4196e4738 100644 --- a/lib/cinder_plugins/glusterfs +++ b/lib/cinder_plugins/glusterfs @@ -27,7 +27,7 @@ set +o xtrace # ------------ # configure_cinder_driver - Set config files, create data dirs, etc -function configure_cinder_driver() { +function configure_cinder_driver { # To use glusterfs, set the following in localrc: # CINDER_DRIVER=glusterfs # CINDER_GLUSTERFS_SHARES="127.0.0.1:/vol1;127.0.0.1:/vol2" diff --git a/lib/cinder_plugins/nfs b/lib/cinder_plugins/nfs index ea2c9ce665..2d9d875245 100644 --- a/lib/cinder_plugins/nfs +++ b/lib/cinder_plugins/nfs @@ -27,7 +27,7 @@ set +o xtrace # ------------ # configure_cinder_driver - Set config files, create data dirs, etc -function configure_cinder_driver() { +function configure_cinder_driver { iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.nfs.NfsDriver" iniset $CINDER_CONF DEFAULT nfs_shares_config "$CINDER_CONF_DIR/nfs_shares.conf" echo "$CINDER_NFS_SERVERPATH" | sudo tee "$CINDER_CONF_DIR/nfs_shares.conf" diff --git a/lib/cinder_plugins/sheepdog b/lib/cinder_plugins/sheepdog index 4435932371..30c60c6efe 100644 --- a/lib/cinder_plugins/sheepdog +++ b/lib/cinder_plugins/sheepdog @@ -27,7 +27,7 @@ set +o xtrace # ------------ # configure_cinder_driver - Set config files, create data dirs, etc -function configure_cinder_driver() { +function configure_cinder_driver { iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.sheepdog.SheepdogDriver" } diff --git a/lib/cinder_plugins/solidfire b/lib/cinder_plugins/solidfire index 47c113e1a2..2c970b5adf 100644 --- a/lib/cinder_plugins/solidfire +++ b/lib/cinder_plugins/solidfire @@ -27,7 +27,7 @@ set +o xtrace # ------------ # configure_cinder_driver - Set config files, create data dirs, etc -function configure_cinder_driver() { +function configure_cinder_driver { # To use solidfire, set the following in localrc: # CINDER_DRIVER=solidfire # SAN_IP= diff --git a/lib/cinder_plugins/vsphere b/lib/cinder_plugins/vsphere index c8cab6a8c1..436b060377 100644 --- a/lib/cinder_plugins/vsphere +++ b/lib/cinder_plugins/vsphere @@ -27,7 +27,7 @@ set +o xtrace # ------------ # configure_cinder_driver - Set config files, create data dirs, etc -function configure_cinder_driver() { +function configure_cinder_driver { iniset $CINDER_CONF DEFAULT vmware_host_ip "$VMWAREAPI_IP" iniset $CINDER_CONF DEFAULT vmware_host_username "$VMWAREAPI_USER" iniset $CINDER_CONF DEFAULT vmware_host_password "$VMWAREAPI_PASSWORD" diff --git a/lib/config b/lib/config index 1678aec3fc..552aeb0ad1 100644 --- a/lib/config +++ b/lib/config @@ -25,7 +25,7 @@ CONFIG_AWK_CMD=${CONFIG_AWK_CMD:-awk} # Get the section for the specific group and config file # get_meta_section infile group configfile -function get_meta_section() { +function get_meta_section { local file=$1 local matchgroup=$2 local configfile=$3 @@ -57,7 +57,7 @@ function get_meta_section() { # Get a list of config files for a specific group # get_meta_section_files infile group -function get_meta_section_files() { +function get_meta_section_files { local file=$1 local matchgroup=$2 @@ -77,7 +77,7 @@ function get_meta_section_files() { # Merge the contents of a meta-config file into its destination config file # If configfile does not exist it will be created. # merge_config_file infile group configfile -function merge_config_file() { +function merge_config_file { local file=$1 local matchgroup=$2 local configfile=$3 @@ -106,7 +106,7 @@ function merge_config_file() { # Merge all of the files specified by group # merge_config_group infile group [group ...] -function merge_config_group() { +function merge_config_group { local localfile=$1; shift local matchgroups=$@ diff --git a/lib/gantt b/lib/gantt index 832d7590df..8db2ca1406 100644 --- a/lib/gantt +++ b/lib/gantt @@ -47,42 +47,42 @@ GANTT_BIN_DIR=$(get_python_exec_prefix) # cleanup_gantt() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_gantt() { +function cleanup_gantt { echo "Cleanup Gantt" } # configure_gantt() - Set config files, create data dirs, etc -function configure_gantt() { +function configure_gantt { echo "Configure Gantt" } # init_gantt() - Initialize database and volume group -function init_gantt() { +function init_gantt { echo "Initialize Gantt" } # install_gantt() - Collect source and prepare -function install_gantt() { +function install_gantt { git_clone $GANTT_REPO $GANTT_DIR $GANTT_BRANCH setup_develop $GANTT_DIR } # install_ganttclient() - Collect source and prepare -function install_ganttclient() { +function install_ganttclient { echo "Install Gantt Client" # git_clone $GANTTCLIENT_REPO $GANTTCLIENT_DIR $GANTTCLIENT_BRANCH # setup_develop $GANTTCLIENT_DIR } # start_gantt() - Start running processes, including screen -function start_gantt() { +function start_gantt { if is_service_enabled gantt; then screen_it gantt "cd $GANTT_DIR && $GANTT_BIN_DIR/gantt-scheduler --config-file $GANTT_CONF" fi } # stop_gantt() - Stop running processes -function stop_gantt() { +function stop_gantt { echo "Stop Gantt" screen_stop gantt } diff --git a/lib/glance b/lib/glance index 1ebeeb3b2e..8a4c21b3f2 100644 --- a/lib/glance +++ b/lib/glance @@ -68,14 +68,14 @@ function is_glance_enabled { # cleanup_glance() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_glance() { +function cleanup_glance { # kill instances (nova) # delete image files (glance) sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR $GLANCE_AUTH_CACHE_DIR } # configure_glance() - Set config files, create data dirs, etc -function configure_glance() { +function configure_glance { if [[ ! -d $GLANCE_CONF_DIR ]]; then sudo mkdir -p $GLANCE_CONF_DIR fi @@ -160,7 +160,7 @@ function configure_glance() { } # create_glance_cache_dir() - Part of the init_glance() process -function create_glance_cache_dir() { +function create_glance_cache_dir { # Create cache dir sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/api sudo chown $STACK_USER $GLANCE_AUTH_CACHE_DIR/api @@ -171,7 +171,7 @@ function create_glance_cache_dir() { } # init_glance() - Initialize databases, etc. -function init_glance() { +function init_glance { # Delete existing images rm -rf $GLANCE_IMAGE_DIR mkdir -p $GLANCE_IMAGE_DIR @@ -190,19 +190,19 @@ function init_glance() { } # install_glanceclient() - Collect source and prepare -function install_glanceclient() { +function install_glanceclient { git_clone $GLANCECLIENT_REPO $GLANCECLIENT_DIR $GLANCECLIENT_BRANCH setup_develop $GLANCECLIENT_DIR } # install_glance() - Collect source and prepare -function install_glance() { +function install_glance { git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH setup_develop $GLANCE_DIR } # start_glance() - Start running processes, including screen -function start_glance() { +function start_glance { screen_it g-reg "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf" screen_it g-api "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." @@ -212,7 +212,7 @@ function start_glance() { } # stop_glance() - Stop running processes -function stop_glance() { +function stop_glance { # Kill the Glance screen windows screen_stop g-api screen_stop g-reg diff --git a/lib/heat b/lib/heat index 972c35ce72..d0c0302016 100644 --- a/lib/heat +++ b/lib/heat @@ -47,14 +47,14 @@ TEMPEST_SERVICES+=,heat # cleanup_heat() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_heat() { +function cleanup_heat { sudo rm -rf $HEAT_AUTH_CACHE_DIR sudo rm -rf $HEAT_ENV_DIR sudo rm -rf $HEAT_TEMPLATES_DIR } # configure_heat() - Set config files, create data dirs, etc -function configure_heat() { +function configure_heat { setup_develop $HEAT_DIR if [[ ! -d $HEAT_CONF_DIR ]]; then @@ -137,7 +137,7 @@ function configure_heat() { } # init_heat() - Initialize database -function init_heat() { +function init_heat { # (re)create heat database recreate_database heat utf8 @@ -147,26 +147,26 @@ function init_heat() { } # create_heat_cache_dir() - Part of the init_heat() process -function create_heat_cache_dir() { +function create_heat_cache_dir { # Create cache dirs sudo mkdir -p $HEAT_AUTH_CACHE_DIR sudo chown $STACK_USER $HEAT_AUTH_CACHE_DIR } # install_heatclient() - Collect source and prepare -function install_heatclient() { +function install_heatclient { git_clone $HEATCLIENT_REPO $HEATCLIENT_DIR $HEATCLIENT_BRANCH setup_develop $HEATCLIENT_DIR sudo install -D -m 0644 -o $STACK_USER {$HEATCLIENT_DIR/tools/,/etc/bash_completion.d/}heat.bash_completion } # install_heat() - Collect source and prepare -function install_heat() { +function install_heat { git_clone $HEAT_REPO $HEAT_DIR $HEAT_BRANCH } # start_heat() - Start running processes, including screen -function start_heat() { +function start_heat { screen_it h-eng "cd $HEAT_DIR; bin/heat-engine --config-file=$HEAT_CONF" screen_it h-api "cd $HEAT_DIR; bin/heat-api --config-file=$HEAT_CONF" screen_it h-api-cfn "cd $HEAT_DIR; bin/heat-api-cfn --config-file=$HEAT_CONF" @@ -174,7 +174,7 @@ function start_heat() { } # stop_heat() - Stop running processes -function stop_heat() { +function stop_heat { # Kill the screen windows for serv in h-eng h-api h-api-cfn h-api-cw; do screen_stop $serv @@ -198,7 +198,7 @@ function disk_image_create { # create_heat_accounts() - Set up common required heat accounts # Note this is in addition to what is in files/keystone_data.sh -function create_heat_accounts() { +function create_heat_accounts { # Note we have to pass token/endpoint here because the current endpoint and # version negotiation in OSC means just --os-identity-api-version=3 won't work KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3" diff --git a/lib/horizon b/lib/horizon index 2f5795d1ca..27c2d26a01 100644 --- a/lib/horizon +++ b/lib/horizon @@ -39,7 +39,7 @@ TEMPEST_SERVICES+=,horizon # --------- # utility method of setting python option -function _horizon_config_set() { +function _horizon_config_set { local file=$1 local section=$2 local option=$3 @@ -64,7 +64,7 @@ function _horizon_config_set() { # cleanup_horizon() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_horizon() { +function cleanup_horizon { if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then # If ``/usr/bin/node`` points into ``$DEST`` # we installed it via ``install_nodejs`` @@ -75,12 +75,12 @@ function cleanup_horizon() { } # configure_horizon() - Set config files, create data dirs, etc -function configure_horizon() { +function configure_horizon { setup_develop $HORIZON_DIR } # init_horizon() - Initialize databases, etc. -function init_horizon() { +function init_horizon { # ``local_settings.py`` is used to override horizon default settings. local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py cp $HORIZON_SETTINGS $local_settings @@ -143,7 +143,7 @@ function init_horizon() { } # install_horizon() - Collect source and prepare -function install_horizon() { +function install_horizon { # Apache installation, because we mark it NOPRIME install_apache_wsgi @@ -151,13 +151,13 @@ function install_horizon() { } # start_horizon() - Start running processes, including screen -function start_horizon() { +function start_horizon { restart_apache_server screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/$APACHE_NAME/horizon_error.log" } # stop_horizon() - Stop running processes (non-screen) -function stop_horizon() { +function stop_horizon { stop_apache_server } diff --git a/lib/infra b/lib/infra index 0dcf0ad980..7f70ff2787 100644 --- a/lib/infra +++ b/lib/infra @@ -27,7 +27,7 @@ REQUIREMENTS_DIR=$DEST/requirements # ------------ # unfubar_setuptools() - Unbreak the giant mess that is the current state of setuptools -function unfubar_setuptools() { +function unfubar_setuptools { # this is a giant game of who's on first, but it does consistently work # there is hope that upstream python packaging fixes this in the future echo_summary "Unbreaking setuptools" @@ -40,7 +40,7 @@ function unfubar_setuptools() { # install_infra() - Collect source and prepare -function install_infra() { +function install_infra { # bring down global requirements git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH diff --git a/lib/ironic b/lib/ironic index 607b13125a..177188dd06 100644 --- a/lib/ironic +++ b/lib/ironic @@ -57,25 +57,25 @@ function is_ironic_enabled { } # install_ironic() - Collect source and prepare -function install_ironic() { +function install_ironic { git_clone $IRONIC_REPO $IRONIC_DIR $IRONIC_BRANCH setup_develop $IRONIC_DIR } # install_ironicclient() - Collect sources and prepare -function install_ironicclient() { +function install_ironicclient { git_clone $IRONICCLIENT_REPO $IRONICCLIENT_DIR $IRONICCLIENT_BRANCH setup_develop $IRONICCLIENT_DIR } # cleanup_ironic() - Remove residual data files, anything left over from previous # runs that would need to clean up. -function cleanup_ironic() { +function cleanup_ironic { sudo rm -rf $IRONIC_AUTH_CACHE_DIR } # configure_ironic() - Set config files, create data dirs, etc -function configure_ironic() { +function configure_ironic { if [[ ! -d $IRONIC_CONF_DIR ]]; then sudo mkdir -p $IRONIC_CONF_DIR fi @@ -101,7 +101,7 @@ function configure_ironic() { # configure_ironic_api() - Is used by configure_ironic(). Performs # API specific configuration. -function configure_ironic_api() { +function configure_ironic_api { iniset $IRONIC_CONF_FILE DEFAULT auth_strategy keystone iniset $IRONIC_CONF_FILE DEFAULT policy_file $IRONIC_POLICY_JSON iniset $IRONIC_CONF_FILE keystone_authtoken auth_host $KEYSTONE_AUTH_HOST @@ -120,7 +120,7 @@ function configure_ironic_api() { # configure_ironic_conductor() - Is used by configure_ironic(). # Sets conductor specific settings. -function configure_ironic_conductor() { +function configure_ironic_conductor { cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR @@ -128,7 +128,7 @@ function configure_ironic_conductor() { } # create_ironic_cache_dir() - Part of the init_ironic() process -function create_ironic_cache_dir() { +function create_ironic_cache_dir { # Create cache dir sudo mkdir -p $IRONIC_AUTH_CACHE_DIR/api sudo chown $STACK_USER $IRONIC_AUTH_CACHE_DIR/api @@ -143,7 +143,7 @@ function create_ironic_cache_dir() { # Tenant User Roles # ------------------------------------------------------------------ # service ironic admin # if enabled -create_ironic_accounts() { +function create_ironic_accounts { SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") @@ -178,7 +178,7 @@ create_ironic_accounts() { # init_ironic() - Initialize databases, etc. -function init_ironic() { +function init_ironic { # (Re)create ironic database recreate_database ironic utf8 @@ -192,7 +192,7 @@ function init_ironic() { } # start_ironic() - Start running processes, including screen -function start_ironic() { +function start_ironic { # Start Ironic API server, if enabled. if is_service_enabled ir-api; then start_ironic_api @@ -206,7 +206,7 @@ function start_ironic() { # start_ironic_api() - Used by start_ironic(). # Starts Ironic API server. -function start_ironic_api() { +function start_ironic_api { screen_it ir-api "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-api --config-file=$IRONIC_CONF_FILE" echo "Waiting for ir-api ($IRONIC_HOSTPORT) to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$IRONIC_HOSTPORT; do sleep 1; done"; then @@ -216,13 +216,13 @@ function start_ironic_api() { # start_ironic_conductor() - Used by start_ironic(). # Starts Ironic conductor. -function start_ironic_conductor() { +function start_ironic_conductor { screen_it ir-cond "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-conductor --config-file=$IRONIC_CONF_FILE" # TODO(romcheg): Find a way to check whether the conductor has started. } # stop_ironic() - Stop running processes -function stop_ironic() { +function stop_ironic { # Kill the Ironic screen windows screen -S $SCREEN_NAME -p ir-api -X kill screen -S $SCREEN_NAME -p ir-cond -X kill diff --git a/lib/keystone b/lib/keystone index 73af1d356d..0548c24e87 100644 --- a/lib/keystone +++ b/lib/keystone @@ -90,7 +90,7 @@ fi # --------- # cleanup_keystone() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_keystone() { +function cleanup_keystone { # kill instances (nova) # delete image files (glance) # This function intentionally left blank @@ -98,14 +98,14 @@ function cleanup_keystone() { } # _cleanup_keystone_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file -function _cleanup_keystone_apache_wsgi() { +function _cleanup_keystone_apache_wsgi { sudo rm -f $KEYSTONE_WSGI_DIR/*.wsgi disable_apache_site keystone sudo rm -f /etc/$APACHE_NAME/$APACHE_CONF_DIR/keystone } # _config_keystone_apache_wsgi() - Set WSGI config files of Keystone -function _config_keystone_apache_wsgi() { +function _config_keystone_apache_wsgi { sudo mkdir -p $KEYSTONE_WSGI_DIR # copy proxy vhost and wsgi file @@ -125,7 +125,7 @@ function _config_keystone_apache_wsgi() { } # configure_keystone() - Set config files, create data dirs, etc -function configure_keystone() { +function configure_keystone { if [[ ! -d $KEYSTONE_CONF_DIR ]]; then sudo mkdir -p $KEYSTONE_CONF_DIR fi @@ -272,7 +272,7 @@ function configure_keystone() { # invisible_to_admin demo Member # Migrated from keystone_data.sh -create_keystone_accounts() { +function create_keystone_accounts { # admin ADMIN_TENANT=$(openstack project create \ @@ -346,14 +346,14 @@ create_keystone_accounts() { # Configure the API version for the OpenStack projects. # configure_API_version conf_file version -function configure_API_version() { +function configure_API_version { local conf_file=$1 local api_version=$2 iniset $conf_file keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$api_version } # init_keystone() - Initialize databases, etc. -function init_keystone() { +function init_keystone { if is_service_enabled ldap; then init_ldap fi @@ -377,14 +377,14 @@ function init_keystone() { } # install_keystoneclient() - Collect source and prepare -function install_keystoneclient() { +function install_keystoneclient { git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH setup_develop $KEYSTONECLIENT_DIR sudo install -D -m 0644 -o $STACK_USER {$KEYSTONECLIENT_DIR/tools/,/etc/bash_completion.d/}keystone.bash_completion } # install_keystone() - Collect source and prepare -function install_keystone() { +function install_keystone { # only install ldap if the service has been enabled if is_service_enabled ldap; then install_ldap @@ -408,7 +408,7 @@ function install_keystone() { } # start_keystone() - Start running processes, including screen -function start_keystone() { +function start_keystone { # Get right service port for testing local service_port=$KEYSTONE_SERVICE_PORT if is_service_enabled tls-proxy; then @@ -436,7 +436,7 @@ function start_keystone() { } # stop_keystone() - Stop running processes -function stop_keystone() { +function stop_keystone { # Kill the Keystone screen window screen_stop key } diff --git a/lib/ldap b/lib/ldap index e4bd41624d..51d02519af 100644 --- a/lib/ldap +++ b/lib/ldap @@ -49,7 +49,7 @@ fi # Perform common variable substitutions on the data files # _ldap_varsubst file -function _ldap_varsubst() { +function _ldap_varsubst { local infile=$1 sed -e " s|\${LDAP_OLCDB_NUMBER}|$LDAP_OLCDB_NUMBER| @@ -62,7 +62,7 @@ function _ldap_varsubst() { } # clean_ldap() - Remove ldap server -function cleanup_ldap() { +function cleanup_ldap { uninstall_package $(get_packages ldap) if is_ubuntu; then uninstall_package slapd ldap-utils libslp1 @@ -76,7 +76,7 @@ function cleanup_ldap() { # init_ldap # init_ldap() - Initialize databases, etc. -function init_ldap() { +function init_ldap { local keystone_ldif TMP_LDAP_DIR=$(mktemp -d -t ldap.$$.XXXXXXXXXX) @@ -106,7 +106,7 @@ function init_ldap() { # install_ldap # install_ldap() - Collect source and prepare -function install_ldap() { +function install_ldap { echo "Installing LDAP inside function" echo "os_VENDOR is $os_VENDOR" @@ -143,17 +143,17 @@ function install_ldap() { } # start_ldap() - Start LDAP -function start_ldap() { +function start_ldap { sudo service $LDAP_SERVICE_NAME restart } # stop_ldap() - Stop LDAP -function stop_ldap() { +function stop_ldap { sudo service $LDAP_SERVICE_NAME stop } # clear_ldap_state() - Clear LDAP State -function clear_ldap_state() { +function clear_ldap_state { ldapdelete -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -r "$LDAP_BASE_DN" } diff --git a/lib/marconi b/lib/marconi index 1c8be49291..8cfc55c1dd 100644 --- a/lib/marconi +++ b/lib/marconi @@ -73,19 +73,19 @@ function is_marconi_enabled { # cleanup_marconi() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_marconi() { +function cleanup_marconi { if ! timeout $SERVICE_TIMEOUT sh -c "while ! mongo marconi --eval 'db.dropDatabase();'; do sleep 1; done"; then die $LINENO "Mongo DB did not start" fi } # configure_marconiclient() - Set config files, create data dirs, etc -function configure_marconiclient() { +function configure_marconiclient { setup_develop $MARCONICLIENT_DIR } # configure_marconi() - Set config files, create data dirs, etc -function configure_marconi() { +function configure_marconi { setup_develop $MARCONI_DIR [ ! -d $MARCONI_CONF_DIR ] && sudo mkdir -m 755 -p $MARCONI_CONF_DIR @@ -110,7 +110,7 @@ function configure_marconi() { fi } -function configure_mongodb() { +function configure_mongodb { # Set nssize to 2GB. This increases the number of namespaces supported # # per database. if is_ubuntu; then @@ -126,7 +126,7 @@ function configure_mongodb() { } # init_marconi() - Initialize etc. -function init_marconi() { +function init_marconi { # Create cache dir sudo mkdir -p $MARCONI_AUTH_CACHE_DIR sudo chown $STACK_USER $MARCONI_AUTH_CACHE_DIR @@ -134,19 +134,19 @@ function init_marconi() { } # install_marconi() - Collect source and prepare -function install_marconi() { +function install_marconi { git_clone $MARCONI_REPO $MARCONI_DIR $MARCONI_BRANCH setup_develop $MARCONI_DIR } # install_marconiclient() - Collect source and prepare -function install_marconiclient() { +function install_marconiclient { git_clone $MARCONICLIENT_REPO $MARCONICLIENT_DIR $MARCONICLIENT_BRANCH setup_develop $MARCONICLIENT_DIR } # start_marconi() - Start running processes, including screen -function start_marconi() { +function start_marconi { screen_it marconi-server "marconi-server --config-file $MARCONI_CONF" echo "Waiting for Marconi to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT/v1/health; do sleep 1; done"; then @@ -155,14 +155,14 @@ function start_marconi() { } # stop_marconi() - Stop running processes -function stop_marconi() { +function stop_marconi { # Kill the marconi screen windows for serv in marconi-server; do screen -S $SCREEN_NAME -p $serv -X kill done } -function create_marconi_accounts() { +function create_marconi_accounts { SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") diff --git a/lib/neutron b/lib/neutron index df276c71d5..35575c0379 100644 --- a/lib/neutron +++ b/lib/neutron @@ -253,7 +253,7 @@ function is_neutron_enabled { # configure_neutron() # Set common config for all neutron server and agents. -function configure_neutron() { +function configure_neutron { _configure_neutron_common iniset_rpc_backend neutron $NEUTRON_CONF DEFAULT @@ -289,7 +289,7 @@ function configure_neutron() { _configure_neutron_debug_command } -function create_nova_conf_neutron() { +function create_nova_conf_neutron { iniset $NOVA_CONF DEFAULT network_api_class "nova.network.neutronv2.api.API" iniset $NOVA_CONF DEFAULT neutron_admin_username "$Q_ADMIN_USERNAME" iniset $NOVA_CONF DEFAULT neutron_admin_password "$SERVICE_PASSWORD" @@ -316,7 +316,7 @@ function create_nova_conf_neutron() { } # create_neutron_cache_dir() - Part of the _neutron_setup_keystone() process -function create_neutron_cache_dir() { +function create_neutron_cache_dir { # Create cache dir sudo mkdir -p $NEUTRON_AUTH_CACHE_DIR sudo chown $STACK_USER $NEUTRON_AUTH_CACHE_DIR @@ -330,7 +330,7 @@ function create_neutron_cache_dir() { # service neutron admin # if enabled # Migrated from keystone_data.sh -function create_neutron_accounts() { +function create_neutron_accounts { SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") @@ -362,7 +362,7 @@ function create_neutron_accounts() { fi } -function create_neutron_initial_network() { +function create_neutron_initial_network { TENANT_ID=$(openstack project list | grep " demo " | get_field 1) die_if_not_set $LINENO TENANT_ID "Failure retrieving TENANT_ID for demo" @@ -429,27 +429,27 @@ function create_neutron_initial_network() { } # init_neutron() - Initialize databases, etc. -function init_neutron() { +function init_neutron { recreate_database $Q_DB_NAME utf8 # Run Neutron db migrations $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head } # install_neutron() - Collect source and prepare -function install_neutron() { +function install_neutron { git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH setup_develop $NEUTRON_DIR } # install_neutronclient() - Collect source and prepare -function install_neutronclient() { +function install_neutronclient { git_clone $NEUTRONCLIENT_REPO $NEUTRONCLIENT_DIR $NEUTRONCLIENT_BRANCH setup_develop $NEUTRONCLIENT_DIR sudo install -D -m 0644 -o $STACK_USER {$NEUTRONCLIENT_DIR/tools/,/etc/bash_completion.d/}neutron.bash_completion } # install_neutron_agent_packages() - Collect source and prepare -function install_neutron_agent_packages() { +function install_neutron_agent_packages { # install packages that are specific to plugin agent(s) if is_service_enabled q-agt q-dhcp q-l3; then neutron_plugin_install_agent_packages @@ -461,7 +461,7 @@ function install_neutron_agent_packages() { } # Start running processes, including screen -function start_neutron_service_and_check() { +function start_neutron_service_and_check { # build config-file options local cfg_file local CFG_FILE_OPTIONS="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" @@ -477,7 +477,7 @@ function start_neutron_service_and_check() { } # Start running processes, including screen -function start_neutron_agents() { +function start_neutron_agents { # Start up the neutron agents if enabled screen_it q-agt "cd $NEUTRON_DIR && python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" screen_it q-dhcp "cd $NEUTRON_DIR && python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE" @@ -510,7 +510,7 @@ function start_neutron_agents() { } # stop_neutron() - Stop running processes (non-screen) -function stop_neutron() { +function stop_neutron { if is_service_enabled q-dhcp; then pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }') [ ! -z "$pid" ] && sudo kill -9 $pid @@ -535,7 +535,7 @@ function stop_neutron() { # cleanup_neutron() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_neutron() { +function cleanup_neutron { if is_neutron_ovs_base_plugin; then neutron_ovs_base_cleanup fi @@ -549,7 +549,7 @@ function cleanup_neutron() { # _configure_neutron_common() # Set common config for all neutron server and agents. # This MUST be called before other ``_configure_neutron_*`` functions. -function _configure_neutron_common() { +function _configure_neutron_common { # Put config files in ``NEUTRON_CONF_DIR`` for everyone to find if [[ ! -d $NEUTRON_CONF_DIR ]]; then sudo mkdir -p $NEUTRON_CONF_DIR @@ -611,7 +611,7 @@ function _configure_neutron_common() { _neutron_setup_rootwrap } -function _configure_neutron_debug_command() { +function _configure_neutron_debug_command { if [[ "$Q_USE_DEBUG_COMMAND" != "True" ]]; then return fi @@ -628,7 +628,7 @@ function _configure_neutron_debug_command() { neutron_plugin_configure_debug_command } -function _configure_neutron_dhcp_agent() { +function _configure_neutron_dhcp_agent { AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent" Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini @@ -652,7 +652,7 @@ function _configure_neutron_dhcp_agent() { neutron_plugin_configure_dhcp_agent } -function _configure_neutron_l3_agent() { +function _configure_neutron_l3_agent { Q_L3_ENABLED=True # for l3-agent, only use per tenant router if we have namespaces Q_L3_ROUTER_PER_TENANT=$Q_USE_NAMESPACE @@ -676,7 +676,7 @@ function _configure_neutron_l3_agent() { neutron_plugin_configure_l3_agent } -function _configure_neutron_metadata_agent() { +function _configure_neutron_metadata_agent { AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent" Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini @@ -691,30 +691,29 @@ function _configure_neutron_metadata_agent() { } -function _configure_neutron_lbaas() { +function _configure_neutron_lbaas { neutron_agent_lbaas_configure_common neutron_agent_lbaas_configure_agent } -function _configure_neutron_metering() { +function _configure_neutron_metering { neutron_agent_metering_configure_common neutron_agent_metering_configure_agent } -function _configure_neutron_fwaas() { +function _configure_neutron_fwaas { neutron_fwaas_configure_common neutron_fwaas_configure_driver } -function _configure_neutron_vpn() -{ +function _configure_neutron_vpn { neutron_vpn_install_agent_packages neutron_vpn_configure_common } # _configure_neutron_plugin_agent() - Set config files for neutron plugin agent # It is called when q-agt is enabled. -function _configure_neutron_plugin_agent() { +function _configure_neutron_plugin_agent { # Specify the default root helper prior to agent configuration to # ensure that an agent's configuration can override the default iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_COMMAND" @@ -727,7 +726,7 @@ function _configure_neutron_plugin_agent() { # _configure_neutron_service() - Set config files for neutron service # It is called when q-svc is enabled. -function _configure_neutron_service() { +function _configure_neutron_service { Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json @@ -765,7 +764,7 @@ function _configure_neutron_service() { #------------------ # _neutron_service_plugin_class_add() - add service plugin class -function _neutron_service_plugin_class_add() { +function _neutron_service_plugin_class_add { local service_plugin_class=$1 if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then Q_SERVICE_PLUGIN_CLASSES=$service_plugin_class @@ -775,7 +774,7 @@ function _neutron_service_plugin_class_add() { } # _neutron_setup_rootwrap() - configure Neutron's rootwrap -function _neutron_setup_rootwrap() { +function _neutron_setup_rootwrap { if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then return fi @@ -815,7 +814,7 @@ function _neutron_setup_rootwrap() { } # Configures keystone integration for neutron service and agents -function _neutron_setup_keystone() { +function _neutron_setup_keystone { local conf_file=$1 local section=$2 local use_auth_url=$3 @@ -842,7 +841,7 @@ function _neutron_setup_keystone() { fi } -function _neutron_setup_interface_driver() { +function _neutron_setup_interface_driver { # ovs_use_veth needs to be set before the plugin configuration # occurs to allow plugins to override the setting. @@ -854,14 +853,14 @@ function _neutron_setup_interface_driver() { # Functions for Neutron Exercises #-------------------------------- -function delete_probe() { +function delete_probe { local from_net="$1" net_id=`_get_net_id $from_net` probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'` neutron-debug --os-tenant-name admin --os-username admin probe-delete $probe_id } -function setup_neutron_debug() { +function setup_neutron_debug { if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then public_net_id=`_get_net_id $PUBLIC_NETWORK_NAME` neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create --device-owner compute $public_net_id @@ -870,23 +869,23 @@ function setup_neutron_debug() { fi } -function teardown_neutron_debug() { +function teardown_neutron_debug { delete_probe $PUBLIC_NETWORK_NAME delete_probe $PRIVATE_NETWORK_NAME } -function _get_net_id() { +function _get_net_id { neutron --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD net-list | grep $1 | awk '{print $2}' } -function _get_probe_cmd_prefix() { +function _get_probe_cmd_prefix { local from_net="$1" net_id=`_get_net_id $from_net` probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1` echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id" } -function _ping_check_neutron() { +function _ping_check_neutron { local from_net=$1 local ip=$2 local timeout_sec=$3 @@ -908,7 +907,7 @@ function _ping_check_neutron() { } # ssh check -function _ssh_check_neutron() { +function _ssh_check_neutron { local from_net=$1 local key_file=$2 local ip=$3 @@ -934,39 +933,39 @@ for f in $TOP_DIR/lib/neutron_thirdparty/*; do fi done -function _neutron_third_party_do() { +function _neutron_third_party_do { for third_party in ${NEUTRON_THIRD_PARTIES//,/ }; do ${1}_${third_party} done } # configure_neutron_third_party() - Set config files, create data dirs, etc -function configure_neutron_third_party() { +function configure_neutron_third_party { _neutron_third_party_do configure } # init_neutron_third_party() - Initialize databases, etc. -function init_neutron_third_party() { +function init_neutron_third_party { _neutron_third_party_do init } # install_neutron_third_party() - Collect source and prepare -function install_neutron_third_party() { +function install_neutron_third_party { _neutron_third_party_do install } # start_neutron_third_party() - Start running processes, including screen -function start_neutron_third_party() { +function start_neutron_third_party { _neutron_third_party_do start } # stop_neutron_third_party - Stop running processes (non-screen) -function stop_neutron_third_party() { +function stop_neutron_third_party { _neutron_third_party_do stop } # check_neutron_third_party_integration() - Check that third party integration is sane -function check_neutron_third_party_integration() { +function check_neutron_third_party_integration { _neutron_third_party_do check } diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight index 1e4aa00121..4cb0da84ea 100644 --- a/lib/neutron_plugins/bigswitch_floodlight +++ b/lib/neutron_plugins/bigswitch_floodlight @@ -8,15 +8,15 @@ set +o xtrace source $TOP_DIR/lib/neutron_plugins/ovs_base source $TOP_DIR/lib/neutron_thirdparty/bigswitch_floodlight # for third party service specific configuration values -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { : } -function neutron_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages { _neutron_ovs_base_install_agent_packages } -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/bigswitch Q_PLUGIN_CONF_FILENAME=restproxy.ini Q_DB_NAME="restproxy_neutron" @@ -25,23 +25,23 @@ function neutron_plugin_configure_common() { BS_FL_CONTROLLER_TIMEOUT=${BS_FL_CONTROLLER_TIMEOUT:-10} } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { _neutron_ovs_base_configure_debug_command } -function neutron_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent { : } -function neutron_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent { _neutron_ovs_base_configure_l3_agent } -function neutron_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent { : } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { iniset /$Q_PLUGIN_CONF_FILE restproxy servers $BS_FL_CONTROLLERS_PORT iniset /$Q_PLUGIN_CONF_FILE restproxy servertimeout $BS_FL_CONTROLLER_TIMEOUT if [ "$BS_FL_VIF_DRIVER" = "ivs" ]; then @@ -49,7 +49,7 @@ function neutron_plugin_configure_service() { fi } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { local conf_file=$1 if [ "$BS_FL_VIF_DRIVER" = "ivs" ]; then iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.IVSInterfaceDriver @@ -59,12 +59,12 @@ function neutron_plugin_setup_interface_driver() { } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { # 1 means False here return 1 } -function neutron_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements { is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 } diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade index 8e18d04984..4443fa7823 100644 --- a/lib/neutron_plugins/brocade +++ b/lib/neutron_plugins/brocade @@ -5,53 +5,53 @@ BRCD_XTRACE=$(set +o | grep xtrace) set +o xtrace -function is_neutron_ovs_base_plugin() { +function is_neutron_ovs_base_plugin { return 1 } -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} } -function neutron_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages { install_package bridge-utils } -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/brocade Q_PLUGIN_CONF_FILENAME=brocade.ini Q_DB_NAME="brcd_neutron" Q_PLUGIN_CLASS="neutron.plugins.brocade.NeutronPlugin.BrocadePluginV2" } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge } -function neutron_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent { iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport } -function neutron_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent { iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport } -function neutron_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent { AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-linuxbridge-agent" } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.BridgeInterfaceDriver } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { # 0 means True here return 0 } -function neutron_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements { is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 } diff --git a/lib/neutron_plugins/cisco b/lib/neutron_plugins/cisco index 8948be6de4..7728eb177f 100644 --- a/lib/neutron_plugins/cisco +++ b/lib/neutron_plugins/cisco @@ -27,12 +27,12 @@ NCCLIENT_REPO=${NCCLIENT_REPO:-${GIT_BASE}/CiscoSystems/ncclient.git} NCCLIENT_BRANCH=${NCCLIENT_BRANCH:-master} # This routine put a prefix on an existing function name -function _prefix_function() { +function _prefix_function { declare -F $1 > /dev/null || die "$1 doesn't exist" eval "$(echo "${2}_${1}()"; declare -f ${1} | tail -n +2)" } -function _has_ovs_subplugin() { +function _has_ovs_subplugin { local subplugin for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do if [[ "$subplugin" == "openvswitch" ]]; then @@ -42,7 +42,7 @@ function _has_ovs_subplugin() { return 1 } -function _has_nexus_subplugin() { +function _has_nexus_subplugin { local subplugin for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do if [[ "$subplugin" == "nexus" ]]; then @@ -52,7 +52,7 @@ function _has_nexus_subplugin() { return 1 } -function _has_n1kv_subplugin() { +function _has_n1kv_subplugin { local subplugin for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do if [[ "$subplugin" == "n1kv" ]]; then @@ -64,7 +64,7 @@ function _has_n1kv_subplugin() { # This routine populates the cisco config file with the information for # a particular nexus switch -function _config_switch() { +function _config_switch { local cisco_cfg_file=$1 local switch_ip=$2 local username=$3 @@ -99,7 +99,7 @@ _prefix_function neutron_plugin_setup_interface_driver ovs _prefix_function has_neutron_plugin_security_group ovs # Check the version of the installed ncclient package -function check_ncclient_version() { +function check_ncclient_version { python << EOF version = '$NCCLIENT_VERSION' import sys @@ -115,13 +115,13 @@ EOF } # Install the ncclient package -function install_ncclient() { +function install_ncclient { git_clone $NCCLIENT_REPO $NCCLIENT_DIR $NCCLIENT_BRANCH (cd $NCCLIENT_DIR; sudo python setup.py install) } # Check if the required version of ncclient has been installed -function is_ncclient_installed() { +function is_ncclient_installed { # Check if the Cisco ncclient repository exists if [[ -d $NCCLIENT_DIR ]]; then remotes=$(cd $NCCLIENT_DIR; git remote -v | grep fetch | awk '{ print $2}') @@ -144,7 +144,7 @@ function is_ncclient_installed() { return 0 } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { if _has_ovs_subplugin; then ovs_has_neutron_plugin_security_group else @@ -152,14 +152,14 @@ function has_neutron_plugin_security_group() { fi } -function is_neutron_ovs_base_plugin() { +function is_neutron_ovs_base_plugin { # Cisco uses OVS if openvswitch subplugin is deployed _has_ovs_subplugin return } # populate required nova configuration parameters -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { if _has_ovs_subplugin; then ovs_neutron_plugin_create_nova_conf else @@ -167,13 +167,13 @@ function neutron_plugin_create_nova_conf() { fi } -function neutron_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages { # Cisco plugin uses openvswitch to operate in one of its configurations ovs_neutron_plugin_install_agent_packages } # Configure common parameters -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { # setup default subplugins if [ ! -v Q_CISCO_PLUGIN_SUBPLUGINS ]; then declare -ga Q_CISCO_PLUGIN_SUBPLUGINS @@ -191,23 +191,23 @@ function neutron_plugin_configure_common() { Q_DB_NAME=cisco_neutron } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { if _has_ovs_subplugin; then ovs_neutron_plugin_configure_debug_command fi } -function neutron_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent { iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport } -function neutron_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent { if _has_ovs_subplugin; then ovs_neutron_plugin_configure_l3_agent fi } -function _configure_nexus_subplugin() { +function _configure_nexus_subplugin { local cisco_cfg_file=$1 # Install a known compatible ncclient from the Cisco repository if necessary @@ -252,7 +252,7 @@ function _configure_nexus_subplugin() { } # Configure n1kv plugin -function _configure_n1kv_subplugin() { +function _configure_n1kv_subplugin { local cisco_cfg_file=$1 # populate the cisco plugin cfg file with the VSM information @@ -270,13 +270,13 @@ function _configure_n1kv_subplugin() { _neutron_ovs_base_setup_bridge $OVS_BRIDGE } -function neutron_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent { if _has_ovs_subplugin; then ovs_neutron_plugin_configure_plugin_agent fi } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { local subplugin local cisco_cfg_file @@ -318,7 +318,7 @@ function neutron_plugin_configure_service() { fi } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver } diff --git a/lib/neutron_plugins/embrane b/lib/neutron_plugins/embrane index 325e9397e6..62f9737e51 100644 --- a/lib/neutron_plugins/embrane +++ b/lib/neutron_plugins/embrane @@ -7,7 +7,7 @@ set +o xtrace source $TOP_DIR/lib/neutron_plugins/openvswitch -save_function() { +function save_function { local ORIG_FUNC=$(declare -f $1) local NEW_FUNC="$2${ORIG_FUNC#$1}" eval "$NEW_FUNC" @@ -15,14 +15,14 @@ save_function() { save_function neutron_plugin_configure_service _neutron_plugin_configure_service -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/embrane Q_PLUGIN_CONF_FILENAME=heleos_conf.ini Q_DB_NAME="ovs_neutron" Q_PLUGIN_CLASS="neutron.plugins.embrane.plugins.embrane_ovs_plugin.EmbraneOvsPlugin" } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { _neutron_plugin_configure_service iniset /$Q_PLUGIN_CONF_FILE heleos esm_mgmt $HELEOS_ESM_MGMT iniset /$Q_PLUGIN_CONF_FILE heleos admin_username $HELEOS_ADMIN_USERNAME diff --git a/lib/neutron_plugins/linuxbridge b/lib/neutron_plugins/linuxbridge index 37bc748c37..362fd5b39e 100644 --- a/lib/neutron_plugins/linuxbridge +++ b/lib/neutron_plugins/linuxbridge @@ -7,14 +7,14 @@ set +o xtrace source $TOP_DIR/lib/neutron_plugins/linuxbridge_agent -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/linuxbridge Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini Q_DB_NAME="neutron_linux_bridge" Q_PLUGIN_CLASS="neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2" } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { if [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then iniset /$Q_PLUGIN_CONF_FILE vlans tenant_network_type vlan else @@ -47,7 +47,7 @@ function neutron_plugin_configure_service() { done } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { # 0 means True here return 0 } diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent index 85e8c085be..74799e477c 100644 --- a/lib/neutron_plugins/linuxbridge_agent +++ b/lib/neutron_plugins/linuxbridge_agent @@ -5,33 +5,33 @@ PLUGIN_XTRACE=$(set +o | grep xtrace) set +o xtrace -function is_neutron_ovs_base_plugin() { +function is_neutron_ovs_base_plugin { # linuxbridge doesn't use OVS return 1 } -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { : } -function neutron_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages { install_package bridge-utils } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge } -function neutron_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent { iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport } -function neutron_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent { iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport } -function neutron_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent { # Setup physical network interface mappings. Override # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more # complex physical network configurations. @@ -63,12 +63,12 @@ function neutron_plugin_configure_plugin_agent() { done } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.BridgeInterfaceDriver } -function neutron_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements { is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 } diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet index dd3b2baeca..742e3b2f0f 100644 --- a/lib/neutron_plugins/midonet +++ b/lib/neutron_plugins/midonet @@ -9,32 +9,32 @@ MIDONET_API_URL=${MIDONET_API_URL:-http://localhost:$MIDONET_API_PORT/midonet-ap MY_XTRACE=$(set +o | grep xtrace) set +o xtrace -function is_neutron_ovs_base_plugin() { +function is_neutron_ovs_base_plugin { # MidoNet does not use l3-agent # 0 means True here return 1 } -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} } -function neutron_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages { : } -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/midonet Q_PLUGIN_CONF_FILENAME=midonet.ini Q_DB_NAME="neutron_midonet" Q_PLUGIN_CLASS="neutron.plugins.midonet.plugin.MidonetPluginV2" } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { : } -function neutron_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent { DHCP_DRIVER=${DHCP_DRIVER:-"neutron.plugins.midonet.agent.midonet_driver.DhcpNoOpDriver"} neutron_plugin_setup_interface_driver $Q_DHCP_CONF_FILE iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_driver $DHCP_DRIVER @@ -42,15 +42,15 @@ function neutron_plugin_configure_dhcp_agent() { iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True } -function neutron_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent { die $LINENO "q-l3 must not be executed with MidoNet plugin!" } -function neutron_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent { die $LINENO "q-agt must not be executed with MidoNet plugin!" } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { if [[ "$MIDONET_API_URL" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE MIDONET midonet_uri $MIDONET_API_URL fi @@ -68,17 +68,17 @@ function neutron_plugin_configure_service() { Q_L3_ROUTER_PER_TENANT=True } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.MidonetInterfaceDriver } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { # 0 means True here return 0 } -function neutron_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements { # 0 means True here return 1 } diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index 4ceabe765d..e985dcb4a5 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -33,7 +33,7 @@ Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS:-} # L3 Plugin to load for ML2 ML2_L3_PLUGIN=${ML2_L3_PLUGIN:-neutron.services.l3_router.l3_router_plugin.L3RouterPlugin} -function populate_ml2_config() { +function populate_ml2_config { CONF=$1 SECTION=$2 OPTS=$3 @@ -47,7 +47,7 @@ function populate_ml2_config() { done } -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ml2 Q_PLUGIN_CONF_FILENAME=ml2_conf.ini Q_DB_NAME="neutron_ml2" @@ -57,7 +57,7 @@ function neutron_plugin_configure_common() { _neutron_service_plugin_class_add $ML2_L3_PLUGIN } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { if [[ "$Q_ML2_TENANT_NETWORK_TYPE" != "" ]]; then Q_SRV_EXTRA_OPTS+=(tenant_network_types=$Q_ML2_TENANT_NETWORK_TYPE) elif [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then @@ -114,7 +114,7 @@ function neutron_plugin_configure_service() { populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_vlan $Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { return 0 } diff --git a/lib/neutron_plugins/nec b/lib/neutron_plugins/nec index 1cb2fef533..6d4bfca244 100644 --- a/lib/neutron_plugins/nec +++ b/lib/neutron_plugins/nec @@ -22,11 +22,11 @@ OFC_RETRY_INTERVAL=${OFC_RETRY_INTERVAL:-1} source $TOP_DIR/lib/neutron_plugins/ovs_base -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { _neutron_ovs_base_configure_nova_vif_driver } -function neutron_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages { # SKIP_OVS_INSTALL is useful when we want to use Open vSwitch whose # version is different from the version provided by the distribution. if [[ "$SKIP_OVS_INSTALL" = "True" ]]; then @@ -36,26 +36,26 @@ function neutron_plugin_install_agent_packages() { _neutron_ovs_base_install_agent_packages } -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/nec Q_PLUGIN_CONF_FILENAME=nec.ini Q_DB_NAME="neutron_nec" Q_PLUGIN_CLASS="neutron.plugins.nec.nec_plugin.NECPluginV2" } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { _neutron_ovs_base_configure_debug_command } -function neutron_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent { : } -function neutron_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent { _neutron_ovs_base_configure_l3_agent } -function _quantum_plugin_setup_bridge() { +function _quantum_plugin_setup_bridge { if [[ "$SKIP_OVS_BRIDGE_SETUP" = "True" ]]; then return fi @@ -72,7 +72,7 @@ function _quantum_plugin_setup_bridge() { _neutron_setup_ovs_tunnels $OVS_BRIDGE } -function neutron_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent { _quantum_plugin_setup_bridge AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-nec-agent" @@ -80,7 +80,7 @@ function neutron_plugin_configure_plugin_agent() { _neutron_ovs_base_configure_firewall_driver } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { iniset $NEUTRON_CONF DEFAULT api_extensions_path neutron/plugins/nec/extensions/ iniset /$Q_PLUGIN_CONF_FILE ofc host $OFC_API_HOST iniset /$Q_PLUGIN_CONF_FILE ofc port $OFC_API_PORT @@ -91,7 +91,7 @@ function neutron_plugin_configure_service() { _neutron_ovs_base_configure_firewall_driver } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver iniset $conf_file DEFAULT ovs_use_veth True @@ -101,7 +101,7 @@ function neutron_plugin_setup_interface_driver() { # --------------------------- # Setup OVS tunnel manually -function _neutron_setup_ovs_tunnels() { +function _neutron_setup_ovs_tunnels { local bridge=$1 local id=0 GRE_LOCAL_IP=${GRE_LOCAL_IP:-$HOST_IP} @@ -117,12 +117,12 @@ function _neutron_setup_ovs_tunnels() { fi } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { # 0 means True here return 0 } -function neutron_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements { is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 } diff --git a/lib/neutron_plugins/openvswitch b/lib/neutron_plugins/openvswitch index f99eb383d8..bdbc5a9367 100644 --- a/lib/neutron_plugins/openvswitch +++ b/lib/neutron_plugins/openvswitch @@ -7,14 +7,14 @@ set +o xtrace source $TOP_DIR/lib/neutron_plugins/openvswitch_agent -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/openvswitch Q_PLUGIN_CONF_FILENAME=ovs_neutron_plugin.ini Q_DB_NAME="ovs_neutron" Q_PLUGIN_CLASS="neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2" } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then iniset /$Q_PLUGIN_CONF_FILE ovs tenant_network_type gre iniset /$Q_PLUGIN_CONF_FILE ovs tunnel_id_ranges $TENANT_TUNNEL_RANGES @@ -52,7 +52,7 @@ function neutron_plugin_configure_service() { done } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { return 0 } diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent index 46c2a5c6e2..3a2bdc316a 100644 --- a/lib/neutron_plugins/openvswitch_agent +++ b/lib/neutron_plugins/openvswitch_agent @@ -7,7 +7,7 @@ set +o xtrace source $TOP_DIR/lib/neutron_plugins/ovs_base -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { _neutron_ovs_base_configure_nova_vif_driver if [ "$VIRT_DRIVER" = 'xenserver' ]; then iniset $NOVA_CONF DEFAULT xenapi_vif_driver nova.virt.xenapi.vif.XenAPIOpenVswitchDriver @@ -17,24 +17,24 @@ function neutron_plugin_create_nova_conf() { fi } -function neutron_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages { _neutron_ovs_base_install_agent_packages } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { _neutron_ovs_base_configure_debug_command } -function neutron_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent { iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport } -function neutron_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent { _neutron_ovs_base_configure_l3_agent iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport } -function neutron_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent { # Setup integration bridge _neutron_ovs_base_setup_bridge $OVS_BRIDGE _neutron_ovs_base_configure_firewall_driver @@ -118,12 +118,12 @@ function neutron_plugin_configure_plugin_agent() { done } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver } -function neutron_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements { is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 } diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 89db29d07f..0a2ba58fbb 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -8,19 +8,19 @@ set +o xtrace OVS_BRIDGE=${OVS_BRIDGE:-br-int} PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} -function is_neutron_ovs_base_plugin() { +function is_neutron_ovs_base_plugin { # Yes, we use OVS. return 0 } -function _neutron_ovs_base_setup_bridge() { +function _neutron_ovs_base_setup_bridge { local bridge=$1 neutron-ovs-cleanup sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge } -function neutron_ovs_base_cleanup() { +function neutron_ovs_base_cleanup { # remove all OVS ports that look like Neutron created ports for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do sudo ovs-vsctl del-port ${port} @@ -32,7 +32,7 @@ function neutron_ovs_base_cleanup() { done } -function _neutron_ovs_base_install_agent_packages() { +function _neutron_ovs_base_install_agent_packages { local kernel_version # Install deps # FIXME add to ``files/apts/neutron``, but don't install if not needed! @@ -50,11 +50,11 @@ function _neutron_ovs_base_install_agent_packages() { fi } -function _neutron_ovs_base_configure_debug_command() { +function _neutron_ovs_base_configure_debug_command { iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE } -function _neutron_ovs_base_configure_firewall_driver() { +function _neutron_ovs_base_configure_firewall_driver { if [[ "$Q_USE_SECGROUP" == "True" ]]; then iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver else @@ -62,7 +62,7 @@ function _neutron_ovs_base_configure_firewall_driver() { fi } -function _neutron_ovs_base_configure_l3_agent() { +function _neutron_ovs_base_configure_l3_agent { iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE neutron-ovs-cleanup @@ -72,7 +72,7 @@ function _neutron_ovs_base_configure_l3_agent() { sudo ip addr flush dev $PUBLIC_BRIDGE } -function _neutron_ovs_base_configure_nova_vif_driver() { +function _neutron_ovs_base_configure_nova_vif_driver { : } diff --git a/lib/neutron_plugins/plumgrid b/lib/neutron_plugins/plumgrid index bccd301011..19f94cb78c 100644 --- a/lib/neutron_plugins/plumgrid +++ b/lib/neutron_plugins/plumgrid @@ -6,15 +6,15 @@ MY_XTRACE=$(set +o | grep xtrace) set +o xtrace -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { : } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { : } -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/plumgrid Q_PLUGIN_CONF_FILENAME=plumgrid.ini Q_DB_NAME="plumgrid_neutron" @@ -26,7 +26,7 @@ function neutron_plugin_configure_common() { PLUMGRID_TIMEOUT=${PLUMGRID_TIMEOUT:-70} } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector director_server $PLUMGRID_DIRECTOR_IP iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector director_server_port $PLUMGRID_DIRECTOR_PORT iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector username $PLUMGRID_ADMIN @@ -34,21 +34,21 @@ function neutron_plugin_configure_service() { iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector servertimeout $PLUMGRID_TIMEOUT } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { : } -function is_neutron_ovs_base_plugin() { +function is_neutron_ovs_base_plugin { # False return 1 } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { # False return 1 } -function neutron_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements { is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 } # Restore xtrace diff --git a/lib/neutron_plugins/ryu b/lib/neutron_plugins/ryu index 334c227cdb..9ae36d38fa 100644 --- a/lib/neutron_plugins/ryu +++ b/lib/neutron_plugins/ryu @@ -8,12 +8,12 @@ set +o xtrace source $TOP_DIR/lib/neutron_plugins/ovs_base source $TOP_DIR/lib/neutron_thirdparty/ryu # for configuration value -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { _neutron_ovs_base_configure_nova_vif_driver iniset $NOVA_CONF DEFAULT libvirt_ovs_integration_bridge "$OVS_BRIDGE" } -function neutron_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages { _neutron_ovs_base_install_agent_packages # neutron_ryu_agent requires ryu module @@ -22,28 +22,28 @@ function neutron_plugin_install_agent_packages() { configure_ryu } -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ryu Q_PLUGIN_CONF_FILENAME=ryu.ini Q_DB_NAME="ovs_neutron" Q_PLUGIN_CLASS="neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2" } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { _neutron_ovs_base_configure_debug_command iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT } -function neutron_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent { iniset $Q_DHCP_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT } -function neutron_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent { iniset $Q_L3_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT _neutron_ovs_base_configure_l3_agent } -function neutron_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent { # Set up integration bridge _neutron_ovs_base_setup_bridge $OVS_BRIDGE if [ -n "$RYU_INTERNAL_INTERFACE" ]; then @@ -55,24 +55,24 @@ function neutron_plugin_configure_plugin_agent() { _neutron_ovs_base_configure_firewall_driver } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { iniset /$Q_PLUGIN_CONF_FILE ovs openflow_rest_api $RYU_API_HOST:$RYU_API_PORT _neutron_ovs_base_configure_firewall_driver } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver iniset $conf_file DEFAULT ovs_use_veth True } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { # 0 means True here return 0 } -function neutron_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements { is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 } diff --git a/lib/neutron_plugins/services/firewall b/lib/neutron_plugins/services/firewall index 8273e54e6c..ab6c32426a 100644 --- a/lib/neutron_plugins/services/firewall +++ b/lib/neutron_plugins/services/firewall @@ -7,11 +7,11 @@ set +o xtrace FWAAS_PLUGIN=neutron.services.firewall.fwaas_plugin.FirewallPlugin -function neutron_fwaas_configure_common() { +function neutron_fwaas_configure_common { _neutron_service_plugin_class_add $FWAAS_PLUGIN } -function neutron_fwaas_configure_driver() { +function neutron_fwaas_configure_driver { FWAAS_DRIVER_CONF_FILENAME=/etc/neutron/fwaas_driver.ini cp $NEUTRON_DIR/etc/fwaas_driver.ini $FWAAS_DRIVER_CONF_FILENAME @@ -19,7 +19,7 @@ function neutron_fwaas_configure_driver() { iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas driver "neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver" } -function neutron_fwaas_stop() { +function neutron_fwaas_stop { : } diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer index 5d7a94e5d8..744826e49d 100644 --- a/lib/neutron_plugins/services/loadbalancer +++ b/lib/neutron_plugins/services/loadbalancer @@ -9,7 +9,7 @@ set +o xtrace AGENT_LBAAS_BINARY="$NEUTRON_BIN_DIR/neutron-lbaas-agent" LBAAS_PLUGIN=neutron.services.loadbalancer.plugin.LoadBalancerPlugin -function neutron_agent_lbaas_install_agent_packages() { +function neutron_agent_lbaas_install_agent_packages { if is_ubuntu || is_fedora; then install_package haproxy elif is_suse; then @@ -18,11 +18,11 @@ function neutron_agent_lbaas_install_agent_packages() { fi } -function neutron_agent_lbaas_configure_common() { +function neutron_agent_lbaas_configure_common { _neutron_service_plugin_class_add $LBAAS_PLUGIN } -function neutron_agent_lbaas_configure_agent() { +function neutron_agent_lbaas_configure_agent { LBAAS_AGENT_CONF_PATH=/etc/neutron/services/loadbalancer/haproxy mkdir -p $LBAAS_AGENT_CONF_PATH @@ -41,7 +41,7 @@ function neutron_agent_lbaas_configure_agent() { fi } -function neutron_lbaas_stop() { +function neutron_lbaas_stop { pids=$(ps aux | awk '/haproxy/ { print $2 }') [ ! -z "$pids" ] && sudo kill $pids } diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering index 37952bbabd..0e5f75b27b 100644 --- a/lib/neutron_plugins/services/metering +++ b/lib/neutron_plugins/services/metering @@ -9,11 +9,11 @@ set +o xtrace AGENT_METERING_BINARY="$NEUTRON_BIN_DIR/neutron-metering-agent" METERING_PLUGIN="neutron.services.metering.metering_plugin.MeteringPlugin" -function neutron_agent_metering_configure_common() { +function neutron_agent_metering_configure_common { _neutron_service_plugin_class_add $METERING_PLUGIN } -function neutron_agent_metering_configure_agent() { +function neutron_agent_metering_configure_agent { METERING_AGENT_CONF_PATH=/etc/neutron/services/metering mkdir -p $METERING_AGENT_CONF_PATH @@ -22,7 +22,7 @@ function neutron_agent_metering_configure_agent() { cp $NEUTRON_DIR/etc/metering_agent.ini $METERING_AGENT_CONF_FILENAME } -function neutron_metering_stop() { +function neutron_metering_stop { : } diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn index 02370e7f85..e56d3613c2 100644 --- a/lib/neutron_plugins/services/vpn +++ b/lib/neutron_plugins/services/vpn @@ -10,15 +10,15 @@ AGENT_VPN_BINARY="$NEUTRON_BIN_DIR/neutron-vpn-agent" VPN_PLUGIN="neutron.services.vpn.plugin.VPNDriverPlugin" IPSEC_PACKAGE=${IPSEC_PACKAGE:-"openswan"} -function neutron_vpn_install_agent_packages() { +function neutron_vpn_install_agent_packages { install_package $IPSEC_PACKAGE } -function neutron_vpn_configure_common() { +function neutron_vpn_configure_common { _neutron_service_plugin_class_add $VPN_PLUGIN } -function neutron_vpn_stop() { +function neutron_vpn_stop { local ipsec_data_dir=$DATA_DIR/neutron/ipsec local pids if [ -d $ipsec_data_dir ]; then diff --git a/lib/neutron_plugins/vmware_nsx b/lib/neutron_plugins/vmware_nsx index d506cb6f8d..0930422e4e 100644 --- a/lib/neutron_plugins/vmware_nsx +++ b/lib/neutron_plugins/vmware_nsx @@ -7,7 +7,7 @@ set +o xtrace source $TOP_DIR/lib/neutron_plugins/ovs_base -function setup_integration_bridge() { +function setup_integration_bridge { _neutron_ovs_base_setup_bridge $OVS_BRIDGE # Set manager to NSX controller (1st of list) if [[ "$NSX_CONTROLLERS" != "" ]]; then @@ -20,24 +20,24 @@ function setup_integration_bridge() { sudo ovs-vsctl set-manager ssl:$OVS_MGR_IP } -function is_neutron_ovs_base_plugin() { +function is_neutron_ovs_base_plugin { # NSX uses OVS, but not the l3-agent return 0 } -function neutron_plugin_create_nova_conf() { +function neutron_plugin_create_nova_conf { # if n-cpu is enabled, then setup integration bridge if is_service_enabled n-cpu; then setup_integration_bridge fi } -function neutron_plugin_install_agent_packages() { +function neutron_plugin_install_agent_packages { # VMware NSX Plugin does not run q-agt, but it currently needs dhcp and metadata agents _neutron_ovs_base_install_agent_packages } -function neutron_plugin_configure_common() { +function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/vmware Q_PLUGIN_CONF_FILENAME=nsx.ini Q_DB_NAME="neutron_nsx" @@ -45,29 +45,29 @@ function neutron_plugin_configure_common() { Q_PLUGIN_CLASS="neutron.plugins.nicira.NeutronPlugin.NvpPluginV2" } -function neutron_plugin_configure_debug_command() { +function neutron_plugin_configure_debug_command { sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge "$PUBLIC_BRIDGE" } -function neutron_plugin_configure_dhcp_agent() { +function neutron_plugin_configure_dhcp_agent { setup_integration_bridge iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network True iniset $Q_DHCP_CONF_FILE DEFAULT ovs_use_veth True } -function neutron_plugin_configure_l3_agent() { +function neutron_plugin_configure_l3_agent { # VMware NSX plugin does not run L3 agent die $LINENO "q-l3 should must not be executed with VMware NSX plugin!" } -function neutron_plugin_configure_plugin_agent() { +function neutron_plugin_configure_plugin_agent { # VMware NSX plugin does not run L2 agent die $LINENO "q-agt must not be executed with VMware NSX plugin!" } -function neutron_plugin_configure_service() { +function neutron_plugin_configure_service { if [[ "$MAX_LP_PER_BRIDGED_LS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE nsx max_lp_per_bridged_ls $MAX_LP_PER_BRIDGED_LS fi @@ -132,17 +132,17 @@ function neutron_plugin_configure_service() { fi } -function neutron_plugin_setup_interface_driver() { +function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver } -function has_neutron_plugin_security_group() { +function has_neutron_plugin_security_group { # 0 means True here return 0 } -function neutron_plugin_check_adv_test_requirements() { +function neutron_plugin_check_adv_test_requirements { is_service_enabled q-dhcp && return 0 } diff --git a/lib/neutron_thirdparty/bigswitch_floodlight b/lib/neutron_thirdparty/bigswitch_floodlight index 24c10443b7..f03de56295 100644 --- a/lib/neutron_thirdparty/bigswitch_floodlight +++ b/lib/neutron_thirdparty/bigswitch_floodlight @@ -8,11 +8,11 @@ set +o xtrace BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80} BS_FL_OF_PORT=${BS_FL_OF_PORT:-6633} -function configure_bigswitch_floodlight() { +function configure_bigswitch_floodlight { : } -function init_bigswitch_floodlight() { +function init_bigswitch_floodlight { install_neutron_agent_packages echo -n "Installing OVS managed by the openflow controllers:" @@ -32,19 +32,19 @@ function init_bigswitch_floodlight() { sudo ovs-vsctl --no-wait set-controller ${OVS_BRIDGE} ${ctrls} } -function install_bigswitch_floodlight() { +function install_bigswitch_floodlight { : } -function start_bigswitch_floodlight() { +function start_bigswitch_floodlight { : } -function stop_bigswitch_floodlight() { +function stop_bigswitch_floodlight { : } -function check_bigswitch_floodlight() { +function check_bigswitch_floodlight { : } diff --git a/lib/neutron_thirdparty/midonet b/lib/neutron_thirdparty/midonet index 98be4254fc..ad417bbc29 100644 --- a/lib/neutron_thirdparty/midonet +++ b/lib/neutron_thirdparty/midonet @@ -20,28 +20,28 @@ MIDONET_CLIENT_DIR=${MIDONET_CLIENT_DIR:-$MIDONET_DIR/python-midonetclient} MY_XTRACE=$(set +o | grep xtrace) set +o xtrace -function configure_midonet() { +function configure_midonet { : } -function init_midonet() { +function init_midonet { : } -function install_midonet() { +function install_midonet { git_clone $MIDONET_CLIENT_REPO $MIDONET_CLIENT_DIR $MIDONET_CLIENT_BRANCH export PYTHONPATH=$MIDONET_CLIENT_DIR/src:$PYTHONPATH } -function start_midonet() { +function start_midonet { : } -function stop_midonet() { +function stop_midonet { : } -function check_midonet() { +function check_midonet { : } diff --git a/lib/neutron_thirdparty/ryu b/lib/neutron_thirdparty/ryu index 5edf273361..424a90041e 100644 --- a/lib/neutron_thirdparty/ryu +++ b/lib/neutron_thirdparty/ryu @@ -21,14 +21,14 @@ RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest} # configure_ryu can be called multiple times as neutron_pluing/ryu may call # this function for neutron-ryu-agent _RYU_CONFIGURED=${_RYU_CONFIGURED:-False} -function configure_ryu() { +function configure_ryu { if [[ "$_RYU_CONFIGURED" == "False" ]]; then setup_develop $RYU_DIR _RYU_CONFIGURED=True fi } -function init_ryu() { +function init_ryu { RYU_CONF_DIR=/etc/ryu if [[ ! -d $RYU_CONF_DIR ]]; then sudo mkdir -p $RYU_CONF_DIR @@ -60,22 +60,22 @@ neutron_controller_addr=tcp:$RYU_OFP_HOST:$RYU_OFP_PORT # Make this function idempotent and avoid cloning same repo many times # with RECLONE=yes _RYU_INSTALLED=${_RYU_INSTALLED:-False} -function install_ryu() { +function install_ryu { if [[ "$_RYU_INSTALLED" == "False" ]]; then git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH _RYU_INSTALLED=True fi } -function start_ryu() { +function start_ryu { screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --config-file $RYU_CONF" } -function stop_ryu() { +function stop_ryu { : } -function check_ryu() { +function check_ryu { : } diff --git a/lib/neutron_thirdparty/trema b/lib/neutron_thirdparty/trema index 2b125646dc..d465ac753e 100644 --- a/lib/neutron_thirdparty/trema +++ b/lib/neutron_thirdparty/trema @@ -31,7 +31,7 @@ TREMA_SS_CONFIG=$TREMA_SS_ETC_DIR/sliceable.conf TREMA_SS_APACHE_CONFIG=/etc/apache2/sites-available/sliceable_switch.conf # configure_trema - Set config files, create data dirs, etc -function configure_trema() { +function configure_trema { # prepare dir for d in $TREMA_SS_ETC_DIR $TREMA_SS_DB_DIR $TREMA_SS_SCRIPT_DIR; do sudo mkdir -p $d @@ -41,7 +41,7 @@ function configure_trema() { } # init_trema - Initialize databases, etc. -function init_trema() { +function init_trema { local _pwd=$(pwd) # Initialize databases for Sliceable Switch @@ -70,7 +70,7 @@ function init_trema() { $TREMA_SS_CONFIG } -function gem_install() { +function gem_install { [[ "$OFFLINE" = "True" ]] && return [ -n "$RUBYGEMS_CMD" ] || get_gem_command @@ -79,7 +79,7 @@ function gem_install() { sudo $RUBYGEMS_CMD install $pkg } -function get_gem_command() { +function get_gem_command { # Trema requires ruby 1.8, so gem1.8 is checked first RUBYGEMS_CMD=$(which gem1.8 || which gem) if [ -z "$RUBYGEMS_CMD" ]; then @@ -87,7 +87,7 @@ function get_gem_command() { fi } -function install_trema() { +function install_trema { # Trema gem_install trema # Sliceable Switch @@ -97,7 +97,7 @@ function install_trema() { make -C $TREMA_DIR/apps/sliceable_switch } -function start_trema() { +function start_trema { # APACHE_NAME is defined in init_horizon (in lib/horizon) restart_service $APACHE_NAME @@ -105,11 +105,11 @@ function start_trema() { trema run -d -c $TREMA_SS_CONFIG } -function stop_trema() { +function stop_trema { sudo TREMA_TMP=$TREMA_TMP_DIR trema killall } -function check_trema() { +function check_trema { : } diff --git a/lib/neutron_thirdparty/vmware_nsx b/lib/neutron_thirdparty/vmware_nsx index 4eb177a458..3fecc62560 100644 --- a/lib/neutron_thirdparty/vmware_nsx +++ b/lib/neutron_thirdparty/vmware_nsx @@ -22,11 +22,11 @@ NSX_GATEWAY_NETWORK_INTERFACE=${NSX_GATEWAY_NETWORK_INTERFACE:-eth2} # is invoked by unstack.sh FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24} -function configure_vmware_nsx() { +function configure_vmware_nsx { : } -function init_vmware_nsx() { +function init_vmware_nsx { if ! is_set NSX_GATEWAY_NETWORK_CIDR; then NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} echo "The IP address to set on br-ex was not specified. " @@ -52,15 +52,15 @@ function init_vmware_nsx() { sudo ip addr add dev $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_CIDR } -function install_vmware_nsx() { +function install_vmware_nsx { : } -function start_vmware_nsx() { +function start_vmware_nsx { : } -function stop_vmware_nsx() { +function stop_vmware_nsx { if ! is_set NSX_GATEWAY_NETWORK_CIDR; then NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} echo "The IP address expected on br-ex was not specified. " @@ -78,7 +78,7 @@ function stop_vmware_nsx() { done } -function check_vmware_nsx() { +function check_vmware_nsx { neutron-check-nsx-config $NEUTRON_CONF_DIR/plugins/vmware/nsx.ini } diff --git a/lib/nova b/lib/nova index fefeda1236..90b1ba4fde 100644 --- a/lib/nova +++ b/lib/nova @@ -144,7 +144,7 @@ function is_n-cell_enabled { } # Helper to clean iptables rules -function clean_iptables() { +function clean_iptables { # Delete rules sudo iptables -S -v | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-A" | sed "s/-A/-D/g" | awk '{print "sudo iptables",$0}' | bash # Delete nat rules @@ -157,7 +157,7 @@ function clean_iptables() { # cleanup_nova() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_nova() { +function cleanup_nova { if is_service_enabled n-cpu; then # Clean iptables from previous runs clean_iptables @@ -191,7 +191,7 @@ function cleanup_nova() { } # configure_nova_rootwrap() - configure Nova's rootwrap -function configure_nova_rootwrap() { +function configure_nova_rootwrap { # Deploy new rootwrap filters files (owned by root). # Wipe any existing rootwrap.d files first if [[ -d $NOVA_CONF_DIR/rootwrap.d ]]; then @@ -219,7 +219,7 @@ function configure_nova_rootwrap() { } # configure_nova() - Set config files, create data dirs, etc -function configure_nova() { +function configure_nova { # Put config files in ``/etc/nova`` for everyone to find if [[ ! -d $NOVA_CONF_DIR ]]; then sudo mkdir -p $NOVA_CONF_DIR @@ -367,7 +367,7 @@ create_nova_accounts() { } # create_nova_conf() - Create a new nova.conf file -function create_nova_conf() { +function create_nova_conf { # Remove legacy ``nova.conf`` rm -f $NOVA_DIR/bin/nova.conf @@ -515,7 +515,7 @@ function create_nova_conf() { iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT" } -function init_nova_cells() { +function init_nova_cells { if is_service_enabled n-cell; then cp $NOVA_CONF $NOVA_CELLS_CONF iniset $NOVA_CELLS_CONF DEFAULT sql_connection `database_connection_url $NOVA_CELLS_DB` @@ -542,14 +542,14 @@ function init_nova_cells() { } # create_nova_cache_dir() - Part of the init_nova() process -function create_nova_cache_dir() { +function create_nova_cache_dir { # Create cache dir sudo mkdir -p $NOVA_AUTH_CACHE_DIR sudo chown $STACK_USER $NOVA_AUTH_CACHE_DIR rm -f $NOVA_AUTH_CACHE_DIR/* } -function create_nova_conf_nova_network() { +function create_nova_conf_nova_network { iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NETWORK_MANAGER" iniset $NOVA_CONF DEFAULT public_interface "$PUBLIC_INTERFACE" iniset $NOVA_CONF DEFAULT vlan_interface "$VLAN_INTERFACE" @@ -560,14 +560,14 @@ function create_nova_conf_nova_network() { } # create_nova_keys_dir() - Part of the init_nova() process -function create_nova_keys_dir() { +function create_nova_keys_dir { # Create keys dir sudo mkdir -p ${NOVA_STATE_PATH}/keys sudo chown -R $STACK_USER ${NOVA_STATE_PATH} } # init_nova() - Initialize databases, etc. -function init_nova() { +function init_nova { # All nova components talk to a central database. # Only do this step once on the API node for an entire cluster. if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then @@ -596,14 +596,14 @@ function init_nova() { } # install_novaclient() - Collect source and prepare -function install_novaclient() { +function install_novaclient { git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH setup_develop $NOVACLIENT_DIR sudo install -D -m 0644 -o $STACK_USER {$NOVACLIENT_DIR/tools/,/etc/bash_completion.d/}nova.bash_completion } # install_nova() - Collect source and prepare -function install_nova() { +function install_nova { if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then install_nova_hypervisor fi @@ -638,7 +638,7 @@ function install_nova() { } # start_nova_api() - Start the API process ahead of other things -function start_nova_api() { +function start_nova_api { # Get right service port for testing local service_port=$NOVA_SERVICE_PORT if is_service_enabled tls-proxy; then @@ -658,7 +658,7 @@ function start_nova_api() { } # start_nova_compute() - Start the compute process -function start_nova_compute() { +function start_nova_compute { if is_service_enabled n-cell; then local compute_cell_conf=$NOVA_CELLS_CONF else @@ -693,7 +693,7 @@ function start_nova_compute() { } # start_nova() - Start running processes, including screen -function start_nova_rest() { +function start_nova_rest { local api_cell_conf=$NOVA_CONF if is_service_enabled n-cell; then local compute_cell_conf=$NOVA_CELLS_CONF @@ -722,13 +722,13 @@ function start_nova_rest() { screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore --config-file $api_cell_conf" } -function start_nova() { +function start_nova { start_nova_compute start_nova_rest } # stop_nova() - Stop running processes (non-screen) -function stop_nova() { +function stop_nova { # Kill the nova screen windows # Some services are listed here twice since more than one instance # of a service may be running in certain configs. diff --git a/lib/nova_plugins/hypervisor-baremetal b/lib/nova_plugins/hypervisor-baremetal index 660c977bde..2da1097027 100644 --- a/lib/nova_plugins/hypervisor-baremetal +++ b/lib/nova_plugins/hypervisor-baremetal @@ -33,13 +33,13 @@ STUB_NETWORK=${STUB_NETWORK:-False} # ------------ # clean_nova_hypervisor - Clean up an installation -function cleanup_nova_hypervisor() { +function cleanup_nova_hypervisor { # This function intentionally left blank : } # configure_nova_hypervisor - Set config files, create data dirs, etc -function configure_nova_hypervisor() { +function configure_nova_hypervisor { configure_baremetal_nova_dirs iniset $NOVA_CONF baremetal sql_connection `database_connection_url nova_bm` @@ -67,19 +67,19 @@ function configure_nova_hypervisor() { } # install_nova_hypervisor() - Install external components -function install_nova_hypervisor() { +function install_nova_hypervisor { # This function intentionally left blank : } # start_nova_hypervisor - Start any required external services -function start_nova_hypervisor() { +function start_nova_hypervisor { # This function intentionally left blank : } # stop_nova_hypervisor - Stop any external services -function stop_nova_hypervisor() { +function stop_nova_hypervisor { # This function intentionally left blank : } diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker index b5df19db02..f8dc6afa19 100644 --- a/lib/nova_plugins/hypervisor-docker +++ b/lib/nova_plugins/hypervisor-docker @@ -44,7 +44,7 @@ DOCKER_APT_REPO=${DOCKER_APT_REPO:-https://get.docker.io/ubuntu} # ------------ # clean_nova_hypervisor - Clean up an installation -function cleanup_nova_hypervisor() { +function cleanup_nova_hypervisor { stop_service docker # Clean out work area @@ -52,13 +52,13 @@ function cleanup_nova_hypervisor() { } # configure_nova_hypervisor - Set config files, create data dirs, etc -function configure_nova_hypervisor() { +function configure_nova_hypervisor { iniset $NOVA_CONF DEFAULT compute_driver docker.DockerDriver iniset $GLANCE_API_CONF DEFAULT container_formats ami,ari,aki,bare,ovf,docker } # install_nova_hypervisor() - Install external components -function install_nova_hypervisor() { +function install_nova_hypervisor { # So far this is Ubuntu only if ! is_ubuntu; then die $LINENO "Docker is only supported on Ubuntu at this time" @@ -77,7 +77,7 @@ function install_nova_hypervisor() { } # start_nova_hypervisor - Start any required external services -function start_nova_hypervisor() { +function start_nova_hypervisor { local docker_pid read docker_pid <$DOCKER_PID_FILE if [[ -z $docker_pid ]] || ! ps -p $docker_pid | grep [d]ocker; then @@ -111,7 +111,7 @@ function start_nova_hypervisor() { } # stop_nova_hypervisor - Stop any external services -function stop_nova_hypervisor() { +function stop_nova_hypervisor { # Stop the docker registry container docker kill $(docker ps | grep docker-registry | cut -d' ' -f1) } diff --git a/lib/nova_plugins/hypervisor-fake b/lib/nova_plugins/hypervisor-fake index fe0d1900ee..e7a833f806 100644 --- a/lib/nova_plugins/hypervisor-fake +++ b/lib/nova_plugins/hypervisor-fake @@ -27,13 +27,13 @@ set +o xtrace # ------------ # clean_nova_hypervisor - Clean up an installation -function cleanup_nova_hypervisor() { +function cleanup_nova_hypervisor { # This function intentionally left blank : } # configure_nova_hypervisor - Set config files, create data dirs, etc -function configure_nova_hypervisor() { +function configure_nova_hypervisor { iniset $NOVA_CONF DEFAULT compute_driver "nova.virt.fake.FakeDriver" # Disable arbitrary limits iniset $NOVA_CONF DEFAULT quota_instances -1 @@ -51,19 +51,19 @@ function configure_nova_hypervisor() { } # install_nova_hypervisor() - Install external components -function install_nova_hypervisor() { +function install_nova_hypervisor { # This function intentionally left blank : } # start_nova_hypervisor - Start any required external services -function start_nova_hypervisor() { +function start_nova_hypervisor { # This function intentionally left blank : } # stop_nova_hypervisor - Stop any external services -function stop_nova_hypervisor() { +function stop_nova_hypervisor { # This function intentionally left blank : } diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index a550600363..b39c57c74a 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -31,13 +31,13 @@ ENABLE_FILE_INJECTION=${ENABLE_FILE_INJECTION:-False} # ------------ # clean_nova_hypervisor - Clean up an installation -function cleanup_nova_hypervisor() { +function cleanup_nova_hypervisor { # This function intentionally left blank : } # configure_nova_hypervisor - Set config files, create data dirs, etc -function configure_nova_hypervisor() { +function configure_nova_hypervisor { if is_service_enabled neutron && is_neutron_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces cat </dev/null; then echo "Found old oslo.config... removing to ensure consistency" diff --git a/lib/rpc_backend b/lib/rpc_backend index 34f576f5b8..a0424b1dee 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -25,7 +25,7 @@ set +o xtrace # Make sure we only have one rpc backend enabled. # Also check the specified rpc backend is available on your platform. -function check_rpc_backend() { +function check_rpc_backend { local rpc_needed=1 # We rely on the fact that filenames in lib/* match the service names # that can be passed as arguments to is_service_enabled. @@ -91,7 +91,7 @@ function cleanup_rpc_backend { } # install rpc backend -function install_rpc_backend() { +function install_rpc_backend { if is_service_enabled rabbit; then # Install rabbitmq-server # the temp file is necessary due to LP: #878600 @@ -135,7 +135,7 @@ function install_rpc_backend() { } # restart the rpc backend -function restart_rpc_backend() { +function restart_rpc_backend { if is_service_enabled rabbit; then # Start rabbitmq-server echo_summary "Starting RabbitMQ" @@ -165,7 +165,7 @@ function restart_rpc_backend() { } # iniset cofiguration -function iniset_rpc_backend() { +function iniset_rpc_backend { local package=$1 local file=$2 local section=$3 @@ -193,7 +193,7 @@ function iniset_rpc_backend() { # Check if qpid can be used on the current distro. # qpid_is_supported -function qpid_is_supported() { +function qpid_is_supported { if [[ -z "$DISTRO" ]]; then GetDistro fi diff --git a/lib/savanna b/lib/savanna index 954f0e711e..d7152b1e6f 100644 --- a/lib/savanna +++ b/lib/savanna @@ -55,7 +55,7 @@ TEMPEST_SERVICES+=,savanna # Tenant User Roles # ------------------------------ # service savanna admin -function create_savanna_accounts() { +function create_savanna_accounts { SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") @@ -88,14 +88,14 @@ function create_savanna_accounts() { # cleanup_savanna() - Remove residual data files, anything left over from # previous runs that would need to clean up. -function cleanup_savanna() { +function cleanup_savanna { # Cleanup auth cache dir sudo rm -rf $SAVANNA_AUTH_CACHE_DIR } # configure_savanna() - Set config files, create data dirs, etc -function configure_savanna() { +function configure_savanna { if [[ ! -d $SAVANNA_CONF_DIR ]]; then sudo mkdir -p $SAVANNA_CONF_DIR @@ -142,18 +142,18 @@ function configure_savanna() { } # install_savanna() - Collect source and prepare -function install_savanna() { +function install_savanna { git_clone $SAVANNA_REPO $SAVANNA_DIR $SAVANNA_BRANCH setup_develop $SAVANNA_DIR } # start_savanna() - Start running processes, including screen -function start_savanna() { +function start_savanna { screen_it savanna "cd $SAVANNA_DIR && $SAVANNA_BIN_DIR/savanna-api --config-file $SAVANNA_CONF_FILE" } # stop_savanna() - Stop running processes -function stop_savanna() { +function stop_savanna { # Kill the Savanna screen windows screen -S $SCREEN_NAME -p savanna -X kill } diff --git a/lib/savanna-dashboard b/lib/savanna-dashboard index 691b23f6e8..6fe15a3c81 100644 --- a/lib/savanna-dashboard +++ b/lib/savanna-dashboard @@ -35,7 +35,7 @@ SAVANNA_PYTHONCLIENT_DIR=$DEST/python-savannaclient # Functions # --------- -function configure_savanna_dashboard() { +function configure_savanna_dashboard { echo -e "AUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py echo -e "HORIZON_CONFIG['dashboards'] += ('savanna',)" >> $HORIZON_DIR/openstack_dashboard/settings.py @@ -47,19 +47,19 @@ function configure_savanna_dashboard() { } # install_savanna_dashboard() - Collect source and prepare -function install_savanna_dashboard() { +function install_savanna_dashboard { install_python_savannaclient git_clone $SAVANNA_DASHBOARD_REPO $SAVANNA_DASHBOARD_DIR $SAVANNA_DASHBOARD_BRANCH setup_develop $SAVANNA_DASHBOARD_DIR } -function install_python_savannaclient() { +function install_python_savannaclient { git_clone $SAVANNA_PYTHONCLIENT_REPO $SAVANNA_PYTHONCLIENT_DIR $SAVANNA_PYTHONCLIENT_BRANCH setup_develop $SAVANNA_PYTHONCLIENT_DIR } # Cleanup file settings.py from Savanna -function cleanup_savanna_dashboard() { +function cleanup_savanna_dashboard { sed -i '/savanna/d' $HORIZON_DIR/openstack_dashboard/settings.py } diff --git a/lib/stackforge b/lib/stackforge index 5fa4570b74..dca08cc2c2 100644 --- a/lib/stackforge +++ b/lib/stackforge @@ -34,7 +34,7 @@ PECAN_DIR=$DEST/pecan # ------------ # install_stackforge() - Collect source and prepare -function install_stackforge() { +function install_stackforge { # TODO(sdague): remove this once we get to Icehouse, this just makes # for a smoother transition of existing users. cleanup_stackforge @@ -47,7 +47,7 @@ function install_stackforge() { } # cleanup_stackforge() - purge possibly old versions of stackforge libraries -function cleanup_stackforge() { +function cleanup_stackforge { # this means we've got an old version installed, lets get rid of it # otherwise python hates itself for lib in wsme pecan; do diff --git a/lib/swift b/lib/swift index 6c33af5082..59c1e54d8a 100644 --- a/lib/swift +++ b/lib/swift @@ -126,7 +126,7 @@ function is_swift_enabled { } # cleanup_swift() - Remove residual data files -function cleanup_swift() { +function cleanup_swift { rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz} if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 @@ -141,7 +141,7 @@ function cleanup_swift() { } # _cleanup_swift_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file -function _cleanup_swift_apache_wsgi() { +function _cleanup_swift_apache_wsgi { sudo rm -f $SWIFT_APACHE_WSGI_DIR/*.wsgi disable_apache_site proxy-server for node_number in ${SWIFT_REPLICAS_SEQ}; do @@ -154,7 +154,7 @@ function _cleanup_swift_apache_wsgi() { } # _config_swift_apache_wsgi() - Set WSGI config files of Swift -function _config_swift_apache_wsgi() { +function _config_swift_apache_wsgi { sudo mkdir -p ${SWIFT_APACHE_WSGI_DIR} local apache_vhost_dir=/etc/${APACHE_NAME}/$APACHE_CONF_DIR local proxy_port=${SWIFT_DEFAULT_BIND_PORT:-8080} @@ -233,7 +233,7 @@ function _config_swift_apache_wsgi() { # This function generates an object/container/account configuration # emulating 4 nodes on different ports -function generate_swift_config() { +function generate_swift_config { local swift_node_config=$1 local node_id=$2 local bind_port=$3 @@ -272,7 +272,7 @@ function generate_swift_config() { # configure_swift() - Set config files, create data dirs and loop image -function configure_swift() { +function configure_swift { local swift_pipeline="${SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH}" local node_number local swift_node_config @@ -460,7 +460,7 @@ EOF } # create_swift_disk - Create Swift backing disk -function create_swift_disk() { +function create_swift_disk { local node_number # First do a bit of setup by creating the directories and @@ -520,7 +520,7 @@ function create_swift_disk() { # swifttenanttest1 swiftusertest3 anotherrole # swifttenanttest2 swiftusertest2 admin -function create_swift_accounts() { +function create_swift_accounts { # Defines specific passwords used by tools/create_userrc.sh SWIFTUSERTEST1_PASSWORD=testing SWIFTUSERTEST2_PASSWORD=testing2 @@ -578,7 +578,7 @@ function create_swift_accounts() { } # init_swift() - Initialize rings -function init_swift() { +function init_swift { local node_number # Make sure to kill all swift processes first swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true @@ -612,7 +612,7 @@ function init_swift() { rm -f $SWIFT_AUTH_CACHE_DIR/* } -function install_swift() { +function install_swift { git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH setup_develop $SWIFT_DIR if is_apache_enabled_service swift; then @@ -620,13 +620,13 @@ function install_swift() { fi } -function install_swiftclient() { +function install_swiftclient { git_clone $SWIFTCLIENT_REPO $SWIFTCLIENT_DIR $SWIFTCLIENT_BRANCH setup_develop $SWIFTCLIENT_DIR } # start_swift() - Start running processes, including screen -function start_swift() { +function start_swift { # (re)start rsyslog restart_service rsyslog # (re)start memcached to make sure we have a clean memcache. @@ -674,7 +674,7 @@ function start_swift() { } # stop_swift() - Stop running processes (non-screen) -function stop_swift() { +function stop_swift { if is_apache_enabled_service swift; then swift-init --run-dir=${SWIFT_DATA_DIR}/run rest stop && return 0 diff --git a/lib/tempest b/lib/tempest index 410c80c46d..16f8744d85 100644 --- a/lib/tempest +++ b/lib/tempest @@ -70,7 +70,7 @@ IPV6_ENABLED=$(trueorfalse True $IPV6_ENABLED) # --------- # configure_tempest() - Set config files, create data dirs, etc -function configure_tempest() { +function configure_tempest { setup_develop $TEMPEST_DIR local image_lines local images @@ -359,12 +359,12 @@ function configure_tempest() { } # install_tempest() - Collect source and prepare -function install_tempest() { +function install_tempest { git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH } # init_tempest() - Initialize ec2 images -function init_tempest() { +function init_tempest { local base_image_name=cirros-0.3.1-x86_64 # /opt/stack/devstack/files/images/cirros-0.3.1-x86_64-uec local image_dir="$FILES/images/${base_image_name}-uec" diff --git a/lib/template b/lib/template index b8e7c4d86f..efe5826f15 100644 --- a/lib/template +++ b/lib/template @@ -45,7 +45,7 @@ function is_XXXX_enabled { # cleanup_XXXX() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_XXXX() { +function cleanup_XXXX { # kill instances (nova) # delete image files (glance) # This function intentionally left blank @@ -53,7 +53,7 @@ function cleanup_XXXX() { } # configure_XXXX() - Set config files, create data dirs, etc -function configure_XXXX() { +function configure_XXXX { # sudo python setup.py deploy # iniset $XXXX_CONF ... # This function intentionally left blank @@ -61,26 +61,26 @@ function configure_XXXX() { } # init_XXXX() - Initialize databases, etc. -function init_XXXX() { +function init_XXXX { # clean up from previous (possibly aborted) runs # create required data files : } # install_XXXX() - Collect source and prepare -function install_XXXX() { +function install_XXXX { # git clone xxx : } # start_XXXX() - Start running processes, including screen -function start_XXXX() { +function start_XXXX { # screen_it XXXX "cd $XXXX_DIR && $XXXX_DIR/bin/XXXX-bin" : } # stop_XXXX() - Stop running processes (non-screen) -function stop_XXXX() { +function stop_XXXX { # FIXME(dtroyer): stop only our screen screen window? : } diff --git a/lib/tls b/lib/tls index 6134fa1bad..072059d599 100644 --- a/lib/tls +++ b/lib/tls @@ -61,7 +61,7 @@ STUD_CIPHERS='TLSv1+HIGH:!DES:!aNULL:!eNULL:@STRENGTH' OPENSSL=${OPENSSL:-/usr/bin/openssl} # Do primary CA configuration -function configure_CA() { +function configure_CA { # build common config file # Verify ``TLS_IP`` is good @@ -73,7 +73,7 @@ function configure_CA() { # Creates a new CA directory structure # create_CA_base ca-dir -function create_CA_base() { +function create_CA_base { local ca_dir=$1 if [[ -d $ca_dir ]]; then @@ -92,7 +92,7 @@ function create_CA_base() { # Create a new CA configuration file # create_CA_config ca-dir common-name -function create_CA_config() { +function create_CA_config { local ca_dir=$1 local common_name=$2 @@ -145,7 +145,7 @@ keyUsage = cRLSign, keyCertSign # Create a new signing configuration file # create_signing_config ca-dir -function create_signing_config() { +function create_signing_config { local ca_dir=$1 echo " @@ -225,7 +225,7 @@ function init_cert { # make_cert creates and signs a new certificate with the given commonName and CA # make_cert ca-dir cert-name "common-name" ["alt-name" ...] -function make_cert() { +function make_cert { local ca_dir=$1 local cert_name=$2 local common_name=$3 @@ -261,7 +261,7 @@ function make_cert() { # Make an intermediate CA to sign everything else # make_int_CA ca-dir signing-ca-dir -function make_int_CA() { +function make_int_CA { local ca_dir=$1 local signing_ca_dir=$2 @@ -291,7 +291,7 @@ function make_int_CA() { # Make a root CA to sign other CAs # make_root_CA ca-dir -function make_root_CA() { +function make_root_CA { local ca_dir=$1 # Create the root CA @@ -319,7 +319,7 @@ function make_root_CA() { # is a short-circuit boolean, i.e it returns on the first match. # # Uses global ``SSL_ENABLED_SERVICES`` -function is_ssl_enabled_service() { +function is_ssl_enabled_service { services=$@ for service in ${services}; do [[ ,${SSL_ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 @@ -337,7 +337,7 @@ function is_ssl_enabled_service() { # example for keystone this would be KEYSTONE_SSL_CERT, KEYSTONE_SSL_KEY and # KEYSTONE_SSL_CA. If it does not find these certificates the program will # quit. -function ensure_certificates() { +function ensure_certificates { local service=$1 local cert_var="${service}_SSL_CERT" @@ -362,7 +362,7 @@ function ensure_certificates() { # Starts the TLS proxy for the given IP/ports # start_tls_proxy front-host front-port back-host back-port -function start_tls_proxy() { +function start_tls_proxy { local f_host=$1 local f_port=$2 local b_host=$3 diff --git a/lib/trove b/lib/trove index 6834149c64..75b990f91e 100644 --- a/lib/trove +++ b/lib/trove @@ -53,7 +53,7 @@ function is_trove_enabled { } # setup_trove_logging() - Adds logging configuration to conf files -function setup_trove_logging() { +function setup_trove_logging { local CONF=$1 iniset $CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $CONF DEFAULT use_syslog $SYSLOG @@ -69,7 +69,7 @@ function setup_trove_logging() { # ------------------------------------------------------------------ # service trove admin # if enabled -create_trove_accounts() { +function create_trove_accounts { # Trove SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") SERVICE_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") @@ -106,19 +106,19 @@ create_trove_accounts() { # cleanup_trove() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up -function cleanup_trove() { +function cleanup_trove { #Clean up dirs rm -fr $TROVE_AUTH_CACHE_DIR/* rm -fr $TROVE_CONF_DIR/* } # configure_troveclient() - Set config files, create data dirs, etc -function configure_troveclient() { +function configure_troveclient { setup_develop $TROVECLIENT_DIR } # configure_trove() - Set config files, create data dirs, etc -function configure_trove() { +function configure_trove { setup_develop $TROVE_DIR # Create the trove conf dir and cache dirs if they don't exist @@ -182,17 +182,17 @@ function configure_trove() { } # install_troveclient() - Collect source and prepare -function install_troveclient() { +function install_troveclient { git_clone $TROVECLIENT_REPO $TROVECLIENT_DIR $TROVECLIENT_BRANCH } # install_trove() - Collect source and prepare -function install_trove() { +function install_trove { git_clone $TROVE_REPO $TROVE_DIR $TROVE_BRANCH } # init_trove() - Initializes Trove Database as a Service -function init_trove() { +function init_trove { #(Re)Create trove db recreate_database trove utf8 @@ -201,14 +201,14 @@ function init_trove() { } # start_trove() - Start running processes, including screen -function start_trove() { +function start_trove { screen_it tr-api "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-api --config-file=$TROVE_CONF_DIR/trove.conf --debug 2>&1" screen_it tr-tmgr "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-taskmanager --config-file=$TROVE_CONF_DIR/trove-taskmanager.conf --debug 2>&1" screen_it tr-cond "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-conductor --config-file=$TROVE_CONF_DIR/trove-conductor.conf --debug 2>&1" } # stop_trove() - Stop running processes -function stop_trove() { +function stop_trove { # Kill the trove screen windows for serv in tr-api tr-tmgr tr-cond; do screen_stop $serv diff --git a/stack.sh b/stack.sh index ac89e52515..a70267984c 100755 --- a/stack.sh +++ b/stack.sh @@ -464,7 +464,7 @@ fi # ----------------- # Draw a spinner so the user knows something is happening -function spinner() { +function spinner { local delay=0.75 local spinstr='/-\|' printf "..." >&3 @@ -479,7 +479,7 @@ function spinner() { # Echo text to the log file, summary log file and stdout # echo_summary "something to say" -function echo_summary() { +function echo_summary { if [[ -t 3 && "$VERBOSE" != "True" ]]; then kill >/dev/null 2>&1 $LAST_SPINNER_PID if [ ! -z "$LAST_SPINNER_PID" ]; then @@ -495,7 +495,7 @@ function echo_summary() { # Echo text only to stdout, no log files # echo_nolog "something not for the logs" -function echo_nolog() { +function echo_nolog { echo $@ >&3 } diff --git a/tests/functions.sh b/tests/functions.sh index 06a4134abf..874d02230d 100755 --- a/tests/functions.sh +++ b/tests/functions.sh @@ -42,7 +42,7 @@ fi echo "Testing enable_service()" -function test_enable_service() { +function test_enable_service { local start="$1" local add="$2" local finish="$3" @@ -68,7 +68,7 @@ test_enable_service 'a,b,c' c 'a,b,c' test_enable_service 'a,b,-c' c 'a,b' test_enable_service 'a,b,c' -c 'a,b' -function test_disable_service() { +function test_disable_service { local start="$1" local del="$2" local finish="$3" @@ -109,7 +109,7 @@ fi echo "Testing disable_negated_services()" -function test_disable_negated_services() { +function test_disable_negated_services { local start="$1" local finish="$2" diff --git a/tests/test_config.sh b/tests/test_config.sh index 39603c9dbe..5700f8df29 100755 --- a/tests/test_config.sh +++ b/tests/test_config.sh @@ -12,7 +12,7 @@ source $TOP/lib/config # check_result() tests and reports the result values # check_result "actual" "expected" -function check_result() { +function check_result { local actual=$1 local expected=$2 if [[ "$actual" == "$expected" ]]; then @@ -26,7 +26,7 @@ TEST_1C_ADD="[eee] type=new multi = foo2" -function create_test1c() { +function create_test1c { cat >test1c.conf <test2a.conf <\w+)", line) @@ -169,6 +184,7 @@ def check_files(files, verbose): check_indents(logical_line) check_for_do(logical_line) check_if_then(logical_line) + check_function_decl(logical_line) prev_line = logical_line prev_lineno = fileinput.filelineno() diff --git a/tools/build_pxe_env.sh b/tools/build_pxe_env.sh index e6f98b4b75..50d91d063c 100755 --- a/tools/build_pxe_env.sh +++ b/tools/build_pxe_env.sh @@ -17,7 +17,7 @@ PXEDIR=${PXEDIR:-/opt/ramstack/pxe} PROGDIR=`dirname $0` # Clean up any resources that may be in use -cleanup() { +function cleanup { set +o errexit # Mop up temporary files diff --git a/tools/build_ramdisk.sh b/tools/build_ramdisk.sh index 737255578a..50ba8ef2ca 100755 --- a/tools/build_ramdisk.sh +++ b/tools/build_ramdisk.sh @@ -14,7 +14,7 @@ if [ ! "$#" -eq "1" ]; then fi # Clean up any resources that may be in use -cleanup() { +function cleanup { set +o errexit # Mop up temporary files @@ -87,7 +87,7 @@ fi # Finds and returns full device path for the next available NBD device. # Exits script if error connecting or none free. # map_nbd image -function map_nbd() { +function map_nbd { for i in `seq 0 15`; do if [ ! -e /sys/block/nbd$i/pid ]; then NBD=/dev/nbd$i diff --git a/tools/build_uec_ramdisk.sh b/tools/build_uec_ramdisk.sh index 3ab5dafdcb..5f3acc5684 100755 --- a/tools/build_uec_ramdisk.sh +++ b/tools/build_uec_ramdisk.sh @@ -20,7 +20,7 @@ if ! egrep -q "oneiric" /etc/lsb-release; then fi # Clean up resources that may be in use -cleanup() { +function cleanup { set +o errexit if [ -n "$MNT_DIR" ]; then diff --git a/tools/build_usb_boot.sh b/tools/build_usb_boot.sh index 8566229833..c97e0a143d 100755 --- a/tools/build_usb_boot.sh +++ b/tools/build_usb_boot.sh @@ -13,7 +13,7 @@ DEST_DIR=${1:-/tmp/syslinux-boot} PXEDIR=${PXEDIR:-/opt/ramstack/pxe} # Clean up any resources that may be in use -cleanup() { +function cleanup { set +o errexit # Mop up temporary files diff --git a/tools/copy_dev_environment_to_uec.sh b/tools/copy_dev_environment_to_uec.sh index 3fd4423f86..94a4926668 100755 --- a/tools/copy_dev_environment_to_uec.sh +++ b/tools/copy_dev_environment_to_uec.sh @@ -22,7 +22,7 @@ cd $TOP_DIR source ./stackrc # Echo usage -usage() { +function usage { echo "Add stack user and keys" echo "" echo "Usage: $0 [full path to raw uec base image]" diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index cd5a1c9643..47da3341b8 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -11,8 +11,7 @@ set -o xtrace ACCOUNT_DIR=./accrc -display_help() -{ +function display_help { cat < @@ -151,7 +150,7 @@ if ! nova x509-get-root-cert "$EUCALYPTUS_CERT"; then fi -function add_entry(){ +function add_entry { local user_id=$1 local user_name=$2 local tenant_id=$3 @@ -213,7 +212,7 @@ EOF } #admin users expected -function create_or_get_tenant(){ +function create_or_get_tenant { local tenant_name=$1 local tenant_id=`keystone tenant-list | awk '/\|[[:space:]]*'"$tenant_name"'[[:space:]]*\|.*\|/ {print $2}'` if [ -n "$tenant_id" ]; then @@ -223,7 +222,7 @@ function create_or_get_tenant(){ fi } -function create_or_get_role(){ +function create_or_get_role { local role_name=$1 local role_id=`keystone role-list| awk '/\|[[:space:]]*'"$role_name"'[[:space:]]*\|/ {print $2}'` if [ -n "$role_id" ]; then @@ -234,7 +233,7 @@ function create_or_get_role(){ } # Provides empty string when the user does not exists -function get_user_id(){ +function get_user_id { local user_name=$1 keystone user-list | awk '/^\|[^|]*\|[[:space:]]*'"$user_name"'[[:space:]]*\|.*\|/ {print $2}' } diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 47b0cd10cd..7833278a12 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -40,7 +40,7 @@ FILES=$TOP_DIR/files # --------------- # get_package_path python-package # in import notation -function get_package_path() { +function get_package_path { local package=$1 echo $(python -c "import os; import $package; print(os.path.split(os.path.realpath($package.__file__))[0])") } diff --git a/tools/get_uec_image.sh b/tools/get_uec_image.sh index da13f4b875..225742c041 100755 --- a/tools/get_uec_image.sh +++ b/tools/get_uec_image.sh @@ -18,7 +18,7 @@ TOP_DIR=$(cd $TOOLS_DIR/..; pwd) set -o errexit set -o xtrace -usage() { +function usage { echo "Usage: $0 - Download and prepare Ubuntu UEC images" echo "" echo "$0 [-r rootsize] release imagefile [kernel]" @@ -31,7 +31,7 @@ usage() { } # Clean up any resources that may be in use -cleanup() { +function cleanup { set +o errexit # Mop up temporary files diff --git a/tools/info.sh b/tools/info.sh index 1e521b9c4b..a8f9544073 100755 --- a/tools/info.sh +++ b/tools/info.sh @@ -61,7 +61,7 @@ fi # ----- # git_report -function git_report() { +function git_report { local dir=$1 local proj ref branch head if [[ -d $dir/.git ]]; then diff --git a/tools/install_openvpn.sh b/tools/install_openvpn.sh index 2f52aa14d0..9a4f0369d5 100755 --- a/tools/install_openvpn.sh +++ b/tools/install_openvpn.sh @@ -22,7 +22,7 @@ if [ -e vpnrc ]; then fi # Do some IP manipulation -function cidr2netmask() { +function cidr2netmask { set -- $(( 5 - ($1 / 8) )) 255 255 255 255 $(( (255 << (8 - ($1 % 8))) & 255 )) 0 0 0 if [[ $1 -gt 1 ]]; then shift $1 @@ -50,7 +50,7 @@ VPN_CLIENT_DHCP="${VPN_CLIENT_DHCP:-net.1 net.254}" VPN_DIR=/etc/openvpn CA_DIR=$VPN_DIR/easy-rsa -usage() { +function usage { echo "$0 - OpenVPN install and certificate generation" echo "" echo "$0 --client name" @@ -102,7 +102,7 @@ if [ ! -r $CA_DIR/keys/dh1024.pem ]; then openvpn --genkey --secret $CA_DIR/keys/ta.key ## Build a TLS key fi -do_server() { +function do_server { NAME=$1 # Generate server certificate $CA_DIR/pkitool --server $NAME @@ -162,7 +162,7 @@ EOF /etc/init.d/openvpn restart } -do_client() { +function do_client { NAME=$1 # Generate a client certificate $CA_DIR/pkitool $NAME diff --git a/tools/install_pip.sh b/tools/install_pip.sh index d714d33530..9fa161e043 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -50,7 +50,7 @@ PIP_TAR_URL=https://pypi.python.org/packages/source/p/pip/pip-$INSTALL_PIP_VERSI GetDistro echo "Distro: $DISTRO" -function get_versions() { +function get_versions { PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null || true) if [[ -n $PIP ]]; then PIP_VERSION=$($PIP --version | awk '{ print $2}') @@ -61,7 +61,7 @@ function get_versions() { } -function install_get_pip() { +function install_get_pip { if [[ ! -r $FILES/get-pip.py ]]; then (cd $FILES; \ curl -O $PIP_GET_PIP_URL; \ @@ -70,7 +70,7 @@ function install_get_pip() { sudo -E python $FILES/get-pip.py } -function install_pip_tarball() { +function install_pip_tarball { (cd $FILES; \ curl -O $PIP_TAR_URL; \ tar xvfz pip-$INSTALL_PIP_VERSION.tar.gz 1>/dev/null; \ diff --git a/tools/jenkins/build_configuration.sh b/tools/jenkins/build_configuration.sh index e295ef2017..64ee159651 100755 --- a/tools/jenkins/build_configuration.sh +++ b/tools/jenkins/build_configuration.sh @@ -5,7 +5,7 @@ CONFIGURATION=$2 ADAPTER=$3 RC=$4 -function usage() { +function usage { echo "Usage: $0 - Build a configuration" echo "" echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]" diff --git a/tools/jenkins/configurations/kvm.sh b/tools/jenkins/configurations/kvm.sh index d9a160ad76..6927fd7c29 100755 --- a/tools/jenkins/configurations/kvm.sh +++ b/tools/jenkins/configurations/kvm.sh @@ -9,7 +9,7 @@ CONFIGURATION=$2 ADAPTER=$3 RC=$4 -function usage() { +function usage { echo "Usage: $0 - Build a test configuration" echo "" echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]" diff --git a/tools/jenkins/configurations/xs.sh b/tools/jenkins/configurations/xs.sh index 864f949114..7b671e9df4 100755 --- a/tools/jenkins/configurations/xs.sh +++ b/tools/jenkins/configurations/xs.sh @@ -8,7 +8,7 @@ CONFIGURATION=$2 ADAPTER=$3 RC=$4 -function usage() { +function usage { echo "Usage: $0 - Build a test configuration" echo "" echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]" diff --git a/tools/jenkins/run_test.sh b/tools/jenkins/run_test.sh index 464956375e..d2b82843b4 100755 --- a/tools/jenkins/run_test.sh +++ b/tools/jenkins/run_test.sh @@ -4,7 +4,7 @@ EXECUTOR_NUMBER=$1 ADAPTER=$2 RC=$3 -function usage() { +function usage { echo "Usage: $0 - Run a test" echo "" echo "$0 [EXECUTOR_NUMBER] [ADAPTER] [RC (optional)]" diff --git a/tools/warm_apts_for_uec.sh b/tools/warm_apts_for_uec.sh index 3c15f52ee3..c57fc2e59c 100755 --- a/tools/warm_apts_for_uec.sh +++ b/tools/warm_apts_for_uec.sh @@ -16,7 +16,7 @@ TOP_DIR=`cd $TOOLS_DIR/..; pwd` cd $TOP_DIR # Echo usage -usage() { +function usage { echo "Cache OpenStack dependencies on a uec image to speed up performance." echo "" echo "Usage: $0 [full path to raw uec base image]" diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh index fbbfd6fbe5..cc3cbe18d1 100755 --- a/tools/xen/build_xva.sh +++ b/tools/xen/build_xva.sh @@ -42,7 +42,7 @@ source xenrc # GUEST_NAME="$1" -function _print_interface_config() { +function _print_interface_config { local device_nr local ip_address local netmask @@ -68,7 +68,7 @@ function _print_interface_config() { echo " post-up ethtool -K $device tx off" } -function print_interfaces_config() { +function print_interfaces_config { echo "auto lo" echo "iface lo inet loopback" diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 7b59bae6b8..a4b3e06e88 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -166,7 +166,7 @@ TNAME="jeos_template_for_devstack" SNAME_TEMPLATE="jeos_snapshot_for_devstack" SNAME_FIRST_BOOT="before_first_boot" -function wait_for_VM_to_halt() { +function wait_for_VM_to_halt { set +x echo "Waiting for the VM to halt. Progress in-VM can be checked with vncviewer:" mgmt_ip=$(echo $XENAPI_CONNECTION_URL | tr -d -c '1234567890.') @@ -318,7 +318,7 @@ xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_FIRST_BOOT" # xe vm-start vm="$GUEST_NAME" -function ssh_no_check() { +function ssh_no_check { ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "$@" } @@ -349,7 +349,7 @@ DOMID=$(get_domid "$GUEST_NAME") xenstore-write /local/domain/$DOMID/authorized_keys/$DOMZERO_USER "$(cat /root/dom0key.pub)" xenstore-chmod -u /local/domain/$DOMID/authorized_keys/$DOMZERO_USER r$DOMID -function run_on_appliance() { +function run_on_appliance { ssh \ -i /root/dom0key \ -o UserKnownHostsFile=/dev/null \ diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh index 094612624b..440774ec5b 100755 --- a/tools/xen/prepare_guest.sh +++ b/tools/xen/prepare_guest.sh @@ -21,7 +21,7 @@ STACK_USER="$3" DOMZERO_USER="$4" -function setup_domzero_user() { +function setup_domzero_user { local username username="$1" From e2907b4838230940a8ff1735feffd80acf13bdab Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 26 Feb 2014 17:35:37 -0600 Subject: [PATCH 0784/4704] Fix Neutron enabled check * Remove the check for neutron enabled on a block of variable settings, there is no conflict and serves no purpose. * Also floating_ips.sh and volume.sh needed to properly source lib/neutron for do ping_check() to work properly. The current error in check-devstack-dsvm-neutron is not related to this fix. Change-Id: I1c458aaa787ffb98c945aefc3afa80c6861a405f --- exercises/floating_ips.sh | 6 +- exercises/volumes.sh | 7 +- lib/neutron | 154 +++++++++++++++++++------------------- 3 files changed, 84 insertions(+), 83 deletions(-) diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index b981aa8294..8dc44effbc 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -27,12 +27,12 @@ TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) # Import common functions source $TOP_DIR/functions -# Import project functions -source $TOP_DIR/lib/neutron - # Import configuration source $TOP_DIR/openrc +# Import project functions +source $TOP_DIR/lib/neutron + # Import exercise configuration source $TOP_DIR/exerciserc diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 33e24589eb..83d25c779c 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -27,12 +27,13 @@ TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) # Import common functions source $TOP_DIR/functions -# Import project functions -source $TOP_DIR/lib/cinder - # Import configuration source $TOP_DIR/openrc +# Import project functions +source $TOP_DIR/lib/cinder +source $TOP_DIR/lib/neutron + # Import exercise configuration source $TOP_DIR/exerciserc diff --git a/lib/neutron b/lib/neutron index df276c71d5..be123adcd5 100644 --- a/lib/neutron +++ b/lib/neutron @@ -59,10 +59,6 @@ # LinuxBridge plugin, please see the top level README file under the # Neutron section. -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - # Neutron Network Configuration # ----------------------------- @@ -127,82 +123,81 @@ Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-False} # See _configure_neutron_common() for details about setting it up declare -a Q_PLUGIN_EXTRA_CONF_FILES -if is_service_enabled neutron; then - Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf - if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then - Q_RR_COMMAND="sudo" - else - NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) - Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE" - fi - - # Provider Network Configurations - # -------------------------------- - - # The following variables control the Neutron openvswitch and - # linuxbridge plugins' allocation of tenant networks and - # availability of provider networks. If these are not configured - # in ``localrc``, tenant networks will be local to the host (with no - # remote connectivity), and no physical resources will be - # available for the allocation of provider networks. - - # To use GRE tunnels for tenant networks, set to True in - # ``localrc``. GRE tunnels are only supported by the openvswitch - # plugin, and currently only on Ubuntu. - ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-False} - - # If using GRE tunnels for tenant networks, specify the range of - # tunnel IDs from which tenant networks are allocated. Can be - # overriden in ``localrc`` in necesssary. - TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGE:-1:1000} - - # To use VLANs for tenant networks, set to True in localrc. VLANs - # are supported by the openvswitch and linuxbridge plugins, each - # requiring additional configuration described below. - ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} - - # If using VLANs for tenant networks, set in ``localrc`` to specify - # the range of VLAN VIDs from which tenant networks are - # allocated. An external network switch must be configured to - # trunk these VLANs between hosts for multi-host connectivity. - # - # Example: ``TENANT_VLAN_RANGE=1000:1999`` - TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} - - # If using VLANs for tenant networks, or if using flat or VLAN - # provider networks, set in ``localrc`` to the name of the physical - # network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the - # openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge - # agent, as described below. - # - # Example: ``PHYSICAL_NETWORK=default`` - PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-} - - # With the openvswitch plugin, if using VLANs for tenant networks, - # or if using flat or VLAN provider networks, set in ``localrc`` to - # the name of the OVS bridge to use for the physical network. The - # bridge will be created if it does not already exist, but a - # physical interface must be manually added to the bridge as a - # port for external connectivity. - # - # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` - OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-} - - # With the linuxbridge plugin, if using VLANs for tenant networks, - # or if using flat or VLAN provider networks, set in ``localrc`` to - # the name of the network interface to use for the physical - # network. - # - # Example: ``LB_PHYSICAL_INTERFACE=eth1`` - LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-} - # With the openvswitch plugin, set to True in ``localrc`` to enable - # provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. - # - # Example: ``OVS_ENABLE_TUNNELING=True`` - OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} +Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf +if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + Q_RR_COMMAND="sudo" +else + NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) + Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE" fi +# Provider Network Configurations +# -------------------------------- + +# The following variables control the Neutron openvswitch and +# linuxbridge plugins' allocation of tenant networks and +# availability of provider networks. If these are not configured +# in ``localrc``, tenant networks will be local to the host (with no +# remote connectivity), and no physical resources will be +# available for the allocation of provider networks. + +# To use GRE tunnels for tenant networks, set to True in +# ``localrc``. GRE tunnels are only supported by the openvswitch +# plugin, and currently only on Ubuntu. +ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-False} + +# If using GRE tunnels for tenant networks, specify the range of +# tunnel IDs from which tenant networks are allocated. Can be +# overriden in ``localrc`` in necesssary. +TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGE:-1:1000} + +# To use VLANs for tenant networks, set to True in localrc. VLANs +# are supported by the openvswitch and linuxbridge plugins, each +# requiring additional configuration described below. +ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} + +# If using VLANs for tenant networks, set in ``localrc`` to specify +# the range of VLAN VIDs from which tenant networks are +# allocated. An external network switch must be configured to +# trunk these VLANs between hosts for multi-host connectivity. +# +# Example: ``TENANT_VLAN_RANGE=1000:1999`` +TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} + +# If using VLANs for tenant networks, or if using flat or VLAN +# provider networks, set in ``localrc`` to the name of the physical +# network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the +# openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge +# agent, as described below. +# +# Example: ``PHYSICAL_NETWORK=default`` +PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-} + +# With the openvswitch plugin, if using VLANs for tenant networks, +# or if using flat or VLAN provider networks, set in ``localrc`` to +# the name of the OVS bridge to use for the physical network. The +# bridge will be created if it does not already exist, but a +# physical interface must be manually added to the bridge as a +# port for external connectivity. +# +# Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` +OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-} + +# With the linuxbridge plugin, if using VLANs for tenant networks, +# or if using flat or VLAN provider networks, set in ``localrc`` to +# the name of the network interface to use for the physical +# network. +# +# Example: ``LB_PHYSICAL_INTERFACE=eth1`` +LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-} + +# With the openvswitch plugin, set to True in ``localrc`` to enable +# provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. +# +# Example: ``OVS_ENABLE_TUNNELING=True`` +OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} + # Neutron plugin specific functions # --------------------------------- @@ -241,6 +236,11 @@ fi TEMPEST_SERVICES+=,neutron +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + # Functions # --------- From 1237922b655d8ab1690b88c718d7002415ce1201 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 27 Feb 2014 17:16:46 -0500 Subject: [PATCH 0785/4704] make service_check fatal if we fail service check, we should do so in a fatal way, because something is not right. This will be very useful in grenade. Change-Id: I18811b0d8e6d06f364685c366cdc8f5dda3b8f7e --- functions-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions-common b/functions-common index 2248fbb610..ab7bc2139b 100644 --- a/functions-common +++ b/functions-common @@ -1135,7 +1135,7 @@ function service_check() { done if [ -n "$failures" ]; then - echo "More details about the above errors can be found with screen, with ./rejoin-stack.sh" + die $LINENO "More details about the above errors can be found with screen, with ./rejoin-stack.sh" fi } From c03f975150bf97b5aef42daa77fc419a9e241123 Mon Sep 17 00:00:00 2001 From: sukhdev Date: Thu, 27 Feb 2014 14:17:44 -0800 Subject: [PATCH 0786/4704] devstack (stack.sh) fails when extra config files are specified Latest merge of https://review.openstack.org/#/c/71996/ exposes an issue in stack.sh which did not surface before. Please see the details of the issue in the bug description. Closes bug: 1285884 Change-Id: Ie231c9835497c2a418a61d339dfd5df1aab9e3d7 --- lib/neutron | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index df276c71d5..07b21af336 100644 --- a/lib/neutron +++ b/lib/neutron @@ -586,11 +586,9 @@ function _configure_neutron_common() { # If additional config files exist, copy them over to neutron configuration # directory if [[ $Q_PLUGIN_EXTRA_CONF_PATH != '' ]]; then - mkdir -p /$Q_PLUGIN_EXTRA_CONF_PATH local f for (( f=0; $f < ${#Q_PLUGIN_EXTRA_CONF_FILES[@]}; f+=1 )); do Q_PLUGIN_EXTRA_CONF_FILES[$f]=$Q_PLUGIN_EXTRA_CONF_PATH/${Q_PLUGIN_EXTRA_CONF_FILES[$f]} - cp $NEUTRON_DIR/${Q_PLUGIN_EXTRA_CONF_FILES[$f]} /${Q_PLUGIN_EXTRA_CONF_FILES[$f]} done fi From 531aeb7900fd7f24794efb8f9da5fce65dc80f4b Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 28 Feb 2014 11:24:29 +1100 Subject: [PATCH 0787/4704] Preinstall yum-utils; move sudo check before install Some cloud images don't have yum-utils installed, so the call to yum-config-manager fails. Pre-install it (I still think it's easier than fiddling config files). Also, these repo setup steps are using sudo, but the root/sudo checks happen after this. Move them up before we start trying to do repo/package installs. Change-Id: I875e1f0663c9badc00278b2cc1a3b04ca3dde9fc --- stack.sh | 91 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 46 insertions(+), 45 deletions(-) diff --git a/stack.sh b/stack.sh index ac89e52515..669209c865 100755 --- a/stack.sh +++ b/stack.sh @@ -161,9 +161,42 @@ fi # Set up logging level VERBOSE=$(trueorfalse True $VERBOSE) +# root Access +# ----------- + +# OpenStack is designed to be run as a non-root user; Horizon will fail to run +# as **root** since Apache will not serve content from **root** user). +# ``stack.sh`` must not be run as **root**. It aborts and suggests one course of +# action to create a suitable user account. + +if [[ $EUID -eq 0 ]]; then + echo "You are running this script as root." + echo "Cut it out." + echo "Really." + echo "If you need an account to run DevStack, do this (as root, heh) to create $STACK_USER:" + echo "$TOP_DIR/tools/create-stack-user.sh" + exit 1 +fi + +# We're not **root**, make sure ``sudo`` is available +is_package_installed sudo || install_package sudo + +# UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one +sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || + echo "#includedir /etc/sudoers.d" | sudo tee -a /etc/sudoers + +# Set up devstack sudoers +TEMPFILE=`mktemp` +echo "$STACK_USER ALL=(root) NOPASSWD:ALL" >$TEMPFILE +# Some binaries might be under /sbin or /usr/sbin, so make sure sudo will +# see them by forcing PATH +echo "Defaults:$STACK_USER secure_path=/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >> $TEMPFILE +chmod 0440 $TEMPFILE +sudo chown root:root $TEMPFILE +sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh # Additional repos -# ================ +# ---------------- # Some distros need to add repos beyond the defaults provided by the vendor # to pick up required packages. @@ -196,45 +229,13 @@ if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then fi # ... and also optional to be enabled + is_package_installed yum-utils || install_package yum-utils sudo yum-config-manager --enable rhel-6-server-optional-rpms fi - -# root Access -# ----------- - -# OpenStack is designed to be run as a non-root user; Horizon will fail to run -# as **root** since Apache will not serve content from **root** user). -# ``stack.sh`` must not be run as **root**. It aborts and suggests one course of -# action to create a suitable user account. - -if [[ $EUID -eq 0 ]]; then - echo "You are running this script as root." - echo "Cut it out." - echo "Really." - echo "If you need an account to run DevStack, do this (as root, heh) to create $STACK_USER:" - echo "$TOP_DIR/tools/create-stack-user.sh" - exit 1 -fi - -# We're not **root**, make sure ``sudo`` is available -is_package_installed sudo || install_package sudo - -# UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one -sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || - echo "#includedir /etc/sudoers.d" | sudo tee -a /etc/sudoers - -# Set up devstack sudoers -TEMPFILE=`mktemp` -echo "$STACK_USER ALL=(root) NOPASSWD:ALL" >$TEMPFILE -# Some binaries might be under /sbin or /usr/sbin, so make sure sudo will -# see them by forcing PATH -echo "Defaults:$STACK_USER secure_path=/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >> $TEMPFILE -chmod 0440 $TEMPFILE -sudo chown root:root $TEMPFILE -sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh - +# Filesystem setup +# ---------------- # Create the destination directory and ensure it is writable by the user # and read/executable by everybody for daemons (e.g. apache run for horizon) @@ -252,6 +253,15 @@ if [ -z "`grep ^127.0.0.1 /etc/hosts | grep $LOCAL_HOSTNAME`" ]; then sudo sed -i "s/\(^127.0.0.1.*\)/\1 $LOCAL_HOSTNAME/" /etc/hosts fi +# Destination path for service data +DATA_DIR=${DATA_DIR:-${DEST}/data} +sudo mkdir -p $DATA_DIR +safe_chown -R $STACK_USER $DATA_DIR + + +# Common Configuration +# -------------------- + # Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly without # Internet access. ``stack.sh`` must have been previously run with Internet # access to install prerequisites and fetch repositories. @@ -265,15 +275,6 @@ ERROR_ON_CLONE=`trueorfalse False $ERROR_ON_CLONE` # Whether to enable the debug log level in OpenStack services ENABLE_DEBUG_LOG_LEVEL=`trueorfalse True $ENABLE_DEBUG_LOG_LEVEL` -# Destination path for service data -DATA_DIR=${DATA_DIR:-${DEST}/data} -sudo mkdir -p $DATA_DIR -safe_chown -R $STACK_USER $DATA_DIR - - -# Common Configuration -# ==================== - # Set fixed and floating range here so we can make sure not to use addresses # from either range when attempting to guess the IP to use for the host. # Note that setting FIXED_RANGE may be necessary when running DevStack From cb415697f37d3df2965f71b19c909a4c50f32eed Mon Sep 17 00:00:00 2001 From: Shashank Hegde Date: Thu, 27 Feb 2014 16:46:43 -0800 Subject: [PATCH 0788/4704] clean.sh removes all the files clean.sh was incorrectly looping over the list of files to remove. Because of this the files were not being removed. Change-Id: Ie0559e1d396a4d35df6a12dfbceefa7eb261bac5 Closes-Bug:1285924 --- clean.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clean.sh b/clean.sh index e121e4f703..3707d8411e 100755 --- a/clean.sh +++ b/clean.sh @@ -123,6 +123,6 @@ fi FILES_TO_CLEAN=".localrc.auto docs-files docs/ shocco/ stack-screenrc test*.conf* test.ini*" FILES_TO_CLEAN+=".stackenv .prereqs" -for file in FILES_TO_CLEAN; do +for file in $FILES_TO_CLEAN; do rm -f $TOP_DIR/$file done From d20f632a70565003ab8c72b2598201be79f4d782 Mon Sep 17 00:00:00 2001 From: Masayuki Igawa Date: Fri, 28 Feb 2014 09:22:37 +0900 Subject: [PATCH 0789/4704] Move some comments of variables to right place setup_develop*() in functions has been moved to functions-common. But some comments about the variables are still left. This commit moves it to the right place. Change-Id: Ic360454f1ee72f51c9979d0468dee0913e9b32e4 --- functions | 4 ---- functions-common | 3 +++ 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/functions b/functions index 3101111c63..407a9e708c 100644 --- a/functions +++ b/functions @@ -6,10 +6,6 @@ # - ``ENABLED_SERVICES`` # - ``FILES`` # - ``GLANCE_HOSTPORT`` -# - ``REQUIREMENTS_DIR`` -# - ``STACK_USER`` -# - ``TRACK_DEPENDS`` -# - ``UNDO_REQUIREMENTS`` # # Include the common functions diff --git a/functions-common b/functions-common index c93dd855b3..a485cae9d9 100644 --- a/functions-common +++ b/functions-common @@ -26,7 +26,10 @@ # - ``PIP_DOWNLOAD_CACHE`` # - ``PIP_USE_MIRRORS`` # - ``RECLONE`` +# - ``REQUIREMENTS_DIR`` +# - ``STACK_USER`` # - ``TRACK_DEPENDS`` +# - ``UNDO_REQUIREMENTS`` # - ``http_proxy``, ``https_proxy``, ``no_proxy`` # Save trace setting From 9bbecb7fc45538bc83d7db5e33a55505a691b44d Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Fri, 28 Feb 2014 11:19:28 -0500 Subject: [PATCH 0790/4704] Source lib/neutron in boot_from_volume.sh Without lib/neutron, boot_from_volume.sh generates the following error: + _ping_check_neutron private 10.11.12.5 30 /devstack/functions: line 356: _ping_check_neutron: command not found Change-Id: Ib72c3f24d614570d69bf5dda35cbaf5847b1d1b9 --- exercises/boot_from_volume.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index 79120460b8..f679669eea 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -32,6 +32,7 @@ source $TOP_DIR/functions # Import project functions source $TOP_DIR/lib/cinder +source $TOP_DIR/lib/neutron # Import configuration source $TOP_DIR/openrc From 0e57b967e558fa843277d0119e50f0cb807929a2 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Fri, 28 Feb 2014 09:09:52 +0100 Subject: [PATCH 0791/4704] Devstack install can fail on missing xinetd.d/rsync config Assuming if the system does not have the xinetd.d/rsync, the dedicated service is the prefered way. Change-Id: Ic42651c5c3fb5bf0099786ca81a7bd06ace896a8 --- lib/swift | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/swift b/lib/swift index 59c1e54d8a..5d4d4ef506 100644 --- a/lib/swift +++ b/lib/swift @@ -301,7 +301,7 @@ function configure_swift { # rsyncd.conf just prepared for 4 nodes if is_ubuntu; then sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync - else + elif [ -e /etc/xinetd.d/rsync ]; then sudo sed -i '/disable *= *yes/ { s/yes/no/ }' /etc/xinetd.d/rsync fi @@ -635,8 +635,10 @@ function start_swift { # Start rsync if is_ubuntu; then sudo /etc/init.d/rsync restart || : + elif [ -e /etc/xinetd.d/rsync ]; then + start_service xinetd else - sudo systemctl start xinetd.service + start_service rsyncd fi if is_apache_enabled_service swift; then From 2e978dd6286a33af72796dc97cd81ed5fa2255de Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Fri, 28 Feb 2014 14:06:59 -0500 Subject: [PATCH 0792/4704] Add use_syslog to Marconi config This patch adds use_syslog option to the marconi config file. This is needed to allow marconi to run, when USE_SCREEN is set to False in devstack. Change-Id: I547697ec2745975e235a4e58cde81132ac37b70d --- lib/marconi | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/marconi b/lib/marconi index 8cfc55c1dd..29ae386d9f 100644 --- a/lib/marconi +++ b/lib/marconi @@ -95,6 +95,7 @@ function configure_marconi { sudo chown $USER $MARCONI_API_LOG_DIR iniset $MARCONI_CONF DEFAULT verbose True + iniset $MARCONI_CONF DEFAULT use_syslog $SYSLOG iniset $MARCONI_CONF 'drivers:transport:wsgi' bind $MARCONI_SERVICE_HOST iniset $MARCONI_CONF keystone_authtoken auth_protocol http From e994f5708d124ae71211876e9456499ac25646a3 Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Fri, 28 Feb 2014 15:13:37 -0500 Subject: [PATCH 0793/4704] Remove bm_poseur, unmaintained and obsolete The bm_poseur git repository link has been broken for over 11 months. The virtualized/fake baremetal environment is not working and has not worked in a long time. Now, on the tail of enabling 'enable -o errexit', this functionality now has a hard break. Change-Id: I3cbd8db58c422bc5273d2433278aaa5e449ecfd9 Closes-Bug: 1285954 --- lib/baremetal | 44 ++++---------------------------------------- stack.sh | 3 --- stackrc | 6 ------ unstack.sh | 5 ----- 4 files changed, 4 insertions(+), 54 deletions(-) diff --git a/lib/baremetal b/lib/baremetal index 473de0dd39..1d02e1e417 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -77,14 +77,6 @@ BM_POWER_MANAGER=${BM_POWER_MANAGER:-nova.virt.baremetal.fake.FakePowerManager} # These should be customized to your environment and hardware # ----------------------------------------------------------- -# whether to create a fake environment, eg. for devstack-gate -BM_USE_FAKE_ENV=`trueorfalse False $BM_USE_FAKE_ENV` - -# Extra options to pass to bm_poseur -# change the bridge name or IP: --bridge br99 --bridge-ip 192.0.2.1 -# change the virtualization type: --engine qemu -BM_POSEUR_EXTRA_OPTS=${BM_POSEUR_EXTRA_OPTS:-} - # To provide PXE, configure nova-network's dnsmasq rather than run the one # dedicated to baremetal. When enable this, make sure these conditions are # fulfilled: @@ -97,15 +89,10 @@ BM_POSEUR_EXTRA_OPTS=${BM_POSEUR_EXTRA_OPTS:-} BM_DNSMASQ_FROM_NOVA_NETWORK=`trueorfalse False $BM_DNSMASQ_FROM_NOVA_NETWORK` # BM_DNSMASQ_IFACE should match FLAT_NETWORK_BRIDGE -if [ "$BM_USE_FAKE_ENV" ]; then - BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-br99} - BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-192.0.2.32,192.0.2.48} -else - BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-eth0} - # if testing on a physical network, - # BM_DNSMASQ_RANGE must be changed to suit your network - BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-} -fi +BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-eth0} +# if testing on a physical network, +# BM_DNSMASQ_RANGE must be changed to suit your network +BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-} # BM_DNSMASQ_DNS provide dns server to bootstrap clients BM_DNSMASQ_DNS=${BM_DNSMASQ_DNS:-} @@ -143,7 +130,6 @@ BM_FLAVOR_ARCH=${BM_FLAVOR_ARCH:-$BM_CPU_ARCH} # Below this, we set some path and filenames. # Defaults are probably sufficient. BM_IMAGE_BUILD_DIR=${BM_IMAGE_BUILD_DIR:-$DEST/diskimage-builder} -BM_POSEUR_DIR=${BM_POSEUR_DIR:-$DEST/bm_poseur} # Use DIB to create deploy ramdisk and kernel. BM_BUILD_DEPLOY_RAMDISK=`trueorfalse True $BM_BUILD_DEPLOY_RAMDISK` @@ -177,7 +163,6 @@ function is_baremetal { # so that we can build the deployment kernel & ramdisk function prepare_baremetal_toolchain { git_clone $BM_IMAGE_BUILD_REPO $BM_IMAGE_BUILD_DIR $BM_IMAGE_BUILD_BRANCH - git_clone $BM_POSEUR_REPO $BM_POSEUR_DIR $BM_POSEUR_BRANCH local shellinabox_basename=$(basename $BM_SHELL_IN_A_BOX) if [[ ! -e $DEST/$shellinabox_basename ]]; then @@ -196,27 +181,6 @@ function prepare_baremetal_toolchain { fi } -# set up virtualized environment for devstack-gate testing -function create_fake_baremetal_env { - local bm_poseur="$BM_POSEUR_DIR/bm_poseur" - # TODO(deva): add support for >1 VM - sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS create-bridge - sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS create-vm - BM_FIRST_MAC=$(sudo $bm_poseur get-macs) - - # NOTE: there is currently a limitation in baremetal driver - # that requires second MAC even if it is not used. - # Passing a fake value allows this to work. - # TODO(deva): remove this after driver issue is fixed. - BM_SECOND_MAC='12:34:56:78:90:12' -} - -function cleanup_fake_baremetal_env { - local bm_poseur="$BM_POSEUR_DIR/bm_poseur" - sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-vm - sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-bridge -} - # prepare various directories needed by baremetal hypervisor function configure_baremetal_nova_dirs { # ensure /tftpboot is prepared diff --git a/stack.sh b/stack.sh index 0ec0e0dc93..5152b2a430 100755 --- a/stack.sh +++ b/stack.sh @@ -1052,9 +1052,6 @@ if is_service_enabled nova && is_baremetal; then echo_summary "Preparing for nova baremetal" prepare_baremetal_toolchain configure_baremetal_nova_dirs - if [[ "$BM_USE_FAKE_ENV" = "True" ]]; then - create_fake_baremetal_env - fi fi diff --git a/stackrc b/stackrc index f235cccb15..6bb6f37195 100644 --- a/stackrc +++ b/stackrc @@ -229,12 +229,6 @@ TEMPEST_BRANCH=${TEMPEST_BRANCH:-master} BM_IMAGE_BUILD_REPO=${BM_IMAGE_BUILD_REPO:-${GIT_BASE}/openstack/diskimage-builder.git} BM_IMAGE_BUILD_BRANCH=${BM_IMAGE_BUILD_BRANCH:-master} -# bm_poseur -# Used to simulate a hardware environment for baremetal -# Only used if BM_USE_FAKE_ENV is set -BM_POSEUR_REPO=${BM_POSEUR_REPO:-${GIT_BASE}/tripleo/bm_poseur.git} -BM_POSEUR_BRANCH=${BM_POSEUR_BRANCH:-master} - # a websockets/html5 or flash powered VNC console for vm instances NOVNC_REPO=${NOVNC_REPO:-https://github.com/kanaka/noVNC.git} NOVNC_BRANCH=${NOVNC_BRANCH:-master} diff --git a/unstack.sh b/unstack.sh index 6351fe0549..a5e7b879f9 100755 --- a/unstack.sh +++ b/unstack.sh @@ -127,11 +127,6 @@ if is_service_enabled tls-proxy; then killall stud fi -# baremetal might have created a fake environment -if is_service_enabled baremetal && [[ "$BM_USE_FAKE_ENV" = "True" ]]; then - cleanup_fake_baremetal_env -fi - SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/* # Get the iSCSI volumes From 8f084c6b855a747467274facb1218837e0f53c88 Mon Sep 17 00:00:00 2001 From: Nicolas Simonds Date: Fri, 28 Feb 2014 17:01:41 -0800 Subject: [PATCH 0794/4704] use "rabbit_hosts" config option instead of "rabbit_host" This allows for easy client configuration against clustered RabbitMQ setups. Does not break existing configs. Change-Id: I2b180f8860a727e35d7b465253689e5e8c44eb98 Closes-Bug: 1286411 --- lib/rpc_backend | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/rpc_backend b/lib/rpc_backend index a0424b1dee..e922daa078 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -186,7 +186,7 @@ function iniset_rpc_backend { fi elif is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_kombu - iniset $file $section rabbit_host $RABBIT_HOST + iniset $file $section rabbit_hosts $RABBIT_HOST iniset $file $section rabbit_password $RABBIT_PASSWORD fi } From 12cb2299e8e4d933c7181ef1a9b97478214d2200 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 28 Feb 2014 19:53:50 -0500 Subject: [PATCH 0795/4704] nova changes for multinode working under -o errexit There was a stray inicomment on paste outside of a nova-api block. This fails under -o errexit because the paste.ini doesn't exist. Move this to inside the correct block. Change-Id: Iffbdae6716a1c2a8f650b68edd4faf436434eab1 --- lib/nova | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/nova b/lib/nova index 90b1ba4fde..583a5923ce 100644 --- a/lib/nova +++ b/lib/nova @@ -245,10 +245,9 @@ function configure_nova { inicomment $NOVA_API_PASTE_INI filter:authtoken cafile inicomment $NOVA_API_PASTE_INI filter:authtoken admin_user inicomment $NOVA_API_PASTE_INI filter:authtoken admin_password + inicomment $NOVA_API_PASTE_INI filter:authtoken signing_dir fi - inicomment $NOVA_API_PASTE_INI filter:authtoken signing_dir - if is_service_enabled n-cpu; then # Force IP forwarding on, just on case sudo sysctl -w net.ipv4.ip_forward=1 From 7083b8224dab423392e21b069a1a6ef54cd14a8f Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 28 Feb 2014 20:16:20 -0500 Subject: [PATCH 0796/4704] make ceilometer work if you don't enable ceilometer-api when doing ceilometer in a multihost devstack, you don't want ceilometer-api running on the computes. Under -o errexit this became fatal. Change-Id: Ie43c8724ba467b810f5a3b075dea45d66dde8648 --- lib/ceilometer | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index d20d628247..0be4184a37 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -206,9 +206,12 @@ function start_ceilometer { screen_it ceilometer-collector "cd ; ceilometer-collector --config-file $CEILOMETER_CONF" screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" - echo "Waiting for ceilometer-api to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://localhost:8777/v2/ >/dev/null; do sleep 1; done"; then - die $LINENO "ceilometer-api did not start" + # only die on API if it was actually intended to be turned on + if service_enabled ceilometer-api; then + echo "Waiting for ceilometer-api to start..." + if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://localhost:8777/v2/ >/dev/null; do sleep 1; done"; then + die $LINENO "ceilometer-api did not start" + fi fi screen_it ceilometer-alarm-notifier "cd ; ceilometer-alarm-notifier --config-file $CEILOMETER_CONF" From c921a95f63b00c549763c9968a103d44df590032 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 28 Feb 2014 21:09:33 -0500 Subject: [PATCH 0797/4704] only do a dbsync if on the database node ceilometer should only try to reset the database if it's actually on a node where there is a database. Change-Id: Ibcfec0556829bff0938e3769c19d34ae6c02b738 --- lib/ceilometer | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 0be4184a37..2e6e7c5a76 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -180,9 +180,11 @@ function init_ceilometer { sudo chown $STACK_USER $CEILOMETER_AUTH_CACHE_DIR rm -f $CEILOMETER_AUTH_CACHE_DIR/* - if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then - recreate_database ceilometer utf8 - $CEILOMETER_BIN_DIR/ceilometer-dbsync + if is_service_enabled mysql postgresql; then + if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then + recreate_database ceilometer utf8 + $CEILOMETER_BIN_DIR/ceilometer-dbsync + fi fi } From a8880cc22c540e88c43da4e49fa6c976361484e4 Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Sat, 1 Mar 2014 00:24:51 -0500 Subject: [PATCH 0798/4704] Use glance image-show to check for uploaded Docker images The behavior of the code being replaced was failing with '-o errexit' should that, as in the common case, the image has not been uploaded into Glance. While we could workaround this using a '|| :', the existing code also happened to overwrite the DOCKER_IMAGE global which is used elsewhere. It seemed prudent to either change this variable name or remove it altogether. Finally, using 'glance image-show' is more deterministic than grepping the output of 'glance image-list'. Change-Id: I23188155966ae9db64259b4a9d25a0d98c63c912 Closes-Bug: 1286443 --- lib/nova_plugins/hypervisor-docker | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker index f8dc6afa19..cdbc4d172d 100644 --- a/lib/nova_plugins/hypervisor-docker +++ b/lib/nova_plugins/hypervisor-docker @@ -104,8 +104,7 @@ function start_nova_hypervisor { fi # Make sure we copied the image in Glance - DOCKER_IMAGE=$(glance image-list | egrep " $DOCKER_IMAGE_NAME ") - if ! is_set DOCKER_IMAGE ; then + if ! (glance image-show "$DOCKER_IMAGE"); then docker push $DOCKER_REPOSITORY_NAME fi } From 5a110d4e684d5cf936621608003f6b30eb75c2b1 Mon Sep 17 00:00:00 2001 From: fumihiko kakuma Date: Wed, 29 Jan 2014 14:42:06 +0900 Subject: [PATCH 0799/4704] Add configurations for the OpenFlow Agent mechanism driver This patch supports configurations for an environment of the OpenFlow Agent mechanism driver Set the following variables in a localrc to be ran this mechanism driver. Q_ML2_PLUGIN_MECHANISM_DRIVERS=ofagent Q_AGENT=ofagent Implements: blueprint ryu-ml2-driver Change-Id: I774da9a26f241487dfa4ec124b12f528704d860b --- lib/neutron_plugins/ofagent_agent | 94 +++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 lib/neutron_plugins/ofagent_agent diff --git a/lib/neutron_plugins/ofagent_agent b/lib/neutron_plugins/ofagent_agent new file mode 100644 index 0000000000..724df41d4c --- /dev/null +++ b/lib/neutron_plugins/ofagent_agent @@ -0,0 +1,94 @@ +# OpenFlow Agent plugin +# ---------------------- + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/neutron_plugins/ovs_base +source $TOP_DIR/lib/neutron_thirdparty/ryu # for RYU_DIR, install_ryu, etc + +function neutron_plugin_create_nova_conf { + _neutron_ovs_base_configure_nova_vif_driver +} + +function neutron_plugin_install_agent_packages { + _neutron_ovs_base_install_agent_packages + + # This agent uses ryu to talk with switches + install_package $(get_packages "ryu") + install_ryu + configure_ryu +} + +function neutron_plugin_configure_debug_command { + _neutron_ovs_base_configure_debug_command +} + +function neutron_plugin_configure_dhcp_agent { + iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport +} + +function neutron_plugin_configure_l3_agent { + _neutron_ovs_base_configure_l3_agent + iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport +} + +function neutron_plugin_configure_plugin_agent { + # Set up integration bridge + _neutron_ovs_base_setup_bridge $OVS_BRIDGE + _neutron_ovs_base_configure_firewall_driver + + # Check a supported openflow version + OF_VERSION=`ovs-ofctl --version | grep "OpenFlow versions" | awk '{print $3}' | cut -d':' -f2` + if [ `vercmp_numbers "$OF_VERSION" "0x3"` -lt "0" ]; then + die $LINENO "This agent requires OpenFlow 1.3+ capable switch." + fi + + # Enable tunnel networks if selected + if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then + # Verify tunnels are supported + # REVISIT - also check kernel module support for GRE and patch ports + OVS_VERSION=`ovs-vsctl --version | head -n 1 | grep -E -o "[0-9]+\.[0-9]+"` + if [ `vercmp_numbers "$OVS_VERSION" "1.4"` -lt "0" ]; then + die $LINENO "You are running OVS version $OVS_VERSION. OVS 1.4+ is required for tunneling between multiple hosts." + fi + iniset /$Q_PLUGIN_CONF_FILE ovs enable_tunneling True + iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $HOST_IP + fi + + # Setup physical network bridge mappings. Override + # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more + # complex physical network configurations. + if [[ "$OVS_BRIDGE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then + OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE + + # Configure bridge manually with physical interface as port for multi-node + sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE + fi + if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE ovs bridge_mappings $OVS_BRIDGE_MAPPINGS + fi + AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-ofagent-agent" + + # Define extra "AGENT" configuration options when q-agt is configured by defining + # defining the array ``Q_AGENT_EXTRA_AGENT_OPTS``. + # For Example: ``Q_AGENT_EXTRA_AGENT_OPTS=(foo=true bar=2)`` + for I in "${Q_AGENT_EXTRA_AGENT_OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + iniset /$Q_PLUGIN_CONF_FILE agent ${I/=/ } + done +} + +function neutron_plugin_setup_interface_driver { + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver + iniset $conf_file DEFAULT ovs_use_veth True +} + +function neutron_plugin_check_adv_test_requirements { + is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 +} + +# Restore xtrace +$MY_XTRACE From 46c688c1ae2bdb0fc923635392a602efa3fd38c2 Mon Sep 17 00:00:00 2001 From: Masayuki Igawa Date: Mon, 24 Feb 2014 18:42:37 +0900 Subject: [PATCH 0800/4704] Fix pep8 errors This commit fixes pep8 errors. Change-Id: Ia1f1d61081a86b8a58251918392171cbc60f5ab8 --- tools/jenkins/jenkins_home/print_summary.py | 17 ++++++++++++-- tools/uec/meta.py | 25 ++++++++++++++++----- 2 files changed, 34 insertions(+), 8 deletions(-) diff --git a/tools/jenkins/jenkins_home/print_summary.py b/tools/jenkins/jenkins_home/print_summary.py index ea943e1caf..ee3790fcda 100755 --- a/tools/jenkins/jenkins_home/print_summary.py +++ b/tools/jenkins/jenkins_home/print_summary.py @@ -1,7 +1,20 @@ #!/usr/bin/python -import urllib + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + import json import sys +import urllib def print_usage(): @@ -42,4 +55,4 @@ def fetch_blob(url): 'logUrl': log_url, 'healthReport': config['healthReport']}) -print json.dumps(results) +print(json.dumps(results)) diff --git a/tools/uec/meta.py b/tools/uec/meta.py index 5b845d81a6..1d994a60d6 100644 --- a/tools/uec/meta.py +++ b/tools/uec/meta.py @@ -1,10 +1,23 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import BaseHTTPServer +import SimpleHTTPServer import sys -from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler -from SimpleHTTPServer import SimpleHTTPRequestHandler -def main(host, port, HandlerClass = SimpleHTTPRequestHandler, - ServerClass = HTTPServer, protocol="HTTP/1.0"): - """simple http server that listens on a give address:port""" + +def main(host, port, HandlerClass=SimpleHTTPServer.SimpleHTTPRequestHandler, + ServerClass=BaseHTTPServer.HTTPServer, protocol="HTTP/1.0"): + """simple http server that listens on a give address:port.""" server_address = (host, port) @@ -12,7 +25,7 @@ def main(host, port, HandlerClass = SimpleHTTPRequestHandler, httpd = ServerClass(server_address, HandlerClass) sa = httpd.socket.getsockname() - print "Serving HTTP on", sa[0], "port", sa[1], "..." + print("Serving HTTP on", sa[0], "port", sa[1], "...") httpd.serve_forever() if __name__ == '__main__': From 9b3602ccf64f1d690a0a3d4adff987a5a12594b1 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Fri, 28 Feb 2014 13:52:29 -0500 Subject: [PATCH 0801/4704] Fix intermittent error in exercises/floating_ips.sh Every once in a while I see this error running floating_ips.sh: /devstack/exercises/floating_ips.sh:184:ping_check /devstack/functions:356:_ping_check_neutron /devstack/lib/neutron:904:die [ERROR] /devstack/lib/neutron:904 [Fail] Could ping server I think the problem is that it immediately tries to ping right after the icmp rule is deleted. Add a timeout and check so we at least wait one second. Change-Id: I753ec257fa12f6d2ddff1a5b1909e32d8995e173 --- exercises/floating_ips.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 8dc44effbc..8b7b96197e 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -178,6 +178,10 @@ fi nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || \ die $LINENO "Failure deleting security group rule from $SECGROUP" +if ! timeout $ASSOCIATE_TIMEOUT sh -c "while nova secgroup-list-rules $SECGROUP | grep -q icmp; do sleep 1; done"; then + die $LINENO "Security group rule not deleted from $SECGROUP" +fi + # FIXME (anthony): make xs support security groups if [ "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then # Test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds From 729236ca1a38804b3c31ec39ef65592e0108f863 Mon Sep 17 00:00:00 2001 From: Mohammad Banikazemi Date: Wed, 5 Feb 2014 14:45:04 -0500 Subject: [PATCH 0802/4704] Adds support for IBM SDN-VE Neutron plugin This provides the support for the monolithic plugin for IBM SDN-VE that is being added to Neutron here: https://review.openstack.org/#/c/66453/ Implements: blueprint ibm-sdnve-plugin-support Depends-On: I92619a95bca2ae0c37e7fdd39da30119b43d1ad6 DocImpact Change-Id: I0958457355036fdab93156cd7fb4afd1a458918b --- lib/neutron_plugins/ibm | 133 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 133 insertions(+) create mode 100644 lib/neutron_plugins/ibm diff --git a/lib/neutron_plugins/ibm b/lib/neutron_plugins/ibm new file mode 100644 index 0000000000..22c8578e64 --- /dev/null +++ b/lib/neutron_plugins/ibm @@ -0,0 +1,133 @@ +# Neutron IBM SDN-VE plugin +# --------------------------- + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/neutron_plugins/ovs_base + +function neutron_plugin_install_agent_packages { + _neutron_ovs_base_install_agent_packages +} + +function _neutron_interface_setup { + # Setup one interface on the integration bridge if needed + # The plugin agent to be used if more than one interface is used + local bridge=$1 + local interface=$2 + sudo ovs-vsctl --no-wait -- --may-exist add-port $bridge $interface +} + +function neutron_setup_integration_bridge { + # Setup integration bridge if needed + if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then + neutron_ovs_base_cleanup + _neutron_ovs_base_setup_bridge $SDNVE_INTEGRATION_BRIDGE + if [[ "$SDNVE_INTERFACE_MAPPINGS" != "" ]]; then + interfaces=(${SDNVE_INTERFACE_MAPPINGS//[,:]/ }) + _neutron_interface_setup $SDNVE_INTEGRATION_BRIDGE ${interfaces[1]} + fi + fi + + # Set controller to SDNVE controller (1st of list) if exists + if [[ "$SDNVE_CONTROLLER_IPS" != "" ]]; then + # Get the first controller + controllers=(${SDNVE_CONTROLLER_IPS//[\[,\]]/ }) + SDNVE_IP=${controllers[0]} + sudo ovs-vsctl set-controller $SDNVE_INTEGRATION_BRIDGE tcp:$SDNVE_IP + fi +} + +function neutron_plugin_create_nova_conf { + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} + # if n-cpu is enabled, then setup integration bridge + if is_service_enabled n-cpu; then + neutron_setup_integration_bridge + fi +} + +function is_neutron_ovs_base_plugin { + if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then + # Yes, we use OVS. + return 0 + else + # No, we do not use OVS. + return 1 + fi +} + +function neutron_plugin_configure_common { + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ibm + Q_PLUGIN_CONF_FILENAME=sdnve_neutron_plugin.ini + Q_DB_NAME="sdnve_neutron" + Q_PLUGIN_CLASS="neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2" +} + +function neutron_plugin_configure_service { + # Define extra "SDNVE" configuration options when q-svc is configured + + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver + + if [[ "$SDNVE_CONTROLLER_IPS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE sdnve controller_ips $SDNVE_CONTROLLER_IPS + fi + + if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE sdnve integration_bridge $SDNVE_INTEGRATION_BRIDGE + fi + + if [[ "$SDNVE_RESET_BRIDGE" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE sdnve reset_bridge $SDNVE_RESET_BRIDGE + fi + + if [[ "$SDNVE_OUT_OF_BAND" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE sdnve out_of_band $SDNVE_OUT_OF_BAND + fi + + if [[ "$SDNVE_INTERFACE_MAPPINGS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE sdnve interface_mappings $SDNVE_INTERFACE_MAPPINGS + fi + + if [[ "$SDNVE_FAKE_CONTROLLER" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE sdnve use_fake_controller $SDNVE_FAKE_CONTROLLER + fi + + + iniset $NEUTRON_CONF DEFAULT notification_driver neutron.openstack.common.notifier.no_op_notifier + +} + +function neutron_plugin_configure_plugin_agent { + AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-ibm-agent" +} + +function neutron_plugin_configure_debug_command { + : +} + +function neutron_plugin_setup_interface_driver { + return 0 +} + +function has_neutron_plugin_security_group { + # Does not support Security Groups + return 1 +} + +function neutron_ovs_base_cleanup { + if [[ "$SDNVE_RESET_BRIDGE" != False ]]; then + # remove all OVS ports that look like Neutron created ports + for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do + sudo ovs-vsctl del-port ${port} + done + + # remove integration bridge created by Neutron + for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${SDNVE_INTEGRATION_BRIDGE}); do + sudo ovs-vsctl del-br ${bridge} + done + fi +} + +# Restore xtrace +$MY_XTRACE From 91baef3e26994c64249453dd0b1d8998eda10eca Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 28 Feb 2014 11:11:45 -0600 Subject: [PATCH 0803/4704] Clarify deprecation of EXTRA_xxx_OPTS The various EXTRA_xxx_OPTS variables will be removed in the Juno development cycle, change the README to reflect the new way for the Neutron variables. Change-Id: Ic84da4a9b5a83e66cf0b57d643a87691f15517f0 --- README.md | 50 ++++++++++++++++++++++++++++++++++---------------- stack.sh | 48 +++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 79 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index 9914b1ed69..9304240f70 100644 --- a/README.md +++ b/README.md @@ -163,7 +163,7 @@ services are started in background and managed by `swift-init` tool. Basic Setup In order to enable Neutron a single node setup, you'll need the -following settings in your `localrc` section: +following settings in your `local.conf`: disable_service n-net enable_service q-svc @@ -172,7 +172,6 @@ following settings in your `localrc` section: enable_service q-l3 enable_service q-meta enable_service q-metering - enable_service neutron # Optional, to enable tempest configuration as part of DevStack enable_service tempest @@ -180,24 +179,44 @@ Then run `stack.sh` as normal. DevStack supports setting specific Neutron configuration flags to the service, Open vSwitch plugin and LinuxBridge plugin configuration files. -To make use of this feature, the following variables are defined and can -be configured in your `localrc` section: +To make use of this feature, the settings can be added to ``local.conf``. +The old ``Q_XXX_EXTRA_XXX_OPTS`` variables are deprecated and will be removed +in the near future. The ``local.conf`` headers for the replacements are: - Variable Name Config File Section Modified - ------------------------------------------------------------------------------------- - Q_SRV_EXTRA_OPTS Plugin `OVS` (for Open Vswitch) or `LINUX_BRIDGE` (for LinuxBridge) - Q_AGENT_EXTRA_AGENT_OPTS Plugin AGENT - Q_AGENT_EXTRA_SRV_OPTS Plugin `OVS` (for Open Vswitch) or `LINUX_BRIDGE` (for LinuxBridge) - Q_SRV_EXTRA_DEFAULT_OPTS Service DEFAULT +* ``Q_SRV_EXTRA_OPTS``: + + [[post-config|/$Q_PLUGIN_CONF_FILE]] + [linuxbridge] # or [ovs] + +* ``Q_AGENT_EXTRA_AGENT_OPTS``: + + [[post-config|/$Q_PLUGIN_CONF_FILE]] + [agent] + +* ``Q_AGENT_EXTRA_SRV_OPTS``: -An example of using the variables in your `localrc` section is below: + [[post-config|/$Q_PLUGIN_CONF_FILE]] + [linuxbridge] # or [ovs] + +* ``Q_SRV_EXTRA_DEFAULT_OPTS``: + + [[post-config|$NEUTRON_CONF]] + [DEFAULT] - Q_AGENT_EXTRA_AGENT_OPTS=(tunnel_type=vxlan vxlan_udp_port=8472) - Q_SRV_EXTRA_OPTS=(tenant_network_type=vxlan) +Example extra config in `local.conf`: + + [[post-config|/$Q_PLUGIN_CONF_FILE]] + [agent] + tunnel_type=vxlan + vxlan_udp_port=8472 + + [[post-config|$NEUTRON_CONF]] + [DEFAULT] + tenant_network_type=vxlan DevStack also supports configuring the Neutron ML2 plugin. The ML2 plugin -can run with the OVS, LinuxBridge, or Hyper-V agents on compute hosts. A -simple way to configure the ml2 plugin is shown below: +can run with the OVS, LinuxBridge, or Hyper-V agents on compute hosts. This +is a simple way to configure the ml2 plugin: # VLAN configuration Q_PLUGIN=ml2 @@ -223,7 +242,6 @@ To change this, set the `Q_AGENT` variable to the agent you want to run Q_ML2_PLUGIN_GRE_TYPE_OPTIONS GRE TypeDriver options. Defaults to none. Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS VXLAN TypeDriver options. Defaults to none. Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS VLAN TypeDriver options. Defaults to none. - Q_AGENT_EXTRA_AGENT_OPTS Extra configuration options to pass to the OVS or LinuxBridge Agent. # Heat diff --git a/stack.sh b/stack.sh index ccd567e0bc..988fda5ff1 100755 --- a/stack.sh +++ b/stack.sh @@ -1359,12 +1359,14 @@ if [[ -n "$DEPRECATED_TEXT" ]]; then echo_summary "WARNING: $DEPRECATED_TEXT" fi +# TODO(dtroyer): Remove EXTRA_OPTS after stable/icehouse branch is cut # Specific warning for deprecated configs if [[ -n "$EXTRA_OPTS" ]]; then echo "" echo_summary "WARNING: EXTRA_OPTS is used" echo "You are using EXTRA_OPTS to pass configuration into nova.conf." echo "Please convert that configuration in localrc to a nova.conf section in local.conf:" + echo "EXTRA_OPTS will be removed early in the Juno development cycle" echo " [[post-config|\$NOVA_CONF]] [DEFAULT] @@ -1375,11 +1377,13 @@ if [[ -n "$EXTRA_OPTS" ]]; then done fi +# TODO(dtroyer): Remove EXTRA_BAREMETAL_OPTS after stable/icehouse branch is cut if [[ -n "$EXTRA_BAREMETAL_OPTS" ]]; then echo "" - echo_summary "WARNING: EXTRA_OPTS is used" - echo "You are using EXTRA_OPTS to pass configuration into nova.conf." + echo_summary "WARNING: EXTRA_BAREMETAL_OPTS is used" + echo "You are using EXTRA_BAREMETAL_OPTS to pass configuration into nova.conf." echo "Please convert that configuration in localrc to a nova.conf section in local.conf:" + echo "EXTRA_BAREMETAL_OPTS will be removed early in the Juno development cycle" echo " [[post-config|\$NOVA_CONF]] [baremetal] @@ -1390,13 +1394,49 @@ if [[ -n "$EXTRA_BAREMETAL_OPTS" ]]; then done fi +# TODO(dtroyer): Remove Q_AGENT_EXTRA_AGENT_OPTS after stable/juno branch is cut +if [[ -n "$Q_AGENT_EXTRA_AGENT_OPTS" ]]; then + echo "" + echo_summary "WARNING: Q_AGENT_EXTRA_AGENT_OPTS is used" + echo "You are using Q_AGENT_EXTRA_AGENT_OPTS to pass configuration into $NEUTRON_CONF." + echo "Please convert that configuration in localrc to a $NEUTRON_CONF section in local.conf:" + echo "Q_AGENT_EXTRA_AGENT_OPTS will be removed early in the 'K' development cycle" + echo " +[[post-config|/\$Q_PLUGIN_CONF_FILE]] +[DEFAULT] +" + for I in "${Q_AGENT_EXTRA_AGENT_OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + echo ${I} + done +fi + +# TODO(dtroyer): Remove Q_AGENT_EXTRA_SRV_OPTS after stable/juno branch is cut +if [[ -n "$Q_AGENT_EXTRA_SRV_OPTS" ]]; then + echo "" + echo_summary "WARNING: Q_AGENT_EXTRA_SRV_OPTS is used" + echo "You are using Q_AGENT_EXTRA_SRV_OPTS to pass configuration into $NEUTRON_CONF." + echo "Please convert that configuration in localrc to a $NEUTRON_CONF section in local.conf:" + echo "Q_AGENT_EXTRA_AGENT_OPTS will be removed early in the 'K' development cycle" + echo " +[[post-config|/\$Q_PLUGIN_CONF_FILE]] +[DEFAULT] +" + for I in "${Q_AGENT_EXTRA_SRV_OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + echo ${I} + done +fi + +# TODO(dtroyer): Remove Q_DHCP_EXTRA_DEFAULT_OPTS after stable/icehouse branch is cut if [[ -n "$Q_DHCP_EXTRA_DEFAULT_OPTS" ]]; then echo "" echo_summary "WARNING: Q_DHCP_EXTRA_DEFAULT_OPTS is used" echo "You are using Q_DHCP_EXTRA_DEFAULT_OPTS to pass configuration into $Q_DHCP_CONF_FILE." echo "Please convert that configuration in localrc to a $Q_DHCP_CONF_FILE section in local.conf:" + echo "Q_DHCP_EXTRA_DEFAULT_OPTS will be removed early in the Juno development cycle" echo " -[[post-config|\$Q_DHCP_CONF_FILE]] +[[post-config|/\$Q_DHCP_CONF_FILE]] [DEFAULT] " for I in "${Q_DHCP_EXTRA_DEFAULT_OPTS[@]}"; do @@ -1405,11 +1445,13 @@ if [[ -n "$Q_DHCP_EXTRA_DEFAULT_OPTS" ]]; then done fi +# TODO(dtroyer): Remove Q_SRV_EXTRA_DEFAULT_OPTS after stable/icehouse branch is cut if [[ -n "$Q_SRV_EXTRA_DEFAULT_OPTS" ]]; then echo "" echo_summary "WARNING: Q_SRV_EXTRA_DEFAULT_OPTS is used" echo "You are using Q_SRV_EXTRA_DEFAULT_OPTS to pass configuration into $NEUTRON_CONF." echo "Please convert that configuration in localrc to a $NEUTRON_CONF section in local.conf:" + echo "Q_SRV_EXTRA_DEFAULT_OPTS will be removed early in the Juno development cycle" echo " [[post-config|\$NEUTRON_CONF]] [DEFAULT] From 57d478d87438912e1a33d4a2d00d4a300148e2fc Mon Sep 17 00:00:00 2001 From: Steven Hardy Date: Fri, 28 Feb 2014 16:37:43 +0000 Subject: [PATCH 0804/4704] Move heat keystone setup into lib/heat Move the heat setup which currently happens in files/keystone_data.sh to lib/heat, where we have create_heat_accounts. Move the user, role, service and endpoint creation as that is consistent with what other services, e.g lib/nova are doing. Change-Id: Iaa2c822cad581d6b2b4f22f8863daf81e25f8485 --- files/keystone_data.sh | 35 ---------------------------------- lib/heat | 43 +++++++++++++++++++++++++++++++++++++++++- stack.sh | 3 +-- 3 files changed, 43 insertions(+), 38 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 9a34c7616f..fc1e8136a4 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -53,41 +53,6 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" | --role ResellerAdmin fi -# Heat -if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then - keystone user-create --name=heat \ - --pass="$SERVICE_PASSWORD" \ - --tenant $SERVICE_TENANT_NAME \ - --email=heat@example.com - keystone user-role-add --tenant $SERVICE_TENANT_NAME \ - --user heat \ - --role service - # heat_stack_user role is for users created by Heat - keystone role-create --name heat_stack_user - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - keystone service-create \ - --name=heat-cfn \ - --type=cloudformation \ - --description="Heat CloudFormation Service" - keystone endpoint-create \ - --region RegionOne \ - --service heat-cfn \ - --publicurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \ - --adminurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \ - --internalurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" - keystone service-create \ - --name=heat \ - --type=orchestration \ - --description="Heat Service" - keystone endpoint-create \ - --region RegionOne \ - --service heat \ - --publicurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ - --adminurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ - --internalurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" - fi -fi - # Glance if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then keystone user-create \ diff --git a/lib/heat b/lib/heat index d0c0302016..42d1057cbd 100644 --- a/lib/heat +++ b/lib/heat @@ -197,8 +197,49 @@ function disk_image_create { } # create_heat_accounts() - Set up common required heat accounts -# Note this is in addition to what is in files/keystone_data.sh function create_heat_accounts { + # migrated from files/keystone_data.sh + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") + + HEAT_USER=$(openstack user create \ + heat \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email heat@example.com \ + | grep " id " | get_field 2) + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $HEAT_USER + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + HEAT_SERVICE=$(openstack service create \ + heat \ + --type=orchestration \ + --description="Heat Orchestration Service" \ + | grep " id " | get_field 2) + openstack endpoint create \ + $HEAT_SERVICE \ + --region RegionOne \ + --publicurl "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ + --adminurl "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ + --internalurl "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" + HEAT_CFN_SERVICE=$(openstack service create \ + heat \ + --type=cloudformation \ + --description="Heat CloudFormation Service" \ + | grep " id " | get_field 2) + openstack endpoint create \ + $HEAT_CFN_SERVICE \ + --region RegionOne \ + --publicurl "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \ + --adminurl "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \ + --internalurl "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" + fi + + # heat_stack_user role is for users created by Heat + openstack role create heat_stack_user + # Note we have to pass token/endpoint here because the current endpoint and # version negotiation in OSC means just --os-identity-api-version=3 won't work KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3" diff --git a/stack.sh b/stack.sh index ccd567e0bc..ec8de2d2dd 100755 --- a/stack.sh +++ b/stack.sh @@ -934,8 +934,7 @@ if is_service_enabled key; then ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \ SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \ S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \ - DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES HEAT_API_CFN_PORT=$HEAT_API_CFN_PORT \ - HEAT_API_PORT=$HEAT_API_PORT \ + DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \ bash -x $FILES/keystone_data.sh # Set up auth creds now that keystone is bootstrapped From 2ca3bf18dd756621f012ebb7ffb338f2fa38d6f2 Mon Sep 17 00:00:00 2001 From: Steven Hardy Date: Mon, 3 Mar 2014 18:07:33 +0000 Subject: [PATCH 0805/4704] Add heat_stack_owner role for heat trusts usage Heat supports deferred operations via keystone trusts, and we'd like to make that the default. To do this, we require a new role, which is the default role specified in heat.conf trusts_delegated_roles, heat_stack_owner. Add the role to the admin/demo users so they can create heat stacks when we make deferred_auth_method=trusts the default. Change-Id: Idfc70ee89428c23f5965e643486ff2ad9566471c Related-Bug: #1286157 --- lib/heat | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/lib/heat b/lib/heat index 42d1057cbd..2d9d863f0c 100644 --- a/lib/heat +++ b/lib/heat @@ -240,6 +240,19 @@ function create_heat_accounts { # heat_stack_user role is for users created by Heat openstack role create heat_stack_user + # heat_stack_owner role is given to users who create Heat stacks, + # it's the default role used by heat to delegate to the heat service + # user (for performing deferred operations via trusts), see heat.conf + HEAT_OWNER_ROLE=$(openstack role create \ + heat_stack_owner \ + | grep " id " | get_field 2) + + # Give the role to the demo and admin users so they can create stacks + # in either of the projects created by devstack + openstack role add $HEAT_OWNER_ROLE --project demo --user demo + openstack role add $HEAT_OWNER_ROLE --project demo --user admin + openstack role add $HEAT_OWNER_ROLE --project admin --user admin + # Note we have to pass token/endpoint here because the current endpoint and # version negotiation in OSC means just --os-identity-api-version=3 won't work KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3" From de3b82037d863b55cc245c343a8697b5cf4b1904 Mon Sep 17 00:00:00 2001 From: Shweta P Date: Mon, 3 Mar 2014 13:38:37 -0500 Subject: [PATCH 0806/4704] NCCLIENT_REPO is using the wrong url NCCLIENT_REPO value in lib/neutron_plugins/cisco is pointing to a repo that does not exist. This fix corrects the url. Closes-Bug #1286302 Change-Id: I42db0b3f7a4bbf5d1d053e3da8b4fbb67d47de94 --- lib/neutron_plugins/cisco | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/cisco b/lib/neutron_plugins/cisco index 7728eb177f..a1b089e1a3 100644 --- a/lib/neutron_plugins/cisco +++ b/lib/neutron_plugins/cisco @@ -23,7 +23,7 @@ Q_CISCO_PLUGIN_VLAN_RANGES=${Q_CISCO_PLUGIN_VLAN_RANGES:-vlan:1:4094} # Specify ncclient package information NCCLIENT_DIR=$DEST/ncclient NCCLIENT_VERSION=${NCCLIENT_VERSION:-0.3.1} -NCCLIENT_REPO=${NCCLIENT_REPO:-${GIT_BASE}/CiscoSystems/ncclient.git} +NCCLIENT_REPO=${NCCLIENT_REPO:-git://github.com/CiscoSystems/ncclient.git} NCCLIENT_BRANCH=${NCCLIENT_BRANCH:-master} # This routine put a prefix on an existing function name From 753afeba7464464a3fd050eb2085e51580f9b5a7 Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Thu, 13 Feb 2014 17:17:30 -0800 Subject: [PATCH 0807/4704] Use neutron security groups in BigSwitch plugin Configures the Big Switch third-party plugin to use neutron security groups instead of nova security groups. Change-Id: I6bc3046ff0e70b8288a7c3f3d6f975376adc081a Implements: blueprint bigswitch-neutron-security --- lib/neutron_plugins/bigswitch_floodlight | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight index 4cb0da84ea..b1b77d7606 100644 --- a/lib/neutron_plugins/bigswitch_floodlight +++ b/lib/neutron_plugins/bigswitch_floodlight @@ -38,7 +38,12 @@ function neutron_plugin_configure_l3_agent { } function neutron_plugin_configure_plugin_agent { - : + # Set up integration bridge + _neutron_ovs_base_setup_bridge $OVS_BRIDGE + iniset /$Q_PLUGIN_CONF_FILE restproxyagent integration_bridge $OVS_BRIDGE + AGENT_BINARY="$NEUTRON_DIR/neutron/plugins/bigswitch/agent/restproxy_agent.py" + + _neutron_ovs_base_configure_firewall_driver } function neutron_plugin_configure_service { @@ -61,7 +66,7 @@ function neutron_plugin_setup_interface_driver { function has_neutron_plugin_security_group { # 1 means False here - return 1 + return 0 } function neutron_plugin_check_adv_test_requirements { From 8829acaf141ade6d5ac61ec3d0b15d80e3a09752 Mon Sep 17 00:00:00 2001 From: zhang-jinnan Date: Mon, 3 Mar 2014 10:55:33 +0800 Subject: [PATCH 0808/4704] Remove blank space after print Keep code clean and pleasure:) Change-Id: Ie0c0781eaeb57b32a9a6185a59353fc4b911afd6 --- tools/jenkins/jenkins_home/print_summary.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/jenkins/jenkins_home/print_summary.py b/tools/jenkins/jenkins_home/print_summary.py index ea943e1caf..6310b1889f 100755 --- a/tools/jenkins/jenkins_home/print_summary.py +++ b/tools/jenkins/jenkins_home/print_summary.py @@ -5,8 +5,8 @@ def print_usage(): - print ("Usage: %s [jenkins_url (eg. http://50.56.12.202:8080/)]" - % sys.argv[0]) + print("Usage: %s [jenkins_url (eg. http://50.56.12.202:8080/)]" + % sys.argv[0]) sys.exit() From ccf60f75a2a5a0f10412b4f806ac7a123068909b Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Mon, 3 Mar 2014 22:48:31 -0500 Subject: [PATCH 0809/4704] Put tempest ipv6 option in the correct group This commit updates the location for the ipv6 option to be in the proper group. This depends on tempest change I35769cf4d18363fad56ed5150b4d01d8a5ad17e7 Change-Id: Ief5ea00649c8954282245e30c63c45557a28ea9f --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 16f8744d85..1639ae60b4 100644 --- a/lib/tempest +++ b/lib/tempest @@ -293,7 +293,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG network public_network_id "$public_network_id" iniset $TEMPEST_CONFIG network public_router_id "$public_router_id" iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE" - iniset $TEMPEST_CONFIG network ipv6_enabled "$IPV6_ENABLED" + iniset $TEMPEST_CONFIG network-feature-enabled ipv6 "$IPV6_ENABLED" # boto iniset $TEMPEST_CONFIG boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" From 314af0a7a97b31ff2a803a77e1a92f5b67857f18 Mon Sep 17 00:00:00 2001 From: Sreeram Yerrapragada Date: Mon, 3 Mar 2014 21:34:45 -0800 Subject: [PATCH 0810/4704] Fix upload function for vmdk files Fix all grep statements failing under -o errexit. Change-Id: I0591a2ba7351d598eb5b29d68a83ce6290600938 --- functions | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/functions b/functions index a844b1c1af..ab8319b0ce 100644 --- a/functions +++ b/functions @@ -55,7 +55,7 @@ function upload_image { mkdir -p $FILES/images IMAGE_FNAME=`basename "$image_url"` if [[ $image_url != file* ]]; then - # Downloads the image (uec ami+aki style), then extracts it. + # Downloads the image (uec ami+akistyle), then extracts it. if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then wget -c $image_url -O $FILES/$IMAGE_FNAME if [[ $? -ne 0 ]]; then @@ -103,12 +103,12 @@ function upload_image { vmdk_net_adapter="" # vmdk adapter type - vmdk_adapter_type="$(head -25 $IMAGE | grep -a -F -m 1 'ddb.adapterType =' $IMAGE)" + vmdk_adapter_type="$(head -25 $IMAGE | { grep -a -F -m 1 'ddb.adapterType =' $IMAGE || true; })" vmdk_adapter_type="${vmdk_adapter_type#*\"}" vmdk_adapter_type="${vmdk_adapter_type%?}" # vmdk disk type - vmdk_create_type="$(head -25 $IMAGE | grep -a -F -m 1 'createType=' $IMAGE)" + vmdk_create_type="$(head -25 $IMAGE | { grep -a -F -m 1 'createType=' $IMAGE || true; })" vmdk_create_type="${vmdk_create_type#*\"}" vmdk_create_type="${vmdk_create_type%\"*}" @@ -119,7 +119,7 @@ function upload_image { elif [[ "$vmdk_create_type" = "monolithicFlat" || \ "$vmdk_create_type" = "vmfs" ]]; then # Attempt to retrieve the *-flat.vmdk - flat_fname="$(head -25 $IMAGE | grep -G 'RW\|RDONLY [0-9]+ FLAT\|VMFS' $IMAGE)" + flat_fname="$(head -25 $IMAGE | { grep -G 'RW\|RDONLY [0-9]+ FLAT\|VMFS' $IMAGE || true; })" flat_fname="${flat_fname#*\"}" flat_fname="${flat_fname%?}" if [[ -z "$flat_name" ]]; then @@ -190,7 +190,7 @@ function upload_image { fi if $descriptor_found; then vmdk_adapter_type="$(head -25 $descriptor_url |"` - `"grep -a -F -m 1 'ddb.adapterType =' $descriptor_url)" + `" { grep -a -F -m 1 'ddb.adapterType =' $descriptor_url || true; })" vmdk_adapter_type="${vmdk_adapter_type#*\"}" vmdk_adapter_type="${vmdk_adapter_type%?}" fi @@ -203,7 +203,7 @@ function upload_image { # NOTE: For backwards compatibility reasons, colons may be used in place # of semi-colons for property delimiters but they are not permitted # characters in NTFS filesystems. - property_string=`echo "$IMAGE_NAME" | grep -oP '(?<=-)(?!.*-).*[:;].*[:;].*$'` + property_string=`echo "$IMAGE_NAME" | { grep -oP '(?<=-)(?!.*-).*[:;].*[:;].*$' || true; }` IFS=':;' read -a props <<< "$property_string" vmdk_disktype="${props[0]:-$vmdk_disktype}" vmdk_adapter_type="${props[1]:-$vmdk_adapter_type}" From a439faa85b89b0d2c73085743426fd8741293cb6 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Mon, 24 Feb 2014 20:32:19 +0900 Subject: [PATCH 0811/4704] Update required packages for ryu Sync with the recent reality. Change-Id: I4c37d09e511f3763d2267267815387bd5c825e0e Closes-Bug: 1287541 --- files/apts/ryu | 4 +--- files/rpms-suse/ryu | 4 +--- files/rpms/ryu | 4 +--- 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/files/apts/ryu b/files/apts/ryu index e8ed926c1e..9b850807e6 100644 --- a/files/apts/ryu +++ b/files/apts/ryu @@ -1,4 +1,2 @@ -python-gevent -python-gflags -python-netifaces +python-eventlet python-sphinx diff --git a/files/rpms-suse/ryu b/files/rpms-suse/ryu index 3797b6cb44..6b426fb163 100644 --- a/files/rpms-suse/ryu +++ b/files/rpms-suse/ryu @@ -1,4 +1,2 @@ python-Sphinx -python-gevent -python-netifaces -python-python-gflags +python-eventlet diff --git a/files/rpms/ryu b/files/rpms/ryu index e8ed926c1e..9b850807e6 100644 --- a/files/rpms/ryu +++ b/files/rpms/ryu @@ -1,4 +1,2 @@ -python-gevent -python-gflags -python-netifaces +python-eventlet python-sphinx From 0e598c3c81fc3d652415095101a095de69ec8a6d Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Mon, 24 Feb 2014 22:02:08 +0900 Subject: [PATCH 0812/4704] Stop running setup_devel for Ryu It doesn't work here for various reasons. - Ryu's setup.py is incompatible with global requirements - This code is called before install_infra. Ryu is not a part of OpenStack anyway. Closes-Bug: 1287569 Change-Id: I01a942411f7d06bdf8f1fec5d1a0bc319560f329 --- lib/neutron_thirdparty/ryu | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/lib/neutron_thirdparty/ryu b/lib/neutron_thirdparty/ryu index 424a90041e..b2c1b613fe 100644 --- a/lib/neutron_thirdparty/ryu +++ b/lib/neutron_thirdparty/ryu @@ -18,14 +18,8 @@ RYU_OFP_PORT=${RYU_OFP_PORT:-6633} # Ryu Applications RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest} -# configure_ryu can be called multiple times as neutron_pluing/ryu may call -# this function for neutron-ryu-agent -_RYU_CONFIGURED=${_RYU_CONFIGURED:-False} function configure_ryu { - if [[ "$_RYU_CONFIGURED" == "False" ]]; then - setup_develop $RYU_DIR - _RYU_CONFIGURED=True - fi + : } function init_ryu { @@ -63,6 +57,7 @@ _RYU_INSTALLED=${_RYU_INSTALLED:-False} function install_ryu { if [[ "$_RYU_INSTALLED" == "False" ]]; then git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH + export PYTHONPATH=$RYU_DIR:$PYTHONPATH _RYU_INSTALLED=True fi } From d5b52ca7557ec1aef71f21c71110455a6aea2505 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 4 Mar 2014 09:23:07 -0500 Subject: [PATCH 0813/4704] fix tgt to use 'service' instead of upstart calls the comments in here were largely about oneric, which we don't support any more. service is installed in a precise environment, and will support debian and the upcoming transition to systemd better, so use that instead. Change-Id: If15493549a8c93a7387df9b3bba31443aed46995 --- lib/cinder | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/lib/cinder b/lib/cinder index d003f5dc7b..dd2956a5b4 100644 --- a/lib/cinder +++ b/lib/cinder @@ -491,10 +491,7 @@ function start_cinder { sudo rm -f /etc/tgt/conf.d/stack.conf _configure_tgt_for_config_d if is_ubuntu; then - # tgt in oneiric doesn't restart properly if tgtd isn't running - # do it in two steps - sudo stop tgt || true - sudo start tgt + sudo service tgt restart elif is_fedora; then if [[ $DISTRO =~ (rhel6) ]]; then sudo /sbin/service tgtd restart From a67cb1af4df6b5c758c319e0590a3188d951e68d Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Tue, 4 Mar 2014 18:38:33 +0400 Subject: [PATCH 0814/4704] Fix typo in ironic configure function IRONIC_CONF should be replaced by IRONIC_CONF_FILE Change-Id: Ie43e376f42f14c46d21df7dbb19db923521f438b --- lib/ironic | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ironic b/lib/ironic index 4e5edc90cf..b346de1e69 100644 --- a/lib/ironic +++ b/lib/ironic @@ -124,7 +124,7 @@ function configure_ironic_conductor { cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR - iniset $IRONIC_CONF DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF + iniset $IRONIC_CONF_FILE DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF } # create_ironic_cache_dir() - Part of the init_ironic() process From 3d2bdf50bc0110c718de39606c8b803696a31285 Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Sat, 1 Mar 2014 00:17:32 -0500 Subject: [PATCH 0815/4704] Use cat instead of read Date: Tue, 4 Mar 2014 15:02:04 -0500 Subject: [PATCH 0816/4704] fix typo in lib/ceilometer this should be is_service_enabled and not service_enabled. Not sure why it passes in the gate, but it fails in stackforge jobs. Change-Id: I876f72cd98ff9c8e4ea28832bc9ac6bbdc3b865d --- lib/ceilometer | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ceilometer b/lib/ceilometer index 2e6e7c5a76..04c1a34b8b 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -209,7 +209,7 @@ function start_ceilometer { screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" # only die on API if it was actually intended to be turned on - if service_enabled ceilometer-api; then + if is_service_enabled ceilometer-api; then echo "Waiting for ceilometer-api to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://localhost:8777/v2/ >/dev/null; do sleep 1; done"; then die $LINENO "ceilometer-api did not start" From e2aa91b237e7e23f70847cba60a54a40560a5a3c Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Tue, 4 Mar 2014 04:40:19 -0500 Subject: [PATCH 0817/4704] Enable marconi-server to run when USE_SCREEN=false This patch, 1. adds log_file option to marconi.conf 2. redirects the output from marconi-server, in the same precedent set by another project. Change-Id: Ib273a03625d5a4edf8bb3ed7d522d2b087975acd --- lib/marconi | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/marconi b/lib/marconi index 29ae386d9f..a96137fc04 100644 --- a/lib/marconi +++ b/lib/marconi @@ -34,7 +34,8 @@ MARCONI_DIR=$DEST/marconi MARCONICLIENT_DIR=$DEST/python-marconiclient MARCONI_CONF_DIR=/etc/marconi MARCONI_CONF=$MARCONI_CONF_DIR/marconi.conf -MARCONI_API_LOG_DIR=/var/log/marconi-api +MARCONI_API_LOG_DIR=/var/log/marconi +MARCONI_API_LOG_FILE=$MARCONI_API_LOG_DIR/queues.log MARCONI_AUTH_CACHE_DIR=${MARCONI_AUTH_CACHE_DIR:-/var/cache/marconi} # Support potential entry-points console scripts @@ -96,6 +97,7 @@ function configure_marconi { iniset $MARCONI_CONF DEFAULT verbose True iniset $MARCONI_CONF DEFAULT use_syslog $SYSLOG + iniset $MARCONI_CONF DEFAULT log_file $MARCONI_API_LOG_FILE iniset $MARCONI_CONF 'drivers:transport:wsgi' bind $MARCONI_SERVICE_HOST iniset $MARCONI_CONF keystone_authtoken auth_protocol http @@ -148,7 +150,7 @@ function install_marconiclient { # start_marconi() - Start running processes, including screen function start_marconi { - screen_it marconi-server "marconi-server --config-file $MARCONI_CONF" + screen_it marconi-server "marconi-server --config-file $MARCONI_CONF 2>&1" echo "Waiting for Marconi to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT/v1/health; do sleep 1; done"; then die $LINENO "Marconi did not start" From ae1728917373986b68d2b0abe2e7052fb78e5903 Mon Sep 17 00:00:00 2001 From: ronak Date: Tue, 4 Mar 2014 15:48:22 -0800 Subject: [PATCH 0818/4704] Supporting Nuage Networks' Plugin through devstack Nuage networks' plugin specific configuration setting file for devstack Change-Id: I936f87b8fbc6f90130514b2fc0d111eab861da7c Implements: blueprint nuage-networks-plugin --- lib/neutron_plugins/nuage | 69 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 lib/neutron_plugins/nuage diff --git a/lib/neutron_plugins/nuage b/lib/neutron_plugins/nuage new file mode 100644 index 0000000000..3649f39bfd --- /dev/null +++ b/lib/neutron_plugins/nuage @@ -0,0 +1,69 @@ +# Nuage Neutron Plugin +# ---------------------- + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +function neutron_plugin_create_nova_conf { + NOVA_OVS_BRIDGE=${NOVA_OVS_BRIDGE:-"br-int"} + iniset $NOVA_CONF DEFAULT neutron_ovs_bridge $NOVA_OVS_BRIDGE + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} + LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver + iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER +} + +function neutron_plugin_install_agent_packages { + : +} + +function neutron_plugin_configure_common { + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/nuage + Q_PLUGIN_CONF_FILENAME=nuage_plugin.ini + Q_DB_NAME="nuage_neutron" + Q_PLUGIN_CLASS="neutron.plugins.nuage.plugin.NuagePlugin" + Q_PLUGIN_EXTENSIONS_PATH=neutron/plugins/nuage/extensions + #Nuage specific Neutron defaults. Actual value must be set and sourced + NUAGE_CNA_SERVERS=${NUAGE_CNA_SERVERS:-'localhost:8443'} + NUAGE_CNA_SERVER_AUTH=${NUAGE_CNA_SERVER_AUTH:-'username:password'} + NUAGE_CNA_ORGANIZATION=${NUAGE_CNA_ORGANIZATION:-'org'} + NUAGE_CNA_SERVER_SSL=${NUAGE_CNA_SERVER_SSL:-'True'} + NUAGE_CNA_BASE_URI=${NUAGE_CNA_BASE_URI:-'/'} + NUAGE_CNA_AUTH_RESOURCE=${NUAGE_CNA_AUTH_RESOURCE:-'/'} + NUAGE_CNA_DEF_NETPART_NAME=${NUAGE_CNA_DEF_NETPART_NAME:-''} +} + +function neutron_plugin_configure_debug_command { + : +} + +function neutron_plugin_configure_dhcp_agent { + : +} + +function neutron_plugin_configure_l3_agent { + : +} + +function neutron_plugin_configure_plugin_agent { + : +} + +function neutron_plugin_configure_service { + iniset $NEUTRON_CONF DEFAULT api_extensions_path neutron/plugins/nuage/extensions/ + iniset /$Q_PLUGIN_CONF_FILE restproxy base_uri $NUAGE_CNA_BASE_URI + iniset /$Q_PLUGIN_CONF_FILE restproxy serverssl $NUAGE_CNA_SERVER_SSL + iniset /$Q_PLUGIN_CONF_FILE restproxy serverauth $NUAGE_CNA_SERVER_AUTH + iniset /$Q_PLUGIN_CONF_FILE restproxy organization $NUAGE_CNA_ORGANIZATION + iniset /$Q_PLUGIN_CONF_FILE restproxy server $NUAGE_CNA_SERVERS + iniset /$Q_PLUGIN_CONF_FILE restproxy auth_resource $NUAGE_CNA_AUTH_RESOURCE + iniset /$Q_PLUGIN_CONF_FILE restproxy default_net_partition_name $NUAGE_CNA_DEF_NETPART_NAME +} + +function has_neutron_plugin_security_group { + # 1 means False here + return 1 +} + +# Restore xtrace +$MY_XTRACE From 8068455a023063b615fc66ee038211a9ae300a81 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 5 Mar 2014 11:50:23 -0600 Subject: [PATCH 0819/4704] Close all logging file descriptors This has lingered for a long time, finally do something about it... Change-Id: Ib90408187698d5d4c23ffb0e527011446efc3c7e --- stack.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/stack.sh b/stack.sh index ab1e8fe94d..32dac0f443 100755 --- a/stack.sh +++ b/stack.sh @@ -1419,3 +1419,9 @@ fi # Indicate how long this took to run (bash maintained variable ``SECONDS``) echo_summary "stack.sh completed in $SECONDS seconds." + +# Restore/close logging file descriptors +exec 1>&3 +exec 2>&3 +exec 3>&- +exec 6>&- From 961328fc4622b16135d6d580429dc3e5db01ded5 Mon Sep 17 00:00:00 2001 From: Flavio Percoco Date: Wed, 5 Mar 2014 18:45:56 +0100 Subject: [PATCH 0820/4704] Fix marconi's storage setting for MongoDB The storage driver should be set to mongodb and the driver's uri to the mongodb:// uri. Change-Id: I6193a5d78f6cd7283b4e3b1831978883b9e99b06 --- lib/marconi | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/marconi b/lib/marconi index 29ae386d9f..8f4f3c6bbc 100644 --- a/lib/marconi +++ b/lib/marconi @@ -105,7 +105,8 @@ function configure_marconi { iniset $MARCONI_CONF keystone_authtoken signing_dir $MARCONI_AUTH_CACHE_DIR if [[ "$MARCONI_BACKEND" = 'mongodb' ]]; then - iniset $MARCONI_CONF database connection mongodb://localhost:27017/marconi + iniset $MARCONI_CONF drivers storage mongodb + iniset $MARCONI_CONF 'drivers:storage:mongodb' uri mongodb://localhost:27017/marconi configure_mongodb cleanup_marconi fi From 5fc5b7e231710c2d67522d1bcabdc448dadd0f94 Mon Sep 17 00:00:00 2001 From: Flavio Percoco Date: Wed, 5 Mar 2014 18:49:02 +0100 Subject: [PATCH 0821/4704] Add support for sqlalchemy to Marconi This patch adds a way to setup a marconi instance using sqlalchemy. Change-Id: Ia694b76286835ca2ca935814370aa43544fe84fa --- lib/marconi | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/marconi b/lib/marconi index 8f4f3c6bbc..1e0cc7df08 100644 --- a/lib/marconi +++ b/lib/marconi @@ -104,7 +104,10 @@ function configure_marconi { iniset $MARCONI_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $MARCONI_CONF keystone_authtoken signing_dir $MARCONI_AUTH_CACHE_DIR - if [[ "$MARCONI_BACKEND" = 'mongodb' ]]; then + if [ "$MARCONI_BACKEND" = 'mysql' ] || [ "$MARCONI_BACKEND" = 'postgresql' ] ; then + iniset $MARCONI_CONF drivers storage sqlalchemy + iniset $MARCONI_CONF 'drivers:storage:sqlalchemy' uri `database_connection_url marconi` + else iniset $MARCONI_CONF drivers storage mongodb iniset $MARCONI_CONF 'drivers:storage:mongodb' uri mongodb://localhost:27017/marconi configure_mongodb From d46d9dd8de00d07eee9170365b1a025f0fc01ed9 Mon Sep 17 00:00:00 2001 From: Andrea Frittoli Date: Wed, 5 Mar 2014 13:38:19 +0000 Subject: [PATCH 0822/4704] Inject all account details in tempest.conf The tempest configuration function did not inject all account details in tempest.conf. The only reason why it worked, was because tempest uses default config values which are valid for the current devstack setup. To remove this dependency, two patches are needed: - this one in devstack, to inject all values - https://review.openstack.org/#/c/77602/ in tempest, to change default values to None Partially fixes bug 1287191 Change-Id: I01507b142703a1ff66707464b9a743e9d0ca3e01 --- lib/tempest | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index 16f8744d85..8455aae170 100644 --- a/lib/tempest +++ b/lib/tempest @@ -149,8 +149,12 @@ function configure_tempest { password=${ADMIN_PASSWORD:-secrete} - # See files/keystone_data.sh where alt_demo user - # and tenant are set up... + # See files/keystone_data.sh and stack.sh where admin, demo and alt_demo + # user and tenant are set up... + ADMIN_USERNAME=${ADMIN_USERNAME:-admin} + ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-admin} + TEMPEST_USERNAME=${TEMPEST_USERNAME:-demo} + TEMPEST_TENANT_NAME=${TEMPEST_TENANT_NAME:-demo} ALT_USERNAME=${ALT_USERNAME:-alt_demo} ALT_TENANT_NAME=${ALT_TENANT_NAME:-alt_demo} @@ -254,11 +258,15 @@ function configure_tempest { # Identity iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/" iniset $TEMPEST_CONFIG identity uri_v3 "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v3/" + iniset $TEMPEST_CONFIG identity username $TEMPEST_USERNAME iniset $TEMPEST_CONFIG identity password "$password" + iniset $TEMPEST_CONFIG identity tenant_name $TEMPEST_TENANT_NAME iniset $TEMPEST_CONFIG identity alt_username $ALT_USERNAME iniset $TEMPEST_CONFIG identity alt_password "$password" iniset $TEMPEST_CONFIG identity alt_tenant_name $ALT_TENANT_NAME + iniset $TEMPEST_CONFIG identity admin_username $ADMIN_USERNAME iniset $TEMPEST_CONFIG identity admin_password "$password" + iniset $TEMPEST_CONFIG identity admin_tenant_name $ADMIN_TENANT_NAME # Image # for the gate we want to be able to override this variable so we aren't @@ -285,7 +293,9 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method # Compute admin + iniset $TEMPEST_CONFIG "compute-admin" username $USERNAME iniset $TEMPEST_CONFIG "compute-admin" password "$password" # DEPRECATED + iniset $TEMPEST_CONFIG "compute-admin" tenant_name $TENANT_NAME # Network iniset $TEMPEST_CONFIG network api_version 2.0 From 99b622a936c0b6f5b6283f3bcdca3bd7d0628e29 Mon Sep 17 00:00:00 2001 From: Ryan Hsu Date: Wed, 5 Mar 2014 15:35:49 -0800 Subject: [PATCH 0823/4704] Refactor vmdk upload code A syntax error is hit when trying to upload a flat vmdk file that is accompanied by a descriptor file. The code block that handles this has some unneeded characters that cause the error. Also, an else-block has been removed so that we can remove an extra indent. Change-Id: Iaf5c914e09da6831eeeec141228b39554a1e2216 Closes-bug: #1288471 --- functions | 51 +++++++++++++++++++++++++-------------------------- 1 file changed, 25 insertions(+), 26 deletions(-) diff --git a/functions b/functions index ab8319b0ce..1d30922916 100644 --- a/functions +++ b/functions @@ -163,38 +163,37 @@ function upload_image { if [[ ${IMAGE_NAME: -5} != "-flat" ]]; then warn $LINENO "Expected filename suffix: '-flat'."` `" Filename provided: ${IMAGE_NAME}" - else - descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk" - path_len=`expr ${#image_url} - ${#IMAGE_FNAME}` - flat_path="${image_url:0:$path_len}" - descriptor_url=$flat_path$descriptor_fname - warn $LINENO "$descriptor_data_pair_msg"` - `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url" - if [[ $flat_path != file* ]]; then - if [[ ! -f $FILES/$descriptor_fname || \ - "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then - wget -c $descriptor_url -O $FILES/$descriptor_fname - if [[ $? -ne 0 ]]; then - warn $LINENO "Descriptor not found $descriptor_url" - descriptor_found=false - fi - fi - descriptor_url="$FILES/$descriptor_fname" - else - descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g") - if [[ ! -f $descriptor_url || \ - "$(stat -c "%s" $descriptor_url)" == "0" ]]; then + fi + + descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk" + path_len=`expr ${#image_url} - ${#IMAGE_FNAME}` + flat_path="${image_url:0:$path_len}" + descriptor_url=$flat_path$descriptor_fname + warn $LINENO "$descriptor_data_pair_msg"` + `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url" + if [[ $flat_path != file* ]]; then + if [[ ! -f $FILES/$descriptor_fname || \ + "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then + wget -c $descriptor_url -O $FILES/$descriptor_fname + if [[ $? -ne 0 ]]; then warn $LINENO "Descriptor not found $descriptor_url" descriptor_found=false fi fi - if $descriptor_found; then - vmdk_adapter_type="$(head -25 $descriptor_url |"` - `" { grep -a -F -m 1 'ddb.adapterType =' $descriptor_url || true; })" - vmdk_adapter_type="${vmdk_adapter_type#*\"}" - vmdk_adapter_type="${vmdk_adapter_type%?}" + descriptor_url="$FILES/$descriptor_fname" + else + descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g") + if [[ ! -f $descriptor_url || \ + "$(stat -c "%s" $descriptor_url)" == "0" ]]; then + warn $LINENO "Descriptor not found $descriptor_url" + descriptor_found=false fi fi + if $descriptor_found; then + vmdk_adapter_type="$(head -25 $descriptor_url | { grep -a -F -m 1 'ddb.adapterType =' $descriptor_url || true; })" + vmdk_adapter_type="${vmdk_adapter_type#*\"}" + vmdk_adapter_type="${vmdk_adapter_type%?}" + fi vmdk_disktype="preallocated" else vmdk_disktype="preallocated" From 581f0ee48510d8eead8a95888ad9b56d89009a76 Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Thu, 20 Feb 2014 16:28:15 +0100 Subject: [PATCH 0824/4704] Add a few missing package for SUSE Additionally rearranged the package list to be alphabetically sorrted Change-Id: I52cea97da60437250d0b7cf86a71e4a05d765568 --- files/rpms-suse/baremetal | 1 + files/rpms-suse/general | 11 ++++++----- 2 files changed, 7 insertions(+), 5 deletions(-) create mode 100644 files/rpms-suse/baremetal diff --git a/files/rpms-suse/baremetal b/files/rpms-suse/baremetal new file mode 100644 index 0000000000..61f73eeae3 --- /dev/null +++ b/files/rpms-suse/baremetal @@ -0,0 +1 @@ +dnsmasq diff --git a/files/rpms-suse/general b/files/rpms-suse/general index 704947ea53..6d994eaf7a 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -1,15 +1,20 @@ +bc bridge-utils ca-certificates-mozilla curl euca2ools +findutils-locate # useful when debugging git-core iputils +libopenssl-devel # to rebuild pyOpenSSL if needed +lsof # useful when debugging +make openssh openssl psmisc -python-setuptools # instead of python-distribute; dist:sle11sp2 python-cmd2 # dist:opensuse-12.3 python-pylint +python-setuptools # instead of python-distribute; dist:sle11sp2 python-unittest2 screen tar @@ -17,7 +22,3 @@ tcpdump unzip vim-enhanced wget -bc - -findutils-locate # useful when debugging -lsof # useful when debugging From 4d8af4aa05a76219b634d02485ae637a404b399f Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Thu, 6 Mar 2014 15:07:53 +0400 Subject: [PATCH 0825/4704] Add n-obj to stop_nova Add missing nova-object service to nova services list Change-Id: Ib26204b69356ad030ba3d03f095993370fbb2676 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 583a5923ce..2d8715ba48 100644 --- a/lib/nova +++ b/lib/nova @@ -731,7 +731,7 @@ function stop_nova { # Kill the nova screen windows # Some services are listed here twice since more than one instance # of a service may be running in certain configs. - for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta; do + for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-obj; do screen_stop $serv done if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then From 423d7901a4cd6bc95188e023625b4e21251fad28 Mon Sep 17 00:00:00 2001 From: Nadya Privalova Date: Thu, 6 Mar 2014 15:14:59 +0400 Subject: [PATCH 0826/4704] Add an ability to configure debug-level for ceilometer Change-Id: Ibe9dd2391202a5af291d2eed1559bae60370f9a8 --- lib/ceilometer | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ceilometer b/lib/ceilometer index 04c1a34b8b..b0899e2f24 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -129,6 +129,7 @@ function configure_ceilometer { iniset $CEILOMETER_CONF DEFAULT notification_topics 'notifications' iniset $CEILOMETER_CONF DEFAULT verbose True + iniset $CEILOMETER_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" # Install the policy file for the API server cp $CEILOMETER_DIR/etc/ceilometer/policy.json $CEILOMETER_CONF_DIR From d44517dfcfacb5aa9e1952847a1505fd3a92580b Mon Sep 17 00:00:00 2001 From: Kyle Mestery Date: Tue, 28 Jan 2014 20:29:18 +0000 Subject: [PATCH 0827/4704] Add support for configuring OVS to work with OpenDaylight This adds support for running OpenDaylight as an OpenStack Neutron plugin under devstack. This entails downloading the latest version of OpenDaylight, configuring it, and running it as a service under devstack. This code also includes pieces which configure Open vSwitch on each devstack node to point at OpenDaylight as their OpenFlow and OVSDB control interface. This is required for compute hosts, which will not be running any Neutron software on them at all. This post-devstack configuration is handled in the extras directory because of the fact there is no Neutron code running on the compute hosts themselves. Closes-bug: #1273917 Change-Id: I696e7c7fe63c835f90c56105775def305a702877 --- extras.d/80-opendaylight.sh | 67 ++++++++++++++ files/apts/opendaylight | 2 + files/rpms-suse/opendaylight | 4 + files/rpms/opendaylight | 1 + lib/opendaylight | 167 +++++++++++++++++++++++++++++++++++ 5 files changed, 241 insertions(+) create mode 100644 extras.d/80-opendaylight.sh create mode 100644 files/apts/opendaylight create mode 100644 files/rpms-suse/opendaylight create mode 100644 files/rpms/opendaylight create mode 100644 lib/opendaylight diff --git a/extras.d/80-opendaylight.sh b/extras.d/80-opendaylight.sh new file mode 100644 index 0000000000..cc5c8dec1a --- /dev/null +++ b/extras.d/80-opendaylight.sh @@ -0,0 +1,67 @@ +# opendaylight.sh - DevStack extras script + +# Need this first to get the is_***_enabled for ODL +source $TOP_DIR/lib/opendaylight + +if is_service_enabled odl-server; then + if [[ "$1" == "source" ]]; then + # no-op + : + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + install_opendaylight + configure_opendaylight + init_opendaylight + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + # This has to start before Neutron + start_opendaylight + elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then + # no-op + : + fi + + if [[ "$1" == "unstack" ]]; then + stop_opendaylight + cleanup_opendaylight + fi + + if [[ "$1" == "clean" ]]; then + # no-op + : + fi +fi + +if is_service_enabled odl-compute; then + if [[ "$1" == "source" ]]; then + # no-op + : + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + install_opendaylight-compute + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + create_nova_conf_neutron + elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + echo_summary "Initializing OpenDaylight" + ODL_LOCAL_IP=${ODL_LOCAL_IP:-$HOST_IP} + ODL_MGR_PORT=${ODL_MGR_PORT:-6640} + read ovstbl <<< $(sudo ovs-vsctl get Open_vSwitch . _uuid) + sudo ovs-vsctl set-manager tcp:$ODL_MGR_IP:$ODL_MGR_PORT + sudo ovs-vsctl set Open_vSwitch $ovstbl other_config={"local_ip"="$ODL_LOCAL_IP"} + elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then + # no-op + : + fi + + if [[ "$1" == "unstack" ]]; then + sudo ovs-vsctl del-manager + BRIDGES=$(sudo ovs-vsctl list-br) + for bridge in $BRIDGES ; do + sudo ovs-vsctl del-controller $bridge + done + + stop_opendaylight-compute + fi + + if [[ "$1" == "clean" ]]; then + # no-op + : + fi +fi diff --git a/files/apts/opendaylight b/files/apts/opendaylight new file mode 100644 index 0000000000..ec3cc9daf8 --- /dev/null +++ b/files/apts/opendaylight @@ -0,0 +1,2 @@ +openvswitch-datapath-dkms # NOPRIME +openvswitch-switch # NOPRIME diff --git a/files/rpms-suse/opendaylight b/files/rpms-suse/opendaylight new file mode 100644 index 0000000000..d6c7146331 --- /dev/null +++ b/files/rpms-suse/opendaylight @@ -0,0 +1,4 @@ +openvswitch # NOPRIME +openvswitch-controller # NOPRIME +openvswitch-switch # NOPRIME + diff --git a/files/rpms/opendaylight b/files/rpms/opendaylight new file mode 100644 index 0000000000..98aaaf48f7 --- /dev/null +++ b/files/rpms/opendaylight @@ -0,0 +1 @@ +openvswitch # NOPRIME diff --git a/lib/opendaylight b/lib/opendaylight new file mode 100644 index 0000000000..ca81c20e55 --- /dev/null +++ b/lib/opendaylight @@ -0,0 +1,167 @@ +# lib/opendaylight +# Functions to control the configuration and operation of the opendaylight service + +# Dependencies: +# +# - ``functions`` file +# # ``DEST`` must be defined +# # ``STACK_USER`` must be defined + +# ``stack.sh`` calls the entry points in this order: +# +# - is_opendaylight_enabled +# - is_opendaylight-compute_enabled +# - install_opendaylight +# - install_opendaylight-compute +# - configure_opendaylight +# - init_opendaylight +# - start_opendaylight +# - stop_opendaylight-compute +# - stop_opendaylight +# - cleanup_opendaylight + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# For OVS_BRIDGE and PUBLIC_BRIDGE +source $TOP_DIR/lib/neutron_plugins/ovs_base + +# Defaults +# -------- + +# The IP address of ODL. Set this in local.conf. +# ODL_MGR_IP= +ODL_MGR_IP=${ODL_MGR_IP:-$SERVICE_HOST} + +# +ODL_DIR=$DEST/opendaylight + +# The OpenDaylight Package, currently using 'Hydrogen' release +ODL_PKG=${ODL_PKG:-distributions-virtualization-0.1.1-osgipackage.zip} + +# The OpenDaylight URL +ODL_URL=${ODL_URL:-https://nexus.opendaylight.org/content/repositories/opendaylight.release/org/opendaylight/integration/distributions-virtualization/0.1.1} + +# Default arguments for OpenDaylight. This is typically used to set +# Java memory options. +# ODL_ARGS=Xmx1024m -XX:MaxPermSize=512m +ODL_ARGS=${ODL_ARGS:-"-XX:MaxPermSize=384m"} + +# How long to pause after ODL starts to let it complete booting +ODL_BOOT_WAIT=${ODL_BOOT_WAIT:-60} + +# Set up default directories + + +# Entry Points +# ------------ + +# Test if OpenDaylight is enabled +# is_opendaylight_enabled +function is_opendaylight_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"odl-" ]] && return 0 + return 1 +} + +# cleanup_opendaylight() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_opendaylight { + : +} + +# configure_opendaylight() - Set config files, create data dirs, etc +function configure_opendaylight { + # Remove simple forwarder + rm -f $ODL_DIR/opendaylight/plugins/org.opendaylight.controller.samples.simpleforwarding* + + # Configure OpenFlow 1.3 + echo "ovsdb.of.version=1.3" >> $ODL_DIR/opendaylight/configuration/config.ini +} + +# init_opendaylight() - Initialize databases, etc. +function init_opendaylight { + # clean up from previous (possibly aborted) runs + # create required data files + : +} + +# install_opendaylight() - Collect source and prepare +function install_opendaylight { + local _pwd=$(pwd) + + if is_ubuntu; then + install_package maven openjdk-7-jre openjdk-7-jdk + else + yum_install maven java-1.7.0-openjdk + fi + + # Download OpenDaylight + mkdir -p $ODL_DIR + cd $ODL_DIR + wget -N $ODL_URL/$ODL_PKG + unzip -u $ODL_PKG +} + +# install_opendaylight-compute - Make sure OVS is install +function install_opendaylight-compute { + local kernel_version + # Install deps + # FIXME add to ``files/apts/neutron``, but don't install if not needed! + if is_ubuntu; then + kernel_version=`cat /proc/version | cut -d " " -f3` + install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version + elif is_fedora; then + install_package openvswitch + # Ensure that the service is started + restart_service openvswitch + elif is_suse; then + install_package openvswitch + restart_service openvswitch-switch + restart_service openvswitch-controller + fi +} + +# start_opendaylight() - Start running processes, including screen +function start_opendaylight { + if is_ubuntu; then + JHOME=/usr/lib/jvm/java-1.7.0-openjdk-amd64 + else + JHOME=/usr/lib/jvm/java-1.7.0-openjdk + fi + + # The flags to ODL have the following meaning: + # -of13: runs ODL using OpenFlow 1.3 protocol support. + # -virt ovsdb: Runs ODL in "virtualization" mode with OVSDB support + screen_it odl-server "cd $ODL_DIR/opendaylight && JAVE_HOME=$JHOME ./run.sh $ODL_ARGS -of13 -virt ovsdb" + + # Sleep a bit to let OpenDaylight finish starting up + sleep $ODL_BOOT_WAIT +} + +# stop_opendaylight() - Stop running processes (non-screen) +function stop_opendaylight { + screen_stop odl-server +} + +# stop_opendaylight-compute() - Remove OVS bridges +function stop_opendaylight-compute { + # remove all OVS ports that look like Neutron created ports + for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do + sudo ovs-vsctl del-port ${port} + done + + # remove all OVS bridges created by Neutron + for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${OVS_BRIDGE} -e ${PUBLIC_BRIDGE}); do + sudo ovs-vsctl del-br ${bridge} + done +} + +# Restore xtrace +$XTRACE + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: From a99b869d3c14b33d0cf59877f3ae60686763f8ae Mon Sep 17 00:00:00 2001 From: Sergey Skripnick Date: Wed, 5 Mar 2014 14:47:58 +0200 Subject: [PATCH 0828/4704] Do not restart libvirt if n-cpu is disabled If this service is disable in localrc, libvirt does not installed at all, and should not be restarted. Change-Id: Iaf482d4a82a26546c25249b3e32c7e629d862a1b Closes: bug 1288236 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 583a5923ce..2f6d04db78 100644 --- a/lib/nova +++ b/lib/nova @@ -308,7 +308,7 @@ function configure_nova { # Rebuild the config file from scratch create_nova_conf - if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then # Configure hypervisor plugin configure_nova_hypervisor fi From b44a8ef14f4e177aef0528db2b7721030f76b290 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 6 Mar 2014 11:25:04 -0600 Subject: [PATCH 0829/4704] Fix errexit in lib/ldap clear_ldap_state() deletes an object from the DIT that doesn't exist on the first run, this is OK but fails with errexit enabled. Change-Id: I3b881eedc891caa6b2dfd5913e43f3babcfa7d47 --- lib/ldap | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ldap b/lib/ldap index 51d02519af..efe2f096d7 100644 --- a/lib/ldap +++ b/lib/ldap @@ -154,7 +154,7 @@ function stop_ldap { # clear_ldap_state() - Clear LDAP State function clear_ldap_state { - ldapdelete -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -r "$LDAP_BASE_DN" + ldapdelete -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -r "$LDAP_BASE_DN" || : } # Restore xtrace From 1eae3e155a25faa8e0bb6ddba77e580c774fd265 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 6 Mar 2014 11:49:22 -0600 Subject: [PATCH 0830/4704] Make stop_swift() more robust for Grenade stop_swift() wasn't calling screen_stop() so the pid files and screen sessions were not being cleaned up. DevStack doesn't really care but Grenade does for the 'base' copy of DevStack. This should be backported to stable/havana for this reason. Change-Id: Ib5afb321cef2b7ad74e69a3fd0d1dad469f78b11 --- lib/swift | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/swift b/lib/swift index 5d4d4ef506..b8bc1b66e7 100644 --- a/lib/swift +++ b/lib/swift @@ -687,6 +687,11 @@ function stop_swift { swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true fi # Dump all of the servers + # Maintain the iteration as screen_stop() has some desirable side-effects + for type in proxy object container account; do + screen_stop s-${type} + done + # Blast out any stragglers pkill -f swift- } From f5d2a5ceb4030aa0868b11ef84b5055b70693702 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 6 Mar 2014 13:45:42 -0500 Subject: [PATCH 0831/4704] test for adding crazy branches as found by dansmith's clever hack, if devstack lands a crazy branch name in stackrc, we'd break the devstack gate. While it's doubtful anyone would do this, add a basic sanity check. Change-Id: Ib3b1881ed4fd520a1828ed073a7c8353e6f0a839 --- run_tests.sh | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/run_tests.sh b/run_tests.sh index a0bfbee0c0..685b2037f0 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -27,3 +27,16 @@ fi echo "Running bash8..." ./tools/bash8.py -v $FILES + + +# Test that no one is trying to land crazy refs as branches + +echo "Ensuring we don't have crazy refs" + +REFS=`grep BRANCH stackrc | grep -v -- '-master'` +rc=$? +if [[ $rc -eq 0 ]]; then + echo "Branch defaults must be master. Found:" + echo $REFS + exit 1 +fi From 07f1d0ef3d638d2289a45a17546e976907e004ee Mon Sep 17 00:00:00 2001 From: Andrea Frittoli Date: Thu, 6 Mar 2014 23:23:01 +0000 Subject: [PATCH 0832/4704] Iniset keystone auth version Introduces support for suth_version config flag in lib/tempest. The variable is named TEMPEST_AUTH_VERSION, and it can be set via localrc, so that the devstack-vm-gate-wrap may control it. The aim is to setup a keystone v3 based experimental check job in tempest experimental pipeline. Partially implements bp multi-keystone-api-version-tests Change-Id: Ia6832d87308c6c7109e6ae0dbd8dff61134718ee --- lib/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tempest b/lib/tempest index 8455aae170..b90988d1d9 100644 --- a/lib/tempest +++ b/lib/tempest @@ -267,6 +267,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG identity admin_username $ADMIN_USERNAME iniset $TEMPEST_CONFIG identity admin_password "$password" iniset $TEMPEST_CONFIG identity admin_tenant_name $ADMIN_TENANT_NAME + iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v2} # Image # for the gate we want to be able to override this variable so we aren't From e530ba30a6965c016934819be5b1cfcaa6879b75 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 7 Mar 2014 05:58:18 -0500 Subject: [PATCH 0833/4704] make compute-admin correct we lost the admin tenant at some point in the last couple of days which disabled 500 tempest tests. Bring this back. Change-Id: I5cab2074777cab99982ae8fc4a83663e9d128284 --- lib/tempest | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/tempest b/lib/tempest index 8455aae170..b3736da963 100644 --- a/lib/tempest +++ b/lib/tempest @@ -293,9 +293,9 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method # Compute admin - iniset $TEMPEST_CONFIG "compute-admin" username $USERNAME - iniset $TEMPEST_CONFIG "compute-admin" password "$password" # DEPRECATED - iniset $TEMPEST_CONFIG "compute-admin" tenant_name $TENANT_NAME + iniset $TEMPEST_CONFIG "compute-admin" username $ADMIN_USERNAME + iniset $TEMPEST_CONFIG "compute-admin" password "$password" + iniset $TEMPEST_CONFIG "compute-admin" tenant_name $ADMIN_TENANT_NAME # Network iniset $TEMPEST_CONFIG network api_version 2.0 From bb1e07859cce688e3beed2c573e9073a72f778fb Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Thu, 6 Mar 2014 09:40:27 -0800 Subject: [PATCH 0834/4704] Don't install vim or locate by default Devstack doesn't need vim or locate, if someone wants to use them, they can just install them afterwards. Change-Id: I00f27c20c86d89465e4aefc67ed645a309c09a03 --- files/apts/general | 2 -- files/rpms-suse/general | 2 -- tools/xen/prepare_guest.sh | 2 +- 3 files changed, 1 insertion(+), 5 deletions(-) diff --git a/files/apts/general b/files/apts/general index 32d31f0642..995c0c6f88 100644 --- a/files/apts/general +++ b/files/apts/general @@ -9,8 +9,6 @@ git lsof # useful when debugging openssh-server openssl -vim-nox -locate # useful when debugging python-virtualenv python-unittest2 iputils-ping diff --git a/files/rpms-suse/general b/files/rpms-suse/general index 6d994eaf7a..ff27a3aac7 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -3,7 +3,6 @@ bridge-utils ca-certificates-mozilla curl euca2ools -findutils-locate # useful when debugging git-core iputils libopenssl-devel # to rebuild pyOpenSSL if needed @@ -20,5 +19,4 @@ screen tar tcpdump unzip -vim-enhanced wget diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh index 440774ec5b..2b5e418a6a 100755 --- a/tools/xen/prepare_guest.sh +++ b/tools/xen/prepare_guest.sh @@ -73,7 +73,7 @@ EOF # Install basics apt-get update apt-get install -y cracklib-runtime curl wget ssh openssh-server tcpdump ethtool -apt-get install -y curl wget ssh openssh-server python-pip git vim-nox sudo python-netaddr +apt-get install -y curl wget ssh openssh-server python-pip git sudo python-netaddr pip install xenapi # Install XenServer guest utilities From b27f16d71660f75fcd82a035cdaf2b2eddec99ce Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 28 Feb 2014 14:29:02 +1100 Subject: [PATCH 0835/4704] Detect missing packages with yum yum -y doesn't report an error when packages are missing (see [1] for upstream discussion). Thus we run the output of yum through a small awk script looking for missing packages output. The one change required for RHEL is that python-wsgiref is included in the distro python, so doesn't need a separate package. [1] https://bugzilla.redhat.com/show_bug.cgi?id=965567 Change-Id: I9908ff4edbf2b0d961d25837a08a34e1417bbb02 --- files/rpms/glance | 2 +- functions-common | 17 ++++++++++++++++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/files/rpms/glance b/files/rpms/glance index 25c5d3902b..534097a92f 100644 --- a/files/rpms/glance +++ b/files/rpms/glance @@ -13,6 +13,6 @@ python-lxml #dist:f18,f19,f20,rhel7 python-paste-deploy #dist:f18,f19,f20,rhel7 python-routes python-sqlalchemy -python-wsgiref +python-wsgiref #dist:f18,f19,f20 pyxattr zlib-devel # testonly diff --git a/functions-common b/functions-common index 0db3ff3e7c..ed3d8832fd 100644 --- a/functions-common +++ b/functions-common @@ -938,9 +938,24 @@ function yum_install { [[ "$OFFLINE" = "True" ]] && return local sudo="sudo" [[ "$(id -u)" = "0" ]] && sudo="env" + + # The manual check for missing packages is because yum -y assumes + # missing packages are OK. See + # https://bugzilla.redhat.com/show_bug.cgi?id=965567 $sudo http_proxy=$http_proxy https_proxy=$https_proxy \ no_proxy=$no_proxy \ - yum install -y "$@" + yum install -y "$@" 2>&1 | \ + awk ' + BEGIN { fail=0 } + /No package/ { fail=1 } + { print } + END { exit fail }' || \ + die $LINENO "Missing packages detected" + + # also ensure we catch a yum failure + if [[ ${PIPESTATUS[0]} != 0 ]]; then + die $LINENO "Yum install failure" + fi } # zypper wrapper to set arguments correctly From f19ccb63593e4c3e6c1c2a7d4f2552c30ca1ee62 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Sat, 8 Mar 2014 07:54:05 -0800 Subject: [PATCH 0836/4704] Take tempurl out of Swift pipeline additions Swift commit 165dd44 added tempurl to the sample config, so now it appears twice in the default devstack-installed configuration. This commit removes tempurl from $SWIFT_EXTRAS_MIDDLEWARE so that it only appears once in the generated proxy pipeline. Change-Id: I4204b2a444312ab87c17f5fb296a43818a4528a6 --- lib/swift | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/swift b/lib/swift index b8bc1b66e7..b65544046a 100644 --- a/lib/swift +++ b/lib/swift @@ -67,8 +67,8 @@ fi SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-$SWIFT_LOOPBACK_DISK_SIZE_DEFAULT} # Set ``SWIFT_EXTRAS_MIDDLEWARE`` to extras middlewares. -# Default is ``staticweb, tempurl, formpost`` -SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-tempurl formpost staticweb} +# Default is ``staticweb, formpost`` +SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-formpost staticweb} # Set ``SWIFT_EXTRAS_MIDDLEWARE_LAST`` to extras middlewares that need to be at # the end of the pipeline. From 11b36c9b0a0a04ff3a53ae95c6de94fdd457f5e7 Mon Sep 17 00:00:00 2001 From: Roey Chen Date: Mon, 10 Mar 2014 11:25:50 +0200 Subject: [PATCH 0837/4704] Fixed unconditioned source phase in OpenDaylight extras Should source ``lib/opendaylight`` in ``extras.d/80-opendaylight.sh`` only when appropriate services are enabled. Fix for bug/1290033 Change-Id: Ifa470e1e132029f3c5bf255f27c4e96373b339a8 Signed-off-by: Roey Chen --- extras.d/80-opendaylight.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/extras.d/80-opendaylight.sh b/extras.d/80-opendaylight.sh index cc5c8dec1a..57b43288e0 100644 --- a/extras.d/80-opendaylight.sh +++ b/extras.d/80-opendaylight.sh @@ -1,7 +1,9 @@ # opendaylight.sh - DevStack extras script -# Need this first to get the is_***_enabled for ODL -source $TOP_DIR/lib/opendaylight +if is_service_enabled odl-server odl-compute; then + # Initial source + [[ "$1" == "source" ]] && source $TOP_DIR/lib/opendaylight +fi if is_service_enabled odl-server; then if [[ "$1" == "source" ]]; then From d9259ea466e54349fa87e7f76b7dfd061b19423c Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 10 Mar 2014 08:39:15 -0400 Subject: [PATCH 0838/4704] remove distros that are out of support by their upstream raring EOL was - 27 Jan 2014 f18 EOL was - 14 Jan 2014 opensuse 12.2 was - 15 Jan 2014 if their upstream isn't going to support them, we shouldn't be in devstack. this additionally leaves us in an interesting situation that there is no longer *any* opensuse version listed as supported. if the opensuse community doesn't step up here we should probably look at removing it. Change-Id: Ibb883930b430477dfd3b5126c5db04f95a50d3a7 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 148ce04e28..e76a55c534 100755 --- a/stack.sh +++ b/stack.sh @@ -142,7 +142,7 @@ disable_negated_services # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (precise|raring|saucy|trusty|7.0|wheezy|sid|testing|jessie|f18|f19|f20|opensuse-12.2|rhel6) ]]; then +if [[ ! ${DISTRO} =~ (precise|saucy|trusty|7.0|wheezy|sid|testing|jessie|f19|f20|rhel6) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From 353c4f1240d974e9ce93ba1f00a4bc7fe2c5856e Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 10 Mar 2014 08:44:18 -0400 Subject: [PATCH 0839/4704] remove additional f18 references f18 has been EOL for 6 weeks now, time to purge it from devstack Change-Id: I5aac2c63b2f4cd8b01ae685b1acf4c188637558b --- files/rpms/cinder | 2 +- files/rpms/glance | 4 ++-- files/rpms/horizon | 4 ++-- files/rpms/keystone | 8 ++++---- files/rpms/neutron | 4 ++-- files/rpms/nova | 6 +++--- files/rpms/swift | 2 +- 7 files changed, 15 insertions(+), 15 deletions(-) diff --git a/files/rpms/cinder b/files/rpms/cinder index 199ae10b79..423d57cd98 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -4,4 +4,4 @@ qemu-img python-devel postgresql-devel iscsi-initiator-utils -python-lxml #dist:f18,f19,f20,rhel7 +python-lxml #dist:f19,f20,rhel7 diff --git a/files/rpms/glance b/files/rpms/glance index 25c5d3902b..c886ecee10 100644 --- a/files/rpms/glance +++ b/files/rpms/glance @@ -9,8 +9,8 @@ python-argparse python-devel python-eventlet python-greenlet -python-lxml #dist:f18,f19,f20,rhel7 -python-paste-deploy #dist:f18,f19,f20,rhel7 +python-lxml #dist:f19,f20,rhel7 +python-paste-deploy #dist:f19,f20,rhel7 python-routes python-sqlalchemy python-wsgiref diff --git a/files/rpms/horizon b/files/rpms/horizon index 59503cc9aa..2dd24e0763 100644 --- a/files/rpms/horizon +++ b/files/rpms/horizon @@ -16,8 +16,8 @@ python-kombu python-migrate python-mox python-nose -python-paste #dist:f18,f19,f20 -python-paste-deploy #dist:f18,f19,f20 +python-paste #dist:f19,f20 +python-paste-deploy #dist:f19,f20 python-routes python-sphinx python-sqlalchemy diff --git a/files/rpms/keystone b/files/rpms/keystone index 99e8524628..7182091b31 100644 --- a/files/rpms/keystone +++ b/files/rpms/keystone @@ -1,9 +1,9 @@ python-greenlet libxslt-devel # dist:f20 -python-lxml #dist:f18,f19,f20 -python-paste #dist:f18,f19,f20 -python-paste-deploy #dist:f18,f19,f20 -python-paste-script #dist:f18,f19,f20 +python-lxml #dist:f19,f20 +python-paste #dist:f19,f20 +python-paste-deploy #dist:f19,f20 +python-paste-script #dist:f19,f20 python-routes python-sqlalchemy python-webob diff --git a/files/rpms/neutron b/files/rpms/neutron index 42d7f68d37..06ea0ea35d 100644 --- a/files/rpms/neutron +++ b/files/rpms/neutron @@ -11,8 +11,8 @@ python-greenlet python-iso8601 python-kombu #rhel6 gets via pip -python-paste # dist:f18,f19,f20,rhel7 -python-paste-deploy # dist:f18,f19,f20,rhel7 +python-paste # dist:f19,f20,rhel7 +python-paste-deploy # dist:f19,f20,rhel7 python-qpid python-routes python-sqlalchemy diff --git a/files/rpms/nova b/files/rpms/nova index a607d925e1..45d6e0bfb1 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -28,11 +28,11 @@ python-kombu python-lockfile python-migrate python-mox -python-paramiko # dist:f18,f19,f20,rhel7 +python-paramiko # dist:f19,f20,rhel7 # ^ on RHEL6, brings in python-crypto which conflicts with version from # pip we need -python-paste # dist:f18,f19,f20,rhel7 -python-paste-deploy # dist:f18,f19,f20,rhel7 +python-paste # dist:f19,f20,rhel7 +python-paste-deploy # dist:f19,f20,rhel7 python-qpid python-routes python-sqlalchemy diff --git a/files/rpms/swift b/files/rpms/swift index 72253f7752..bf29ea29b7 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -9,7 +9,7 @@ python-eventlet python-greenlet python-netifaces python-nose -python-paste-deploy # dist:f18,f19,f20,rhel7 +python-paste-deploy # dist:f19,f20,rhel7 python-simplejson python-webob pyxattr From 13349080b11383697f7c5312c357cc6c336ff9ba Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 10 Mar 2014 11:27:23 -0400 Subject: [PATCH 0840/4704] put libvirt debug in the right place libvirt debug setting was happening in a place where we weren't actually resetting the daemon. Move it to into the hypervisor plugin where we do. Change-Id: Ia79b0ef50f6b8fb007a20ce5cb4e510a5e4600a5 --- lib/nova | 11 ----------- lib/nova_plugins/hypervisor-libvirt | 10 ++++++++++ 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/lib/nova b/lib/nova index 583a5923ce..f5e0d11281 100644 --- a/lib/nova +++ b/lib/nova @@ -665,17 +665,6 @@ function start_nova_compute { fi if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - # Enable client side traces for libvirt - local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util" - local log_outputs="1:file:/var/log/libvirt/libvirtd.log" - # Enable server side traces for libvirtd - if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then - echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf - fi - if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then - echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf - fi - # The group **$LIBVIRT_GROUP** is added to the current user in this script. # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group. screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf'" diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index bbf65546f7..26880e5850 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -103,6 +103,16 @@ EOF fi add_user_to_group $STACK_USER $LIBVIRT_GROUP + # Enable server side traces for libvirtd + local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util" + local log_outputs="1:file:/var/log/libvirt/libvirtd.log" + if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then + echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf + fi + if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then + echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf + fi + # libvirt detects various settings on startup, as we potentially changed # the system configuration (modules, filesystems), we need to restart # libvirt to detect those changes. From 2983474e37d6c97c482e154a1f0d1f60a709915b Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Sun, 9 Mar 2014 18:36:42 +0100 Subject: [PATCH 0841/4704] Use the $SERVICE_HOST in backup_swift_url The $SERVICE_HOST is used to specify the swift proxy endpoint, the c-bak should use the same endpoint. Change-Id: Ia815f514839b0d1ec3fb9bb40992637c4f123e06 --- lib/cinder | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/cinder b/lib/cinder index dd2956a5b4..dadbe40a3b 100644 --- a/lib/cinder +++ b/lib/cinder @@ -274,6 +274,10 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT lock_path $CINDER_STATE_PATH iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL + if is_service_enabled swift; then + iniset $CINDER_CONF DEFAULT backup_swift_url "http://$SERVICE_HOST:8080/v1/AUTH_" + fi + if is_service_enabled ceilometer; then iniset $CINDER_CONF DEFAULT notification_driver "cinder.openstack.common.notifier.rpc_notifier" fi From cea32b1f86631761e170413124dbf80972234a8c Mon Sep 17 00:00:00 2001 From: Aaron Rosen Date: Tue, 4 Mar 2014 16:20:14 -0800 Subject: [PATCH 0842/4704] Configuration needed for neutron nova callback Change-Id: I07cb476f5e87e967cd6fbbfc82881e8a147453b4 --- lib/neutron | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/lib/neutron b/lib/neutron index bb591abb0b..84e827761a 100644 --- a/lib/neutron +++ b/lib/neutron @@ -110,6 +110,10 @@ Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False} Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1} # nova vif driver that all plugins should use NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} +Q_NOTIFY_NOVA_PORT_STATUS_CHANGE=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGE:-True} +Q_NOTIFY_NOVA_ON_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_CHANGE:-True} +VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True} +VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300} # The next two variables are configured by plugin # e.g. _configure_neutron_l3_agent or lib/neutron_plugins/* @@ -313,6 +317,9 @@ function create_nova_conf_neutron { if is_service_enabled q-meta; then iniset $NOVA_CONF DEFAULT service_neutron_metadata_proxy "True" fi + + iniset $NOVA_CONF DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL" + iniset $NOVA_CONF DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT" } # create_neutron_cache_dir() - Part of the _neutron_setup_keystone() process @@ -754,6 +761,16 @@ function _configure_neutron_service { iniset $NEUTRON_CONF DEFAULT ${I/=/ } done + # Configuration for neutron notifations to nova. + iniset $NEUTRON_CONF DEFAULT notify_nova_port_status_change $Q_NOTIFY_NOVA_PORT_STATUS_CHANGE + iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_ON_PORT_DATA_CHANGES + iniset $NEUTRON_CONF DEFAULT nova_url "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2" + iniset $NEUTRON_CONF DEFAULT nova_admin_username nova $NOVA_USER + iniset $NEUTRON_CONF DEFAULT nova_admin_password $SERVICE_PASSWORD + ADMIN_TENANT_ID=$(openstack project list | awk "/ service / { print \$2 }") + iniset $NEUTRON_CONF DEFAULT nova_admin_tenant_id $ADMIN_TENANT_ID + iniset $NEUTRON_CONF DEFAULT nova_admin_auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" + # Configure plugin neutron_plugin_configure_service } From 42a59c2bfae69eca5520748d6b45803a387fdb88 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 3 Mar 2014 14:31:29 -0600 Subject: [PATCH 0843/4704] Complete moving Keystone setup out of keystone_data.sh * Move remaining role creation to create_keystone_accounts() * Move glance creation to create_glance_accounts() * Move nova/ec2/s3 creation to create_nova_accounts() * Move ceilometer creation to create_ceilometer_accounts() * Move tempest creation to create_tempest_accounts() * Convert moved code to use OpenStackClient for setup * files/keystone_data.sh is removed Note that the SERVICE_TENANT and ADMIN_ROLE lookups in the other service implementations are not necessary with OSC, all operations can be done using names rather than requiring IDs. Change-Id: I4283ca0036ae39fd44ed2eed834b69d78e4f8257 --- extras.d/80-tempest.sh | 2 +- files/keystone_data.sh | 146 ----------------------------------------- lib/ceilometer | 12 ++++ lib/glance | 43 ++++++++++++ lib/keystone | 19 ++++-- lib/nova | 47 ++++++++++++- lib/tempest | 24 +++++++ stack.sh | 21 ++---- 8 files changed, 146 insertions(+), 168 deletions(-) delete mode 100755 files/keystone_data.sh diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh index 0186e36aee..74f4c60d10 100644 --- a/extras.d/80-tempest.sh +++ b/extras.d/80-tempest.sh @@ -9,7 +9,7 @@ if is_service_enabled tempest; then install_tempest elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then # Tempest config must come after layer 2 services are running - : + create_tempest_accounts elif [[ "$1" == "stack" && "$2" == "extra" ]]; then echo_summary "Initializing Tempest" configure_tempest diff --git a/files/keystone_data.sh b/files/keystone_data.sh deleted file mode 100755 index fc1e8136a4..0000000000 --- a/files/keystone_data.sh +++ /dev/null @@ -1,146 +0,0 @@ -#!/bin/bash -# -# Initial data for Keystone using python-keystoneclient -# -# Tenant User Roles -# ------------------------------------------------------------------ -# service glance service -# service glance-swift ResellerAdmin -# service heat service # if enabled -# service ceilometer admin # if enabled -# Tempest Only: -# alt_demo alt_demo Member -# -# Variables set before calling this script: -# SERVICE_TOKEN - aka admin_token in keystone.conf -# SERVICE_ENDPOINT - local Keystone admin endpoint -# SERVICE_TENANT_NAME - name of tenant containing service accounts -# SERVICE_HOST - host used for endpoint creation -# ENABLED_SERVICES - stack.sh's list of services to start -# DEVSTACK_DIR - Top-level DevStack directory -# KEYSTONE_CATALOG_BACKEND - used to determine service catalog creation - -# Defaults -# -------- - -ADMIN_PASSWORD=${ADMIN_PASSWORD:-secrete} -SERVICE_PASSWORD=${SERVICE_PASSWORD:-$ADMIN_PASSWORD} -export SERVICE_TOKEN=$SERVICE_TOKEN -export SERVICE_ENDPOINT=$SERVICE_ENDPOINT -SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} - -# Roles -# ----- - -# The ResellerAdmin role is used by Nova and Ceilometer so we need to keep it. -# The admin role in swift allows a user to act as an admin for their tenant, -# but ResellerAdmin is needed for a user to act as any tenant. The name of this -# role is also configurable in swift-proxy.conf -keystone role-create --name=ResellerAdmin -# Service role, so service users do not have to be admins -keystone role-create --name=service - - -# Services -# -------- - -if [[ "$ENABLED_SERVICES" =~ "n-api" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" || "$ENABLED_SERVICES" =~ "swift" ]]; then - # Nova needs ResellerAdmin role to download images when accessing - # swift through the s3 api. - keystone user-role-add \ - --tenant $SERVICE_TENANT_NAME \ - --user nova \ - --role ResellerAdmin -fi - -# Glance -if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then - keystone user-create \ - --name=glance \ - --pass="$SERVICE_PASSWORD" \ - --tenant $SERVICE_TENANT_NAME \ - --email=glance@example.com - keystone user-role-add \ - --tenant $SERVICE_TENANT_NAME \ - --user glance \ - --role service - # required for swift access - if [[ "$ENABLED_SERVICES" =~ "s-proxy" ]]; then - keystone user-create \ - --name=glance-swift \ - --pass="$SERVICE_PASSWORD" \ - --tenant $SERVICE_TENANT_NAME \ - --email=glance-swift@example.com - keystone user-role-add \ - --tenant $SERVICE_TENANT_NAME \ - --user glance-swift \ - --role ResellerAdmin - fi - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - keystone service-create \ - --name=glance \ - --type=image \ - --description="Glance Image Service" - keystone endpoint-create \ - --region RegionOne \ - --service glance \ - --publicurl "http://$SERVICE_HOST:9292" \ - --adminurl "http://$SERVICE_HOST:9292" \ - --internalurl "http://$SERVICE_HOST:9292" - fi -fi - -# Ceilometer -if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" || "$ENABLED_SERVICES" =~ "swift" ]]; then - # Ceilometer needs ResellerAdmin role to access swift account stats. - keystone user-role-add --tenant $SERVICE_TENANT_NAME \ - --user ceilometer \ - --role ResellerAdmin -fi - -# EC2 -if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - keystone service-create \ - --name=ec2 \ - --type=ec2 \ - --description="EC2 Compatibility Layer" - keystone endpoint-create \ - --region RegionOne \ - --service ec2 \ - --publicurl "http://$SERVICE_HOST:8773/services/Cloud" \ - --adminurl "http://$SERVICE_HOST:8773/services/Admin" \ - --internalurl "http://$SERVICE_HOST:8773/services/Cloud" - fi -fi - -# S3 -if [[ "$ENABLED_SERVICES" =~ "n-obj" || "$ENABLED_SERVICES" =~ "swift3" ]]; then - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - keystone service-create \ - --name=s3 \ - --type=s3 \ - --description="S3" - keystone endpoint-create \ - --region RegionOne \ - --service s3 \ - --publicurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ - --adminurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ - --internalurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" - fi -fi - -if [[ "$ENABLED_SERVICES" =~ "tempest" ]]; then - # Tempest has some tests that validate various authorization checks - # between two regular users in separate tenants - keystone tenant-create \ - --name=alt_demo - keystone user-create \ - --name=alt_demo \ - --pass="$ADMIN_PASSWORD" \ - --email=alt_demo@example.com - keystone user-role-add \ - --tenant alt_demo \ - --user alt_demo \ - --role Member -fi diff --git a/lib/ceilometer b/lib/ceilometer index 04c1a34b8b..b8305b1e9e 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -69,6 +69,11 @@ function is_ceilometer_enabled { # create_ceilometer_accounts() - Set up common required ceilometer accounts +# Project User Roles +# ------------------------------------------------------------------ +# SERVICE_TENANT_NAME ceilometer admin +# SERVICE_TENANT_NAME ceilometer ResellerAdmin (if Swift is enabled) + create_ceilometer_accounts() { SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") @@ -99,6 +104,13 @@ create_ceilometer_accounts() { --adminurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \ --internalurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" fi + if is_service_enabled swift; then + # Ceilometer needs ResellerAdmin role to access swift account stats. + openstack role add \ + --project $SERVICE_TENANT_NAME \ + --user ceilometer \ + ResellerAdmin + fi fi } diff --git a/lib/glance b/lib/glance index 8a4c21b3f2..51e4399388 100644 --- a/lib/glance +++ b/lib/glance @@ -159,6 +159,49 @@ function configure_glance { cp -p $GLANCE_DIR/etc/schema-image.json $GLANCE_SCHEMA_JSON } +# create_glance_accounts() - Set up common required glance accounts + +# Project User Roles +# ------------------------------------------------------------------ +# SERVICE_TENANT_NAME glance service +# SERVICE_TENANT_NAME glance-swift ResellerAdmin (if Swift is enabled) + +function create_glance_accounts { + if is_service_enabled g-api; then + openstack user create \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT_NAME \ + glance + openstack role add \ + --project $SERVICE_TENANT_NAME \ + --user glance \ + service + # required for swift access + if is_service_enabled s-proxy; then + openstack user create \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT_NAME \ + glance-swift + openstack role add \ + --project $SERVICE_TENANT_NAME \ + --user glance-swift \ + ResellerAdmin + fi + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + openstack service create \ + --type image \ + --description "Glance Image Service" \ + glance + openstack endpoint create \ + --region RegionOne \ + --publicurl "http://$GLANCE_HOSTPORT" \ + --adminurl "http://$GLANCE_HOSTPORT" \ + --internalurl "http://$GLANCE_HOSTPORT" \ + glance + fi + fi +} + # create_glance_cache_dir() - Part of the init_glance() process function create_glance_cache_dir { # Create cache dir diff --git a/lib/keystone b/lib/keystone index c6856c95c3..b31cc57a56 100644 --- a/lib/keystone +++ b/lib/keystone @@ -266,9 +266,11 @@ function configure_keystone { # Tenant User Roles # ------------------------------------------------------------------ +# admin admin admin # service -- -- +# -- -- service +# -- -- ResellerAdmin # -- -- Member -# admin admin admin # demo admin admin # demo demo Member, anotherrole # invisible_to_admin demo Member @@ -294,10 +296,17 @@ function create_keystone_accounts { --project $ADMIN_TENANT \ --user $ADMIN_USER - # service - SERVICE_TENANT=$(openstack project create \ - $SERVICE_TENANT_NAME \ - | grep " id " | get_field 2) + # Create service project/role + openstack project create $SERVICE_TENANT_NAME + + # Service role, so service users do not have to be admins + openstack role create service + + # The ResellerAdmin role is used by Nova and Ceilometer so we need to keep it. + # The admin role in swift allows a user to act as an admin for their tenant, + # but ResellerAdmin is needed for a user to act as any tenant. The name of this + # role is also configurable in swift-proxy.conf + openstack role create ResellerAdmin # The Member role is used by Horizon and Swift so we need to keep it: MEMBER_ROLE=$(openstack role create \ diff --git a/lib/nova b/lib/nova index 583a5923ce..a7c44211ca 100644 --- a/lib/nova +++ b/lib/nova @@ -316,9 +316,10 @@ function configure_nova { # create_nova_accounts() - Set up common required nova accounts -# Tenant User Roles +# Project User Roles # ------------------------------------------------------------------ -# service nova admin, [ResellerAdmin (swift only)] +# SERVICE_TENANT_NAME nova admin +# SERVICE_TENANT_NAME nova ResellerAdmin (if Swift is enabled) # Migrated from keystone_data.sh create_nova_accounts() { @@ -363,6 +364,48 @@ create_nova_accounts() { --internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" fi fi + + if is_service_enabled n-api; then + # Swift + if is_service_enabled swift; then + # Nova needs ResellerAdmin role to download images when accessing + # swift through the s3 api. + openstack role add \ + --project $SERVICE_TENANT_NAME \ + --user nova \ + ResellerAdmin + fi + + # EC2 + if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then + openstack service create \ + --type ec2 \ + --description "EC2 Compatibility Layer" \ + ec2 + openstack endpoint create \ + --region RegionOne \ + --publicurl "http://$SERVICE_HOST:8773/services/Cloud" \ + --adminurl "http://$SERVICE_HOST:8773/services/Admin" \ + --internalurl "http://$SERVICE_HOST:8773/services/Cloud" \ + ec2 + fi + fi + + # S3 + if is_service_enabled n-obj swift3; then + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + openstack service create \ + --type s3 \ + --description "S3" \ + s3 + openstack endpoint create \ + --region RegionOne \ + --publicurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ + --adminurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ + --internalurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ + s3 + fi + fi } # create_nova_conf() - Create a new nova.conf file diff --git a/lib/tempest b/lib/tempest index 16f8744d85..897efa8a8f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -358,6 +358,30 @@ function configure_tempest { $errexit } +# create_tempest_accounts() - Set up common required tempest accounts + +# Project User Roles +# ------------------------------------------------------------------ +# alt_demo alt_demo Member + +# Migrated from keystone_data.sh +function create_tempest_accounts { + if is_service_enabled tempest; then + # Tempest has some tests that validate various authorization checks + # between two regular users in separate tenants + openstack project create \ + alt_demo + openstack user create \ + --project alt_demo \ + --password "$ADMIN_PASSWORD" \ + alt_demo + openstack role add \ + --project alt_demo \ + --user alt_demo \ + Member + fi +} + # install_tempest() - Collect source and prepare function install_tempest { git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH diff --git a/stack.sh b/stack.sh index c990a1c6ca..f8973ee98f 100755 --- a/stack.sh +++ b/stack.sh @@ -907,14 +907,13 @@ if is_service_enabled key; then SERVICE_ENDPOINT=http://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT_INT/v2.0 fi - # Do the keystone-specific bits from keystone_data.sh - export OS_SERVICE_TOKEN=$SERVICE_TOKEN - export OS_SERVICE_ENDPOINT=$SERVICE_ENDPOINT - # Add temporarily to make openstackclient work + # Setup OpenStackclient token-flow auth export OS_TOKEN=$SERVICE_TOKEN export OS_URL=$SERVICE_ENDPOINT + create_keystone_accounts create_nova_accounts + create_glance_accounts create_cinder_accounts create_neutron_accounts @@ -922,7 +921,7 @@ if is_service_enabled key; then create_ceilometer_accounts fi - if is_service_enabled swift || is_service_enabled s-proxy; then + if is_service_enabled swift; then create_swift_accounts fi @@ -930,20 +929,14 @@ if is_service_enabled key; then create_heat_accounts fi - # ``keystone_data.sh`` creates services, admin and demo users, and roles. - ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \ - SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \ - S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \ - DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \ - bash -x $FILES/keystone_data.sh - - # Set up auth creds now that keystone is bootstrapped + # Begone token-flow auth unset OS_TOKEN OS_URL + + # Set up password-flow auth creds now that keystone is bootstrapped export OS_AUTH_URL=$SERVICE_ENDPOINT export OS_TENANT_NAME=admin export OS_USERNAME=admin export OS_PASSWORD=$ADMIN_PASSWORD - unset OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT fi From 936284b02ab6365bb0bcde49b617a57a902d491c Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 11 Mar 2014 09:35:55 +1100 Subject: [PATCH 0844/4704] Make mongo install for ceilometer NOPRIME mongodb packages are missing on some platforms, so we switch to a manual install. Also gate the mongo call in cleanup Change-Id: I1755e461c66be30da3db2a0994f908503c4c38ea --- files/apts/ceilometer-collector | 4 ++-- files/rpms/ceilometer-collector | 4 ++-- lib/ceilometer | 21 ++++++++++++++++++--- 3 files changed, 22 insertions(+), 7 deletions(-) diff --git a/files/apts/ceilometer-collector b/files/apts/ceilometer-collector index 71007ba4c5..f1b692ac71 100644 --- a/files/apts/ceilometer-collector +++ b/files/apts/ceilometer-collector @@ -1,5 +1,5 @@ -python-pymongo -mongodb-server +python-pymongo #NOPRIME +mongodb-server #NOPRIME libnspr4-dev pkg-config libxml2-dev diff --git a/files/rpms/ceilometer-collector b/files/rpms/ceilometer-collector index c91bac36a2..9cf580d22d 100644 --- a/files/rpms/ceilometer-collector +++ b/files/rpms/ceilometer-collector @@ -1,4 +1,4 @@ selinux-policy-targeted -mongodb-server -pymongo +mongodb-server #NOPRIME +pymongo # NOPRIME mongodb # NOPRIME diff --git a/lib/ceilometer b/lib/ceilometer index b0899e2f24..6aaddcefad 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -106,7 +106,9 @@ create_ceilometer_accounts() { # cleanup_ceilometer() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_ceilometer { - mongo ceilometer --eval "db.dropDatabase();" + if [ "$CEILOMETER_BACKEND" != 'mysql' ] && [ "$CEILOMETER_BACKEND" != 'postgresql' ] ; then + mongo ceilometer --eval "db.dropDatabase();" + fi } # configure_ceilometerclient() - Set config files, create data dirs, etc @@ -164,14 +166,27 @@ function configure_ceilometer { } function configure_mongodb { + # server package is the same on all + local packages=mongodb-server + + if is_fedora; then + # mongodb client + python bindings + packages="${packages} mongodb pymongo" + else + packages="${packages} python-pymongo" + fi + + install_package ${packages} + if is_fedora; then - # install mongodb client - install_package mongodb # ensure smallfiles selected to minimize freespace requirements sudo sed -i '/--smallfiles/!s/OPTIONS=\"/OPTIONS=\"--smallfiles /' /etc/sysconfig/mongod restart_service mongod fi + + # give mongodb time to start-up + sleep 5 } # init_ceilometer() - Initialize etc. From ccb3d10e04f7be773daf1bddd0bc2bff024ce6f4 Mon Sep 17 00:00:00 2001 From: Newell Jensen Date: Mon, 10 Mar 2014 14:28:52 -0700 Subject: [PATCH 0845/4704] Makes error message easier to understand. If the host ip address is indeterminate while executing stack.sh, an error message is displayed. This error message could be a source of confusion since it references localrc, which is depreciated. This patch makes the error message clearer and easier to understand. It does this by taking out the reference to localrc. It also points the user towards local.conf where there are suggestions on how to set HOST_IP. Change-Id: I41f14a2de85449d2a08ab7eb2849844a1087b147 Closes-Bug: #1290556 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 148ce04e28..817da26a8e 100755 --- a/stack.sh +++ b/stack.sh @@ -289,7 +289,7 @@ FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256} HOST_IP=$(get_default_host_ip $FIXED_RANGE $FLOATING_RANGE "$HOST_IP_IFACE" "$HOST_IP") if [ "$HOST_IP" == "" ]; then - die $LINENO "Could not determine host ip address. Either localrc specified dhcp on ${HOST_IP_IFACE} or defaulted" + die $LINENO "Could not determine host ip address. See local.conf for suggestions on setting HOST_IP." fi # Allow the use of an alternate hostname (such as localhost/127.0.0.1) for service endpoints. From c20bab89c47e02d88fb314d4d0a8dbfc73fca20e Mon Sep 17 00:00:00 2001 From: Ralf Haferkamp Date: Tue, 11 Mar 2014 11:38:24 +0100 Subject: [PATCH 0846/4704] Use the python-pyOpenSSL package openSUSE Recent pyOpenSSL releases when installed from pip depend on cryptography>=0.2.1, which itself depends on cffi>=0.8. That is conflicting with the python-cffi (0.7.2) package on openSUSE-13.1 which is required by the installed python-xattr. Change-Id: I721ce5288d150a3b01fb2558f7ca86028d734138 --- files/rpms-suse/glance | 1 + 1 file changed, 1 insertion(+) diff --git a/files/rpms-suse/glance b/files/rpms-suse/glance index dd68ac08c8..d9844e9bb4 100644 --- a/files/rpms-suse/glance +++ b/files/rpms-suse/glance @@ -8,5 +8,6 @@ python-devel python-eventlet python-greenlet python-iso8601 +python-pyOpenSSL python-wsgiref python-xattr From 3b1f2e4e885559957a939f8a260b4cff9938bc80 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Mon, 24 Feb 2014 20:30:07 +0900 Subject: [PATCH 0847/4704] Fix inverted conditionals in setup_develop This fixes regressions introduced by: Change-Id: Ic97e68348f46245b271567893b447fcedbd7bd6e ("Handle non-zero exit code from git diff") Change-Id: I053a292c287f3035eef37db2264eda06a170f9bc Closes-Bug: 1287513 --- functions-common | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/functions-common b/functions-common index ed3d8832fd..90cd3dfa72 100644 --- a/functions-common +++ b/functions-common @@ -1248,7 +1248,7 @@ function setup_develop { # ``errexit`` requires us to trap the exit code when the repo is changed local update_requirements=$(cd $project_dir && git diff --exit-code >/dev/null || echo "changed") - if [[ $update_requirements = "changed" ]]; then + if [[ $update_requirements != "changed" ]]; then (cd $REQUIREMENTS_DIR; \ $SUDO_CMD python update.py $project_dir) fi @@ -1264,7 +1264,7 @@ function setup_develop { # a variable that tells us whether or not we should UNDO the requirements # changes (this will be set to False in the OpenStack ci gate) if [ $UNDO_REQUIREMENTS = "True" ]; then - if [[ $update_requirements = "changed" ]]; then + if [[ $update_requirements != "changed" ]]; then (cd $project_dir && git reset --hard) fi fi From dd304603e011160f7f796ec4af7dcaf50008372c Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 11 Mar 2014 16:38:57 -0400 Subject: [PATCH 0848/4704] put libvirt debug behind a flag only turn on the libvirt debugging if we really need it, which we could control in the gate via devstack-gate. Change-Id: I5e6d41d5333357608ab6a614610c060400f70a10 --- lib/nova_plugins/hypervisor-libvirt | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 26880e5850..5a51f33808 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -25,6 +25,8 @@ set +o xtrace # File injection is disabled by default in Nova. This will turn it back on. ENABLE_FILE_INJECTION=${ENABLE_FILE_INJECTION:-False} +# if we should turn on massive libvirt debugging +DEBUG_LIBVIRT=$(trueorfalse False $DEBUG_LIBVIRT) # Entry Points @@ -104,13 +106,15 @@ EOF add_user_to_group $STACK_USER $LIBVIRT_GROUP # Enable server side traces for libvirtd - local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util" - local log_outputs="1:file:/var/log/libvirt/libvirtd.log" - if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then - echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf - fi - if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then - echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf + if [[ "$DEBUG_LIBVIRT" = "True" ]] ; then + local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util" + local log_outputs="1:file:/var/log/libvirt/libvirtd.log" + if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then + echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf + fi + if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then + echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf + fi fi # libvirt detects various settings on startup, as we potentially changed From d78c4057d4ae53a994eefb4d4b0ee01a9365e5d5 Mon Sep 17 00:00:00 2001 From: Hemanth Ravi Date: Sun, 26 Jan 2014 17:30:11 -0800 Subject: [PATCH 0849/4704] Install script for One Convergence Neutron plugin. Change-Id: I1dcc625a7c986e7533820b01af9eee5b8addcffe Implements: install for blueprint oc-nvsd-neutron-plugin --- lib/neutron_plugins/oneconvergence | 76 ++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100644 lib/neutron_plugins/oneconvergence diff --git a/lib/neutron_plugins/oneconvergence b/lib/neutron_plugins/oneconvergence new file mode 100644 index 0000000000..0aebff629c --- /dev/null +++ b/lib/neutron_plugins/oneconvergence @@ -0,0 +1,76 @@ +# Neutron One Convergence plugin +# --------------------------- +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/neutron_plugins/ovs_base + +Q_L3_ENABLED=true +Q_L3_ROUTER_PER_TENANT=true +Q_USE_NAMESPACE=true + +function neutron_plugin_install_agent_packages { + _neutron_ovs_base_install_agent_packages +} +# Configure common parameters +function neutron_plugin_configure_common { + + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/oneconvergence + Q_PLUGIN_CONF_FILENAME=nvsdplugin.ini + Q_PLUGIN_CLASS="neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2" + Q_DB_NAME='oc_nvsd_neutron' +} + +# Configure plugin specific information +function neutron_plugin_configure_service { + iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_ip $NVSD_IP + iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_port $NVSD_PORT + iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_user $NVSD_USER + iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_passwd $NVSD_PASSWD +} + +function neutron_plugin_configure_debug_command { + _neutron_ovs_base_configure_debug_command +} + +function neutron_plugin_setup_interface_driver { + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver +} + +function has_neutron_plugin_security_group { + # 1 means False here + return 0 +} + +function setup_integration_bridge { + _neutron_ovs_base_setup_bridge $OVS_BRIDGE +} + +function neutron_plugin_configure_dhcp_agent { + setup_integration_bridge + iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport +} + +function neutron_plugin_configure_l3_agent { + _neutron_ovs_base_configure_l3_agent + iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport +} + +function neutron_plugin_configure_plugin_agent { + + AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-nvsd-agent" + + _neutron_ovs_base_configure_firewall_driver +} + +function neutron_plugin_create_nova_conf { + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} + if ( is_service_enabled n-cpu && ! ( is_service_enabled q-dhcp )) ; then + setup_integration_bridge + fi +} + +# Restore xtrace +$MY_XTRACE From 7d4c7e09b4882077471c3b2cb097c237c2016f96 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 12 Mar 2014 08:05:08 -0400 Subject: [PATCH 0850/4704] remove docker from devstack with I1c9bea2fdeebc4199c4f7d8fca4580a6fb7fed5b nova removed docker from it's driver tree. We shouldn't have driver support inside of devstack that's not part of upstream projects (this has been a line we've been pretty clear on with Neutron drivers in the past). Remove docker driver accordingly. Change-Id: Ib91d415ea1616d99a5c5e7bc3b9015392fda5847 --- README.md | 6 +- exercises/boot_from_volume.sh | 3 - exercises/euca.sh | 3 - exercises/floating_ips.sh | 3 - exercises/sec_groups.sh | 3 - exercises/volumes.sh | 3 - lib/nova_plugins/hypervisor-docker | 132 ----------------------------- stackrc | 3 - tools/docker/README.md | 13 --- tools/docker/install_docker.sh | 68 --------------- 10 files changed, 1 insertion(+), 236 deletions(-) delete mode 100644 lib/nova_plugins/hypervisor-docker delete mode 100644 tools/docker/README.md delete mode 100755 tools/docker/install_docker.sh diff --git a/README.md b/README.md index 9914b1ed69..a0f5b2689d 100644 --- a/README.md +++ b/README.md @@ -73,7 +73,7 @@ does not run if started as root. This is a recent change (Oct 2013) from the previous behaviour of automatically creating a ``stack`` user. Automatically creating user accounts is not the right response to running as root, so -that bit is now an explicit step using ``tools/create-stack-user.sh``. +that bit is now an explicit step using ``tools/create-stack-user.sh``. Run that (as root!) or just check it out to see what DevStack's expectations are for the account it runs under. Many people simply use their usual login (the default 'ubuntu' login on a UEC image @@ -253,10 +253,6 @@ If tempest has been successfully configured, a basic set of smoke tests can be r If you would like to use Xenserver as the hypervisor, please refer to the instructions in `./tools/xen/README.md`. -# DevStack on Docker - -If you would like to use Docker as the hypervisor, please refer to the instructions in `./tools/docker/README.md`. - # Additional Projects DevStack has a hook mechanism to call out to a dispatch script at specific diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index f679669eea..dff8e7a632 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -44,9 +44,6 @@ source $TOP_DIR/exerciserc # the exercise is skipped is_service_enabled cinder || exit 55 -# Also skip if the hypervisor is Docker -[[ "$VIRT_DRIVER" == "docker" ]] && exit 55 - # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} diff --git a/exercises/euca.sh b/exercises/euca.sh index ad852a4f79..3768b56d4e 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -40,9 +40,6 @@ source $TOP_DIR/exerciserc # the exercise is skipped is_service_enabled n-api || exit 55 -# Skip if the hypervisor is Docker -[[ "$VIRT_DRIVER" == "docker" ]] && exit 55 - # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 8b7b96197e..1416d4dc6a 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -40,9 +40,6 @@ source $TOP_DIR/exerciserc # the exercise is skipped is_service_enabled n-api || exit 55 -# Skip if the hypervisor is Docker -[[ "$VIRT_DRIVER" == "docker" ]] && exit 55 - # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh index d71a1e0755..5f8b0a4d5d 100755 --- a/exercises/sec_groups.sh +++ b/exercises/sec_groups.sh @@ -37,9 +37,6 @@ source $TOP_DIR/exerciserc # the exercise is skipped is_service_enabled n-api || exit 55 -# Skip if the hypervisor is Docker -[[ "$VIRT_DRIVER" == "docker" ]] && exit 55 - # Testing Security Groups # ======================= diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 83d25c779c..0d556df9e7 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -41,9 +41,6 @@ source $TOP_DIR/exerciserc # exercise is skipped. is_service_enabled cinder || exit 55 -# Also skip if the hypervisor is Docker -[[ "$VIRT_DRIVER" == "docker" ]] && exit 55 - # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker deleted file mode 100644 index fd3c4fefc8..0000000000 --- a/lib/nova_plugins/hypervisor-docker +++ /dev/null @@ -1,132 +0,0 @@ -# lib/nova_plugins/docker -# Configure the Docker hypervisor - -# Enable with: -# -# VIRT_DRIVER=docker - -# Dependencies: -# -# - ``functions`` file -# - ``nova`` and ``glance`` configurations - -# install_nova_hypervisor - install any external requirements -# configure_nova_hypervisor - make configuration changes, including those to other services -# start_nova_hypervisor - start any external services -# stop_nova_hypervisor - stop any external services -# cleanup_nova_hypervisor - remove transient data and cache - -# Save trace setting -MY_XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Set up default directories -DOCKER_DIR=$DEST/docker - -DOCKER_UNIX_SOCKET=/var/run/docker.sock -DOCKER_PID_FILE=/var/run/docker.pid -DOCKER_REGISTRY_PORT=${DOCKER_REGISTRY_PORT:-5042} - -DOCKER_IMAGE=${DOCKER_IMAGE:-cirros:latest} -DOCKER_IMAGE_NAME=$DEFAULT_IMAGE_NAME -DOCKER_REGISTRY_IMAGE=${DOCKER_REGISTRY_IMAGE:-registry:latest} -DOCKER_REGISTRY_IMAGE_NAME=registry -DOCKER_REPOSITORY_NAME=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT}/${DOCKER_IMAGE_NAME} - -DOCKER_APT_REPO=${DOCKER_APT_REPO:-https://get.docker.io/ubuntu} - - -# Entry Points -# ------------ - -# clean_nova_hypervisor - Clean up an installation -function cleanup_nova_hypervisor { - stop_service docker - - # Clean out work area - sudo rm -rf /var/lib/docker -} - -# configure_nova_hypervisor - Set config files, create data dirs, etc -function configure_nova_hypervisor { - iniset $NOVA_CONF DEFAULT compute_driver docker.DockerDriver - iniset $GLANCE_API_CONF DEFAULT container_formats ami,ari,aki,bare,ovf,docker -} - -# is_docker_running - Return 0 (true) if Docker is running, otherwise 1 -function is_docker_running { - local docker_pid - if [ -f "$DOCKER_PID_FILE" ]; then - docker_pid=$(cat "$DOCKER_PID_FILE") - fi - if [[ -z "$docker_pid" ]] || ! ps -p "$docker_pid" | grep [d]ocker; then - return 1 - fi - return 0 -} - -# install_nova_hypervisor() - Install external components -function install_nova_hypervisor { - # So far this is Ubuntu only - if ! is_ubuntu; then - die $LINENO "Docker is only supported on Ubuntu at this time" - fi - - # Make sure Docker is installed - if ! is_package_installed lxc-docker; then - die $LINENO "Docker is not installed. Please run tools/docker/install_docker.sh" - fi - - if ! (is_docker_running); then - die $LINENO "Docker not running" - fi -} - -# start_nova_hypervisor - Start any required external services -function start_nova_hypervisor { - if ! (is_docker_running); then - die $LINENO "Docker not running" - fi - - # Start the Docker registry container - docker run -d -p ${DOCKER_REGISTRY_PORT}:5000 \ - -e SETTINGS_FLAVOR=openstack -e OS_USERNAME=${OS_USERNAME} \ - -e OS_PASSWORD=${OS_PASSWORD} -e OS_TENANT_NAME=${OS_TENANT_NAME} \ - -e OS_GLANCE_URL="${SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}" \ - -e OS_AUTH_URL=${OS_AUTH_URL} \ - $DOCKER_REGISTRY_IMAGE_NAME ./docker-registry/run.sh - - echo "Waiting for docker registry to start..." - DOCKER_REGISTRY=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT} - if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl -s $DOCKER_REGISTRY; do sleep 1; done"; then - die $LINENO "docker-registry did not start" - fi - - # Tag image if not already tagged - if ! docker images | grep $DOCKER_REPOSITORY_NAME; then - docker tag $DOCKER_IMAGE_NAME $DOCKER_REPOSITORY_NAME - fi - - # Make sure we copied the image in Glance - if ! (glance image-show "$DOCKER_IMAGE"); then - docker push $DOCKER_REPOSITORY_NAME - fi -} - -# stop_nova_hypervisor - Stop any external services -function stop_nova_hypervisor { - # Stop the docker registry container - docker kill $(docker ps | grep docker-registry | cut -d' ' -f1) -} - - -# Restore xtrace -$MY_XTRACE - -# Local variables: -# mode: shell-script -# End: diff --git a/stackrc b/stackrc index 6bb6f37195..756ec275dc 100644 --- a/stackrc +++ b/stackrc @@ -320,9 +320,6 @@ case "$VIRT_DRIVER" in openvz) DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-12.04-x86_64} IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-12.04-x86_64.tar.gz"};; - docker) - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros} - IMAGE_URLS=${IMAGE_URLS:-};; libvirt) case "$LIBVIRT_TYPE" in lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc diff --git a/tools/docker/README.md b/tools/docker/README.md deleted file mode 100644 index 976111f750..0000000000 --- a/tools/docker/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# DevStack on Docker - -Using Docker as Nova's hypervisor requries two steps: - -* Configure DevStack by adding the following to `localrc`:: - - VIRT_DRIVER=docker - -* Download and install the Docker service and images:: - - tools/docker/install_docker.sh - -After this, `stack.sh` should run as normal. diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh deleted file mode 100755 index 27c8c8210b..0000000000 --- a/tools/docker/install_docker.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env bash - -# **install_docker.sh** - Do the initial Docker installation and configuration - -# install_docker.sh -# -# Install docker package and images -# * downloads a base busybox image and a glance registry image if necessary -# * install the images in Docker's image cache - - -# Keep track of the current directory -SCRIPT_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $SCRIPT_DIR/../..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Load local configuration -source $TOP_DIR/stackrc - -FILES=$TOP_DIR/files - -# Get our defaults -source $TOP_DIR/lib/nova_plugins/hypervisor-docker - -SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} - - -# Install Docker Service -# ====================== - -if is_fedora; then - install_package docker-io socat -else - # Stop the auto-repo updates and do it when required here - NO_UPDATE_REPOS=True - - # Set up home repo - curl https://get.docker.io/gpg | sudo apt-key add - - install_package python-software-properties && \ - sudo sh -c "echo deb $DOCKER_APT_REPO docker main > /etc/apt/sources.list.d/docker.list" - apt_get update - install_package --force-yes lxc-docker socat -fi - -# Start the daemon - restart just in case the package ever auto-starts... -restart_service docker - -echo "Waiting for docker daemon to start..." -DOCKER_GROUP=$(groups | cut -d' ' -f1) -CONFIGURE_CMD="while ! /bin/echo -e 'GET /v1.3/version HTTP/1.0\n\n' | socat - unix-connect:$DOCKER_UNIX_SOCKET 2>/dev/null | grep -q '200 OK'; do - # Set the right group on docker unix socket before retrying - sudo chgrp $DOCKER_GROUP $DOCKER_UNIX_SOCKET - sudo chmod g+rw $DOCKER_UNIX_SOCKET - sleep 1 -done" -if ! timeout $SERVICE_TIMEOUT sh -c "$CONFIGURE_CMD"; then - die $LINENO "docker did not start" -fi - -# Get guest container image -docker pull $DOCKER_IMAGE -docker tag $DOCKER_IMAGE $DOCKER_IMAGE_NAME - -# Get docker-registry image -docker pull $DOCKER_REGISTRY_IMAGE -docker tag $DOCKER_REGISTRY_IMAGE $DOCKER_REGISTRY_IMAGE_NAME From 1749106c3abb17ee7cf30eb69bc9b744f3fc5a95 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Wed, 12 Mar 2014 14:38:25 +0100 Subject: [PATCH 0851/4704] Remove unused package dependencies * /sbin/vconfig command is not used by either nova or neutron. * Now the AMQP carrot is not used, not even optionally by the oslo.messaging. * python-gfalgs just referenced as a similar configuration style, by neutron. Change-Id: Idde5446e47e7da1dd204ea518ab816e2cce77c7d --- files/apts/nova | 2 -- files/rpms-suse/nova | 2 -- files/rpms/neutron | 1 - files/rpms/nova | 3 --- 4 files changed, 8 deletions(-) diff --git a/files/apts/nova b/files/apts/nova index ae925c3293..dfb25c7f37 100644 --- a/files/apts/nova +++ b/files/apts/nova @@ -25,7 +25,6 @@ socat # used by ajaxterm python-mox python-paste python-migrate -python-gflags python-greenlet python-libvirt # NOPRIME python-libxml2 @@ -34,7 +33,6 @@ python-numpy # used by websockify for spice console python-pastedeploy python-eventlet python-cheetah -python-carrot python-tempita python-sqlalchemy python-suds diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova index ee4917d702..c3c878fb4a 100644 --- a/files/rpms-suse/nova +++ b/files/rpms-suse/nova @@ -24,7 +24,6 @@ python-Routes python-SQLAlchemy python-Tempita python-boto -python-carrot python-cheetah python-eventlet python-feedparser @@ -37,7 +36,6 @@ python-mox python-mysql python-numpy # needed by websockify for spice console python-paramiko -python-python-gflags python-sqlalchemy-migrate python-suds python-xattr # needed for glance which is needed for nova --- this shouldn't be here diff --git a/files/rpms/neutron b/files/rpms/neutron index 42d7f68d37..e5c901be37 100644 --- a/files/rpms/neutron +++ b/files/rpms/neutron @@ -21,4 +21,3 @@ rabbitmq-server # NOPRIME qpid-cpp-server # NOPRIME sqlite sudo -vconfig diff --git a/files/rpms/nova b/files/rpms/nova index a607d925e1..61b0e9a0d1 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -17,11 +17,9 @@ mysql-server # NOPRIME parted polkit python-boto -python-carrot python-cheetah python-eventlet python-feedparser -python-gflags python-greenlet python-iso8601 python-kombu @@ -42,4 +40,3 @@ rabbitmq-server # NOPRIME qpid-cpp-server # NOPRIME sqlite sudo -vconfig From 64bd01652e6fd7c593498b1fd2bf50bfdf64ce40 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 12 Mar 2014 13:04:22 -0400 Subject: [PATCH 0852/4704] make git_clone safer the ensures that if the function returns early, we return to a sane directory, and not hang out somewhere that a future git call might modify a directory in a weird way. This is especially important in the case of stable branches where were are hopping between stable for servers and master for clients. Change-Id: Ib8ebbc23b1813bc1bfb31d0a079f1b882135bd39 --- functions-common | 3 +++ 1 file changed, 3 insertions(+) diff --git a/functions-common b/functions-common index 90cd3dfa72..c6fd5c7163 100644 --- a/functions-common +++ b/functions-common @@ -517,12 +517,14 @@ function git_clone { GIT_DEST=$2 GIT_REF=$3 RECLONE=$(trueorfalse False $RECLONE) + local orig_dir=`pwd` if [[ "$OFFLINE" = "True" ]]; then echo "Running in offline mode, clones already exist" # print out the results so we know what change was used in the logs cd $GIT_DEST git show --oneline | head -1 + cd $orig_dir return fi @@ -572,6 +574,7 @@ function git_clone { # print out the results so we know what change was used in the logs cd $GIT_DEST git show --oneline | head -1 + cd $orig_dir } # git can sometimes get itself infinitely stuck with transient network From 767b5a45b7c6a91a449e0cb41baf16221a7de5e1 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Wed, 12 Mar 2014 10:33:15 -0700 Subject: [PATCH 0853/4704] Split up stop_nova to match start_nova Split stop_nova into: stop_nova_compute and stop_nova_rest. This is needed to support the partial-ncpu grenade test where we want to stop everything but nova_compute. Change-Id: I6a21821277e56897d705ca5746806e2211632d12 --- lib/nova | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/lib/nova b/lib/nova index 55103e8dcc..15f56d336b 100644 --- a/lib/nova +++ b/lib/nova @@ -715,17 +715,25 @@ function start_nova { start_nova_rest } -# stop_nova() - Stop running processes (non-screen) -function stop_nova { +function stop_nova_compute { + if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + stop_nova_hypervisor + fi +} + +function stop_nova_rest { # Kill the nova screen windows # Some services are listed here twice since more than one instance # of a service may be running in certain configs. for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-obj; do screen_stop $serv done - if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then - stop_nova_hypervisor - fi +} + +# stop_nova() - Stop running processes (non-screen) +function stop_nova { + stop_nova_rest + stop_nova_compute } From 9c6d2840fdb67eb7af34be241bdb2fbebaf67c87 Mon Sep 17 00:00:00 2001 From: Sreeram Yerrapragada Date: Mon, 10 Mar 2014 14:12:58 -0700 Subject: [PATCH 0854/4704] fix failing wget statements under -o errexit in vmdk upload routine Fix the case when uploaded image has no descriptor. Refactored the code a bit Tested: 1. monithic Sparse 2. monolithic flat 2.1 flat file name mentioned in descriptor file 2.1 flat file name not mentioned in descriptor file 3. descriptor header not found in the file 3.1 image file name is *-flat, download descriptor 3.2 image file name does not end with *-flat 4. file name contains all image properties Change-Id: I0df9be5c2a1b9ed53cdb22d5cd40b94e56c48f37 Closes-bug: #1289664 --- functions | 63 ++++++++++++++++++++----------------------------------- 1 file changed, 23 insertions(+), 40 deletions(-) diff --git a/functions b/functions index 1d30922916..e0d2b01d0c 100644 --- a/functions +++ b/functions @@ -122,7 +122,7 @@ function upload_image { flat_fname="$(head -25 $IMAGE | { grep -G 'RW\|RDONLY [0-9]+ FLAT\|VMFS' $IMAGE || true; })" flat_fname="${flat_fname#*\"}" flat_fname="${flat_fname%?}" - if [[ -z "$flat_name" ]]; then + if [[ -z "$flat_fname" ]]; then flat_fname="$IMAGE_NAME-flat.vmdk" fi path_len=`expr ${#image_url} - ${#IMAGE_FNAME}` @@ -133,27 +133,16 @@ function upload_image { if [[ ! -f $FILES/$flat_fname || \ "$(stat -c "%s" $FILES/$flat_fname)" = "0" ]]; then wget -c $flat_url -O $FILES/$flat_fname - if [[ $? -ne 0 ]]; then - echo "Flat disk not found: $flat_url" - flat_found=false - fi - fi - if $flat_found; then - IMAGE="$FILES/${flat_fname}" fi + IMAGE="$FILES/${flat_fname}" else IMAGE=$(echo $flat_url | sed "s/^file:\/\///g") if [[ ! -f $IMAGE || "$(stat -c "%s" $IMAGE)" == "0" ]]; then echo "Flat disk not found: $flat_url" - flat_found=false + return 1 fi - if ! $flat_found; then - IMAGE=$(echo $image_url | sed "s/^file:\/\///g") - fi - fi - if $flat_found; then - IMAGE_NAME="${flat_fname}" fi + IMAGE_NAME="${flat_fname}" vmdk_disktype="preallocated" elif [[ "$vmdk_create_type" = "streamOptimized" ]]; then vmdk_disktype="streamOptimized" @@ -163,33 +152,27 @@ function upload_image { if [[ ${IMAGE_NAME: -5} != "-flat" ]]; then warn $LINENO "Expected filename suffix: '-flat'."` `" Filename provided: ${IMAGE_NAME}" - fi - - descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk" - path_len=`expr ${#image_url} - ${#IMAGE_FNAME}` - flat_path="${image_url:0:$path_len}" - descriptor_url=$flat_path$descriptor_fname - warn $LINENO "$descriptor_data_pair_msg"` - `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url" - if [[ $flat_path != file* ]]; then - if [[ ! -f $FILES/$descriptor_fname || \ - "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then - wget -c $descriptor_url -O $FILES/$descriptor_fname - if [[ $? -ne 0 ]]; then - warn $LINENO "Descriptor not found $descriptor_url" - descriptor_found=false - fi - fi - descriptor_url="$FILES/$descriptor_fname" else - descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g") - if [[ ! -f $descriptor_url || \ - "$(stat -c "%s" $descriptor_url)" == "0" ]]; then - warn $LINENO "Descriptor not found $descriptor_url" - descriptor_found=false + descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk" + path_len=`expr ${#image_url} - ${#IMAGE_FNAME}` + flat_path="${image_url:0:$path_len}" + descriptor_url=$flat_path$descriptor_fname + warn $LINENO "$descriptor_data_pair_msg"` + `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url" + if [[ $flat_path != file* ]]; then + if [[ ! -f $FILES/$descriptor_fname || \ + "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then + wget -c $descriptor_url -O $FILES/$descriptor_fname + fi + descriptor_url="$FILES/$descriptor_fname" + else + descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g") + if [[ ! -f $descriptor_url || \ + "$(stat -c "%s" $descriptor_url)" == "0" ]]; then + echo "Descriptor not found: $descriptor_url" + return 1 + fi fi - fi - if $descriptor_found; then vmdk_adapter_type="$(head -25 $descriptor_url | { grep -a -F -m 1 'ddb.adapterType =' $descriptor_url || true; })" vmdk_adapter_type="${vmdk_adapter_type#*\"}" vmdk_adapter_type="${vmdk_adapter_type%?}" From 7ff8443e46c94562822895b86b24122bc7474cfd Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Mon, 10 Mar 2014 20:04:51 +0400 Subject: [PATCH 0855/4704] Rename all Savanna usages to Sahara There are several backward compatibility nits. Change-Id: I93cac543375896602d158860cc557f86e41bcb63 --- exercises/{savanna.sh => sahara.sh} | 8 +- extras.d/70-sahara.sh | 37 ++++++ extras.d/70-savanna.sh | 37 ------ lib/sahara | 177 ++++++++++++++++++++++++++++ lib/sahara-dashboard | 72 +++++++++++ lib/savanna | 173 --------------------------- lib/savanna-dashboard | 72 ----------- 7 files changed, 290 insertions(+), 286 deletions(-) rename exercises/{savanna.sh => sahara.sh} (88%) create mode 100644 extras.d/70-sahara.sh delete mode 100644 extras.d/70-savanna.sh create mode 100644 lib/sahara create mode 100644 lib/sahara-dashboard delete mode 100644 lib/savanna delete mode 100644 lib/savanna-dashboard diff --git a/exercises/savanna.sh b/exercises/sahara.sh similarity index 88% rename from exercises/savanna.sh rename to exercises/sahara.sh index fc3f9760e5..867920ed31 100755 --- a/exercises/savanna.sh +++ b/exercises/sahara.sh @@ -1,8 +1,8 @@ #!/usr/bin/env bash -# **savanna.sh** +# **sahara.sh** -# Sanity check that Savanna started if enabled +# Sanity check that Sahara started if enabled echo "*********************************************************************" echo "Begin DevStack Exercise: $0" @@ -33,9 +33,9 @@ source $TOP_DIR/openrc # Import exercise configuration source $TOP_DIR/exerciserc -is_service_enabled savanna || exit 55 +is_service_enabled sahara || exit 55 -curl http://$SERVICE_HOST:8386/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Savanna API not functioning!" +curl http://$SERVICE_HOST:8386/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Sahara API isn't functioning!" set +o xtrace echo "*********************************************************************" diff --git a/extras.d/70-sahara.sh b/extras.d/70-sahara.sh new file mode 100644 index 0000000000..80e07ff7b9 --- /dev/null +++ b/extras.d/70-sahara.sh @@ -0,0 +1,37 @@ +# sahara.sh - DevStack extras script to install Sahara + +if is_service_enabled sahara; then + if [[ "$1" == "source" ]]; then + # Initial source + source $TOP_DIR/lib/sahara + source $TOP_DIR/lib/sahara-dashboard + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + echo_summary "Installing sahara" + install_sahara + cleanup_sahara + if is_service_enabled horizon; then + install_sahara_dashboard + fi + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + echo_summary "Configuring sahara" + configure_sahara + create_sahara_accounts + if is_service_enabled horizon; then + configure_sahara_dashboard + fi + elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + echo_summary "Initializing sahara" + start_sahara + fi + + if [[ "$1" == "unstack" ]]; then + stop_sahara + if is_service_enabled horizon; then + cleanup_sahara_dashboard + fi + fi + + if [[ "$1" == "clean" ]]; then + cleanup_sahara + fi +fi diff --git a/extras.d/70-savanna.sh b/extras.d/70-savanna.sh deleted file mode 100644 index edc1376deb..0000000000 --- a/extras.d/70-savanna.sh +++ /dev/null @@ -1,37 +0,0 @@ -# savanna.sh - DevStack extras script to install Savanna - -if is_service_enabled savanna; then - if [[ "$1" == "source" ]]; then - # Initial source - source $TOP_DIR/lib/savanna - source $TOP_DIR/lib/savanna-dashboard - elif [[ "$1" == "stack" && "$2" == "install" ]]; then - echo_summary "Installing Savanna" - install_savanna - cleanup_savanna - if is_service_enabled horizon; then - install_savanna_dashboard - fi - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - echo_summary "Configuring Savanna" - configure_savanna - create_savanna_accounts - if is_service_enabled horizon; then - configure_savanna_dashboard - fi - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - echo_summary "Initializing Savanna" - start_savanna - fi - - if [[ "$1" == "unstack" ]]; then - stop_savanna - if is_service_enabled horizon; then - cleanup_savanna_dashboard - fi - fi - - if [[ "$1" == "clean" ]]; then - cleanup_savanna - fi -fi diff --git a/lib/sahara b/lib/sahara new file mode 100644 index 0000000000..4cb04ecd3a --- /dev/null +++ b/lib/sahara @@ -0,0 +1,177 @@ +# lib/sahara + +# Dependencies: +# ``functions`` file +# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined + +# ``stack.sh`` calls the entry points in this order: +# +# install_sahara +# configure_sahara +# start_sahara +# stop_sahara +# cleanup_sahara + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default repos +SAHARA_REPO=${SAHARA_REPO:-${GIT_BASE}/openstack/sahara.git} +SAHARA_BRANCH=${SAHARA_BRANCH:-master} + +# Set up default directories +SAHARA_DIR=$DEST/sahara +SAHARA_CONF_DIR=${SAHARA_CONF_DIR:-/etc/sahara} +SAHARA_CONF_FILE=${SAHARA_CONF_DIR}/sahara.conf +SAHARA_DEBUG=${SAHARA_DEBUG:-True} + +SAHARA_SERVICE_HOST=${SAHARA_SERVICE_HOST:-$SERVICE_HOST} +SAHARA_SERVICE_PORT=${SAHARA_SERVICE_PORT:-8386} +SAHARA_SERVICE_PROTOCOL=${SAHARA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} + +SAHARA_AUTH_CACHE_DIR=${SAHARA_AUTH_CACHE_DIR:-/var/cache/sahara} + +# Support entry points installation of console scripts +if [[ -d $SAHARA_DIR/bin ]]; then + SAHARA_BIN_DIR=$SAHARA_DIR/bin +else + SAHARA_BIN_DIR=$(get_python_exec_prefix) +fi + +# Tell Tempest this project is present +TEMPEST_SERVICES+=,sahara + +# For backward compatibility with current tests in Tempest +TEMPEST_SERVICES+=,savanna + + +# Functions +# --------- + +# create_sahara_accounts() - Set up common required sahara accounts +# +# Tenant User Roles +# ------------------------------ +# service sahara admin +function create_sahara_accounts { + + SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") + + SAHARA_USER=$(openstack user create \ + sahara \ + --password "$SERVICE_PASSWORD" \ + --project $SERVICE_TENANT \ + --email sahara@example.com \ + | grep " id " | get_field 2) + openstack role add \ + $ADMIN_ROLE \ + --project $SERVICE_TENANT \ + --user $SAHARA_USER + + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + SAHARA_SERVICE=$(openstack service create \ + sahara \ + --type=data_processing \ + --description="Sahara Data Processing" \ + | grep " id " | get_field 2) + openstack endpoint create \ + $SAHARA_SERVICE \ + --region RegionOne \ + --publicurl "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ + --adminurl "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ + --internalurl "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" + fi +} + +# cleanup_sahara() - Remove residual data files, anything left over from +# previous runs that would need to clean up. +function cleanup_sahara { + + # Cleanup auth cache dir + sudo rm -rf $SAHARA_AUTH_CACHE_DIR +} + +# configure_sahara() - Set config files, create data dirs, etc +function configure_sahara { + + if [[ ! -d $SAHARA_CONF_DIR ]]; then + sudo mkdir -p $SAHARA_CONF_DIR + fi + sudo chown $STACK_USER $SAHARA_CONF_DIR + + # Copy over sahara configuration file and configure common parameters. + # TODO(slukjanov): rename when sahara internals will be updated + cp $SAHARA_DIR/etc/savanna/savanna.conf.sample $SAHARA_CONF_FILE + + # Create auth cache dir + sudo mkdir -p $SAHARA_AUTH_CACHE_DIR + sudo chown $STACK_USER $SAHARA_AUTH_CACHE_DIR + rm -rf $SAHARA_AUTH_CACHE_DIR/* + + # Set obsolete keystone auth configs for backward compatibility + iniset $SAHARA_CONF_FILE DEFAULT os_auth_host $KEYSTONE_SERVICE_HOST + iniset $SAHARA_CONF_FILE DEFAULT os_auth_port $KEYSTONE_SERVICE_PORT + iniset $SAHARA_CONF_FILE DEFAULT os_auth_protocol $KEYSTONE_SERVICE_PROTOCOL + iniset $SAHARA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD + iniset $SAHARA_CONF_FILE DEFAULT os_admin_username sahara + iniset $SAHARA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME + + # Set actual keystone auth configs + iniset $SAHARA_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ + iniset $SAHARA_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $SAHARA_CONF_FILE keystone_authtoken admin_user sahara + iniset $SAHARA_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD + iniset $SAHARA_CONF_FILE keystone_authtoken signing_dir $SAHARA_AUTH_CACHE_DIR + iniset $SAHARA_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA + + iniset $SAHARA_CONF_FILE DEFAULT debug $SAHARA_DEBUG + + iniset $SAHARA_CONF_FILE database connection `database_connection_url sahara` + + if is_service_enabled neutron; then + iniset $SAHARA_CONF_FILE DEFAULT use_neutron true + iniset $SAHARA_CONF_FILE DEFAULT use_floating_ips true + fi + + if is_service_enabled heat; then + iniset $SAHARA_CONF_FILE DEFAULT infrastructure_engine heat + else + iniset $SAHARA_CONF_FILE DEFAULT infrastructure_engine direct + fi + + iniset $SAHARA_CONF_FILE DEFAULT use_syslog $SYSLOG + + recreate_database sahara utf8 + $SAHARA_BIN_DIR/sahara-db-manage --config-file $SAHARA_CONF_FILE upgrade head +} + +# install_sahara() - Collect source and prepare +function install_sahara { + git_clone $SAHARA_REPO $SAHARA_DIR $SAHARA_BRANCH + setup_develop $SAHARA_DIR +} + +# start_sahara() - Start running processes, including screen +function start_sahara { + screen_it sahara "cd $SAHARA_DIR && $SAHARA_BIN_DIR/sahara-api --config-file $SAHARA_CONF_FILE" +} + +# stop_sahara() - Stop running processes +function stop_sahara { + # Kill the Sahara screen windows + screen -S $SCREEN_NAME -p sahara -X kill +} + + +# Restore xtrace +$XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/sahara-dashboard b/lib/sahara-dashboard new file mode 100644 index 0000000000..a81df0f7a8 --- /dev/null +++ b/lib/sahara-dashboard @@ -0,0 +1,72 @@ +# lib/sahara-dashboard + +# Dependencies: +# +# - ``functions`` file +# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined +# - ``SERVICE_HOST`` + +# ``stack.sh`` calls the entry points in this order: +# +# - install_sahara_dashboard +# - configure_sahara_dashboard +# - cleanup_sahara_dashboard + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/horizon + +# Defaults +# -------- + +# Set up default repos +SAHARA_DASHBOARD_REPO=${SAHARA_DASHBOARD_REPO:-${GIT_BASE}/openstack/sahara-dashboard.git} +SAHARA_DASHBOARD_BRANCH=${SAHARA_DASHBOARD_BRANCH:-master} + +SAHARA_PYTHONCLIENT_REPO=${SAHARA_PYTHONCLIENT_REPO:-${GIT_BASE}/openstack/python-saharaclient.git} +SAHARA_PYTHONCLIENT_BRANCH=${SAHARA_PYTHONCLIENT_BRANCH:-master} + +# Set up default directories +SAHARA_DASHBOARD_DIR=$DEST/sahara-dashboard +SAHARA_PYTHONCLIENT_DIR=$DEST/python-saharaclient + +# Functions +# --------- + +function configure_sahara_dashboard { + + echo -e "AUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py + echo -e "HORIZON_CONFIG['dashboards'] += ('sahara',)" >> $HORIZON_DIR/openstack_dashboard/settings.py + echo -e "INSTALLED_APPS += ('saharadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py + + if is_service_enabled neutron; then + echo -e "SAHARA_USE_NEUTRON = True" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py + fi +} + +# install_sahara_dashboard() - Collect source and prepare +function install_sahara_dashboard { + install_python_saharaclient + git_clone $SAHARA_DASHBOARD_REPO $SAHARA_DASHBOARD_DIR $SAHARA_DASHBOARD_BRANCH + setup_develop $SAHARA_DASHBOARD_DIR +} + +function install_python_saharaclient { + git_clone $SAHARA_PYTHONCLIENT_REPO $SAHARA_PYTHONCLIENT_DIR $SAHARA_PYTHONCLIENT_BRANCH + setup_develop $SAHARA_PYTHONCLIENT_DIR +} + +# Cleanup file settings.py from Sahara +function cleanup_sahara_dashboard { + sed -i '/sahara/d' $HORIZON_DIR/openstack_dashboard/settings.py +} + +# Restore xtrace +$XTRACE + +# Local variables: +# mode: shell-script +# End: + diff --git a/lib/savanna b/lib/savanna deleted file mode 100644 index 2cb092c96c..0000000000 --- a/lib/savanna +++ /dev/null @@ -1,173 +0,0 @@ -# lib/savanna - -# Dependencies: -# ``functions`` file -# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined - -# ``stack.sh`` calls the entry points in this order: -# -# install_savanna -# configure_savanna -# start_savanna -# stop_savanna -# cleanup_savanna - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Set up default repos -SAVANNA_REPO=${SAVANNA_REPO:-${GIT_BASE}/openstack/savanna.git} -SAVANNA_BRANCH=${SAVANNA_BRANCH:-master} - -# Set up default directories -SAVANNA_DIR=$DEST/savanna -SAVANNA_CONF_DIR=${SAVANNA_CONF_DIR:-/etc/savanna} -SAVANNA_CONF_FILE=${SAVANNA_CONF_DIR}/savanna.conf -SAVANNA_DEBUG=${SAVANNA_DEBUG:-True} - -SAVANNA_SERVICE_HOST=${SAVANNA_SERVICE_HOST:-$SERVICE_HOST} -SAVANNA_SERVICE_PORT=${SAVANNA_SERVICE_PORT:-8386} -SAVANNA_SERVICE_PROTOCOL=${SAVANNA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} - -SAVANNA_AUTH_CACHE_DIR=${SAVANNA_AUTH_CACHE_DIR:-/var/cache/savanna} - -# Support entry points installation of console scripts -if [[ -d $SAVANNA_DIR/bin ]]; then - SAVANNA_BIN_DIR=$SAVANNA_DIR/bin -else - SAVANNA_BIN_DIR=$(get_python_exec_prefix) -fi - -# Tell Tempest this project is present -TEMPEST_SERVICES+=,savanna - - -# Functions -# --------- - -# create_savanna_accounts() - Set up common required savanna accounts -# -# Tenant User Roles -# ------------------------------ -# service savanna admin -function create_savanna_accounts { - - SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") - ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }") - - SAVANNA_USER=$(openstack user create \ - savanna \ - --password "$SERVICE_PASSWORD" \ - --project $SERVICE_TENANT \ - --email savanna@example.com \ - | grep " id " | get_field 2) - openstack role add \ - $ADMIN_ROLE \ - --project $SERVICE_TENANT \ - --user $SAVANNA_USER - - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - SAVANNA_SERVICE=$(openstack service create \ - savanna \ - --type=data_processing \ - --description="Savanna Data Processing" \ - | grep " id " | get_field 2) - openstack endpoint create \ - $SAVANNA_SERVICE \ - --region RegionOne \ - --publicurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ - --adminurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ - --internalurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" - fi -} - -# cleanup_savanna() - Remove residual data files, anything left over from -# previous runs that would need to clean up. -function cleanup_savanna { - - # Cleanup auth cache dir - sudo rm -rf $SAVANNA_AUTH_CACHE_DIR -} - -# configure_savanna() - Set config files, create data dirs, etc -function configure_savanna { - - if [[ ! -d $SAVANNA_CONF_DIR ]]; then - sudo mkdir -p $SAVANNA_CONF_DIR - fi - sudo chown $STACK_USER $SAVANNA_CONF_DIR - - # Copy over savanna configuration file and configure common parameters. - cp $SAVANNA_DIR/etc/savanna/savanna.conf.sample $SAVANNA_CONF_FILE - - # Create auth cache dir - sudo mkdir -p $SAVANNA_AUTH_CACHE_DIR - sudo chown $STACK_USER $SAVANNA_AUTH_CACHE_DIR - rm -rf $SAVANNA_AUTH_CACHE_DIR/* - - # Set obsolete keystone auth configs for backward compatibility - iniset $SAVANNA_CONF_FILE DEFAULT os_auth_host $KEYSTONE_SERVICE_HOST - iniset $SAVANNA_CONF_FILE DEFAULT os_auth_port $KEYSTONE_SERVICE_PORT - iniset $SAVANNA_CONF_FILE DEFAULT os_auth_protocol $KEYSTONE_SERVICE_PROTOCOL - iniset $SAVANNA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD - iniset $SAVANNA_CONF_FILE DEFAULT os_admin_username savanna - iniset $SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME - - # Set actual keystone auth configs - iniset $SAVANNA_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ - iniset $SAVANNA_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $SAVANNA_CONF_FILE keystone_authtoken admin_user savanna - iniset $SAVANNA_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD - iniset $SAVANNA_CONF_FILE keystone_authtoken signing_dir $SAVANNA_AUTH_CACHE_DIR - iniset $SAVANNA_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA - - iniset $SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG - - iniset $SAVANNA_CONF_FILE database connection `database_connection_url savanna` - - if is_service_enabled neutron; then - iniset $SAVANNA_CONF_FILE DEFAULT use_neutron true - iniset $SAVANNA_CONF_FILE DEFAULT use_floating_ips true - fi - - if is_service_enabled heat; then - iniset $SAVANNA_CONF_FILE DEFAULT infrastructure_engine heat - else - iniset $SAVANNA_CONF_FILE DEFAULT infrastructure_engine savanna - fi - - iniset $SAVANNA_CONF_FILE DEFAULT use_syslog $SYSLOG - - recreate_database savanna utf8 - $SAVANNA_BIN_DIR/savanna-db-manage --config-file $SAVANNA_CONF_FILE upgrade head -} - -# install_savanna() - Collect source and prepare -function install_savanna { - git_clone $SAVANNA_REPO $SAVANNA_DIR $SAVANNA_BRANCH - setup_develop $SAVANNA_DIR -} - -# start_savanna() - Start running processes, including screen -function start_savanna { - screen_it savanna "cd $SAVANNA_DIR && $SAVANNA_BIN_DIR/savanna-api --config-file $SAVANNA_CONF_FILE" -} - -# stop_savanna() - Stop running processes -function stop_savanna { - # Kill the Savanna screen windows - screen -S $SCREEN_NAME -p savanna -X kill -} - - -# Restore xtrace -$XTRACE - -# Local variables: -# mode: shell-script -# End: diff --git a/lib/savanna-dashboard b/lib/savanna-dashboard deleted file mode 100644 index 6fe15a3c81..0000000000 --- a/lib/savanna-dashboard +++ /dev/null @@ -1,72 +0,0 @@ -# lib/savanna-dashboard - -# Dependencies: -# -# - ``functions`` file -# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined -# - ``SERVICE_HOST`` - -# ``stack.sh`` calls the entry points in this order: -# -# - install_savanna_dashboard -# - configure_savanna_dashboard -# - cleanup_savanna_dashboard - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - -source $TOP_DIR/lib/horizon - -# Defaults -# -------- - -# Set up default repos -SAVANNA_DASHBOARD_REPO=${SAVANNA_DASHBOARD_REPO:-${GIT_BASE}/openstack/savanna-dashboard.git} -SAVANNA_DASHBOARD_BRANCH=${SAVANNA_DASHBOARD_BRANCH:-master} - -SAVANNA_PYTHONCLIENT_REPO=${SAVANNA_PYTHONCLIENT_REPO:-${GIT_BASE}/openstack/python-savannaclient.git} -SAVANNA_PYTHONCLIENT_BRANCH=${SAVANNA_PYTHONCLIENT_BRANCH:-master} - -# Set up default directories -SAVANNA_DASHBOARD_DIR=$DEST/savanna-dashboard -SAVANNA_PYTHONCLIENT_DIR=$DEST/python-savannaclient - -# Functions -# --------- - -function configure_savanna_dashboard { - - echo -e "AUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py - echo -e "HORIZON_CONFIG['dashboards'] += ('savanna',)" >> $HORIZON_DIR/openstack_dashboard/settings.py - echo -e "INSTALLED_APPS += ('savannadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py - - if is_service_enabled neutron; then - echo -e "SAVANNA_USE_NEUTRON = True" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py - fi -} - -# install_savanna_dashboard() - Collect source and prepare -function install_savanna_dashboard { - install_python_savannaclient - git_clone $SAVANNA_DASHBOARD_REPO $SAVANNA_DASHBOARD_DIR $SAVANNA_DASHBOARD_BRANCH - setup_develop $SAVANNA_DASHBOARD_DIR -} - -function install_python_savannaclient { - git_clone $SAVANNA_PYTHONCLIENT_REPO $SAVANNA_PYTHONCLIENT_DIR $SAVANNA_PYTHONCLIENT_BRANCH - setup_develop $SAVANNA_PYTHONCLIENT_DIR -} - -# Cleanup file settings.py from Savanna -function cleanup_savanna_dashboard { - sed -i '/savanna/d' $HORIZON_DIR/openstack_dashboard/settings.py -} - -# Restore xtrace -$XTRACE - -# Local variables: -# mode: shell-script -# End: - From 51ebda6c8d37539473e463e8b24f27f21d798392 Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Wed, 12 Mar 2014 22:26:12 +0400 Subject: [PATCH 0856/4704] Use sahara.conf.sample instead of old one Sahara internals was updated, now we can use correct conf sample. Change-Id: Ia8d99c2742785c3b5c724617a5dfc2880624a03f --- lib/sahara | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/sahara b/lib/sahara index 4cb04ecd3a..38b4ecd7e9 100644 --- a/lib/sahara +++ b/lib/sahara @@ -106,8 +106,7 @@ function configure_sahara { sudo chown $STACK_USER $SAHARA_CONF_DIR # Copy over sahara configuration file and configure common parameters. - # TODO(slukjanov): rename when sahara internals will be updated - cp $SAHARA_DIR/etc/savanna/savanna.conf.sample $SAHARA_CONF_FILE + cp $SAHARA_DIR/etc/sahara/sahara.conf.sample $SAHARA_CONF_FILE # Create auth cache dir sudo mkdir -p $SAHARA_AUTH_CACHE_DIR From 1a0c090057dde13fd3bb8ffcb84a923eb5952084 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 12 Mar 2014 14:59:50 -0500 Subject: [PATCH 0857/4704] Additional attempts to flosh stdout/stderr The logfile output is piped through awk to apply a timestamp and filter out all of the xtrace commands in the xtrace output. A while back we added fflush("") which is supposed to flush all open output files and pipes. It appears that gawk in precise is old enough that it may only flush stdout, so explicitly flush the logfile handle. Change-Id: If5198c2da2a3278eed8ae3d50c7ca5c15eac6d94 --- stack.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/stack.sh b/stack.sh index e76a55c534..a16046474c 100755 --- a/stack.sh +++ b/stack.sh @@ -541,6 +541,7 @@ if [[ -n "$LOGFILE" ]]; then print print > logfile fflush("") + fflush(logfile) }' ) 2>&1 # Set up a second fd for output exec 6> >( tee "${SUMFILE}" ) From 26c5a2252e9b99e053616d262fb627c1716a2e4d Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 12 Mar 2014 18:37:37 -0400 Subject: [PATCH 0858/4704] change the vmdk to the one used in VMWare ci The debian image that defaults to being using with vmware is huge, and it turns out it's not actually used in VMWare ci so we don't really know if it's working. Instead use the vmdk that is used in VMWare ci, which we know will boot, as we get results everyday. Change-Id: I014746af293852525e2bd128c4d19f5889ecd55d --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 6bb6f37195..cff1e26209 100644 --- a/stackrc +++ b/stackrc @@ -335,7 +335,7 @@ case "$VIRT_DRIVER" in ;; vsphere) DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-debian-2.6.32-i686} - IMAGE_URLS=${IMAGE_URLS:-"http://partnerweb.vmware.com/programs/vmdkimage/debian-2.6.32-i686.vmdk"};; + IMAGE_URLS=${IMAGE_URLS:-"http://partnerweb.vmware.com/programs/vmdkimage/cirros-0.3.0-i386-disk.vmdk"};; xenserver) DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-disk} IMAGE_URLS=${IMAGE_URLS:-"https://github.com/downloads/citrix-openstack/warehouse/cirros-0.3.0-x86_64-disk.vhd.tgz"};; From 7eb99343979921993dc361f71b5efd77e9130f78 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 6 Feb 2014 10:33:40 +0100 Subject: [PATCH 0859/4704] Setup the correct ec2 manifest path setup correctly the path to the ec2 boundled images. Change-Id: If3bce845e009a73c6b685976de3fa6d44b907bed --- lib/tempest | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/tempest b/lib/tempest index c74f00d1ab..a3df45e81c 100644 --- a/lib/tempest +++ b/lib/tempest @@ -310,6 +310,9 @@ function configure_tempest { iniset $TEMPEST_CONFIG boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" iniset $TEMPEST_CONFIG boto s3_url "http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}" iniset $TEMPEST_CONFIG boto s3_materials_path "$BOTO_MATERIALS_PATH" + iniset $TEMPEST_CONFIG boto ari_manifest cirros-0.3.1-x86_64-initrd.manifest.xml + iniset $TEMPEST_CONFIG boto ami_manifest cirros-0.3.1-x86_64-blank.img.manifest.xml + iniset $TEMPEST_CONFIG boto aki_manifest cirros-0.3.1-x86_64-vmlinuz.manifest.xml iniset $TEMPEST_CONFIG boto instance_type "$boto_instance_type" iniset $TEMPEST_CONFIG boto http_socket_timeout 30 iniset $TEMPEST_CONFIG boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros} From 0f73ff2c516cb9fdb6849f7feb19cd0cfde46852 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 13 Mar 2014 14:20:43 -0700 Subject: [PATCH 0860/4704] Move libvirt install + setup to functions-libvirt Moves installation and setup of libvirt to a common functions-libvirt, which can be used by other drivers in the future that may require cross-distro libvirt installation and config but are not using VIRT_DRIVER=libvirt (ie, Ironic). Change-Id: I4a9255c8b4bacd5acfde9b8061c9e537aeea592c --- lib/nova_plugins/functions-libvirt | 125 ++++++++++++++++++++++++++++ lib/nova_plugins/hypervisor-libvirt | 99 +--------------------- 2 files changed, 128 insertions(+), 96 deletions(-) create mode 100644 lib/nova_plugins/functions-libvirt diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt new file mode 100644 index 0000000000..adffe010ee --- /dev/null +++ b/lib/nova_plugins/functions-libvirt @@ -0,0 +1,125 @@ +# lib/nova_plugins/functions-libvirt +# Common libvirt configuration functions + +# Dependencies: +# ``functions`` file +# ``STACK_USER`` has to be defined + +# Save trace setting +LV_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# Defaults +# ------- + +# if we should turn on massive libvirt debugging +DEBUG_LIBVIRT=$(trueorfalse False $DEBUG_LIBVIRT) + +# Installs required distro-specific libvirt packages. +function install_libvirt { + if is_ubuntu; then + install_package kvm + install_package libvirt-bin + install_package python-libvirt + install_package python-guestfs + elif is_fedora || is_suse; then + install_package kvm + install_package libvirt + install_package libvirt-python + install_package python-libguestfs + fi +} + +# Configures the installed libvirt system so that is accessible by +# STACK_USER via qemu:///system with management capabilities. +function configure_libvirt { + if is_service_enabled neutron && is_neutron_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then + # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces + cat </dev/null; then + sudo groupadd $LIBVIRT_GROUP + fi + add_user_to_group $STACK_USER $LIBVIRT_GROUP + + # Enable server side traces for libvirtd + if [[ "$DEBUG_LIBVIRT" = "True" ]] ; then + local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util" + local log_outputs="1:file:/var/log/libvirt/libvirtd.log" + if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then + echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf + fi + if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then + echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf + fi + fi + + # libvirt detects various settings on startup, as we potentially changed + # the system configuration (modules, filesystems), we need to restart + # libvirt to detect those changes. + restart_service $LIBVIRT_DAEMON +} + + +# Restore xtrace +$LV_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 5a51f33808..053df3cdf5 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -7,7 +7,6 @@ # Dependencies: # ``functions`` file # ``nova`` configuration -# ``STACK_USER`` has to be defined # install_nova_hypervisor - install any external requirements # configure_nova_hypervisor - make configuration changes, including those to other services @@ -19,14 +18,13 @@ MY_XTRACE=$(set +o | grep xtrace) set +o xtrace +source $TOP_DIR/lib/nova_plugins/functions-libvirt # Defaults # -------- # File injection is disabled by default in Nova. This will turn it back on. ENABLE_FILE_INJECTION=${ENABLE_FILE_INJECTION:-False} -# if we should turn on massive libvirt debugging -DEBUG_LIBVIRT=$(trueorfalse False $DEBUG_LIBVIRT) # Entry Points @@ -40,88 +38,7 @@ function cleanup_nova_hypervisor { # configure_nova_hypervisor - Set config files, create data dirs, etc function configure_nova_hypervisor { - if is_service_enabled neutron && is_neutron_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then - # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces - cat </dev/null; then - sudo groupadd $LIBVIRT_GROUP - fi - add_user_to_group $STACK_USER $LIBVIRT_GROUP - - # Enable server side traces for libvirtd - if [[ "$DEBUG_LIBVIRT" = "True" ]] ; then - local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util" - local log_outputs="1:file:/var/log/libvirt/libvirtd.log" - if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then - echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf - fi - if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then - echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf - fi - fi - - # libvirt detects various settings on startup, as we potentially changed - # the system configuration (modules, filesystems), we need to restart - # libvirt to detect those changes. - restart_service $LIBVIRT_DAEMON - + configure_libvirt iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE" iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none" iniset $NOVA_CONF DEFAULT use_usb_tablet "False" @@ -150,17 +67,7 @@ EOF # install_nova_hypervisor() - Install external components function install_nova_hypervisor { - if is_ubuntu; then - install_package kvm - install_package libvirt-bin - install_package python-libvirt - install_package python-guestfs - elif is_fedora || is_suse; then - install_package kvm - install_package libvirt - install_package libvirt-python - install_package python-libguestfs - fi + install_libvirt # Install and configure **LXC** if specified. LXC is another approach to # splitting a system into many smaller parts. LXC uses cgroups and chroot From bbf759e9ed59b31258bcc8ba9fd3c79db9e57aee Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Thu, 13 Mar 2014 18:09:17 -0700 Subject: [PATCH 0861/4704] Only stop n-cpu in stop_nova_compute Move screen_stop n-cpu from stop_nova_rest to stop_nova_compute. Change-Id: I672673a55869d3f68e12c476924fc742e8260f39 --- lib/nova | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 360427d13e..b01d107525 100644 --- a/lib/nova +++ b/lib/nova @@ -716,6 +716,7 @@ function start_nova { } function stop_nova_compute { + screen_stop n-cpu if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then stop_nova_hypervisor fi @@ -725,7 +726,7 @@ function stop_nova_rest { # Kill the nova screen windows # Some services are listed here twice since more than one instance # of a service may be running in certain configs. - for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-obj; do + for serv in n-api n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-obj; do screen_stop $serv done } From 1e94eb1a3f1c87670ff4720b89f25b95e0d15e07 Mon Sep 17 00:00:00 2001 From: Steve Martinelli Date: Thu, 13 Mar 2014 23:22:39 -0500 Subject: [PATCH 0862/4704] Move from keystoneclient to openstackclient in eucarc Updating an ec2 create command to openstackclient syntax. Change-Id: I3dd21ddd52b77f3af76988db9ae6b863427d9106 --- eucarc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eucarc b/eucarc index 350235106c..343f4ccde2 100644 --- a/eucarc +++ b/eucarc @@ -22,7 +22,7 @@ source $RC_DIR/openrc export EC2_URL=$(keystone catalog --service ec2 | awk '/ publicURL / { print $4 }') # Create EC2 credentials for the current user -CREDS=$(keystone ec2-credentials-create) +CREDS=$(openstack ec2 credentials create) export EC2_ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }') export EC2_SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }') From 2f6c30b33c074a03748b7c0273c49fe81ab96607 Mon Sep 17 00:00:00 2001 From: Steve Martinelli Date: Thu, 13 Mar 2014 23:32:46 -0500 Subject: [PATCH 0863/4704] Update client-env to use openstackclient commands Updated the only instance of a keystoneclient command, to check if the identity service is enabled. Change-Id: If86f71c1610a79690d6c6a8eb423b6fa234372bb --- exercises/client-env.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exercises/client-env.sh b/exercises/client-env.sh index d955e4d1e1..4e8259cd06 100755 --- a/exercises/client-env.sh +++ b/exercises/client-env.sh @@ -64,7 +64,7 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then STATUS_KEYSTONE="Skipped" else echo -e "\nTest Keystone" - if keystone catalog --service identity; then + if openstack endpoint show identity; then STATUS_KEYSTONE="Succeeded" else STATUS_KEYSTONE="Failed" From 4376ae04df50fb9b338039b02a94fea351cedb28 Mon Sep 17 00:00:00 2001 From: Tiago Mello Date: Fri, 14 Mar 2014 10:48:56 -0300 Subject: [PATCH 0864/4704] Clean /etc/mysql when calling clean.sh The clean.sh script should also remove the /etc/mysql directory. It contains information from the old devstack installation and may conflict with the further one. apt-get purge does not remove it since the directory is not empty. Change-Id: I885345a2311851d8746abe42e44300ecd4f6e08a --- lib/databases/mysql | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/databases/mysql b/lib/databases/mysql index f5ee3c0ed0..7a0145ae1b 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -23,6 +23,7 @@ function cleanup_database_mysql { stop_service $MYSQL apt_get purge -y mysql* sudo rm -rf /var/lib/mysql + sudo rm -rf /etc/mysql return elif is_fedora; then if [[ $DISTRO =~ (rhel7) ]]; then From 0b03e7acb84e14efed3bfc2b30055a8427a40a12 Mon Sep 17 00:00:00 2001 From: Rafael Folco Date: Fri, 14 Mar 2014 11:14:57 -0300 Subject: [PATCH 0865/4704] Set correct default disk bus back to virtio on ppc64 virtio is supported and should be the default disk bus on Power to take advantage of I/O performance drivers. This aligns with Nova default bus values on PowerKVM. SCSI is the default for cdrom. Change-Id: I5de08c90359b3a500c352c09c07b6b082ddb4325 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 1d30922916..e439ef6dbe 100644 --- a/functions +++ b/functions @@ -290,7 +290,7 @@ function upload_image { esac if is_arch "ppc64"; then - IMG_PROPERTY="--property hw_disk_bus=scsi --property hw_cdrom_bus=scsi" + IMG_PROPERTY="--property hw_cdrom_bus=scsi" fi if [ "$CONTAINER_FORMAT" = "bare" ]; then From 846609b627bff979ce767dd9ad00daa46a150342 Mon Sep 17 00:00:00 2001 From: Piyush Masrani Date: Fri, 14 Mar 2014 19:21:48 +0530 Subject: [PATCH 0866/4704] Devstack changes to ceilometer to support vsphere Ceilometer currently supports only libvirt when installed using devstack. Have extended this support to Vmware Vsphere in this changelist. Change-Id: I98c64204973bca5e6a7f859a5431adb2b661277f --- lib/ceilometer | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/lib/ceilometer b/lib/ceilometer index b0899e2f24..abf4629b5e 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -161,6 +161,13 @@ function configure_ceilometer { configure_mongodb cleanup_ceilometer fi + + if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then + iniset $CEILOMETER_CONF DEFAULT hypervisor_inspector vsphere + iniset $CEILOMETER_CONF vmware host_ip "$VMWAREAPI_IP" + iniset $CEILOMETER_CONF vmware host_username "$VMWAREAPI_USER" + iniset $CEILOMETER_CONF vmware host_password "$VMWAREAPI_PASSWORD" + fi } function configure_mongodb { @@ -204,6 +211,9 @@ function start_ceilometer { if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then screen_it ceilometer-acompute "cd ; sg $LIBVIRT_GROUP \"ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" fi + if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then + screen_it ceilometer-acompute "cd ; ceilometer-agent-compute --config-file $CEILOMETER_CONF" + fi screen_it ceilometer-acentral "cd ; ceilometer-agent-central --config-file $CEILOMETER_CONF" screen_it ceilometer-anotification "cd ; ceilometer-agent-notification --config-file $CEILOMETER_CONF" screen_it ceilometer-collector "cd ; ceilometer-collector --config-file $CEILOMETER_CONF" From 380587bde6444edcc8c0b3adad250de70b27ad33 Mon Sep 17 00:00:00 2001 From: Malini Kamalambal Date: Fri, 14 Mar 2014 12:22:18 -0400 Subject: [PATCH 0867/4704] Rollback workaround for Marconi This patch rollsback the stderr redirection in Marconi. Change-Id: Iaa2d897295cf2bc2e4a8c370d3e0592def337c78 --- lib/marconi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/marconi b/lib/marconi index 3c4547fc75..fd1c35132a 100644 --- a/lib/marconi +++ b/lib/marconi @@ -154,7 +154,7 @@ function install_marconiclient { # start_marconi() - Start running processes, including screen function start_marconi { - screen_it marconi-server "marconi-server --config-file $MARCONI_CONF 2>&1" + screen_it marconi-server "marconi-server --config-file $MARCONI_CONF" echo "Waiting for Marconi to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT/v1/health; do sleep 1; done"; then die $LINENO "Marconi did not start" From 29870cce3214766ecc208d0bb404724cf232ad69 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 14 Mar 2014 14:32:01 -0400 Subject: [PATCH 0868/4704] as is_heat_enabled this is missing, and the code assumes "heat" to be in the enabled services list otherwise. Change-Id: Ib0a7db04d8e38b58aca48261308e7c4d1fd43972 --- lib/heat | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/heat b/lib/heat index 2d9d863f0c..902333e29a 100644 --- a/lib/heat +++ b/lib/heat @@ -45,6 +45,13 @@ TEMPEST_SERVICES+=,heat # Functions # --------- +# Test if any Heat services are enabled +# is_heat_enabled +function is_heat_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"h-" ]] && return 0 + return 1 +} + # cleanup_heat() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_heat { From 06fb29c66124b6c753fdd262eb262043b4551298 Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Fri, 31 Jan 2014 18:02:07 +0400 Subject: [PATCH 0869/4704] Integration testing preparation for Ironic Add ability to create/register qemu vms for Ironic testing purposes Implements bp:deprecate-baremetal-driver Change-Id: If452438fcc0ff562531b33a36cd189b235654b48 --- extras.d/50-ironic.sh | 7 + files/apts/ironic | 10 + files/rpms/ironic | 9 + lib/baremetal | 7 +- lib/ironic | 265 ++++++++++++++++++- lib/nova_plugins/hypervisor-ironic | 75 ++++++ stackrc | 2 +- tools/install_prereqs.sh | 8 +- tools/ironic/scripts/cleanup-nodes | 25 ++ tools/ironic/scripts/configure-vm | 78 ++++++ tools/ironic/scripts/create-nodes | 68 +++++ tools/ironic/scripts/setup-network | 24 ++ tools/ironic/templates/brbm.xml | 6 + tools/ironic/templates/tftpd-xinetd.template | 11 + tools/ironic/templates/vm.xml | 43 +++ 15 files changed, 630 insertions(+), 8 deletions(-) create mode 100644 files/apts/ironic create mode 100644 files/rpms/ironic create mode 100644 lib/nova_plugins/hypervisor-ironic create mode 100755 tools/ironic/scripts/cleanup-nodes create mode 100755 tools/ironic/scripts/configure-vm create mode 100755 tools/ironic/scripts/create-nodes create mode 100755 tools/ironic/scripts/setup-network create mode 100644 tools/ironic/templates/brbm.xml create mode 100644 tools/ironic/templates/tftpd-xinetd.template create mode 100644 tools/ironic/templates/vm.xml diff --git a/extras.d/50-ironic.sh b/extras.d/50-ironic.sh index 9e61dc5d78..3b8e3d5045 100644 --- a/extras.d/50-ironic.sh +++ b/extras.d/50-ironic.sh @@ -24,10 +24,17 @@ if is_service_enabled ir-api ir-cond; then # Start the ironic API and ironic taskmgr components echo_summary "Starting Ironic" start_ironic + + if [[ "$IRONIC_BAREMETAL_BASIC_OPS" = "True" ]]; then + prepare_baremetal_basic_ops + fi fi if [[ "$1" == "unstack" ]]; then stop_ironic + if [[ "$IRONIC_BAREMETAL_BASIC_OPS" = "True" ]]; then + cleanup_baremetal_basic_ops + fi fi if [[ "$1" == "clean" ]]; then diff --git a/files/apts/ironic b/files/apts/ironic new file mode 100644 index 0000000000..a749ad762e --- /dev/null +++ b/files/apts/ironic @@ -0,0 +1,10 @@ +libguestfs0 +libvirt-bin +openssh-client +openvswitch-switch +openvswitch-datapath-dkms +python-libguestfs +python-libvirt +syslinux +tftpd-hpa +xinetd diff --git a/files/rpms/ironic b/files/rpms/ironic new file mode 100644 index 0000000000..54b98299ee --- /dev/null +++ b/files/rpms/ironic @@ -0,0 +1,9 @@ +libguestfs +libvirt +libvirt-python +openssh-clients +openvswitch +python-libguestfs +syslinux +tftp-server +xinetd diff --git a/lib/baremetal b/lib/baremetal index 1d02e1e417..eda92f97cb 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -140,7 +140,10 @@ BM_DEPLOY_KERNEL=${BM_DEPLOY_KERNEL:-} # If you need to add any extra flavors to the deploy ramdisk image # eg, specific network drivers, specify them here -BM_DEPLOY_FLAVOR=${BM_DEPLOY_FLAVOR:-} +# +# NOTE(deva): this will be moved to lib/ironic in a future patch +# for now, set the default to a suitable value for Ironic's needs +BM_DEPLOY_FLAVOR=${BM_DEPLOY_FLAVOR:--a amd64 ubuntu deploy-ironic} # set URL and version for google shell-in-a-box BM_SHELL_IN_A_BOX=${BM_SHELL_IN_A_BOX:-http://shellinabox.googlecode.com/files/shellinabox-2.14.tar.gz} @@ -220,7 +223,7 @@ function upload_baremetal_deploy { BM_DEPLOY_KERNEL=bm-deploy.kernel BM_DEPLOY_RAMDISK=bm-deploy.initramfs if [ ! -e "$TOP_DIR/files/$BM_DEPLOY_KERNEL" -o ! -e "$TOP_DIR/files/$BM_DEPLOY_RAMDISK" ]; then - $BM_IMAGE_BUILD_DIR/bin/ramdisk-image-create $BM_DEPLOY_FLAVOR deploy \ + $BM_IMAGE_BUILD_DIR/bin/ramdisk-image-create $BM_DEPLOY_FLAVOR \ -o $TOP_DIR/files/bm-deploy fi fi diff --git a/lib/ironic b/lib/ironic index b346de1e69..c6fa563e6a 100644 --- a/lib/ironic +++ b/lib/ironic @@ -18,16 +18,19 @@ # - stop_ironic # - cleanup_ironic -# Save trace setting +# Save trace and pipefail settings XTRACE=$(set +o | grep xtrace) +PIPEFAIL=$(set +o | grep pipefail) set +o xtrace - +set +o pipefail # Defaults # -------- # Set up default directories IRONIC_DIR=$DEST/ironic +IRONIC_DATA_DIR=$DATA_DIR/ironic +IRONIC_STATE_PATH=/var/lib/ironic IRONICCLIENT_DIR=$DEST/python-ironicclient IRONIC_AUTH_CACHE_DIR=${IRONIC_AUTH_CACHE_DIR:-/var/cache/ironic} IRONIC_CONF_DIR=${IRONIC_CONF_DIR:-/etc/ironic} @@ -35,6 +38,28 @@ IRONIC_CONF_FILE=$IRONIC_CONF_DIR/ironic.conf IRONIC_ROOTWRAP_CONF=$IRONIC_CONF_DIR/rootwrap.conf IRONIC_POLICY_JSON=$IRONIC_CONF_DIR/policy.json +# Set up defaults for functional / integration testing +IRONIC_SCRIPTS_DIR=${IRONIC_SCRIPTS_DIR:-$TOP_DIR/tools/ironic/scripts} +IRONIC_TEMPLATES_DIR=${IRONIC_TEMPLATES_DIR:-$TOP_DIR/tools/ironic/templates} +IRONIC_BAREMETAL_BASIC_OPS=$(trueorfalse False $IRONIC_BAREMETAL_BASIC_OPS) +IRONIC_SSH_USERNAME=${IRONIC_SSH_USERNAME:-`whoami`} +IRONIC_SSH_KEY_DIR=${IRONIC_SSH_KEY_DIR:-$IRONIC_DATA_DIR/ssh_keys} +IRONIC_SSH_KEY_FILENAME=${IRONIC_SSH_KEY_FILENAME:-ironic_key} +IRONIC_KEY_FILE=$IRONIC_SSH_KEY_DIR/$IRONIC_SSH_KEY_FILENAME +IRONIC_SSH_VIRT_TYPE=${IRONIC_SSH_VIRT_TYPE:-virsh} +IRONIC_TFTPBOOT_DIR=${IRONIC_TFTPBOOT_DIR:-$IRONIC_DATA_DIR/tftpboot} +IRONIC_VM_SSH_PORT=${IRONIC_VM_SSH_PORT:-2222} +IRONIC_VM_SSH_ADDRESS=${IRONIC_VM_SSH_ADDRESS:-$HOST_IP} +IRONIC_VM_COUNT=${IRONIC_VM_COUNT:-1} +IRONIC_VM_SPECS_CPU=${IRONIC_VM_SPECS_CPU:-1} +IRONIC_VM_SPECS_RAM=${IRONIC_VM_SPECS_RAM:-256} +IRONIC_VM_SPECS_DISK=${IRONIC_VM_SPECS_DISK:-10} +IRONIC_VM_EMULATOR=${IRONIC_VM_EMULATOR:-/usr/bin/qemu-system-x86_64} +IRONIC_VM_NETWORK_BRIDGE=${IRONIC_VM_NETWORK_BRIDGE:-brbm} +IRONIC_VM_NETWORK_RANGE=${IRONIC_VM_NETWORK_RANGE:-192.0.2.0/24} +IRONIC_VM_MACS_CSV_FILE=${IRONIC_VM_MACS_CSV_FILE:-$IRONIC_DATA_DIR/ironic_macs.csv} +IRONIC_AUTHORIZED_KEYS_FILE=${IRONIC_AUTHORIZED_KEYS_FILE:-$HOME/.ssh/authorized_keys} + # Support entry points installation of console scripts IRONIC_BIN_DIR=$(get_python_exec_prefix) @@ -86,8 +111,8 @@ function configure_ironic { iniset $IRONIC_CONF_FILE DEFAULT debug True inicomment $IRONIC_CONF_FILE DEFAULT log_file iniset $IRONIC_CONF_FILE DEFAULT sql_connection `database_connection_url ironic` + iniset $IRONIC_CONF_FILE DEFAULT state_path $IRONIC_STATE_PATH iniset $IRONIC_CONF_FILE DEFAULT use_syslog $SYSLOG - # Configure Ironic conductor, if it was enabled. if is_service_enabled ir-cond; then configure_ironic_conductor @@ -97,6 +122,10 @@ function configure_ironic { if is_service_enabled ir-api; then configure_ironic_api fi + + if [[ "$IRONIC_BAREMETAL_BASIC_OPS" == "True" ]]; then + configure_ironic_auxiliary + fi } # configure_ironic_api() - Is used by configure_ironic(). Performs @@ -125,6 +154,10 @@ function configure_ironic_conductor { cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR iniset $IRONIC_CONF_FILE DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF + iniset $IRONIC_CONF_FILE conductor api_url http://$SERVICE_HOST:6385 + iniset $IRONIC_CONF_FILE pxe tftp_server $SERVICE_HOST + iniset $IRONIC_CONF_FILE pxe tftp_root $IRONIC_TFTPBOOT_DIR + iniset $IRONIC_CONF_FILE pxe tftp_master_path $IRONIC_TFTPBOOT_DIR/master_images } # create_ironic_cache_dir() - Part of the init_ironic() process @@ -225,9 +258,233 @@ function stop_ironic { screen -S $SCREEN_NAME -p ir-cond -X kill } +function is_ironic { + if ( is_service_enabled ir-cond && is_service_enabled ir-api ); then + return 0 + fi + return 1 +} + +function configure_ironic_dirs { + sudo mkdir -p $IRONIC_DATA_DIR + sudo mkdir -p $IRONIC_STATE_PATH + sudo mkdir -p $IRONIC_TFTPBOOT_DIR + sudo chown -R $STACK_USER $IRONIC_DATA_DIR $IRONIC_STATE_PATH + sudo chown -R $STACK_USER:$LIBVIRT_GROUP $IRONIC_TFTPBOOT_DIR + if is_ubuntu; then + PXEBIN=/usr/lib/syslinux/pxelinux.0 + elif is_fedora; then + PXEBIN=/usr/share/syslinux/pxelinux.0 + fi + if [ ! -f $PXEBIN ]; then + die $LINENO "pxelinux.0 (from SYSLINUX) not found." + fi + + cp $PXEBIN $IRONIC_TFTPBOOT_DIR + mkdir -p $IRONIC_TFTPBOOT_DIR/pxelinux.cfg +} + +function ironic_ensure_libvirt_group { + groups $STACK_USER | grep -q $LIBVIRT_GROUP || adduser $STACK_USER $LIBVIRT_GROUP +} + +function create_bridge_and_vms { + ironic_ensure_libvirt_group + + # Call libvirt setup scripts in a new shell to ensure any new group membership + sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/setup-network" + + sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/create-nodes \ + $IRONIC_VM_SPECS_CPU $IRONIC_VM_SPECS_RAM $IRONIC_VM_SPECS_DISK \ + amd64 $IRONIC_VM_COUNT $IRONIC_VM_NETWORK_BRIDGE $IRONIC_VM_EMULATOR" >> $IRONIC_VM_MACS_CSV_FILE + +} + +function enroll_vms { + + CHASSIS_ID=$(ironic chassis-create -d "ironic test chassis" | grep " uuid " | get_field 2) + IRONIC_NET_ID=$(neutron net-list | grep private | get_field 1) + local idx=0 + + # work around; need to know what netns neutron uses for private network + neutron port-create private + + while read MAC; do + + NODE_ID=$(ironic node-create --chassis_uuid $CHASSIS_ID --driver pxe_ssh \ + -i ssh_virt_type=$IRONIC_SSH_VIRT_TYPE \ + -i ssh_address=$IRONIC_VM_SSH_ADDRESS \ + -i ssh_port=$IRONIC_VM_SSH_PORT \ + -i ssh_username=$IRONIC_SSH_USERNAME \ + -i ssh_key_filename=$IRONIC_SSH_KEY_DIR/$IRONIC_SSH_KEY_FILENAME \ + -p cpus=$IRONIC_VM_SPECS_CPU \ + -p memory_mb=$IRONIC_VM_SPECS_RAM \ + -p local_gb=$IRONIC_VM_SPECS_DISK \ + -p cpu_arch=x86_64 \ + | grep " uuid " | get_field 2) + + ironic port-create --address $MAC --node_uuid $NODE_ID + + idx=$((idx+1)) + + done < $IRONIC_VM_MACS_CSV_FILE + + # create the nova flavor + nova flavor-create baremetal auto $IRONIC_VM_SPECS_RAM $IRONIC_VM_SPECS_DISK $IRONIC_VM_SPECS_CPU + nova flavor-key baremetal set "cpu_arch"="x86_64" "baremetal:deploy_kernel_id"="$BM_DEPLOY_KERNEL_ID" "baremetal:deploy_ramdisk_id"="$BM_DEPLOY_RAMDISK_ID" + + # intentional sleep to make sure the tag has been set to port + sleep 10 + TAPDEV=$(sudo ip netns exec qdhcp-${IRONIC_NET_ID} ip link list | grep tap | cut -d':' -f2 | cut -b2-) + TAG_ID=$(sudo ovs-vsctl show |grep ${TAPDEV} -A1 -m1 | grep tag | cut -d':' -f2 | cut -b2-) + + # make sure veth pair is not existing, otherwise delete its links + sudo ip link show ovs-tap1 && sudo ip link delete ovs-tap1 + sudo ip link show brbm-tap1 && sudo ip link delete brbm-tap1 + # create veth pair for future interconnection between br-int and brbm + sudo ip link add brbm-tap1 type veth peer name ovs-tap1 + sudo ip link set dev brbm-tap1 up + sudo ip link set dev ovs-tap1 up + + sudo ovs-vsctl -- --if-exists del-port ovs-tap1 -- add-port br-int ovs-tap1 tag=$TAG_ID + sudo ovs-vsctl -- --if-exists del-port brbm-tap1 -- add-port $IRONIC_VM_NETWORK_BRIDGE brbm-tap1 +} + +function configure_tftpd { + # enable tftp natting for allowing connections to SERVICE_HOST's tftp server + sudo modprobe nf_conntrack_tftp + sudo modprobe nf_nat_tftp + + if is_ubuntu; then + PXEBIN=/usr/lib/syslinux/pxelinux.0 + elif is_fedora; then + PXEBIN=/usr/share/syslinux/pxelinux.0 + fi + if [ ! -f $PXEBIN ]; then + die $LINENO "pxelinux.0 (from SYSLINUX) not found." + fi + + # stop tftpd and setup serving via xinetd + stop_service tftpd-hpa || true + [ -f /etc/init/tftpd-hpa.conf ] && echo "manual" | sudo tee /etc/init/tftpd-hpa.override + sudo cp $IRONIC_TEMPLATES_DIR/tftpd-xinetd.template /etc/xinetd.d/tftp + sudo sed -e "s|%TFTPBOOT_DIR%|$IRONIC_TFTPBOOT_DIR|g" -i /etc/xinetd.d/tftp + + # setup tftp file mapping to satisfy requests at the root (booting) and + # /tftpboot/ sub-dir (as per deploy-ironic elements) + echo "r ^([^/]) $IRONIC_TFTPBOOT_DIR/\1" >$IRONIC_TFTPBOOT_DIR/map-file + echo "r ^(/tftpboot/) $IRONIC_TFTPBOOT_DIR/\2" >>$IRONIC_TFTPBOOT_DIR/map-file + + chmod -R 0755 $IRONIC_TFTPBOOT_DIR + restart_service xinetd +} + +function configure_ironic_ssh_keypair { + # Generating ssh key pair for stack user + if [[ ! -d $IRONIC_SSH_KEY_DIR ]]; then + mkdir -p $IRONIC_SSH_KEY_DIR + fi + if [[ ! -d $HOME/.ssh ]]; then + mkdir -p $HOME/.ssh + chmod 700 $HOME/.ssh + fi + echo -e 'n\n' | ssh-keygen -q -t rsa -P '' -f $IRONIC_KEY_FILE + cat $IRONIC_KEY_FILE.pub | tee -a $IRONIC_AUTHORIZED_KEYS_FILE +} + +function ironic_ssh_check { + local KEY_FILE=$1 + local FLOATING_IP=$2 + local PORT=$3 + local DEFAULT_INSTANCE_USER=$4 + local ACTIVE_TIMEOUT=$5 + if ! timeout $ACTIVE_TIMEOUT sh -c "while ! ssh -p $PORT -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP echo success; do sleep 1; done"; then + die $LINENO "server didn't become ssh-able!" + fi +} + +function configure_ironic_sshd { + # Ensure sshd server accepts connections from localhost only + + SSH_CONFIG=/etc/ssh/sshd_config + HOST_PORT=$IRONIC_VM_SSH_ADDRESS:$IRONIC_VM_SSH_PORT + if ! sudo grep ListenAddress $SSH_CONFIG | grep $HOST_PORT; then + echo "ListenAddress $HOST_PORT" | sudo tee -a $SSH_CONFIG + fi + + SSH_SERVICE_NAME=sshd + if is_ubuntu; then + SSH_SERVICE_NAME=ssh + fi + + restart_service $SSH_SERVICE_NAME + # to ensure ssh service is up and running + sleep 3 + ironic_ssh_check $IRONIC_SSH_KEY_DIR/$IRONIC_SSH_KEY_FILENAME $IRONIC_VM_SSH_ADDRESS $IRONIC_VM_SSH_PORT $IRONIC_SSH_USERNAME 10 + +} + +function configure_ironic_auxiliary { + configure_ironic_dirs + configure_ironic_ssh_keypair + configure_ironic_sshd +} + +function prepare_baremetal_basic_ops { + + # install diskimage-builder + git_clone $BM_IMAGE_BUILD_REPO $BM_IMAGE_BUILD_DIR $BM_IMAGE_BUILD_BRANCH + + # make sure all needed service were enabled + for srv in nova glance key neutron; do + if ! is_service_enabled "$srv"; then + die $LINENO "$srv should be enabled for ironic tests" + fi + done + + SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + + # stop all nova services + stop_nova || true + + # remove any nova services failure status + find $SERVICE_DIR/$SCREEN_NAME -name 'n-*.failure' -exec rm -f '{}' \; + + # start them again + start_nova_api + start_nova + + TOKEN=$(keystone token-get | grep ' id ' | get_field 2) + die_if_not_set $LINENO TOKEN "Keystone fail to get token" + + echo_summary "Creating and uploading baremetal images for ironic" + + # build and upload separate deploy kernel & ramdisk + upload_baremetal_deploy $TOKEN + + create_bridge_and_vms + enroll_vms + configure_tftpd +} + +function cleanup_baremetal_basic_ops { + rm -f $IRONIC_VM_MACS_CSV_FILE + if [ -f $IRONIC_KEY_FILE ]; then + KEY=`cat $IRONIC_KEY_FILE.pub` + # remove public key from authorized_keys + grep -v "$KEY" $IRONIC_AUTHORIZED_KEYS_FILE > temp && mv temp $IRONIC_AUTHORIZED_KEYS_FILE + chmod 0600 $IRONIC_AUTHORIZED_KEYS_FILE + fi + sudo rm -rf $IRONIC_DATA_DIR $IRONIC_STATE_PATH + sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/cleanup-nodes $IRONIC_VM_COUNT $IRONIC_VM_NETWORK_BRIDGE" + sudo rm -rf /etc/xinetd.d/tftp /etc/init/tftpd-hpa.override + restart_service xinetd +} -# Restore xtrace +# Restore xtrace + pipefail $XTRACE +$PIPEFAIL # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic new file mode 100644 index 0000000000..5af7c0b292 --- /dev/null +++ b/lib/nova_plugins/hypervisor-ironic @@ -0,0 +1,75 @@ +# lib/nova_plugins/hypervisor-ironic +# Configure the ironic hypervisor + +# Enable with: +# VIRT_DRIVER=ironic + +# Dependencies: +# ``functions`` file +# ``nova`` configuration + +# install_nova_hypervisor - install any external requirements +# configure_nova_hypervisor - make configuration changes, including those to other services +# start_nova_hypervisor - start any external services +# stop_nova_hypervisor - stop any external services +# cleanup_nova_hypervisor - remove transient data and cache + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Entry Points +# ------------ + +# clean_nova_hypervisor - Clean up an installation +function cleanup_nova_hypervisor { + # This function intentionally left blank + : +} + +# configure_nova_hypervisor - Set config files, create data dirs, etc +function configure_nova_hypervisor { + iniset $NOVA_CONF ironic sql_connection `database_connection_url nova_bm` + LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.firewall.NoopFirewallDriver"} + iniset $NOVA_CONF DEFAULT compute_driver ironic.nova.virt.ironic.IronicDriver + iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER + iniset $NOVA_CONF DEFAULT scheduler_host_manager ironic.nova.scheduler.ironic_host_manager.IronicHostManager + iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0 + iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0 + # ironic section + iniset $NOVA_CONF ironic admin_username admin + iniset $NOVA_CONF ironic admin_password $ADMIN_PASSWORD + iniset $NOVA_CONF ironic admin_url $KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0 + iniset $NOVA_CONF ironic admin_tenant_name demo + iniset $NOVA_CONF ironic api_endpoint http://$SERVICE_HOST:6358/v1 +} + +# install_nova_hypervisor() - Install external components +function install_nova_hypervisor { + # This function intentionally left blank + : +} + +# start_nova_hypervisor - Start any required external services +function start_nova_hypervisor { + # This function intentionally left blank + : +} + +# stop_nova_hypervisor - Stop any external services +function stop_nova_hypervisor { + # This function intentionally left blank + : +} + + +# Restore xtrace +$MY_XTRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/stackrc b/stackrc index 456637854b..4a997bf77c 100644 --- a/stackrc +++ b/stackrc @@ -267,7 +267,7 @@ DEFAULT_VIRT_DRIVER=libvirt is_package_installed xenserver-core && DEFAULT_VIRT_DRIVER=xenserver VIRT_DRIVER=${VIRT_DRIVER:-$DEFAULT_VIRT_DRIVER} case "$VIRT_DRIVER" in - libvirt) + ironic|libvirt) LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} if [[ "$os_VENDOR" =~ (Debian) ]]; then LIBVIRT_GROUP=libvirt diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh index 0c65fd9b00..9651083cb3 100755 --- a/tools/install_prereqs.sh +++ b/tools/install_prereqs.sh @@ -55,7 +55,13 @@ export_proxy_variables # ================ # Install package requirements -install_package $(get_packages general $ENABLED_SERVICES) +PACKAGES=$(get_packages general $ENABLED_SERVICES) +if is_ubuntu && echo $PACKAGES | grep -q dkms ; then + # ensure headers for the running kernel are installed for any DKMS builds + PACKAGES="$PACKAGES linux-headers-$(uname -r)" +fi + +install_package $PACKAGES if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then if is_ubuntu || is_fedora; then diff --git a/tools/ironic/scripts/cleanup-nodes b/tools/ironic/scripts/cleanup-nodes new file mode 100755 index 0000000000..dc5a19d1cd --- /dev/null +++ b/tools/ironic/scripts/cleanup-nodes @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +# **cleanup-nodes** + +# Cleans up baremetal poseur nodes and volumes created during ironic setup +# Assumes calling user has proper libvirt group membership and access. + +set -exu + +LIBVIRT_STORAGE_POOL=${LIBVIRT_STORAGE_POOL:-"default"} + +VM_COUNT=$1 +NETWORK_BRIDGE=$2 + +for (( idx=0; idx<$VM_COUNT; idx++ )); do + NAME="baremetal${NETWORK_BRIDGE}_${idx}" + VOL_NAME="baremetal${NETWORK_BRIDGE}-${idx}.qcow2" + virsh list | grep -q $NAME && virsh destroy $NAME + virsh list --inactive | grep -q $NAME && virsh undefine $NAME + + if virsh pool-list | grep -q $LIBVIRT_STORAGE_POOL ; then + virsh vol-list $LIBVIRT_STORAGE_POOL | grep -q $VOL_NAME && + virsh vol-delete $VOL_NAME --pool $LIBVIRT_STORAGE_POOL + fi +done diff --git a/tools/ironic/scripts/configure-vm b/tools/ironic/scripts/configure-vm new file mode 100755 index 0000000000..9936b76c4f --- /dev/null +++ b/tools/ironic/scripts/configure-vm @@ -0,0 +1,78 @@ +#!/usr/bin/env python + +import argparse +import os.path + +import libvirt + +templatedir = os.path.join(os.path.dirname(os.path.dirname(__file__)), + 'templates') + + +def main(): + parser = argparse.ArgumentParser( + description="Configure a kvm virtual machine for the seed image.") + parser.add_argument('--name', default='seed', + help='the name to give the machine in libvirt.') + parser.add_argument('--image', + help='Use a custom image file (must be qcow2).') + parser.add_argument('--engine', default='qemu', + help='The virtualization engine to use') + parser.add_argument('--arch', default='i686', + help='The architecture to use') + parser.add_argument('--memory', default='2097152', + help="Maximum memory for the VM in KB.") + parser.add_argument('--cpus', default='1', + help="CPU count for the VM.") + parser.add_argument('--bootdev', default='hd', + help="What boot device to use (hd/network).") + parser.add_argument('--network', default="brbm", + help='The libvirt network name to use') + parser.add_argument('--libvirt-nic-driver', default='e1000', + help='The libvirt network driver to use') + parser.add_argument('--emulator', default=None, + help='Path to emulator bin for vm template') + args = parser.parse_args() + with file(templatedir + '/vm.xml', 'rb') as f: + source_template = f.read() + params = { + 'name': args.name, + 'imagefile': args.image, + 'engine': args.engine, + 'arch': args.arch, + 'memory': args.memory, + 'cpus': args.cpus, + 'bootdev': args.bootdev, + 'network': args.network, + 'emulator': args.emulator, + } + + if args.emulator: + params['emulator'] = args.emulator + else: + if os.path.exists("/usr/bin/kvm"): # Debian + params['emulator'] = "/usr/bin/kvm" + elif os.path.exists("/usr/bin/qemu-kvm"): # Redhat + params['emulator'] = "/usr/bin/qemu-kvm" + + nicparams = { + 'nicdriver': args.libvirt_nic_driver, + 'network': args.network, + } + + params['bm_network'] = """ + + + + + +
+""" % nicparams + + libvirt_template = source_template % params + conn = libvirt.open("qemu:///system") + a = conn.defineXML(libvirt_template) + print ("Created machine %s with UUID %s" % (args.name, a.UUIDString())) + +if __name__ == '__main__': + main() diff --git a/tools/ironic/scripts/create-nodes b/tools/ironic/scripts/create-nodes new file mode 100755 index 0000000000..3232b50776 --- /dev/null +++ b/tools/ironic/scripts/create-nodes @@ -0,0 +1,68 @@ +#!/usr/bin/env bash + +# **create-nodes** + +# Creates baremetal poseur nodes for ironic testing purposes + +set -exu + +# Keep track of the devstack directory +TOP_DIR=$(cd $(dirname "$0")/.. && pwd) + +CPU=$1 +MEM=$(( 1024 * $2 )) +# extra G to allow fuzz for partition table : flavor size and registered size +# need to be different to actual size. +DISK=$(( $3 + 1)) + +case $4 in + i386) ARCH='i686' ;; + amd64) ARCH='x86_64' ;; + *) echo "Unsupported arch $4!" ; exit 1 ;; +esac + +TOTAL=$(($5 - 1)) +BRIDGE=$6 +EMULATOR=$7 + +LIBVIRT_NIC_DRIVER=${LIBVIRT_NIC_DRIVER:-"e1000"} +LIBVIRT_STORAGE_POOL=${LIBVIRT_STORAGE_POOL:-"default"} + +if ! virsh pool-list --all | grep -q $LIBVIRT_STORAGE_POOL; then + virsh pool-define-as --name $LIBVIRT_STORAGE_POOL dir --target /var/lib/libvirt/images >&2 + virsh pool-autostart $LIBVIRT_STORAGE_POOL >&2 + virsh pool-start $LIBVIRT_STORAGE_POOL >&2 +fi + +pool_state=$(virsh pool-info $LIBVIRT_STORAGE_POOL | grep State | awk '{ print $2 }') +if [ "$pool_state" != "running" ] ; then + [ ! -d /var/lib/libvirt/images ] && sudo mkdir /var/lib/libvirt/images + virsh pool-start $LIBVIRT_STORAGE_POOL >&2 +fi + +PREALLOC= +if [ -f /etc/debian_version ]; then + PREALLOC="--prealloc-metadata" +fi + +DOMS="" +for idx in $(seq 0 $TOTAL) ; do + NAME="baremetal${BRIDGE}_${idx}" + DOMS="$DOMS $NAME" + VOL_NAME="baremetal${BRIDGE}-${idx}.qcow2" + (virsh list --all | grep -q $NAME) && continue + + virsh vol-list --pool $LIBVIRT_STORAGE_POOL | grep -q $VOL_NAME && + virsh vol-delete $VOL_NAME --pool $LIBVIRT_STORAGE_POOL >&2 + virsh vol-create-as $LIBVIRT_STORAGE_POOL ${VOL_NAME} ${DISK}G --format qcow2 $PREALLOC >&2 + volume_path=$(virsh vol-path --pool $LIBVIRT_STORAGE_POOL $VOL_NAME) + # Pre-touch the VM to set +C, as it can only be set on empty files. + sudo touch "$volume_path" + sudo chattr +C "$volume_path" || true + $TOP_DIR/scripts/configure-vm --bootdev network --name $NAME --image "$volume_path" --arch $ARCH --cpus $CPU --memory $MEM --libvirt-nic-driver $LIBVIRT_NIC_DRIVER --emulator $EMULATOR --network $BRIDGE >&2 +done + +for dom in $DOMS ; do + # echo mac + virsh dumpxml $dom | grep "mac address" | head -1 | cut -d\' -f2 +done diff --git a/tools/ironic/scripts/setup-network b/tools/ironic/scripts/setup-network new file mode 100755 index 0000000000..8c3ea901b4 --- /dev/null +++ b/tools/ironic/scripts/setup-network @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +# **setup-network** + +# Setups openvswitch libvirt network suitable for +# running baremetal poseur nodes for ironic testing purposes + +set -exu + +# Keep track of the devstack directory +TOP_DIR=$(cd $(dirname "$0")/.. && pwd) +BRIDGE_SUFFIX=${1:-''} +BRIDGE_NAME=brbm$BRIDGE_SUFFIX + +# Only add bridge if missing +(sudo ovs-vsctl list-br | grep ${BRIDGE_NAME}$) || sudo ovs-vsctl add-br ${BRIDGE_NAME} + +# remove bridge before replacing it. +(virsh net-list | grep "${BRIDGE_NAME} ") && virsh net-destroy ${BRIDGE_NAME} +(virsh net-list --inactive | grep "${BRIDGE_NAME} ") && virsh net-undefine ${BRIDGE_NAME} + +virsh net-define <(sed s/brbm/$BRIDGE_NAME/ $TOP_DIR/templates/brbm.xml) +virsh net-autostart ${BRIDGE_NAME} +virsh net-start ${BRIDGE_NAME} diff --git a/tools/ironic/templates/brbm.xml b/tools/ironic/templates/brbm.xml new file mode 100644 index 0000000000..0769d3f1d0 --- /dev/null +++ b/tools/ironic/templates/brbm.xml @@ -0,0 +1,6 @@ + + brbm + + + + diff --git a/tools/ironic/templates/tftpd-xinetd.template b/tools/ironic/templates/tftpd-xinetd.template new file mode 100644 index 0000000000..7b9b0f8a78 --- /dev/null +++ b/tools/ironic/templates/tftpd-xinetd.template @@ -0,0 +1,11 @@ +service tftp +{ + protocol = udp + port = 69 + socket_type = dgram + wait = yes + user = root + server = /usr/sbin/in.tftpd + server_args = -v -v -v -v -v --map-file %TFTPBOOT_DIR%/map-file %TFTPBOOT_DIR% + disable = no +} diff --git a/tools/ironic/templates/vm.xml b/tools/ironic/templates/vm.xml new file mode 100644 index 0000000000..b18dec055f --- /dev/null +++ b/tools/ironic/templates/vm.xml @@ -0,0 +1,43 @@ + + %(name)s + %(memory)s + %(cpus)s + + hvm + + + + + + + + + + destroy + restart + restart + + %(emulator)s + + + + +
+ + +
+ + %(network)s + %(bm_network)s + + +