From 6839d42819c8349d8f1e72a58037198c97baca06 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Tue, 17 Oct 2017 12:58:18 +0900 Subject: [PATCH 001/574] neutron-legacy: Remove no longer necessary vpnaas conditional VPNaaS agent is going to be an L3 agent extention. Related-Bug: #1692128 Depends-On: I0b86c432e4b2210e5f2a73a7e3ba16d10467f0f2 Change-Id: Id827274b7c74cdf71db6d1f2ab3eadb5fef099f5 --- lib/neutron-legacy | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 0ccb17c084..e2e0bb92a9 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -503,11 +503,7 @@ function start_mutnauq_l2_agent { function start_mutnauq_other_agents { run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $Q_DHCP_CONF_FILE" - if is_service_enabled neutron-vpnaas; then - : # Started by plugin - else - run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)" - fi + run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)" run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE" run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" From 63beab524368875820db453c244468105584bc85 Mon Sep 17 00:00:00 2001 From: Huan Xiong Date: Fri, 23 Mar 2018 14:42:37 +0000 Subject: [PATCH 002/574] init_cinder() shouldn't always create DEFAULT_VOLUME_GROUP_NAME DEFAULT_VOLUME_GROUP_NAME volume group is LVM ephemeral storage used by Nova. It is created by init_nova() if user sets NOVA_BACKEND to "LVM". However, init_cinder() is also hardcoded to create it, based on the asumption that CINDER_ENABLED_BACKENDS includes it. That assumption doesn't hold for the current code. What's more important, even if user wants to use DEFAULT_VOLUME_GROUP_NAME as one of cinder backends and adds it to CINDER_ENABLED_BACKENDS, the current code in init_cinder() are general enough and should work fine. This change removes relevant code in init_cinder(). It also moves DEFAULT_VOLUME_GROUP_NAME clean-up code from unstack.sh to cleanup_nova(). Change-Id: I53762f8eda6256f962cc4e1f1098406879bbcf5c --- lib/cinder | 4 ---- lib/nova | 4 ++++ unstack.sh | 4 ---- 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/lib/cinder b/lib/cinder index 3a8097f894..e0b8971dcb 100644 --- a/lib/cinder +++ b/lib/cinder @@ -420,10 +420,6 @@ function init_cinder { be_type=${be%%:*} be_name=${be##*:} if type init_cinder_backend_${be_type} >/dev/null 2>&1; then - # Always init the default volume group for lvm. - if [[ "$be_type" == "lvm" ]]; then - init_default_lvm_volume_group - fi init_cinder_backend_${be_type} ${be_name} fi done diff --git a/lib/nova b/lib/nova index 56e309333b..ee682db86a 100644 --- a/lib/nova +++ b/lib/nova @@ -264,6 +264,10 @@ function cleanup_nova { stop_process "n-api-meta" remove_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" remove_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" + + if [[ "$NOVA_BACKEND" == "LVM" ]]; then + clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME + fi } # configure_nova() - Set config files, create data dirs, etc diff --git a/unstack.sh b/unstack.sh index ccea0ef585..cfbf22e0fa 100755 --- a/unstack.sh +++ b/unstack.sh @@ -175,9 +175,5 @@ fi # enabled backends. So if Cinder is enabled, and installed successfully we are # sure lvm2 (lvremove, /etc/lvm/lvm.conf, etc.) is here. if is_service_enabled cinder && is_package_installed lvm2; then - # Using /bin/true here indicates a BUG - maybe the - # DEFAULT_VOLUME_GROUP_NAME doesn't exist? We should - # isolate this further down in lib/cinder cleanup. - clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME || /bin/true clean_lvm_filter fi From dc01a8ab63aff1be170fb59c293ed4bddd03749a Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Sun, 14 Jul 2019 22:33:13 +0200 Subject: [PATCH 003/574] Switch TLS tests to TLSv1.2+ only This would more likely match a relevant production deployment. Change-Id: I4ee2ff0c00a8e33fd069a782b32eed5fef62c01b --- files/apache-keystone.template | 1 + files/apache-neutron.template | 1 + lib/tls | 1 + 3 files changed, 3 insertions(+) diff --git a/files/apache-keystone.template b/files/apache-keystone.template index 128436027d..480fe06a9c 100644 --- a/files/apache-keystone.template +++ b/files/apache-keystone.template @@ -38,6 +38,7 @@ LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" %SSLLISTEN% %SSLENGINE% %SSLLISTEN% %SSLCERTFILE% %SSLLISTEN% %SSLKEYFILE% +%SSLLISTEN% SSLProtocol -all +TLSv1.3 +TLSv1.2 %SSLLISTEN% Alias /identity %KEYSTONE_BIN%/keystone-wsgi-public diff --git a/files/apache-neutron.template b/files/apache-neutron.template index c7796b93bf..358e87f5da 100644 --- a/files/apache-neutron.template +++ b/files/apache-neutron.template @@ -24,6 +24,7 @@ LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" %SSLLISTEN% %SSLENGINE% %SSLLISTEN% %SSLCERTFILE% %SSLLISTEN% %SSLKEYFILE% +%SSLLISTEN% SSLProtocol -all +TLSv1.3 +TLSv1.2 %SSLLISTEN% Alias /networking %NEUTRON_BIN%/neutron-api diff --git a/lib/tls b/lib/tls index 0032449e13..6f2a65a75b 100644 --- a/lib/tls +++ b/lib/tls @@ -536,6 +536,7 @@ $listen_string SSLEngine On SSLCertificateFile $DEVSTACK_CERT + SSLProtocol -all +TLSv1.3 +TLSv1.2 # Disable KeepAlive to fix bug #1630664 a.k.a the # ('Connection aborted.', BadStatusLine("''",)) error From 8c86e5a53e1c77fc17df7d979f6435b534021d13 Mon Sep 17 00:00:00 2001 From: Eric Harney Date: Thu, 17 Oct 2019 15:42:41 -0400 Subject: [PATCH 004/574] Cinder: only set volume_clear for LVM This only applies to the LVM driver (when using thick provisioning), and doesn't have any effect on other backends like NFS, so only write the conf entry for LVM. Change-Id: I722ba2fa0010d9887ed9b7fdd9e050cd4694768e --- lib/cinder | 3 --- lib/cinder_backends/lvm | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/lib/cinder b/lib/cinder index fd960535d9..2e6e97a006 100644 --- a/lib/cinder +++ b/lib/cinder @@ -250,9 +250,6 @@ function configure_cinder { default_name=$be_name fi enabled_backends+=$be_name, - - iniset $CINDER_CONF $be_name volume_clear $CINDER_VOLUME_CLEAR - done iniset $CINDER_CONF DEFAULT enabled_backends ${enabled_backends%,*} if [[ -n "$default_name" ]]; then diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm index 497081c9e4..e03ef14c55 100644 --- a/lib/cinder_backends/lvm +++ b/lib/cinder_backends/lvm @@ -52,7 +52,7 @@ function configure_cinder_backend_lvm { iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name iniset $CINDER_CONF $be_name target_helper "$CINDER_ISCSI_HELPER" iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE" - + iniset $CINDER_CONF $be_name volume_clear "$CINDER_VOLUME_CLEAR" } # init_cinder_backend_lvm - Initialize volume group From a676c4029e46743ccf2e81ebd17cd306ffc4960d Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Wed, 2 Aug 2017 16:46:35 +0000 Subject: [PATCH 005/574] Revert "Generate deprecation warning for postgresql" Based on resolution [1], there's no clear indication that next steps involve the removal of the DB from Devstack or from the gate. [1] I332cef8ec4539520adcf37c6d2ea11488289fcfd This reverts commit d9aaae95f2b84170bf35e037715e4963d89f940c. Change-Id: I8410d65c0e0b24035aa035fac7560a686d53ec50 --- lib/databases/postgresql | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/databases/postgresql b/lib/databases/postgresql index 618834b550..1f347f5548 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -95,7 +95,6 @@ function configure_database_postgresql { function install_database_postgresql { echo_summary "Installing postgresql" - deprecated "Use of postgresql in devstack is deprecated, and will be removed during the Pike cycle" local pgpass=$HOME/.pgpass if [[ ! -e $pgpass ]]; then cat < $pgpass From b107f9cf18c8112cce3f796995f3a5691be56259 Mon Sep 17 00:00:00 2001 From: "Walter A. Boring IV" Date: Mon, 1 Jul 2019 16:19:12 -0700 Subject: [PATCH 006/574] Add support for ceph_iscsi cinder driver This patch adds support to configure the new ceph iscsi driver Depends-On: https://review.opendev.org/#/c/662829/ Depends-On:https://review.opendev.org/668667 Change-Id: Ica180e00dedb8e7ed60e27e3f4841faa8fef938c --- lib/cinder_backends/ceph_iscsi | 56 ++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100644 lib/cinder_backends/ceph_iscsi diff --git a/lib/cinder_backends/ceph_iscsi b/lib/cinder_backends/ceph_iscsi new file mode 100644 index 0000000000..94412e0da6 --- /dev/null +++ b/lib/cinder_backends/ceph_iscsi @@ -0,0 +1,56 @@ +#!/bin/bash +# +# lib/cinder_backends/ceph_iscsi +# Configure the ceph_iscsi backend + +# Enable with: +# +# CINDER_ENABLED_BACKENDS+=,ceph_iscsi:ceph_iscsi +# +# Optional paramteters: +# CEPH_ISCSI_API_URL= +# +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_ceph_backend_ceph_iscsi - called from configure_cinder() + + +# Save trace setting +_XTRACE_CINDER_CEPH_ISCSI=$(set +o | grep xtrace) +set +o xtrace + +# Entry Points +# ------------ + +# configure_cinder_backend_ceph_iscsi - Set config files, create data dirs, etc +# configure_cinder_backend_ceph_iscsi $name +function configure_cinder_backend_ceph_iscsi { + local be_name=$1 + + CEPH_ISCSI_API_URL=${CEPH_ISCSI_API_URL:-http://$CEPH_ISCSI_API_HOST:$CEPH_ISCSI_API_PORT} + + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.ceph.rbd_iscsi.RBDISCSIDriver" + iniset $CINDER_CONF $be_name rbd_ceph_conf "$CEPH_CONF_FILE" + iniset $CINDER_CONF $be_name rbd_pool "$CINDER_CEPH_POOL" + iniset $CINDER_CONF $be_name rbd_user "$CINDER_CEPH_USER" + iniset $CINDER_CONF $be_name rbd_iscsi_api_user "$CEPH_ISCSI_API_USER" + iniset $CINDER_CONF $be_name rbd_iscsi_api_password "$CEPH_ISCSI_API_PASSWORD" + iniset $CINDER_CONF $be_name rbd_iscsi_api_url "$CEPH_ISCSI_API_URL" + iniset $CINDER_CONF $be_name rbd_iscsi_target_iqn "$CEPH_ISCSI_TARGET_IQN" + iniset $CINDER_CONF $be_name rbd_flatten_volume_from_snapshot False + iniset $CINDER_CONF $be_name rbd_max_clone_depth 5 + iniset $CINDER_CONF DEFAULT glance_api_version 2 + + pip_install rbd-iscsi-client +} + +# Restore xtrace +$_XTRACE_CINDER_CEPH_ISCSI + +# Local variables: +# mode: shell-script +# End: From b37240382dc300f30efd83894fb0a9077e98d0ec Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Fri, 8 Jan 2021 09:41:56 +0100 Subject: [PATCH 007/574] Drop opensuse platform job It has been broken for over a month. Feel free to revert in combination with a fix, better with a commitment to keep the job in working shape permanently. Change-Id: I2604374c23716d56de29e16a459b7c7f45b84891 --- .zuul.yaml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index c1406716fe..bf32af070e 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -584,13 +584,6 @@ voting: false timeout: 9000 -- job: - name: devstack-platform-opensuse-15 - parent: tempest-full-py3 - description: openSUSE 15.x platform test - nodeset: devstack-single-node-opensuse-15 - voting: false - - job: name: devstack-platform-bionic parent: tempest-full-py3 @@ -686,7 +679,6 @@ jobs: - devstack - devstack-ipv6 - - devstack-platform-opensuse-15 - devstack-platform-fedora-latest - devstack-platform-centos-8 - devstack-platform-bionic From ed164289a57549fb2b2404fc77052bb09ceb5105 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 18 Jan 2021 09:57:00 -0800 Subject: [PATCH 008/574] Always verify os_glance reserved namespace On master, we should always enable tempest's verification of Glance's os_glance namespace enforcement. Change-Id: Ia71878e6c53ee683a868112959876798e946e2ce Depends-On: https://review.opendev.org/c/openstack/glance/+/771070 --- lib/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tempest b/lib/tempest index 552e1c22a3..b2047000e0 100644 --- a/lib/tempest +++ b/lib/tempest @@ -351,6 +351,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG image disk_formats "ami,ari,aki,vhd,raw,iso" fi iniset $TEMPEST_CONFIG image-feature-enabled import_image $GLANCE_USE_IMPORT_WORKFLOW + iniset $TEMPEST_CONFIG image-feature-enabled os_glance_reserved True # Compute iniset $TEMPEST_CONFIG compute image_ref $image_uuid iniset $TEMPEST_CONFIG compute image_ref_alt $image_uuid_alt From eef2a0d751f4d53fb7b475e69e229cb541f7af2c Mon Sep 17 00:00:00 2001 From: Vanou Ishii Date: Wed, 20 Jan 2021 14:15:57 +0900 Subject: [PATCH 009/574] Fix Early Use of die function in stack.sh This commit fixes use of die function before it's defined. die function can be used after sourcing $TOP_DIR/functions chain- sources $TOP_DIR/functions-common. Because fixed portion of stack.sh checks existence of $TOP_DIR/inc and sourcing $TOP_DIR/function chain source $TOP_DIR/inc, this commit uses echo & exit command instead of die function. Closes-Bug: #1913021 Change-Id: I5ec174cf7b02269525b1bfd0bfa94ea889d16fce --- stack.sh | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index 036afd7b00..c334159550 100755 --- a/stack.sh +++ b/stack.sh @@ -96,19 +96,25 @@ fi # templates and other useful files in the ``files`` subdirectory FILES=$TOP_DIR/files if [ ! -d $FILES ]; then - die $LINENO "missing devstack/files" + set +o xtrace + echo "missing devstack/files" + exit 1 fi # ``stack.sh`` keeps function libraries here # Make sure ``$TOP_DIR/inc`` directory is present if [ ! -d $TOP_DIR/inc ]; then - die $LINENO "missing devstack/inc" + set +o xtrace + echo "missing devstack/inc" + exit 1 fi # ``stack.sh`` keeps project libraries here # Make sure ``$TOP_DIR/lib`` directory is present if [ ! -d $TOP_DIR/lib ]; then - die $LINENO "missing devstack/lib" + set +o xtrace + echo "missing devstack/lib" + exit 1 fi # Check if run in POSIX shell From b4bba2f2c817dd5c7594e8c7950021969704db5d Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 4 Feb 2021 23:24:17 +0000 Subject: [PATCH 010/574] Increase volumes quota for service project If we are backing glance with cinder, we will use more volumes and if timing is right, we will clash with other tests and be unable to create what we need. If we are backing glance with cinder, we should increase the volumes quota, which this patch does (to 50 from a default of 10). Closes-Bug: #1914665 Change-Id: I2ad1c4d21f996ee1a9ce29ba4f1a4b8f5720f8fb --- lib/cinder | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/cinder b/lib/cinder index 6c97e114a6..33deff61f2 100644 --- a/lib/cinder +++ b/lib/cinder @@ -539,6 +539,14 @@ function create_volume_types { OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-key ${be_name} set volume_backend_name=${be_name} fi done + + # Increase quota for the service project if glance is using cinder, + # since it's likely to occasionally go above the default 10 in parallel + # test execution. + if [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then + openstack --os-region-name="$REGION_NAME" \ + quota set --volumes 50 "$SERVICE_PROJECT_NAME" + fi fi } From a2273cc4c86348d0dd17ff8c64b2f1edeb620225 Mon Sep 17 00:00:00 2001 From: Flavio Fernandes Date: Sat, 6 Feb 2021 16:23:36 -0500 Subject: [PATCH 011/574] [OVN] Support for network-logging config This patchset adds configuration support for network logging when the OVN driver is enabled. Depends-On: https://review.opendev.org/768129 Change-Id: I6fc0973bedfd1dcc72b01981cd64f9283662d37c Signed-off-by: Flavio Fernandes --- lib/neutron_plugins/ovn_agent | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index b661f593a4..f2baf4a08d 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -149,6 +149,9 @@ Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS:-port_security,qos} # this one allows empty: ML2_L3_PLUGIN=${ML2_L3_PLUGIN-"ovn-router"} +Q_LOG_DRIVER_RATE_LIMIT=${Q_LOG_DRIVER_RATE_LIMIT:-100} +Q_LOG_DRIVER_BURST_LIMIT=${Q_LOG_DRIVER_BURST_LIMIT:-25} +Q_LOG_DRIVER_LOG_BASE=${Q_LOG_DRIVER_LOG_BASE:-acl_log_meter} # Utility Functions # ----------------- @@ -490,6 +493,12 @@ function configure_ovn_plugin { populate_ml2_config /$Q_PLUGIN_CONF_FILE securitygroup enable_security_group="$Q_USE_SECGROUP" inicomment /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver + if is_service_enabled q-log neutron-log; then + populate_ml2_config /$Q_PLUGIN_CONF_FILE network_log rate_limit="$Q_LOG_DRIVER_RATE_LIMIT" + populate_ml2_config /$Q_PLUGIN_CONF_FILE network_log burst_limit="$Q_LOG_DRIVER_BURST_LIMIT" + inicomment /$Q_PLUGIN_CONF_FILE network_log local_output_log_base="$Q_LOG_DRIVER_LOG_BASE" + fi + if is_service_enabled q-ovn-metadata-agent; then populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True else From 30d9bf9a6d8af9590b04caa3757956522f2004d4 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Tue, 19 Jan 2021 12:10:52 -0800 Subject: [PATCH 012/574] Async task support We have a *ton* of stuff in devstack that is very linear, specifically the ten-ish minutes we spend loading osc to run a single API command against something. We also generate configs, sync databases, and other things that use one core of our worker and make our runtime longer than it really needs to be. The idea in this patch is to make it super simple to run some things in the background and then wait for them to finish before proceeding to something that will require them to be done. This avoids the interleaving you would expect by redirecting the async tasks to a log file, and then cat'ing that log file synchronously during the wait operation. The per-task log file remains so it's easier to examine it in isolation. Multiple people have reported between 22-30% improvement in the time it takes to stack with this. More can be done, but what is here already makes a significant difference. Change-Id: I270a910b531641b023c13f75dfedca057a1f1031 --- .zuul.yaml | 12 +++ clean.sh | 2 +- extras.d/80-tempest.sh | 3 +- functions | 1 + inc/async | 225 +++++++++++++++++++++++++++++++++++++++++ lib/keystone | 40 ++++---- lib/nova | 29 +++--- stack.sh | 46 ++++++--- unstack.sh | 1 + 9 files changed, 315 insertions(+), 44 deletions(-) create mode 100644 inc/async diff --git a/.zuul.yaml b/.zuul.yaml index c1406716fe..7b0696bcd7 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -598,6 +598,17 @@ nodeset: openstack-single-node-bionic voting: false +- job: + name: devstack-async + parent: tempest-full-py3 + description: Async mode enabled + voting: false + vars: + devstack_localrc: + DEVSTACK_PARALLEL: True + zuul_copy_output: + /opt/stack/async: logs + - job: name: devstack-platform-fedora-latest parent: tempest-full-py3 @@ -690,6 +701,7 @@ - devstack-platform-fedora-latest - devstack-platform-centos-8 - devstack-platform-bionic + - devstack-async - devstack-multinode - devstack-unit-tests - openstack-tox-bashate diff --git a/clean.sh b/clean.sh index 4cebf1d9ea..870dfd4313 100755 --- a/clean.sh +++ b/clean.sh @@ -113,7 +113,7 @@ cleanup_rpc_backend cleanup_database # Clean out data and status -sudo rm -rf $DATA_DIR $DEST/status +sudo rm -rf $DATA_DIR $DEST/status $DEST/async # Clean out the log file and log directories if [[ -n "$LOGFILE" ]] && [[ -f "$LOGFILE" ]]; then diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh index 15ecfe39eb..06c73ec763 100644 --- a/extras.d/80-tempest.sh +++ b/extras.d/80-tempest.sh @@ -6,7 +6,7 @@ if is_service_enabled tempest; then source $TOP_DIR/lib/tempest elif [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing Tempest" - install_tempest + async_runfunc install_tempest elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then # Tempest config must come after layer 2 services are running : @@ -17,6 +17,7 @@ if is_service_enabled tempest; then # local.conf Tempest option overrides : elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then + async_wait install_tempest echo_summary "Initializing Tempest" configure_tempest echo_summary "Installing Tempest Plugins" diff --git a/functions b/functions index fc87a5512d..89bbab2085 100644 --- a/functions +++ b/functions @@ -21,6 +21,7 @@ source ${FUNC_DIR}/inc/ini-config source ${FUNC_DIR}/inc/meta-config source ${FUNC_DIR}/inc/python source ${FUNC_DIR}/inc/rootwrap +source ${FUNC_DIR}/inc/async # Save trace setting _XTRACE_FUNCTIONS=$(set +o | grep xtrace) diff --git a/inc/async b/inc/async new file mode 100644 index 0000000000..d29168f2f5 --- /dev/null +++ b/inc/async @@ -0,0 +1,225 @@ +#!/bin/bash +# +# Symbolic asynchronous tasks for devstack +# +# Usage: +# +# async_runfunc my_shell_func foo bar baz +# +# ... do other stuff ... +# +# async_wait my_shell_func +# + +DEVSTACK_PARALLEL=$(trueorfalse False DEVSTACK_PARALLEL) +_ASYNC_BG_TIME=0 + +# Keep track of how much total time was spent in background tasks +# Takes a job runtime in ms. +function _async_incr_bg_time { + local elapsed_ms="$1" + _ASYNC_BG_TIME=$(($_ASYNC_BG_TIME + $elapsed_ms)) +} + +# Get the PID of a named future to wait on +function async_pidof { + local name="$1" + local inifile="${DEST}/async/${name}.ini" + + if [ -f "$inifile" ]; then + iniget $inifile job pid + else + echo 'UNKNOWN' + return 1 + fi +} + +# Log a message about a job. If the message contains "%command" then the +# full command line of the job will be substituted in the output +function async_log { + local name="$1" + shift + local message="$*" + local inifile=${DEST}/async/${name}.ini + local pid + local command + + pid=$(iniget $inifile job pid) + command=$(iniget $inifile job command | tr '#' '-') + message=$(echo "$message" | sed "s#%command#$command#g") + + echo "[Async ${name}:${pid}]: $message" +} + +# Inner function that actually runs the requested task. We wrap it like this +# just so we can emit a finish message as soon as the work is done, to make +# it easier to find the tracking just before an error. +function async_inner { + local name="$1" + local rc + shift + set -o xtrace + if $* >${DEST}/async/${name}.log 2>&1; then + rc=0 + set +o xtrace + async_log "$name" "finished successfully" + else + rc=$? + set +o xtrace + async_log "$name" "FAILED with rc $rc" + fi + iniset ${DEST}/async/${name}.ini job end_time $(date "+%s%3N") + return $rc +} + +# Run something async. Takes a symbolic name and a list of arguments of +# what to run. Ideally this would be rarely used and async_runfunc() would +# be used everywhere for readability. +# +# This spawns the work in a background worker, records a "future" to be +# collected by a later call to async_wait() +function async_run { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local name="$1" + shift + local inifile=${DEST}/async/${name}.ini + + touch $inifile + iniset $inifile job command "$*" + iniset $inifile job start_time $(date +%s%3N) + + if [[ "$DEVSTACK_PARALLEL" = "True" ]]; then + async_inner $name $* & + iniset $inifile job pid $! + async_log "$name" "running: %command" + $xtrace + else + iniset $inifile job pid "self" + async_log "$name" "Running synchronously: %command" + $xtrace + $* + return $? + fi +} + +# Shortcut for running a shell function async. Uses the function name as the +# async name. +function async_runfunc { + async_run $1 $* +} + +# Wait for an async future to complete. May return immediately if already +# complete, or of the future has already been waited on (avoid this). May +# block until the future completes. +function async_wait { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local pid rc running inifile runtime + rc=0 + for name in $*; do + running=$(ls ${DEST}/async/*.ini 2>/dev/null | wc -l) + inifile="${DEST}/async/${name}.ini" + + if pid=$(async_pidof "$name"); then + async_log "$name" "Waiting for completion of %command" \ + "($running other jobs running)" + time_start async_wait + if [[ "$pid" != "self" ]]; then + # Do not actually call wait if we ran synchronously + if wait $pid; then + rc=0 + else + rc=$? + fi + cat ${DEST}/async/${name}.log + fi + time_stop async_wait + local start_time + local end_time + start_time=$(iniget $inifile job start_time) + end_time=$(iniget $inifile job end_time) + _async_incr_bg_time $(($end_time - $start_time)) + runtime=$((($end_time - $start_time) / 1000)) + async_log "$name" "finished %command with result" \ + "$rc in $runtime seconds" + rm -f $inifile + if [ $rc -ne 0 ]; then + echo Stopping async wait due to error: $* + break + fi + else + # This could probably be removed - it is really just here + # to help notice if you wait for something by the wrong + # name, but it also shows up for things we didn't start + # because they were not enabled. + echo Not waiting for async task $name that we never started or \ + has already been waited for + fi + done + + $xtrace + return $rc +} + +# Check for uncollected futures and wait on them +function async_cleanup { + local name + + if [[ "$DEVSTACK_PARALLEL" != "True" ]]; then + return 0 + fi + + for inifile in $(find ${DEST}/async -name '*.ini'); do + name=$(basename $pidfile .ini) + echo "WARNING: uncollected async future $name" + async_wait $name || true + done +} + +# Make sure our async dir is created and clean +function async_init { + local async_dir=${DEST}/async + + # Clean any residue if present from previous runs + rm -Rf $async_dir + + # Make sure we have a state directory + mkdir -p $async_dir +} + +function async_print_timing { + local bg_time_minus_wait + local elapsed_time + local serial_time + local speedup + + if [[ "$DEVSTACK_PARALLEL" != "True" ]]; then + return 0 + fi + + # The logic here is: All the background task time would be + # serialized if we did not do them in the background. So we can + # add that to the elapsed time for the whole run. However, time we + # spend waiting for async things to finish adds to the elapsed + # time, but is time where we're not doing anything useful. Thus, + # we substract that from the would-be-serialized time. + + bg_time_minus_wait=$((\ + ($_ASYNC_BG_TIME - ${_TIME_TOTAL[async_wait]}) / 1000)) + elapsed_time=$(($(date "+%s") - $_TIME_BEGIN)) + serial_time=$(($elapsed_time + $bg_time_minus_wait)) + + echo + echo "=================" + echo " Async summary" + echo "=================" + echo " Time spent in the background minus waits: $bg_time_minus_wait sec" + echo " Elapsed time: $elapsed_time sec" + echo " Time if we did everything serially: $serial_time sec" + echo " Speedup: " $(echo | awk "{print $serial_time / $elapsed_time}") +} diff --git a/lib/keystone b/lib/keystone index d4c7b063bb..66e867ca68 100644 --- a/lib/keystone +++ b/lib/keystone @@ -318,25 +318,25 @@ function create_keystone_accounts { local admin_role="admin" local member_role="member" - get_or_add_user_domain_role $admin_role $admin_user default + async_run ks-domain-role get_or_add_user_domain_role $admin_role $admin_user default # Create service project/role get_or_create_domain "$SERVICE_DOMAIN_NAME" - get_or_create_project "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME" + async_run ks-project get_or_create_project "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME" # Service role, so service users do not have to be admins - get_or_create_role service + async_run ks-service get_or_create_role service # The ResellerAdmin role is used by Nova and Ceilometer so we need to keep it. # The admin role in swift allows a user to act as an admin for their project, # but ResellerAdmin is needed for a user to act as any project. The name of this # role is also configurable in swift-proxy.conf - get_or_create_role ResellerAdmin + async_run ks-reseller get_or_create_role ResellerAdmin # another_role demonstrates that an arbitrary role may be created and used # TODO(sleepsonthefloor): show how this can be used for rbac in the future! local another_role="anotherrole" - get_or_create_role $another_role + async_run ks-anotherrole get_or_create_role $another_role # invisible project - admin can't see this one local invis_project @@ -349,10 +349,12 @@ function create_keystone_accounts { demo_user=$(get_or_create_user "demo" \ "$ADMIN_PASSWORD" "default" "demo@example.com") - get_or_add_user_project_role $member_role $demo_user $demo_project - get_or_add_user_project_role $admin_role $admin_user $demo_project - get_or_add_user_project_role $another_role $demo_user $demo_project - get_or_add_user_project_role $member_role $demo_user $invis_project + async_wait ks-{domain-role,domain,project,service,reseller,anotherrole} + + async_run ks-demo-member get_or_add_user_project_role $member_role $demo_user $demo_project + async_run ks-demo-admin get_or_add_user_project_role $admin_role $admin_user $demo_project + async_run ks-demo-another get_or_add_user_project_role $another_role $demo_user $demo_project + async_run ks-demo-invis get_or_add_user_project_role $member_role $demo_user $invis_project # alt_demo local alt_demo_project @@ -361,9 +363,9 @@ function create_keystone_accounts { alt_demo_user=$(get_or_create_user "alt_demo" \ "$ADMIN_PASSWORD" "default" "alt_demo@example.com") - get_or_add_user_project_role $member_role $alt_demo_user $alt_demo_project - get_or_add_user_project_role $admin_role $admin_user $alt_demo_project - get_or_add_user_project_role $another_role $alt_demo_user $alt_demo_project + async_run ks-alt-member get_or_add_user_project_role $member_role $alt_demo_user $alt_demo_project + async_run ks-alt-admin get_or_add_user_project_role $admin_role $admin_user $alt_demo_project + async_run ks-alt-another get_or_add_user_project_role $another_role $alt_demo_user $alt_demo_project # groups local admin_group @@ -373,11 +375,15 @@ function create_keystone_accounts { non_admin_group=$(get_or_create_group "nonadmins" \ "default" "non-admin group") - get_or_add_group_project_role $member_role $non_admin_group $demo_project - get_or_add_group_project_role $another_role $non_admin_group $demo_project - get_or_add_group_project_role $member_role $non_admin_group $alt_demo_project - get_or_add_group_project_role $another_role $non_admin_group $alt_demo_project - get_or_add_group_project_role $admin_role $admin_group $admin_project + async_run ks-group-memberdemo get_or_add_group_project_role $member_role $non_admin_group $demo_project + async_run ks-group-anotherdemo get_or_add_group_project_role $another_role $non_admin_group $demo_project + async_run ks-group-memberalt get_or_add_group_project_role $member_role $non_admin_group $alt_demo_project + async_run ks-group-anotheralt get_or_add_group_project_role $another_role $non_admin_group $alt_demo_project + async_run ks-group-admin get_or_add_group_project_role $admin_role $admin_group $admin_project + + async_wait ks-demo-{member,admin,another,invis} + async_wait ks-alt-{member,admin,another} + async_wait ks-group-{memberdemo,anotherdemo,memberalt,anotheralt,admin} if is_service_enabled ldap; then create_ldap_domain diff --git a/lib/nova b/lib/nova index d7426039c4..0a28cd97aa 100644 --- a/lib/nova +++ b/lib/nova @@ -741,31 +741,36 @@ function create_nova_keys_dir { sudo install -d -o $STACK_USER ${NOVA_STATE_PATH} ${NOVA_STATE_PATH}/keys } +function init_nova_db { + local dbname="$1" + local conffile="$2" + recreate_database $dbname + $NOVA_BIN_DIR/nova-manage --config-file $conffile db sync --local_cell +} + # init_nova() - Initialize databases, etc. function init_nova { # All nova components talk to a central database. # Only do this step once on the API node for an entire cluster. if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then + # (Re)create nova databases + async_run nova-cell-0 init_nova_db nova_cell0 $NOVA_CONF + for i in $(seq 1 $NOVA_NUM_CELLS); do + async_run nova-cell-$i init_nova_db nova_cell${i} $(conductor_conf $i) + done + recreate_database $NOVA_API_DB $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF api_db sync - recreate_database nova_cell0 - # map_cell0 will create the cell mapping record in the nova_api DB so - # this needs to come after the api_db sync happens. We also want to run - # this before the db sync below since that will migrate both the nova - # and nova_cell0 databases. + # this needs to come after the api_db sync happens. $NOVA_BIN_DIR/nova-manage cell_v2 map_cell0 --database_connection `database_connection_url nova_cell0` - # (Re)create nova databases - for i in $(seq 1 $NOVA_NUM_CELLS); do - recreate_database nova_cell${i} - $NOVA_BIN_DIR/nova-manage --config-file $(conductor_conf $i) db sync --local_cell + # Wait for DBs to finish from above + for i in $(seq 0 $NOVA_NUM_CELLS); do + async_wait nova-cell-$i done - # Migrate nova and nova_cell0 databases. - $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync - # Run online migrations on the new databases # Needed for flavor conversion $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db online_data_migrations diff --git a/stack.sh b/stack.sh index 036afd7b00..dcfef6db48 100755 --- a/stack.sh +++ b/stack.sh @@ -330,6 +330,9 @@ if [[ ! -d $DATA_DIR ]]; then safe_chmod 0755 $DATA_DIR fi +# Create and/or clean the async state directory +async_init + # Configure proper hostname # Certain services such as rabbitmq require that the local hostname resolves # correctly. Make sure it exists in /etc/hosts so that is always true. @@ -1082,19 +1085,19 @@ if is_service_enabled keystone; then create_keystone_accounts if is_service_enabled nova; then - create_nova_accounts + async_runfunc create_nova_accounts fi if is_service_enabled glance; then - create_glance_accounts + async_runfunc create_glance_accounts fi if is_service_enabled cinder; then - create_cinder_accounts + async_runfunc create_cinder_accounts fi if is_service_enabled neutron; then - create_neutron_accounts + async_runfunc create_neutron_accounts fi if is_service_enabled swift; then - create_swift_accounts + async_runfunc create_swift_accounts fi fi @@ -1107,9 +1110,11 @@ write_clouds_yaml if is_service_enabled horizon; then echo_summary "Configuring Horizon" - configure_horizon + async_runfunc configure_horizon fi +async_wait create_nova_accounts create_glance_accounts create_cinder_accounts +async_wait create_neutron_accounts create_swift_accounts configure_horizon # Glance # ------ @@ -1117,7 +1122,7 @@ fi # NOTE(yoctozepto): limited to node hosting the database which is the controller if is_service_enabled $DATABASE_BACKENDS && is_service_enabled glance; then echo_summary "Configuring Glance" - init_glance + async_runfunc init_glance fi @@ -1131,7 +1136,7 @@ if is_service_enabled neutron; then # Run init_neutron only on the node hosting the Neutron API server if is_service_enabled $DATABASE_BACKENDS && is_service_enabled neutron; then - init_neutron + async_runfunc init_neutron fi fi @@ -1161,7 +1166,7 @@ fi if is_service_enabled swift; then echo_summary "Configuring Swift" - init_swift + async_runfunc init_swift fi @@ -1170,7 +1175,7 @@ fi if is_service_enabled cinder; then echo_summary "Configuring Cinder" - init_cinder + async_runfunc init_cinder fi # Placement Service @@ -1178,9 +1183,16 @@ fi if is_service_enabled placement; then echo_summary "Configuring placement" - init_placement + async_runfunc init_placement fi +# Wait for neutron and placement before starting nova +async_wait init_neutron +async_wait init_placement +async_wait init_glance +async_wait init_swift +async_wait init_cinder + # Compute Service # --------------- @@ -1192,7 +1204,7 @@ if is_service_enabled nova; then # TODO(stephenfin): Is it possible for neutron to *not* be enabled now? If # not, remove the if here if is_service_enabled neutron; then - configure_neutron_nova + async_runfunc configure_neutron_nova fi fi @@ -1236,6 +1248,8 @@ if is_service_enabled cinder; then iniset $CINDER_CONF key_manager fixed_key "$FIXED_KEY" fi +async_wait configure_neutron_nova + # Launch the nova-api and wait for it to answer before continuing if is_service_enabled n-api; then echo_summary "Starting Nova API" @@ -1282,7 +1296,7 @@ fi if is_service_enabled nova; then echo_summary "Starting Nova" start_nova - create_flavors + async_runfunc create_flavors fi if is_service_enabled cinder; then echo_summary "Starting Cinder" @@ -1331,6 +1345,8 @@ if is_service_enabled horizon; then start_horizon fi +async_wait create_flavors + # Create account rc files # ======================= @@ -1467,8 +1483,12 @@ else exec 1>&3 fi +# Make sure we didn't leak any background tasks +async_cleanup + # Dump out the time totals time_totals +async_print_timing # Using the cloud # =============== diff --git a/unstack.sh b/unstack.sh index 3197cf136f..d9dca7c107 100755 --- a/unstack.sh +++ b/unstack.sh @@ -184,3 +184,4 @@ if is_service_enabled cinder && is_package_installed lvm2; then fi clean_pyc_files +rm -Rf $DEST/async From e11d367d8e31a4875301e2e890fa8ffede270ec2 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Wed, 30 Sep 2020 13:06:39 +0100 Subject: [PATCH 013/574] orchestrate-devstack: Copy controller ceph.conf and keyrings to subnode This change introduces a basic role to copy the contents of /etc/ceph between the controller and subnodes during orchestrate-devstack allowing a multinode ceph job to be introduced by I9ffdff44a3ad42ebdf26ab72e24dfe3b12b1ef8b. Note that this role is only used when devstack-plugin-ceph is enabled. Change-Id: I324c0f35db34f8540ca164bf8c6e3dea67c5b1b4 --- roles/orchestrate-devstack/tasks/main.yaml | 5 +++++ .../sync-controller-ceph-conf-and-keys/README.rst | 3 +++ .../tasks/main.yaml | 15 +++++++++++++++ 3 files changed, 23 insertions(+) create mode 100644 roles/sync-controller-ceph-conf-and-keys/README.rst create mode 100644 roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml diff --git a/roles/orchestrate-devstack/tasks/main.yaml b/roles/orchestrate-devstack/tasks/main.yaml index f747943f3c..2b8ae01a62 100644 --- a/roles/orchestrate-devstack/tasks/main.yaml +++ b/roles/orchestrate-devstack/tasks/main.yaml @@ -18,6 +18,11 @@ name: sync-devstack-data when: devstack_services['tls-proxy']|default(false) + - name: Sync controller ceph.conf and key rings to subnode + include_role: + name: sync-controller-ceph-conf-and-keys + when: devstack_plugins is defined and 'devstack-plugin-ceph' in devstack_plugins + - name: Run devstack on the sub-nodes include_role: name: run-devstack diff --git a/roles/sync-controller-ceph-conf-and-keys/README.rst b/roles/sync-controller-ceph-conf-and-keys/README.rst new file mode 100644 index 0000000000..e3d2bb42a4 --- /dev/null +++ b/roles/sync-controller-ceph-conf-and-keys/README.rst @@ -0,0 +1,3 @@ +Sync ceph config and keys between controller and subnodes + +Simply copy the contents of /etc/ceph on the controller to subnodes. diff --git a/roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml b/roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml new file mode 100644 index 0000000000..71ece579e6 --- /dev/null +++ b/roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml @@ -0,0 +1,15 @@ +- name: Ensure /etc/ceph exists on subnode + become: true + file: + path: /etc/ceph + state: directory + +- name: Copy /etc/ceph from controller to subnode + become: true + synchronize: + owner: yes + group: yes + perms: yes + src: /etc/ceph/ + dest: /etc/ceph/ + delegate_to: controller From b516efedf973d290c22c9279cf83d2dd47dc37fc Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Mon, 15 Feb 2021 10:11:43 +0000 Subject: [PATCH 014/574] nova: Default NOVA_USE_SERVICE_TOKEN to True Introduced in devstack by I2d7348c4a72af96c0ed2ef6c0ab75d16e9aec8fc and long tested by nova-next this enabled by most deployment tools by default now and should be enabled by default in devstack. Change-Id: Ia76b96fe87d99560db947a59cd0660aab9b05335 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 0a28cd97aa..1999753a8c 100644 --- a/lib/nova +++ b/lib/nova @@ -135,7 +135,7 @@ fi # ``NOVA_USE_SERVICE_TOKEN`` is a mode where service token is passed along with # user token while communicating to external RESP API's like Neutron, Cinder # and Glance. -NOVA_USE_SERVICE_TOKEN=$(trueorfalse False NOVA_USE_SERVICE_TOKEN) +NOVA_USE_SERVICE_TOKEN=$(trueorfalse True NOVA_USE_SERVICE_TOKEN) # ``NOVA_ALLOW_MOVE_TO_SAME_HOST`` can be set to False in multi node DevStack, # where there are at least two nova-computes. From 57b092dbceb95ed03f8d33f64a5cc60eabd57e50 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Mon, 8 Feb 2021 11:37:38 -0600 Subject: [PATCH 015/574] Stop configure 'member' role in tempest_roles Config option auth.tempest_roles is used to set the extra roles to all dynamic cred tests users. - https://opendev.org/openstack/tempest/src/commit/9b6f441fdc2a970410ea631dc1318896349e010f/tempest/common/credentials_factory.py#L82 Devstack set the 'member' role in CONF.auth.tempest_roles - https://opendev.org/openstack/devstack/src/commit/556f84aea90c572873fc9834292635b41e590224/lib/tempest#L628 This cause issue if any tests testing for speciifc rols and want to exclude the 'member' role, basically this bug - https://bugs.launchpad.net/devstack/+bug/1915740 Also with 'member' role assigned by default, Tempest will not be able to test the secure RBAC new default 'reader' role. Let's remove this role assignment now and let test congfigure what they want. Closes-Bug: #1915740 Change-Id: I0b6ab9fb943c7b0925a0a0d2490a8bcdfa76cedc --- lib/tempest | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 8ee986d555..04540e5ea8 100644 --- a/lib/tempest +++ b/lib/tempest @@ -625,7 +625,6 @@ function configure_tempest { rm -f $tmp_u_c_m # Auth: - iniset $TEMPEST_CONFIG auth tempest_roles "member" if [[ $TEMPEST_USE_TEST_ACCOUNTS == "True" ]]; then if [[ $TEMPEST_HAS_ADMIN == "True" ]]; then tox -evenv-tempest -- tempest account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-project-name $admin_project_name -r $TEMPEST_CONCURRENCY --with-admin etc/accounts.yaml From 48b7633ae84c5be77c6415d7f95ca696e4c0a2b6 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Tue, 16 Feb 2021 14:14:23 -0800 Subject: [PATCH 016/574] Fix nova db dependency When I reordered the nova database creation for better performance and cleaner arrangement, I broke the non-standard arrangement where the super and cell conductors are squashed together. In devstack, this is implemented by pointing the controllers at cell1 in the config, which makes it hard to create and sync the databases in the natural order. This manifested in a failure when running in this mode (which apparently Trove is). As a quick fix, this special-cases the setup for cell0 if that mode is enabled. I will follow this up with a cleaner refactor of all that stuff so this hack isn't required, but that will take a bit longer. Change-Id: I5385157c281beb041bf67cba546be20cf9497cbe --- lib/nova | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 0a28cd97aa..6913040d26 100644 --- a/lib/nova +++ b/lib/nova @@ -754,7 +754,17 @@ function init_nova { # Only do this step once on the API node for an entire cluster. if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then # (Re)create nova databases - async_run nova-cell-0 init_nova_db nova_cell0 $NOVA_CONF + if [[ "$CELLSV2_SETUP" == "singleconductor" ]]; then + # If we are doing singleconductor mode, we have some strange + # interdependencies. in that the main config refers to cell1 + # instead of cell0. In that case, just make sure the cell0 database + # is created before we need it below, but don't db_sync it until + # after the cellN databases are there. + recreate_database nova_cell0 + else + async_run nova-cell-0 init_nova_db nova_cell0 $NOVA_CONF + fi + for i in $(seq 1 $NOVA_NUM_CELLS); do async_run nova-cell-$i init_nova_db nova_cell${i} $(conductor_conf $i) done @@ -771,6 +781,11 @@ function init_nova { async_wait nova-cell-$i done + if [[ "$CELLSV2_SETUP" == "singleconductor" ]]; then + # We didn't db sync cell0 above, so run it now + $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync + fi + # Run online migrations on the new databases # Needed for flavor conversion $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db online_data_migrations From f361122798b9e3163790bee81abfa0486746fa8a Mon Sep 17 00:00:00 2001 From: Pierre Riteau Date: Wed, 17 Feb 2021 17:43:13 +0100 Subject: [PATCH 017/574] Fix DevStack setup on CentOS 8.3 CentOS 8.3 changed the name of the PowerTools repository to powertools: https://wiki.centos.org/Manuals/ReleaseNotes/CentOS8.2011#Yum_repo_file_and_repoid_changes With this repository disabled, DevStack fails to install libyaml-devel, which causes a failure to install many packages. In my environment DevStack stopped with an error caused by a missing wget. Keep the command using the old repository name, for compatibility with older CentOS releases. Change-Id: I5541a8aee8467abf10ce8a10d770618bdd693f02 --- stack.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stack.sh b/stack.sh index d3c1476429..6375c8e5e0 100755 --- a/stack.sh +++ b/stack.sh @@ -365,6 +365,9 @@ if [[ $DISTRO == "rhel8" ]]; then # EPEL packages assume that the PowerTools repository is enable. sudo dnf config-manager --set-enabled PowerTools + # CentOS 8.3 changed the repository name to lower case. + sudo dnf config-manager --set-enabled powertools + if [[ ${SKIP_EPEL_INSTALL} != True ]]; then _install_epel fi From 3bdc8f66ad243f7487ba494e6a71f63c4965413a Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Tue, 9 Feb 2021 12:56:34 -0600 Subject: [PATCH 018/574] Add a variable to configure the Tempest venv upper constraints We use Tempest master for testing the supported stable branches so using master upper constraints works fine but when we need to use old Tempest in the below cases then master upper constraints do not work and devstack will not be able to install Tempest in vnenv: - Testing Extended Maintenance branch - Testing py2.7 jobs until stable/train with in-tree tempest plugins This commit adds a variable to set the compatible upper constraint to use for Tempest's old version. Few of the current failure which can be fixed by this new configurable var: - networking-generic-switch-tempest-dlm-python2 - https://zuul.opendev.org/t/openstack/build/ebcf3d68d62c4af3a43a222aa9ce5556 - devstack-platform-xenial on stable/steinand stable/train - https://zuul.opendev.org/t/openstack/build/37ffc1af6f3f4b44b5ca8cbfa27068ac Change-Id: I5b2217d85e6871ca3f7a3f6f859fdce9a50d3946 --- lib/tempest | 28 +++++++++++++++++++++++----- stackrc | 1 + 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/lib/tempest b/lib/tempest index 8ee986d555..77197c2a28 100644 --- a/lib/tempest +++ b/lib/tempest @@ -111,6 +111,21 @@ function image_size_in_gib { echo $size | python3 -c "import math; print(int(math.ceil(float(int(input()) / 1024.0 ** 3))))" } +function set_tempest_venv_constraints { + local tmp_c + tmp_c=$1 + if [[ $TEMPEST_VENV_UPPER_CONSTRAINTS == "master" ]]; then + (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > $tmp_c + else + echo "Using $TEMPEST_VENV_UPPER_CONSTRAINTS constraints in Tempest virtual env." + cat $TEMPEST_VENV_UPPER_CONSTRAINTS > $tmp_c + # NOTE: setting both tox env var and once Tempest start using new var + # TOX_CONSTRAINTS_FILE then we can remove the old one. + export UPPER_CONSTRAINTS_FILE=$TEMPEST_VENV_UPPER_CONSTRAINTS + export TOX_CONSTRAINTS_FILE=$TEMPEST_VENV_UPPER_CONSTRAINTS + fi +} + # configure_tempest() - Set config files, create data dirs, etc function configure_tempest { if [[ "$INSTALL_TEMPEST" == "True" ]]; then @@ -617,10 +632,9 @@ function configure_tempest { tox -revenv-tempest --notest fi - # The requirements might be on a different branch, while tempest needs master requirements. local tmp_u_c_m tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX) - (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > $tmp_u_c_m + set_tempest_venv_constraints $tmp_u_c_m tox -evenv-tempest -- pip install -c $tmp_u_c_m -r requirements.txt rm -f $tmp_u_c_m @@ -702,6 +716,10 @@ function install_tempest { # TEMPEST_DIR already exist until RECLONE is true. git checkout $TEMPEST_BRANCH + local tmp_u_c_m + tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX) + set_tempest_venv_constraints $tmp_u_c_m + tox -r --notest -efull # TODO: remove the trailing pip constraint when a proper fix # arrives for bug https://bugs.launchpad.net/devstack/+bug/1906322 @@ -709,8 +727,9 @@ function install_tempest { # NOTE(mtreinish) Respect constraints in the tempest full venv, things that # are using a tox job other than full will not be respecting constraints but # running pip install -U on tempest requirements - $TEMPEST_DIR/.tox/tempest/bin/pip install -c $REQUIREMENTS_DIR/upper-constraints.txt -r requirements.txt + $TEMPEST_DIR/.tox/tempest/bin/pip install -c $tmp_u_c_m -r requirements.txt PROJECT_VENV["tempest"]=${TEMPEST_DIR}/.tox/tempest + rm -f $tmp_u_c_m popd } @@ -718,10 +737,9 @@ function install_tempest { function install_tempest_plugins { pushd $TEMPEST_DIR if [[ $TEMPEST_PLUGINS != 0 ]] ; then - # The requirements might be on a different branch, while tempest & tempest plugins needs master requirements. local tmp_u_c_m tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX) - (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > $tmp_u_c_m + set_tempest_venv_constraints $tmp_u_c_m tox -evenv-tempest -- pip install -c $tmp_u_c_m $TEMPEST_PLUGINS rm -f $tmp_u_c_m echo "Checking installed Tempest plugins:" diff --git a/stackrc b/stackrc index a36f8970e6..244acbbbb0 100644 --- a/stackrc +++ b/stackrc @@ -298,6 +298,7 @@ REQUIREMENTS_BRANCH=${REQUIREMENTS_BRANCH:-$TARGET_BRANCH} # Tempest test suite TEMPEST_REPO=${TEMPEST_REPO:-${GIT_BASE}/openstack/tempest.git} TEMPEST_BRANCH=${TEMPEST_BRANCH:-$BRANCHLESS_TARGET_BRANCH} +TEMPEST_VENV_UPPER_CONSTRAINTS=${TEMPEST_VENV_UPPER_CONSTRAINTS:-master} ############## From 8903d8c1e211607ce6bf86ff974f90717e8e2cac Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Fri, 15 Jan 2021 09:26:44 +0000 Subject: [PATCH 019/574] [OVN] Fix Fedora/CentOS OVN configuration When installing OVN from packages, the rpm for Fedora / CentOS pre set some configurations that conflicts with the post configuration done by DevStack. This patch fixes this problem by erasing the pre-set configuration from the packages and leaving it to DevStack to configure OVN for its use (just like we would do when compiling it from source). Change-Id: I9c18023c9aa79c0633748a6169f4f283e9d74ef0 Signed-off-by: Lucas Alvares Gomes --- lib/neutron_plugins/ovn_agent | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index b661f593a4..0a8ca9761f 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -529,6 +529,14 @@ function configure_ovn { fi fi + # Erase the pre-set configurations from packages. DevStack will + # configure OVS and OVN accordingly for its use. + if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]] && is_fedora; then + sudo truncate -s 0 /etc/openvswitch/default.conf + sudo truncate -s 0 /etc/sysconfig/openvswitch + sudo truncate -s 0 /etc/sysconfig/ovn + fi + # Metadata if is_service_enabled q-ovn-metadata-agent && is_service_enabled ovn-controller; then sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR From 8f3e51d79f392151023f3853a6c8a3f7b868ecfa Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 2 Mar 2021 16:18:48 +0000 Subject: [PATCH 020/574] nova: Die if console TLS enabled with tls-proxy We require the 'tls-proxy' service to set up certificates for us. Hard fail if 'NOVA_CONSOLE_PROXY_COMPUTE_TLS' is enabled but the 'tls-proxy' service is not. Change-Id: I52fec12b78ecd8f76f835551ccb84dfb1d5b3d8a Signed-off-by: Stephen Finucane --- lib/nova | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/nova b/lib/nova index 6913040d26..28d3ba45d7 100644 --- a/lib/nova +++ b/lib/nova @@ -83,6 +83,11 @@ fi # services and the compute node NOVA_CONSOLE_PROXY_COMPUTE_TLS=${NOVA_CONSOLE_PROXY_COMPUTE_TLS:-False} +# Validate configuration +if ! is_service_enabled tls-proxy && [ "$NOVA_CONSOLE_PROXY_COMPUTE_TLS" == "True" ]; then + die $LINENO "enabling TLS for the console proxy requires the tls-proxy service" +fi + # Public facing bits NOVA_SERVICE_HOST=${NOVA_SERVICE_HOST:-$SERVICE_HOST} NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774} From f548ce4816b58d7e65d64fc22a1066f1aea63824 Mon Sep 17 00:00:00 2001 From: Akihiro Motoki Date: Thu, 4 Mar 2021 10:31:30 +0900 Subject: [PATCH 021/574] Allow to install os-ken from git repo os-ken is used by neutron ML2/OVS agent. We need to install os-ken from source to test os-ken changes against neutron. We already have tempest-integrated-networking job in os-ken repo but it turns out it consumes os-ken from PyPI :-( Change-Id: Ibcff212591e9fed25f1316403627269d81455b09 --- lib/neutron_plugins/openvswitch_agent | 4 ++++ stackrc | 5 +++++ tests/test_libs_from_pypi.sh | 2 +- 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent index 100961196d..7fed8bf853 100644 --- a/lib/neutron_plugins/openvswitch_agent +++ b/lib/neutron_plugins/openvswitch_agent @@ -15,6 +15,10 @@ function neutron_plugin_create_nova_conf { function neutron_plugin_install_agent_packages { _neutron_ovs_base_install_agent_packages + if use_library_from_git "os-ken"; then + git_clone_by_name "os-ken" + setup_dev_lib "os-ken" + fi } function neutron_plugin_configure_dhcp_agent { diff --git a/stackrc b/stackrc index a36f8970e6..2b1511d04f 100644 --- a/stackrc +++ b/stackrc @@ -554,6 +554,11 @@ GITREPO["ovsdbapp"]=${OVSDBAPP_REPO:-${GIT_BASE}/openstack/ovsdbapp.git} GITBRANCH["ovsdbapp"]=${OVSDBAPP_BRANCH:-$TARGET_BRANCH} GITDIR["ovsdbapp"]=$DEST/ovsdbapp +# os-ken used by neutron +GITREPO["os-ken"]=${OS_KEN_REPO:-${GIT_BASE}/openstack/os-ken.git} +GITBRANCH["os-ken"]=${OS_KEN_BRANCH:-$TARGET_BRANCH} +GITDIR["os-ken"]=$DEST/os-ken + ################## # # TripleO / Heat Agent Components diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index ab7583d042..5b53389073 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -44,7 +44,7 @@ ALL_LIBS+=" debtcollector os-brick os-traits automaton futurist oslo.service" ALL_LIBS+=" oslo.cache oslo.reports osprofiler cursive" ALL_LIBS+=" keystoneauth ironic-lib neutron-lib oslo.privsep" ALL_LIBS+=" diskimage-builder os-vif python-brick-cinderclient-ext" -ALL_LIBS+=" castellan python-barbicanclient ovsdbapp" +ALL_LIBS+=" castellan python-barbicanclient ovsdbapp os-ken" # Generate the above list with # echo ${!GITREPO[@]} From 3c6d1059298788d4ce35845fdb1bef2938046702 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 2 Mar 2021 16:35:47 +0000 Subject: [PATCH 022/574] nova: Remove nova-xvpvncproxy This was removed this service from nova in Ussuri [1]. There's no need to keep this around. [1] I2f7f2379d0cd54e4d0a91008ddb44858cfc5a4cf Change-Id: Idc95c6467a8c6e0c0ed07a6458425ff0a10ff995 Signed-off-by: Stephen Finucane --- lib/nova | 34 ++++++++++++++-------------------- 1 file changed, 14 insertions(+), 20 deletions(-) diff --git a/lib/nova b/lib/nova index 28d3ba45d7..caa778060f 100644 --- a/lib/nova +++ b/lib/nova @@ -612,10 +612,10 @@ function configure_console_compute { # can use the NOVA_CPU_CELL variable to know which cell we are for # calculating the offset. # Stagger the offset based on the total number of possible console proxies - # (novnc, xvpvnc, spice, serial) so that their ports will not collide if + # (novnc, spice, serial) so that their ports will not collide if # all are enabled. local offset - offset=$(((NOVA_CPU_CELL - 1) * 4)) + offset=$(((NOVA_CPU_CELL - 1) * 3)) # Use the host IP instead of the service host because for multi-node, the # service host will be the controller only. @@ -623,7 +623,7 @@ function configure_console_compute { default_proxyclient_addr=$(iniget $NOVA_CPU_CONF DEFAULT my_ip) # All nova-compute workers need to know the vnc configuration options - # These settings don't hurt anything if n-xvnc and n-novnc are disabled + # These settings don't hurt anything if n-novnc is disabled if is_service_enabled n-cpu; then if [ "$NOVNC_FROM_PACKAGE" == "True" ]; then # Use the old URL when installing novnc packages. @@ -636,13 +636,11 @@ function configure_console_compute { NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:$((6080 + offset))/vnc_lite.html"} fi iniset $NOVA_CPU_CONF vnc novncproxy_base_url "$NOVNCPROXY_URL" - XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:$((6081 + offset))/console"} - iniset $NOVA_CPU_CONF vnc xvpvncproxy_base_url "$XVPVNCPROXY_URL" - SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:$((6082 + offset))/spice_auto.html"} + SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:$((6081 + offset))/spice_auto.html"} iniset $NOVA_CPU_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL" fi - if is_service_enabled n-novnc || is_service_enabled n-xvnc || [ "$NOVA_VNC_ENABLED" != False ]; then + if is_service_enabled n-novnc || [ "$NOVA_VNC_ENABLED" != False ]; then # Address on which instance vncservers will listen on compute hosts. # For multi-host, this should be the management ip of the compute host. VNCSERVER_LISTEN=${VNCSERVER_LISTEN:-$NOVA_SERVICE_LISTEN_ADDRESS} @@ -665,7 +663,7 @@ function configure_console_compute { if is_service_enabled n-sproxy; then iniset $NOVA_CPU_CONF serial_console enabled True - iniset $NOVA_CPU_CONF serial_console base_url "ws://$SERVICE_HOST:$((6083 + offset))/" + iniset $NOVA_CPU_CONF serial_console base_url "ws://$SERVICE_HOST:$((6082 + offset))/" fi } @@ -674,15 +672,13 @@ function configure_console_proxies { local conf=${1:-$NOVA_CONF} local offset=${2:-0} # Stagger the offset based on the total number of possible console proxies - # (novnc, xvpvnc, spice, serial) so that their ports will not collide if + # (novnc, spice, serial) so that their ports will not collide if # all are enabled. - offset=$((offset * 4)) + offset=$((offset * 3)) - if is_service_enabled n-novnc || is_service_enabled n-xvnc || [ "$NOVA_VNC_ENABLED" != False ]; then + if is_service_enabled n-novnc || [ "$NOVA_VNC_ENABLED" != False ]; then iniset $conf vnc novncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" iniset $conf vnc novncproxy_port $((6080 + offset)) - iniset $conf vnc xvpvncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" - iniset $conf vnc xvpvncproxy_port $((6081 + offset)) if is_nova_console_proxy_compute_tls_enabled ; then iniset $conf vnc auth_schemes "vencrypt" @@ -714,12 +710,12 @@ function configure_console_proxies { if is_service_enabled n-spice; then iniset $conf spice html5proxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" - iniset $conf spice html5proxy_port $((6082 + offset)) + iniset $conf spice html5proxy_port $((6081 + offset)) fi if is_service_enabled n-sproxy; then iniset $conf serial_console serialproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" - iniset $conf serial_console serialproxy_port $((6083 + offset)) + iniset $conf serial_console serialproxy_port $((6082 + offset)) fi } @@ -986,7 +982,7 @@ function start_nova_rest { function enable_nova_console_proxies { for i in $(seq 1 $NOVA_NUM_CELLS); do - for srv in n-novnc n-xvnc n-spice n-sproxy; do + for srv in n-novnc n-spice n-sproxy; do if is_service_enabled $srv; then enable_service ${srv}-cell${i} fi @@ -1004,7 +1000,6 @@ function start_nova_console_proxies { # console proxies run globally for singleconductor, else they run per cell if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then run_process n-novnc "$NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR" - run_process n-xvnc "$NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf" run_process n-spice "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR" run_process n-sproxy "$NOVA_BIN_DIR/nova-serialproxy --config-file $api_cell_conf" else @@ -1013,7 +1008,6 @@ function start_nova_console_proxies { local conf conf=$(conductor_conf $i) run_process n-novnc-cell${i} "$NOVA_BIN_DIR/nova-novncproxy --config-file $conf --web $NOVNC_WEB_DIR" - run_process n-xvnc-cell${i} "$NOVA_BIN_DIR/nova-xvpvncproxy --config-file $conf" run_process n-spice-cell${i} "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $conf --web $SPICE_WEB_DIR" run_process n-sproxy-cell${i} "$NOVA_BIN_DIR/nova-serialproxy --config-file $conf" done @@ -1104,13 +1098,13 @@ function stop_nova_rest { function stop_nova_console_proxies { if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then - for srv in n-novnc n-xvnc n-spice n-sproxy; do + for srv in n-novnc n-spice n-sproxy; do stop_process $srv done else enable_nova_console_proxies for i in $(seq 1 $NOVA_NUM_CELLS); do - for srv in n-novnc n-xvnc n-spice n-sproxy; do + for srv in n-novnc n-spice n-sproxy; do stop_process ${srv}-cell${i} done done From 970891a4ef863344fb1425727b3c3bf91b1c8bb5 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 2 Mar 2021 16:45:39 +0000 Subject: [PATCH 023/574] Remove references to XenAPI driver The XenAPI driver was removed during the Victoria release [1], while the libvirt+xen driver has been removed in the Wallaby release [2]. Remove references to Xen from DevStack since its all a no-op now. [1] I42b302afbb1cfede7a0f7b16485a596cd70baf17 [2] I73305e82da5d8da548961b801a8e75fb0e8c4cf1 Change-Id: If7055feb88391f496a5e5e4c72008bf0050c5356 Signed-off-by: Stephen Finucane --- HACKING.rst | 3 +- MAINTAINERS.rst | 4 - doc/source/configuration.rst | 6 -- doc/source/plugins.rst | 2 +- functions | 25 ------ functions-common | 2 - lib/cinder_plugins/XenAPINFS | 46 ----------- lib/glance | 4 - lib/nova | 8 -- lib/nova_plugins/hypervisor-xenserver | 107 -------------------------- lib/tempest | 14 +--- stack.sh | 10 --- stackrc | 17 +--- tools/image_list.sh | 2 +- tools/uec/meta.py | 42 ---------- tools/xen/README.md | 3 - 16 files changed, 6 insertions(+), 289 deletions(-) delete mode 100644 lib/cinder_plugins/XenAPINFS delete mode 100644 lib/nova_plugins/hypervisor-xenserver delete mode 100644 tools/uec/meta.py delete mode 100644 tools/xen/README.md diff --git a/HACKING.rst b/HACKING.rst index f55aed8a07..0c4de303ce 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -74,8 +74,7 @@ of test of specific fragile functions in the ``functions`` and ``tools`` - Contains a collection of stand-alone scripts. While these may reference the top-level DevStack configuration they can generally be -run alone. There are also some sub-directories to support specific -environments such as XenServer. +run alone. Scripts diff --git a/MAINTAINERS.rst b/MAINTAINERS.rst index d4968a6051..3cf61a6875 100644 --- a/MAINTAINERS.rst +++ b/MAINTAINERS.rst @@ -77,10 +77,6 @@ SUSE Tempest ~~~~~~~ -Xen -~~~ -* Bob Ball - Zaqar (Marconi) ~~~~~~~~~~~~~~~ diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 22f5999174..2d0c894530 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -628,12 +628,6 @@ outside of tox. If you would like to install it add the following to your INSTALL_TEMPEST=True -Xenserver -~~~~~~~~~ - -If you would like to use Xenserver as the hypervisor, please refer to -the instructions in ``./tools/xen/README.md``. - Cinder ~~~~~~ diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index a18a786c49..7d70d74dd0 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -241,7 +241,7 @@ locations in the top-level of the plugin repository: on Ubuntu, Debian or Linux Mint. - ``./devstack/files/rpms/$plugin_name`` - Packages to install when running - on Red Hat, Fedora, CentOS or XenServer. + on Red Hat, Fedora, or CentOS. - ``./devstack/files/rpms-suse/$plugin_name`` - Packages to install when running on SUSE Linux or openSUSE. diff --git a/functions b/functions index 89bbab2085..ccca5cda51 100644 --- a/functions +++ b/functions @@ -280,31 +280,6 @@ function upload_image { return fi - # XenServer-vhd-ovf-format images are provided as .vhd.tgz - # and should not be decompressed prior to loading - if [[ "$image_url" =~ '.vhd.tgz' ]]; then - image_name="${image_fname%.vhd.tgz}" - local force_vm_mode="" - if [[ "$image_name" =~ 'cirros' ]]; then - # Cirros VHD image currently only boots in PV mode. - # Nova defaults to PV for all VHD images, but - # the glance setting is needed for booting - # directly from volume. - force_vm_mode="vm_mode=xen" - fi - _upload_image "$image_name" ovf vhd "$image" $force_vm_mode - return - fi - - # .xen-raw.tgz suggests a Xen capable raw image inside a tgz. - # and should not be decompressed prior to loading. - # Setting metadata, so PV mode is used. - if [[ "$image_url" =~ '.xen-raw.tgz' ]]; then - image_name="${image_fname%.xen-raw.tgz}" - _upload_image "$image_name" tgz raw "$image" vm_mode=xen - return - fi - if [[ "$image_url" =~ '.hds' ]]; then image_name="${image_fname%.hds}" vm_mode=${image_name##*-} diff --git a/functions-common b/functions-common index 87d8c64804..340da754a2 100644 --- a/functions-common +++ b/functions-common @@ -397,8 +397,6 @@ function GetDistro { # Drop the . release as we assume it's compatible # XXX re-evaluate when we get RHEL10 DISTRO="rhel${os_RELEASE::1}" - elif [[ "$os_VENDOR" =~ (XenServer) ]]; then - DISTRO="xs${os_RELEASE%.*}" else # We can't make a good choice here. Setting a sensible DISTRO # is part of the problem, but not the major issue -- we really diff --git a/lib/cinder_plugins/XenAPINFS b/lib/cinder_plugins/XenAPINFS deleted file mode 100644 index 92135e7c4f..0000000000 --- a/lib/cinder_plugins/XenAPINFS +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash -# -# lib/cinder_plugins/XenAPINFS -# Configure the XenAPINFS driver - -# Enable with: -# -# CINDER_DRIVER=XenAPINFS - -# Dependencies: -# -# - ``functions`` file -# - ``cinder`` configurations - -# configure_cinder_driver - make configuration changes, including those to other services - -# Save trace setting -_XTRACE_CINDER_XENAPINFS=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Set up default directories - - -# Entry Points -# ------------ - -# configure_cinder_driver - Set config files, create data dirs, etc -function configure_cinder_driver { - iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.xenapi.sm.XenAPINFSDriver" - iniset $CINDER_CONF DEFAULT xenapi_connection_url "$CINDER_XENAPI_CONNECTION_URL" - iniset $CINDER_CONF DEFAULT xenapi_connection_username "$CINDER_XENAPI_CONNECTION_USERNAME" - iniset $CINDER_CONF DEFAULT xenapi_connection_password "$CINDER_XENAPI_CONNECTION_PASSWORD" - iniset $CINDER_CONF DEFAULT xenapi_nfs_server "$CINDER_XENAPI_NFS_SERVER" - iniset $CINDER_CONF DEFAULT xenapi_nfs_serverpath "$CINDER_XENAPI_NFS_SERVERPATH" -} - -# Restore xtrace -$_XTRACE_CINDER_XENAPINFS - -# Local variables: -# mode: shell-script -# End: diff --git a/lib/glance b/lib/glance index c2a8b7492e..fcf778d3f6 100644 --- a/lib/glance +++ b/lib/glance @@ -279,10 +279,6 @@ function configure_glance { configure_keystone_authtoken_middleware $GLANCE_API_CONF glance iniset $GLANCE_API_CONF oslo_messaging_notifications driver messagingv2 iniset_rpc_backend glance $GLANCE_API_CONF - if [ "$VIRT_DRIVER" = 'xenserver' ]; then - iniset $GLANCE_API_CONF DEFAULT container_formats "ami,ari,aki,bare,ovf,tgz" - iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,raw,iso" - fi if [ "$VIRT_DRIVER" = 'libvirt' ] && [ "$LIBVIRT_TYPE" = 'parallels' ]; then iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso,ploop" fi diff --git a/lib/nova b/lib/nova index caa778060f..216c3cff9e 100644 --- a/lib/nova +++ b/lib/nova @@ -1052,14 +1052,6 @@ function is_nova_ready { # happen between here and the script ending. However, in multinode # tests this can very often not be the case. So ensure that the # compute is up before we move on. - - # TODO(sdague): honestly, this probably should be a plug point for - # an external system. - if [[ "$VIRT_DRIVER" == 'xenserver' ]]; then - # xenserver encodes information in the hostname of the compute - # because of the dom0/domU split. Just ignore for now. - return - fi wait_for_compute $NOVA_READY_TIMEOUT } diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver deleted file mode 100644 index 511ec1bc09..0000000000 --- a/lib/nova_plugins/hypervisor-xenserver +++ /dev/null @@ -1,107 +0,0 @@ -#!/bin/bash -# -# lib/nova_plugins/hypervisor-xenserver -# Configure the XenServer hypervisor - -# Enable with: -# VIRT_DRIVER=xenserver - -# Dependencies: -# ``functions`` file -# ``nova`` configuration - -# install_nova_hypervisor - install any external requirements -# configure_nova_hypervisor - make configuration changes, including those to other services -# start_nova_hypervisor - start any external services -# stop_nova_hypervisor - stop any external services -# cleanup_nova_hypervisor - remove transient data and cache - -# Save trace setting -_XTRACE_XENSERVER=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1} - - -# Entry Points -# ------------ - -# clean_nova_hypervisor - Clean up an installation -function cleanup_nova_hypervisor { - # This function intentionally left blank - : -} - -# configure_nova_hypervisor - Set config files, create data dirs, etc -function configure_nova_hypervisor { - if [ -z "$XENAPI_CONNECTION_URL" ]; then - die $LINENO "XENAPI_CONNECTION_URL is not specified" - fi - - # Check os-xenapi plugin is enabled - local plugins="${DEVSTACK_PLUGINS}" - local plugin - local found=0 - for plugin in ${plugins//,/ }; do - if [[ "$plugin" = "os-xenapi" ]]; then - found=1 - break - fi - done - if [[ $found -ne 1 ]]; then - die $LINENO "os-xenapi plugin is not specified. Please enable this plugin in local.conf" - fi - - iniset $NOVA_CONF DEFAULT compute_driver "xenapi.XenAPIDriver" - iniset $NOVA_CONF xenserver connection_url "$XENAPI_CONNECTION_URL" - iniset $NOVA_CONF xenserver connection_username "$XENAPI_USER" - iniset $NOVA_CONF xenserver connection_password "$XENAPI_PASSWORD" - iniset $NOVA_CONF DEFAULT flat_injected "False" - - local dom0_ip - dom0_ip=$(echo "$XENAPI_CONNECTION_URL" | cut -d "/" -f 3-) - - local ssh_dom0 - ssh_dom0="sudo -u $DOMZERO_USER ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@$dom0_ip" - - # install console logrotate script - tar -czf - -C $NOVA_DIR/tools/xenserver/ rotate_xen_guest_logs.sh | - $ssh_dom0 'tar -xzf - -C /root/ && chmod +x /root/rotate_xen_guest_logs.sh && mkdir -p /var/log/xen/guest' - - # Create a cron job that will rotate guest logs - $ssh_dom0 crontab - << CRONTAB -* * * * * /root/rotate_xen_guest_logs.sh >/dev/null 2>&1 -CRONTAB - -} - -# install_nova_hypervisor() - Install external components -function install_nova_hypervisor { - # xenapi functionality is now included in os-xenapi library which houses the plugin - # so this function intentionally left blank - : -} - -# start_nova_hypervisor - Start any required external services -function start_nova_hypervisor { - # This function intentionally left blank - : -} - -# stop_nova_hypervisor - Stop any external services -function stop_nova_hypervisor { - # This function intentionally left blank - : -} - - -# Restore xtrace -$_XTRACE_XENSERVER - -# Local variables: -# mode: shell-script -# End: diff --git a/lib/tempest b/lib/tempest index 8a5b785927..9ccd19b505 100644 --- a/lib/tempest +++ b/lib/tempest @@ -347,9 +347,6 @@ function configure_tempest { if [[ ! -z "$TEMPEST_HTTP_IMAGE" ]]; then iniset $TEMPEST_CONFIG image http_image $TEMPEST_HTTP_IMAGE fi - if [ "$VIRT_DRIVER" = "xenserver" ]; then - iniset $TEMPEST_CONFIG image disk_formats "ami,ari,aki,vhd,raw,iso" - fi iniset $TEMPEST_CONFIG image-feature-enabled import_image $GLANCE_USE_IMPORT_WORKFLOW iniset $TEMPEST_CONFIG image-feature-enabled os_glance_reserved True # Compute @@ -425,15 +422,8 @@ function configure_tempest { iniset $TEMPEST_CONFIG network-feature-enabled port_security $NEUTRON_PORT_SECURITY # Scenario - if [ "$VIRT_DRIVER" = "xenserver" ]; then - SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES} - SCENARIO_IMAGE_FILE="cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.vhd.tgz" - iniset $TEMPEST_CONFIG scenario img_disk_format vhd - iniset $TEMPEST_CONFIG scenario img_container_format ovf - else - SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES} - SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_FILE_NAME - fi + SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES} + SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_FILE_NAME iniset $TEMPEST_CONFIG scenario img_file $SCENARIO_IMAGE_DIR/$SCENARIO_IMAGE_FILE # If using provider networking, use the physical network for validation rather than private diff --git a/stack.sh b/stack.sh index 6375c8e5e0..ca9ecfa213 100755 --- a/stack.sh +++ b/stack.sh @@ -718,16 +718,6 @@ if is_service_enabled keystone; then fi -# Nova -# ----- - -if is_service_enabled nova && [[ "$VIRT_DRIVER" == 'xenserver' ]]; then - # Look for the backend password here because read_password - # is not a library function. - read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN." -fi - - # Swift # ----- diff --git a/stackrc b/stackrc index a36f8970e6..205481be08 100644 --- a/stackrc +++ b/stackrc @@ -605,10 +605,8 @@ ENABLE_VOLUME_MULTIATTACH=$(trueorfalse False ENABLE_VOLUME_MULTIATTACH) # Nova hypervisor configuration. We default to libvirt with **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can -# also install an **LXC**, **OpenVZ** or **XenAPI** based system. If xenserver-core -# is installed, the default will be XenAPI +# also install an **LXC** or **OpenVZ** based system. DEFAULT_VIRT_DRIVER=libvirt -is_package_installed xenserver-core && DEFAULT_VIRT_DRIVER=xenserver VIRT_DRIVER=${VIRT_DRIVER:-$DEFAULT_VIRT_DRIVER} case "$VIRT_DRIVER" in ironic|libvirt) @@ -633,14 +631,6 @@ case "$VIRT_DRIVER" in fake) NUMBER_FAKE_NOVA_COMPUTE=${NUMBER_FAKE_NOVA_COMPUTE:-1} ;; - xenserver) - # Xen config common to nova and neutron - XENAPI_USER=${XENAPI_USER:-"root"} - # This user will be used for dom0 - domU communication - # should be able to log in to dom0 without a password - # will be used to install the plugins - DOMZERO_USER=${DOMZERO_USER:-"domzero"} - ;; *) ;; esac @@ -695,11 +685,6 @@ if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.2-i386-disk.vmdk} DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-$DEFAULT_IMAGE_NAME} IMAGE_URLS+="http://partnerweb.vmware.com/programs/vmdkimage/${DEFAULT_IMAGE_FILE_NAME}";; - xenserver) - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.5-x86_64-disk} - DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.5-x86_64-disk.vhd.tgz} - IMAGE_URLS+="http://ca.downloads.xensource.com/OpenStack/cirros-0.3.5-x86_64-disk.vhd.tgz" - IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz";; fake) # Use the same as the default for libvirt DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk} diff --git a/tools/image_list.sh b/tools/image_list.sh index 3a27c4acfd..81231be9f3 100755 --- a/tools/image_list.sh +++ b/tools/image_list.sh @@ -22,7 +22,7 @@ source $TOP_DIR/functions # Possible virt drivers, if we have more, add them here. Always keep # dummy in the end position to trigger the fall through case. -DRIVERS="openvz ironic libvirt vsphere xenserver dummy" +DRIVERS="openvz ironic libvirt vsphere dummy" # Extra variables to trigger getting additional images. export ENABLED_SERVICES="h-api,tr-api" diff --git a/tools/uec/meta.py b/tools/uec/meta.py deleted file mode 100644 index 1d994a60d6..0000000000 --- a/tools/uec/meta.py +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import BaseHTTPServer -import SimpleHTTPServer -import sys - - -def main(host, port, HandlerClass=SimpleHTTPServer.SimpleHTTPRequestHandler, - ServerClass=BaseHTTPServer.HTTPServer, protocol="HTTP/1.0"): - """simple http server that listens on a give address:port.""" - - server_address = (host, port) - - HandlerClass.protocol_version = protocol - httpd = ServerClass(server_address, HandlerClass) - - sa = httpd.socket.getsockname() - print("Serving HTTP on", sa[0], "port", sa[1], "...") - httpd.serve_forever() - -if __name__ == '__main__': - if sys.argv[1:]: - address = sys.argv[1] - else: - address = '0.0.0.0' - if ':' in address: - host, port = address.split(':') - else: - host = address - port = 8080 - - main(host, int(port)) diff --git a/tools/xen/README.md b/tools/xen/README.md deleted file mode 100644 index 287301156e..0000000000 --- a/tools/xen/README.md +++ /dev/null @@ -1,3 +0,0 @@ -Note: XenServer relative tools have been moved to `os-xenapi`_ and be maintained there. - -.. _os-xenapi: https://opendev.org/x/os-xenapi/ From 3948fcb03c96bacc0c620de5b2c18a475e7afef2 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 2 Mar 2021 16:51:49 +0000 Subject: [PATCH 024/574] Remove MAINTAINERS.rst This file is mega out-of-date and no longer helpful. Remove it. Change-Id: Ic7e215c3e48a9c453d19355ad7d683494811d2af Signed-off-by: Stephen Finucane --- HACKING.rst | 3 -- MAINTAINERS.rst | 88 ------------------------------------------------- 2 files changed, 91 deletions(-) delete mode 100644 MAINTAINERS.rst diff --git a/HACKING.rst b/HACKING.rst index 0c4de303ce..6a91e0a6a8 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -274,9 +274,6 @@ your change even years from now -- why we were motivated to make a change at the time. -* **Reviewers** -- please see ``MAINTAINERS.rst`` for a list of people - that should be added to reviews of various sub-systems. - Making Changes, Testing, and CI ------------------------------- diff --git a/MAINTAINERS.rst b/MAINTAINERS.rst deleted file mode 100644 index 3cf61a6875..0000000000 --- a/MAINTAINERS.rst +++ /dev/null @@ -1,88 +0,0 @@ -MAINTAINERS -=========== - - -Overview --------- - -The following is a list of people known to have interests in -particular areas or sub-systems of devstack. - -It is a rather general guide intended to help seed the initial -reviewers list of a change. A +1 on a review from someone identified -as being a maintainer of its affected area is a very positive flag to -the core team for the veracity of the change. - -The ``devstack-core`` group can still be added to all reviews. - - -Format -~~~~~~ - -The format of the file is the name of the maintainer and their -gerrit-registered email. - - -Maintainers ------------ - -.. contents:: :local: - - -Ceph -~~~~ - -* Sebastien Han - -Cinder -~~~~~~ - -Fedora/CentOS/RHEL -~~~~~~~~~~~~~~~~~~ - -* Ian Wienand - -Neutron -~~~~~~~ - -MidoNet -~~~~~~~ - -* Jaume Devesa -* Ryu Ishimoto -* YAMAMOTO Takashi - -OpenDaylight -~~~~~~~~~~~~ - -* Kyle Mestery - -OpenFlow Agent (ofagent) -~~~~~~~~~~~~~~~~~~~~~~~~ - -* YAMAMOTO Takashi -* Fumihiko Kakuma - -Swift -~~~~~ - -* Chmouel Boudjnah - -SUSE -~~~~ - -* Ralf Haferkamp -* Vincent Untz - -Tempest -~~~~~~~ - -Zaqar (Marconi) -~~~~~~~~~~~~~~~ - -* Flavio Percoco -* Malini Kamalambal - -Oracle Linux -~~~~~~~~~~~~ -* Wiekus Beukes From 9dc2b88eb42a5f98f43bc8ad3dfa3962a4d44d74 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Fri, 5 Mar 2021 09:32:19 -0600 Subject: [PATCH 025/574] Add enforce_scope setting support for keystone Keystone-tempest-plugin has implemented the secure RBAC tests and enabling the enforce_scope via keystone devstack plugin. Doing those setting in devstack will help to manage easily and in central place also avoid restarting the api service. Change-Id: I30da189474476d3397152a0a15c2e30a62d712ad --- lib/keystone | 11 +++++++++++ lib/tempest | 10 ++++++++++ 2 files changed, 21 insertions(+) diff --git a/lib/keystone b/lib/keystone index 66e867ca68..e282db0bfa 100644 --- a/lib/keystone +++ b/lib/keystone @@ -134,6 +134,12 @@ KEYSTONE_PASSWORD_HASH_ROUNDS=${KEYSTONE_PASSWORD_HASH_ROUNDS:-4} # Cache settings KEYSTONE_ENABLE_CACHE=${KEYSTONE_ENABLE_CACHE:-True} +# Flag to set the oslo_policy.enforce_scope. This is used to switch +# the Identity API policies to start checking the scope of token. By Default, +# this flag is False. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +KEYSTONE_ENFORCE_SCOPE=$(trueorfalse False KEYSTONE_ENFORCE_SCOPE) + # Functions # --------- @@ -281,6 +287,11 @@ function configure_keystone { iniset $KEYSTONE_CONF security_compliance lockout_duration $KEYSTONE_LOCKOUT_DURATION iniset $KEYSTONE_CONF security_compliance unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT fi + if [[ "$KEYSTONE_ENFORCE_SCOPE" == True ]] ; then + iniset $KEYSTONE_CONF oslo_policy enforce_scope true + iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults true + iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml + fi } # create_keystone_accounts() - Sets up common required keystone accounts diff --git a/lib/tempest b/lib/tempest index 8a5b785927..f210e4014b 100644 --- a/lib/tempest +++ b/lib/tempest @@ -601,6 +601,16 @@ function configure_tempest { fi done + # ``enforce_scope`` + # If services enable the enforce_scope for their policy + # we need to enable the same on Tempest side so that + # test can be run with scoped token. + if [[ "$KEYSTONE_ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope keystone true + iniset $TEMPEST_CONFIG auth admin_system 'all' + iniset $TEMPEST_CONFIG auth admin_project_name '' + fi + if [ "$VIRT_DRIVER" = "libvirt" ] && [ "$LIBVIRT_TYPE" = "lxc" ]; then # libvirt-lxc does not support boot from volume or attaching volumes # so basically anything with cinder is out of the question. From 8c93049220bd3551b53513426c5a7bfdb7bac1d9 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Fri, 5 Mar 2021 09:40:39 -0600 Subject: [PATCH 026/574] Add enforce_scope setting support for Glance Glance started moving to new RBAC and glance-tempest-plugin and Tempest need to set few configuration to enable the scope checks on glance side and so does on Temepst side to tell glance is ready with scope checks so that test can be run with scoped token. Change-Id: I09f513d08212bc80a3a86a750b29b1c6625d2f89 --- lib/glance | 12 ++++++++++++ lib/tempest | 1 + 2 files changed, 13 insertions(+) diff --git a/lib/glance b/lib/glance index c2a8b7492e..fd2f2cb902 100644 --- a/lib/glance +++ b/lib/glance @@ -85,6 +85,12 @@ GLANCE_TASKS_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/os_glance_tasks_s GLANCE_USE_IMPORT_WORKFLOW=$(trueorfalse False GLANCE_USE_IMPORT_WORKFLOW) +# Flag to set the oslo_policy.enforce_scope. This is used to switch +# the Image API policies to start checking the scope of token. By Default, +# this flag is False. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +GLANCE_ENFORCE_SCOPE=$(trueorfalse False GLANCE_ENFORCE_SCOPE) + GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance} GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs GLANCE_API_CONF=$GLANCE_CONF_DIR/glance-api.conf @@ -371,6 +377,12 @@ function configure_glance { iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS" fi + + if [[ "$GLANCE_ENFORCE_SCOPE" == True ]] ; then + iniset $GLANCE_API_CONF oslo_policy enforce_scope true + iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults true + iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac true + fi } # create_glance_accounts() - Set up common required glance accounts diff --git a/lib/tempest b/lib/tempest index f210e4014b..0a9f800bcc 100644 --- a/lib/tempest +++ b/lib/tempest @@ -610,6 +610,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG auth admin_system 'all' iniset $TEMPEST_CONFIG auth admin_project_name '' fi + iniset $TEMPEST_CONFIG enforce_scope glance "$GLANCE_ENFORCE_SCOPE" if [ "$VIRT_DRIVER" = "libvirt" ] && [ "$LIBVIRT_TYPE" = "lxc" ]; then # libvirt-lxc does not support boot from volume or attaching volumes From bd0d0fde24a5654507e02d32eea7ea0c1fc46821 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Sat, 6 Mar 2021 17:23:39 -0600 Subject: [PATCH 027/574] Add enforce_scope setting support for Cinder Conder started moving to new RBAC and cinder-tempest-plugin and Tempest need to set few configuration to enable the scope checks on cinder side and on Temepst side to tell cinder is all configured with scope checks and test can be run with scoped token. Change-Id: Ic7cd919c000c4e7b9a3a06638a5bd87b1617e749 --- lib/cinder | 11 +++++++++++ lib/tempest | 2 ++ 2 files changed, 13 insertions(+) diff --git a/lib/cinder b/lib/cinder index 33deff61f2..da6f32728e 100644 --- a/lib/cinder +++ b/lib/cinder @@ -125,6 +125,12 @@ CINDER_IMG_CACHE_SIZE_COUNT=${CINDER_IMG_CACHE_SIZE_COUNT:-} # enable the cache for all cinder backends. CINDER_CACHE_ENABLED_FOR_BACKENDS=${CINDER_CACHE_ENABLED_FOR_BACKENDS:-$CINDER_ENABLED_BACKENDS} +# Flag to set the oslo_policy.enforce_scope. This is used to switch +# the Volume API policies to start checking the scope of token. by default, +# this flag is False. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +CINDER_ENFORCE_SCOPE=$(trueorfalse False CINDER_ENFORCE_SCOPE) + # Functions # --------- @@ -326,6 +332,11 @@ function configure_cinder { elif is_service_enabled etcd3; then iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT" fi + + if [[ "$CINDER_ENFORCE_SCOPE" == True ]] ; then + iniset $CINDER_CONF oslo_policy enforce_scope true + iniset $CINDER_CONF oslo_policy enforce_new_defaults true + fi } # create_cinder_accounts() - Set up common required cinder accounts diff --git a/lib/tempest b/lib/tempest index f210e4014b..238e25f07b 100644 --- a/lib/tempest +++ b/lib/tempest @@ -611,6 +611,8 @@ function configure_tempest { iniset $TEMPEST_CONFIG auth admin_project_name '' fi + iniset $TEMPEST_CONFIG enforce_scope cinder "$CINDER_ENFORCE_SCOPE" + if [ "$VIRT_DRIVER" = "libvirt" ] && [ "$LIBVIRT_TYPE" = "lxc" ]; then # libvirt-lxc does not support boot from volume or attaching volumes # so basically anything with cinder is out of the question. From 38fed19acc2aa232503f91424d6c732ed8c7ed3a Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Mon, 8 Mar 2021 08:50:53 +0000 Subject: [PATCH 028/574] Update Cirros to 0.5.2 This release [1] includes a single fix [2] pulling in the ahci module which is required by Iad1adbc23b31dd54a96299e7a8a4b622c15eed8d, a nova-next change introducing q35 testing to the job. This depends on the following change caching the image within the CI host image: Depends-On: https://review.opendev.org/c/openstack/project-config/+/779178/ [1] https://github.com/cirros-dev/cirros/releases/tag/0.5.2 [2] https://github.com/cirros-dev/cirros/pull/65 Change-Id: I12e0bdb3699e5343592ab834468ba6b2fcdcaaf4 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 205481be08..1bcc302846 100644 --- a/stackrc +++ b/stackrc @@ -657,7 +657,7 @@ esac #IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image #IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image -CIRROS_VERSION=${CIRROS_VERSION:-"0.5.1"} +CIRROS_VERSION=${CIRROS_VERSION:-"0.5.2"} CIRROS_ARCH=${CIRROS_ARCH:-"x86_64"} # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of From 802259a49656170108dd79559166ad89c49e2ef7 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Tue, 12 Jan 2021 22:55:57 +0000 Subject: [PATCH 029/574] Simulate a remote/standalone g-api worker In order to be able to test glance's distributed import function, we need to have multiple workers in an arrangement like they would be if one was on another host (potentially at another site). This extra worker must be separate from the default image service in order to repeatedly hit one and then the other to test cross- service interactions. This allows you to enable_service g-api-r, which will clone the main g-api service, modify it to run on a different port, and start it. The service will be registered in the catalog as image_remote. Depends-On: https://review.opendev.org/c/openstack/glance/+/769976 Change-Id: I0e2bb5412701d515153c023873addb9d7abdb8a4 --- lib/glance | 71 ++++++++++++++++++++++++++++++++++++++++++++++++++++- lib/tempest | 5 ++++ 2 files changed, 75 insertions(+), 1 deletion(-) diff --git a/lib/glance b/lib/glance index c2a8b7492e..cf66023f5c 100644 --- a/lib/glance +++ b/lib/glance @@ -131,7 +131,7 @@ function is_glance_enabled { # runs that a clean run would need to clean up function cleanup_glance { # delete image files (glance) - sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR + sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR $(glance_remote_conf '') # Cleanup multiple stores directories if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "True" ]]; then @@ -365,6 +365,11 @@ function configure_glance { if [[ "$GLANCE_STANDALONE" == False ]]; then write_local_uwsgi_http_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" "/image" + # Grab our uwsgi listen address and use that to fill out our + # worker_self_reference_url config + iniset $GLANCE_API_CONF DEFAULT worker_self_reference_url \ + $(awk '-F= ' '/^http-socket/ { print "http://"$2}' \ + $GLANCE_UWSGI_CONF) else write_local_proxy_http_config glance "http://$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT_INT" "/image" iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS @@ -460,6 +465,64 @@ function install_glance { setup_develop $GLANCE_DIR } +# glance_remote_conf() - Return the path to an alternate config file for +# the remote glance clone +function glance_remote_conf { + echo "$(dirname ${GLANCE_CONF_DIR})/glance-remote/"$(basename "$1") +} + +# start_glance_remote_clone() - Clone the regular glance api worker +function start_glance_remote_clone { + local glance_remote_conf glance_remote_port + + glance_remote_conf_dir=$(glance_remote_conf '') + glance_remote_port=$(get_random_port) + + # Clone the existing ready-to-go glance-api setup + sudo rm -Rf $glance_remote_conf_dir + sudo cp -r "$GLANCE_CONF_DIR" $glance_remote_conf_dir + sudo chown $STACK_USER -R $glance_remote_conf_dir + + # Point this worker at different data dirs + remote_data="${DATA_DIR}/glance-remote" + mkdir -p $remote_data/os_glance_tasks_store \ + $remote_data/os_glance_staging_store + iniset $(glance_remote_conf 'glance-api.conf') os_glance_staging_store \ + filesystem_store_datadir ${remote_data}/os_glance_staging_store + iniset $(glance_remote_conf 'glance-api.conf') os_glance_tasks_store \ + filesystem_store_datadir ${remote_data}/os_glance_tasks_store + + # Change our uwsgi to our new port + sed -ri "s/^(http-socket.*):[0-9]+/\1:$glance_remote_port/" \ + $(glance_remote_conf $GLANCE_UWSGI_CONF) + + # Update the self-reference url with our new port + iniset $(glance_remote_conf $GLANCE_API_CONF) DEFAULT \ + worker_self_reference_url \ + $(awk '-F= ' '/^http-socket/ { print "http://"$2 }' \ + $(glance_remote_conf $GLANCE_UWSGI_CONF)) + + # We need to create the systemd service for the clone, but then + # change it to include an Environment line to point the WSGI app + # at the alternate config directory. + write_uwsgi_user_unit_file devstack@g-api-r.service "$(which uwsgi) \ + --procname-prefix \ + glance-api-remote \ + --ini $(glance_remote_conf $GLANCE_UWSGI_CONF)" \ + "" "$STACK_USER" + iniset -sudo ${SYSTEMD_DIR}/devstack@g-api-r.service \ + "Service" "Environment" "OS_GLANCE_CONFIG_DIR=$glance_remote_conf_dir" + + # Reload and restart with the new config + $SYSTEMCTL daemon-reload + $SYSTEMCTL restart devstack@g-api-r + + get_or_create_service glance_remote image_remote "Alternate glance" + get_or_create_endpoint image_remote $REGION_NAME \ + $(awk '-F= ' '/^http-socket/ { print "http://"$2 }' \ + $(glance_remote_conf $GLANCE_UWSGI_CONF)) +} + # start_glance() - Start running processes function start_glance { local service_protocol=$GLANCE_SERVICE_PROTOCOL @@ -475,6 +538,11 @@ function start_glance { run_process g-api "$GLANCE_BIN_DIR/glance-api --config-dir=$GLANCE_CONF_DIR" fi + if is_service_enabled g-api-r; then + echo "Starting the g-api-r clone service..." + start_glance_remote_clone + fi + echo "Waiting for g-api ($GLANCE_SERVICE_HOST) to start..." if ! wait_for_service $SERVICE_TIMEOUT $GLANCE_URL; then die $LINENO "g-api did not start" @@ -484,6 +552,7 @@ function start_glance { # stop_glance() - Stop running processes function stop_glance { stop_process g-api + stop_process g-api-r } # Restore xtrace diff --git a/lib/tempest b/lib/tempest index 8eab4f5ef2..7e7f0ab7a9 100644 --- a/lib/tempest +++ b/lib/tempest @@ -352,6 +352,11 @@ function configure_tempest { fi iniset $TEMPEST_CONFIG image-feature-enabled import_image $GLANCE_USE_IMPORT_WORKFLOW iniset $TEMPEST_CONFIG image-feature-enabled os_glance_reserved True + if is_service_enabled g-api-r; then + iniset $TEMPEST_CONFIG image alternate_image_endpoint \ + "image_remote" + fi + # Compute iniset $TEMPEST_CONFIG compute image_ref $image_uuid iniset $TEMPEST_CONFIG compute image_ref_alt $image_uuid_alt From 61b4fbf143b96365fa85456246bcadcaab3d76be Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Tue, 9 Mar 2021 08:05:37 -0800 Subject: [PATCH 030/574] Address feedback from glance-remote patch This cleans up some of the quote and variable handling that was pointed out in review of the previous patch. This is non-critical, so I'm putting it in a subsequent patch to avoid disturbing the careful alignment of patches across three projects that are mostly approved. Change-Id: I9b281efd74ba5cd78f97b84e5704b41fd040e481 --- lib/glance | 40 ++++++++++++++++++++++------------------ lib/tempest | 3 +-- 2 files changed, 23 insertions(+), 20 deletions(-) diff --git a/lib/glance b/lib/glance index cf66023f5c..3fb61b0268 100644 --- a/lib/glance +++ b/lib/glance @@ -130,8 +130,9 @@ function is_glance_enabled { # cleanup_glance() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_glance { - # delete image files (glance) - sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR $(glance_remote_conf '') + # delete image files (glance) and all of the glance-remote temporary + # storage + sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR "${DATA_DIR}/glance-remote" # Cleanup multiple stores directories if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "True" ]]; then @@ -468,39 +469,41 @@ function install_glance { # glance_remote_conf() - Return the path to an alternate config file for # the remote glance clone function glance_remote_conf { - echo "$(dirname ${GLANCE_CONF_DIR})/glance-remote/"$(basename "$1") + echo $(dirname "${GLANCE_CONF_DIR}")/glance-remote/$(basename "$1") } # start_glance_remote_clone() - Clone the regular glance api worker function start_glance_remote_clone { - local glance_remote_conf glance_remote_port + local glance_remote_conf_dir glance_remote_port remote_data + local glance_remote_uwsgi - glance_remote_conf_dir=$(glance_remote_conf '') + glance_remote_conf_dir="$(glance_remote_conf "")" glance_remote_port=$(get_random_port) + glance_remote_uwsgi="$(glance_remote_conf $GLANCE_UWSGI_CONF)" # Clone the existing ready-to-go glance-api setup - sudo rm -Rf $glance_remote_conf_dir - sudo cp -r "$GLANCE_CONF_DIR" $glance_remote_conf_dir - sudo chown $STACK_USER -R $glance_remote_conf_dir + sudo rm -Rf "$glance_remote_conf_dir" + sudo cp -r "$GLANCE_CONF_DIR" "$glance_remote_conf_dir" + sudo chown $STACK_USER -R "$glance_remote_conf_dir" # Point this worker at different data dirs remote_data="${DATA_DIR}/glance-remote" mkdir -p $remote_data/os_glance_tasks_store \ - $remote_data/os_glance_staging_store - iniset $(glance_remote_conf 'glance-api.conf') os_glance_staging_store \ - filesystem_store_datadir ${remote_data}/os_glance_staging_store - iniset $(glance_remote_conf 'glance-api.conf') os_glance_tasks_store \ - filesystem_store_datadir ${remote_data}/os_glance_tasks_store + "${remote_data}/os_glance_staging_store" + iniset $(glance_remote_conf "$GLANCE_API_CONF") os_glance_staging_store \ + filesystem_store_datadir "${remote_data}/os_glance_staging_store" + iniset $(glance_remote_conf "$GLANCE_API_CONF") os_glance_tasks_store \ + filesystem_store_datadir "${remote_data}/os_glance_tasks_store" # Change our uwsgi to our new port sed -ri "s/^(http-socket.*):[0-9]+/\1:$glance_remote_port/" \ - $(glance_remote_conf $GLANCE_UWSGI_CONF) + "$glance_remote_uwsgi" # Update the self-reference url with our new port iniset $(glance_remote_conf $GLANCE_API_CONF) DEFAULT \ worker_self_reference_url \ $(awk '-F= ' '/^http-socket/ { print "http://"$2 }' \ - $(glance_remote_conf $GLANCE_UWSGI_CONF)) + "$glance_remote_uwsgi") # We need to create the systemd service for the clone, but then # change it to include an Environment line to point the WSGI app @@ -508,10 +511,11 @@ function start_glance_remote_clone { write_uwsgi_user_unit_file devstack@g-api-r.service "$(which uwsgi) \ --procname-prefix \ glance-api-remote \ - --ini $(glance_remote_conf $GLANCE_UWSGI_CONF)" \ + --ini $glance_remote_uwsgi" \ "" "$STACK_USER" iniset -sudo ${SYSTEMD_DIR}/devstack@g-api-r.service \ - "Service" "Environment" "OS_GLANCE_CONFIG_DIR=$glance_remote_conf_dir" + "Service" "Environment" \ + "OS_GLANCE_CONFIG_DIR=$glance_remote_conf_dir" # Reload and restart with the new config $SYSTEMCTL daemon-reload @@ -520,7 +524,7 @@ function start_glance_remote_clone { get_or_create_service glance_remote image_remote "Alternate glance" get_or_create_endpoint image_remote $REGION_NAME \ $(awk '-F= ' '/^http-socket/ { print "http://"$2 }' \ - $(glance_remote_conf $GLANCE_UWSGI_CONF)) + $glance_remote_uwsgi) } # start_glance() - Start running processes diff --git a/lib/tempest b/lib/tempest index 7e7f0ab7a9..bbd23bb63e 100644 --- a/lib/tempest +++ b/lib/tempest @@ -353,8 +353,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG image-feature-enabled import_image $GLANCE_USE_IMPORT_WORKFLOW iniset $TEMPEST_CONFIG image-feature-enabled os_glance_reserved True if is_service_enabled g-api-r; then - iniset $TEMPEST_CONFIG image alternate_image_endpoint \ - "image_remote" + iniset $TEMPEST_CONFIG image alternate_image_endpoint image_remote fi # Compute From af79a934ef057ea6ef7690894d58d21f7818979e Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Mon, 15 Mar 2021 12:20:42 -0400 Subject: [PATCH 031/574] Use 'ip addr replace' in OVN code Instead of doing a flush/add, use replace like the ML2/OVS code does. Should have the same behavior of not failing if the address is already present. Change-Id: If9d8a848b079ccb8c0c9b8e6fb708107aa0d46c7 --- lib/neutron_plugins/ovn_agent | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index b661f593a4..abc9c63614 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -271,8 +271,7 @@ function create_public_bridge { sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=$PHYSICAL_NETWORK:$ext_gw_ifc if [ -n "$FLOATING_RANGE" ]; then local cidr_len=${FLOATING_RANGE#*/} - sudo ip addr flush dev $ext_gw_ifc - sudo ip addr add $PUBLIC_NETWORK_GATEWAY/$cidr_len dev $ext_gw_ifc + sudo ip addr replace $PUBLIC_NETWORK_GATEWAY/$cidr_len dev $ext_gw_ifc fi # Ensure IPv6 RAs are accepted on the interface with the default route. @@ -286,8 +285,7 @@ function create_public_bridge { sudo sysctl -w net.ipv6.conf.all.forwarding=1 if [ -n "$IPV6_PUBLIC_RANGE" ]; then local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/} - sudo ip -6 addr flush dev $ext_gw_ifc - sudo ip -6 addr add $IPV6_PUBLIC_NETWORK_GATEWAY/$ipv6_cidr_len dev $ext_gw_ifc + sudo ip -6 addr replace $IPV6_PUBLIC_NETWORK_GATEWAY/$ipv6_cidr_len dev $ext_gw_ifc fi sudo ip link set $ext_gw_ifc up From 1ed276c17791dba1f0b7ef4446d0efe09135553b Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Thu, 11 Mar 2021 13:10:28 +0100 Subject: [PATCH 032/574] Use (or set properly) system-id generated by openvswitch In case when OVN_UUID isn't set by user, and it isn't stored in /etc/openvswith/system-id.conf file, Devstack will reuse it. If it's not, it will generate and store it in the /etc/openvswitch/system-id.conf file so it can be set to same value after openvswitch will be e.g. restarted. In case when OVN_UUID is set by user, it will be also saved in /etc/openvswitch/system-id.conf file to make it persistent when e.g openvswitch will be restarted. Closes-Bug: #1918656 Change-Id: I8e3b05f3ab83e204bc1ce895baec0e1ba515895b --- lib/neutron_plugins/ovn_agent | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index b661f593a4..c6ac16d007 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -66,7 +66,9 @@ OVN_L3_SCHEDULER=${OVN_L3_SCHEDULER:-leastloaded} # A UUID to uniquely identify this system. If one is not specified, a random # one will be generated. A randomly generated UUID will be saved in a file -# 'ovn-uuid' so that the same one will be re-used if you re-run DevStack. +# $OVS_SYSCONFDIR/system-id.conf (typically /etc/openvswitch/system-id.conf) +# so that the same one will be re-used if you re-run DevStack or restart +# Open vSwitch service. OVN_UUID=${OVN_UUID:-} # Whether or not to build the openvswitch kernel module from ovs. This is required @@ -109,6 +111,7 @@ OVS_RUNDIR=$OVS_PREFIX/var/run/openvswitch OVS_SHAREDIR=$OVS_PREFIX/share/openvswitch OVS_SCRIPTDIR=$OVS_SHAREDIR/scripts OVS_DATADIR=$DATA_DIR/ovs +OVS_SYSCONFDIR=${OVS_SYSCONFDIR:-/etc/openvswitch} OVN_DATADIR=$DATA_DIR/ovn OVN_SHAREDIR=$OVS_PREFIX/share/ovn @@ -521,11 +524,17 @@ function configure_ovn { echo "Configuring OVN" if [ -z "$OVN_UUID" ] ; then - if [ -f ./ovn-uuid ] ; then - OVN_UUID=$(cat ovn-uuid) + if [ -f $OVS_SYSCONFDIR/system-id.conf ]; then + OVN_UUID=$(cat $OVS_SYSCONFDIR/system-id.conf) else OVN_UUID=$(uuidgen) - echo $OVN_UUID > ovn-uuid + echo $OVN_UUID | sudo tee $OVS_SYSCONFDIR/system-id.conf + fi + else + local ovs_uuid + ovs_uuid=$(cat $OVS_SYSCONFDIR/system-id.conf) + if [ "$ovs_uuid" != $OVN_UUID ]; then + echo $OVN_UUID | sudo tee $OVS_SYSCONFDIR/system-id.conf fi fi From 30819e66ddad5b57b726684e62b511a938aaea98 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Mon, 22 Mar 2021 07:14:50 +0000 Subject: [PATCH 033/574] Set default OVS_SYSCONFDIR value depending on OVS_PREFIX When OVN is built from source, the value of OVS_PREFIX is set to "/usr/local". All other paths referring to OVS should be prefixed with this value. Closes-Bug: #1920634 Related-Bug: #1918656 Change-Id: I9a45a5379d1c47cdf67b9c6d3d0409a88501e61e --- lib/neutron_plugins/ovn_agent | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 97c20fcda1..2f6d1ab10d 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -111,7 +111,7 @@ OVS_RUNDIR=$OVS_PREFIX/var/run/openvswitch OVS_SHAREDIR=$OVS_PREFIX/share/openvswitch OVS_SCRIPTDIR=$OVS_SHAREDIR/scripts OVS_DATADIR=$DATA_DIR/ovs -OVS_SYSCONFDIR=${OVS_SYSCONFDIR:-/etc/openvswitch} +OVS_SYSCONFDIR=${OVS_SYSCONFDIR:-$OVS_PREFIX/etc/openvswitch} OVN_DATADIR=$DATA_DIR/ovn OVN_SHAREDIR=$OVS_PREFIX/share/ovn From 84b328c814fd5be8af53738128aa3f5ef75ca3c7 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Fri, 26 Mar 2021 07:17:42 -0700 Subject: [PATCH 034/574] Default to parallel execution Several jobs have been running in parallel since the late Wallaby cycle, and other developers have had it enabled locally. I have heard no async-related stability or debug-ability complaints thus far. I think that we should convert the default to parallel early in the Xena cycle in an attempt to spread the speed improvements across the board, while also collecting data on a wider set of configurations. Change-Id: I83d56c9363d481bb6d5921f5e1f9b024f136044b --- inc/async | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inc/async b/inc/async index d29168f2f5..c63bc2045a 100644 --- a/inc/async +++ b/inc/async @@ -11,7 +11,7 @@ # async_wait my_shell_func # -DEVSTACK_PARALLEL=$(trueorfalse False DEVSTACK_PARALLEL) +DEVSTACK_PARALLEL=$(trueorfalse True DEVSTACK_PARALLEL) _ASYNC_BG_TIME=0 # Keep track of how much total time was spent in background tasks From d207ba9015f3210812468bfbf7d06b1491392554 Mon Sep 17 00:00:00 2001 From: Toshiaki Takahashi Date: Wed, 23 Dec 2020 17:40:57 +0000 Subject: [PATCH 035/574] Move gawk into general for post-config Devstack script for setting post-config needs gawk. So this patch moves gawk from files/*/nova into files/*/general. Closes-Bug: #1909041 Change-Id: I06a1a5524f146a8d7337963e846b5a6b7561be13 --- files/debs/general | 1 + files/debs/nova | 1 - files/rpms-suse/general | 1 + files/rpms-suse/nova | 1 - files/rpms/general | 1 + files/rpms/nova | 1 - 6 files changed, 3 insertions(+), 3 deletions(-) diff --git a/files/debs/general b/files/debs/general index d64417f1b7..7e481b4072 100644 --- a/files/debs/general +++ b/files/debs/general @@ -5,6 +5,7 @@ bsdmainutils curl default-jre-headless # NOPRIME g++ +gawk gcc gettext # used for compiling message catalogs git diff --git a/files/debs/nova b/files/debs/nova index a7aebbf946..e19441453b 100644 --- a/files/debs/nova +++ b/files/debs/nova @@ -3,7 +3,6 @@ curl dnsmasq-base dnsmasq-utils # for dhcp_release ebtables -gawk genisoimage # required for config_drive iptables iputils-arping diff --git a/files/rpms-suse/general b/files/rpms-suse/general index 0de0876dcd..f63611025c 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -3,6 +3,7 @@ apache2-devel bc ca-certificates-mozilla curl +gawk gcc gcc-c++ git-core diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova index 9923760750..1cc2f62ea5 100644 --- a/files/rpms-suse/nova +++ b/files/rpms-suse/nova @@ -4,7 +4,6 @@ curl dnsmasq dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1 ebtables -gawk iptables iputils kpartx diff --git a/files/rpms/general b/files/rpms/general index cfcd7ff261..33da0a5385 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -1,6 +1,7 @@ bc curl dbus +gawk gcc gcc-c++ gettext # used for compiling message catalogs diff --git a/files/rpms/nova b/files/rpms/nova index 2218330230..8ea8ccc5ca 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -3,7 +3,6 @@ curl dnsmasq # for q-dhcp dnsmasq-utils # for dhcp_release ebtables -gawk genisoimage # required for config_drive iptables iputils From 01a84d2d03cd871fb8734d5fdc9d149b9487e3e4 Mon Sep 17 00:00:00 2001 From: Hironori Shiina Date: Mon, 11 Jan 2021 13:42:46 -0500 Subject: [PATCH 036/574] Configure Cinder backup driver This patch adds a new environment variable, CINDER_BACKUP_DRIVER for configuring cinder backup driver used when c-bak service is enabled. This gets cinder backup driver configurable with a similar pattern to cinder backends. Although the current configurable backup drivers don't need cleanup functions, the interface for cleanup is prepared for the future. The following backup drivers can be configured: swift: This is the default backup driver. ceph: This already can be configured if ceph backend driver is enabled. For backward compatibility, ceph backup driver is used if ceph backend driver is enabled and no backup driver is specified. s3_swift: The s3 backup driver gets configurable with this patch. By specifying 's3_swift', the driver is configured for swift s3api. In the future, lib/cinder_backups/s3 should be created separatedly for external S3 compatible storage. This file will just set given parameters such as a URL and credentials. Change-Id: I356c224d938e1aa59c8589387a03682b3ec6e23d --- lib/cinder | 45 ++++++++++++++++++++++++----- lib/cinder_backends/ceph | 32 --------------------- lib/cinder_backups/ceph | 57 +++++++++++++++++++++++++++++++++++++ lib/cinder_backups/s3_swift | 45 +++++++++++++++++++++++++++++ lib/cinder_backups/swift | 38 +++++++++++++++++++++++++ 5 files changed, 178 insertions(+), 39 deletions(-) create mode 100644 lib/cinder_backups/ceph create mode 100644 lib/cinder_backups/s3_swift create mode 100644 lib/cinder_backups/swift diff --git a/lib/cinder b/lib/cinder index 6c97e114a6..14ab291f8a 100644 --- a/lib/cinder +++ b/lib/cinder @@ -31,6 +31,7 @@ set +o xtrace CINDER_DRIVER=${CINDER_DRIVER:-default} CINDER_PLUGINS=$TOP_DIR/lib/cinder_plugins CINDER_BACKENDS=$TOP_DIR/lib/cinder_backends +CINDER_BACKUPS=$TOP_DIR/lib/cinder_backups # grab plugin config if specified via cinder_driver if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then @@ -98,6 +99,16 @@ else CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-tgtadm} fi +# For backward compatibility +# Before CINDER_BACKUP_DRIVER was introduced, ceph backup driver was configured +# along with ceph backend driver. +if [[ -z "${CINDER_BACKUP_DRIVER}" && "$CINDER_ENABLED_BACKENDS" =~ "ceph" ]]; then + CINDER_BACKUP_DRIVER=ceph +fi + +# Supported backup drivers are in lib/cinder_backups +CINDER_BACKUP_DRIVER=${CINDER_BACKUP_DRIVER:-swift} + # Toggle for deploying Cinder under a wsgi server. Legacy mod_wsgi # reference should be cleaned up to more accurately refer to uwsgi. CINDER_USE_MOD_WSGI=${CINDER_USE_MOD_WSGI:-True} @@ -113,6 +124,15 @@ if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then done fi +# Source the backup driver +if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then + if [[ -r $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER ]]; then + source $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER + else + die "cinder backup driver $CINDER_BACKUP_DRIVER is not supported" + fi +fi + # Environment variables to configure the image-volume cache CINDER_IMG_CACHE_ENABLED=${CINDER_IMG_CACHE_ENABLED:-True} @@ -189,6 +209,12 @@ function cleanup_cinder { done fi + if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then + if type cleanup_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then + cleanup_cinder_backup_$CINDER_BACKUP_DRIVER + fi + fi + stop_process "c-api" remove_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" } @@ -266,13 +292,12 @@ function configure_cinder { configure_cinder_image_volume_cache fi - if is_service_enabled c-bak; then - # NOTE(mriedem): The default backup driver uses swift and if we're - # on a subnode we might not know if swift is enabled, but chances are - # good that it is on the controller so configure the backup service - # to use it. If we want to configure the backup service to use - # a non-swift driver, we'll likely need environment variables. - iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_" + if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then + if type configure_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then + configure_cinder_backup_$CINDER_BACKUP_DRIVER + else + die "configure_cinder_backup_$CINDER_BACKUP_DRIVER doesn't exist in $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER" + fi fi if is_service_enabled ceilometer; then @@ -410,6 +435,12 @@ function init_cinder { done fi + if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then + if type init_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then + init_cinder_backup_$CINDER_BACKUP_DRIVER + fi + fi + mkdir -p $CINDER_STATE_PATH/volumes } diff --git a/lib/cinder_backends/ceph b/lib/cinder_backends/ceph index 33c9706d3d..0b465730c0 100644 --- a/lib/cinder_backends/ceph +++ b/lib/cinder_backends/ceph @@ -6,12 +6,6 @@ # Enable with: # # CINDER_ENABLED_BACKENDS+=,ceph:ceph -# -# Optional parameters: -# CINDER_BAK_CEPH_POOL= -# CINDER_BAK_CEPH_USER= -# CINDER_BAK_CEPH_POOL_PG= -# CINDER_BAK_CEPH_POOL_PGP= # Dependencies: # @@ -29,11 +23,6 @@ set +o xtrace # Defaults # -------- -CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups} -CINDER_BAK_CEPH_POOL_PG=${CINDER_BAK_CEPH_POOL_PG:-8} -CINDER_BAK_CEPH_POOL_PGP=${CINDER_BAK_CEPH_POOL_PGP:-8} -CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak} - # Entry Points # ------------ @@ -52,27 +41,6 @@ function configure_cinder_backend_ceph { iniset $CINDER_CONF $be_name rbd_flatten_volume_from_snapshot False iniset $CINDER_CONF $be_name rbd_max_clone_depth 5 iniset $CINDER_CONF DEFAULT glance_api_version 2 - - if is_service_enabled c-bak; then - sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP} - if [ "$REMOTE_CEPH" = "False" ]; then - # Configure Cinder backup service options, ceph pool, ceph user and ceph key - sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS} - if [[ $CEPH_REPLICAS -ne 1 ]]; then - sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID} - fi - fi - sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring - sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring - - iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver" - iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE" - iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL" - iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER" - iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0 - iniset $CINDER_CONF DEFAULT backup_ceph_stripe_count 0 - iniset $CINDER_CONF DEFAULT restore_discard_excess_bytes True - fi } # Restore xtrace diff --git a/lib/cinder_backups/ceph b/lib/cinder_backups/ceph new file mode 100644 index 0000000000..26136bef96 --- /dev/null +++ b/lib/cinder_backups/ceph @@ -0,0 +1,57 @@ +#!/bin/bash +# +# lib/cinder_backups/ceph +# Configure the ceph backup driver + +# Enable with: +# +# CINDER_BACKUP_DRIVER=ceph + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# Save trace setting +_XTRACE_CINDER_CEPH=$(set +o | grep xtrace) +set +o xtrace + +# Defaults +# -------- + +CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups} +CINDER_BAK_CEPH_POOL_PG=${CINDER_BAK_CEPH_POOL_PG:-8} +CINDER_BAK_CEPH_POOL_PGP=${CINDER_BAK_CEPH_POOL_PGP:-8} +CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak} + + +function configure_cinder_backup_ceph { + sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP} + if [ "$REMOTE_CEPH" = "False" ]; then + # Configure Cinder backup service options, ceph pool, ceph user and ceph key + sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS} + if [[ $CEPH_REPLICAS -ne 1 ]]; then + sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID} + fi + fi + sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring + sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring + + iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver" + iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE" + iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL" + iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER" + iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0 + iniset $CINDER_CONF DEFAULT backup_ceph_stripe_count 0 + iniset $CINDER_CONF DEFAULT restore_discard_excess_bytes True +} + +# init_cinder_backup_ceph: nothing to do +# cleanup_cinder_backup_ceph: nothing to do + +# Restore xtrace +$_XTRACE_CINDER_CEPH + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_backups/s3_swift b/lib/cinder_backups/s3_swift new file mode 100644 index 0000000000..6fb248606e --- /dev/null +++ b/lib/cinder_backups/s3_swift @@ -0,0 +1,45 @@ +#!/bin/bash +# +# lib/cinder_backups/s3_swift +# Configure the s3 backup driver with swift s3api +# +# TODO: create lib/cinder_backup/s3 for external s3 compatible storage + +# Enable with: +# +# CINDER_BACKUP_DRIVER=s3_swift +# enable_service s3api s-proxy s-object s-container s-account + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# Save trace setting +_XTRACE_CINDER_S3_SWIFT=$(set +o | grep xtrace) +set +o xtrace + +function configure_cinder_backup_s3_swift { + # This configuration requires swift and s3api. If we're + # on a subnode we might not know if they are enabled + iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.s3.S3BackupDriver" + iniset $CINDER_CONF DEFAULT backup_s3_endpoint_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$S3_SERVICE_PORT" +} + +function init_cinder_backup_s3_swift { + openstack ec2 credential create + iniset $CINDER_CONF DEFAULT backup_s3_store_access_key "$(openstack ec2 credential list -c Access -f value)" + iniset $CINDER_CONF DEFAULT backup_s3_store_secret_key "$(openstack ec2 credential list -c Secret -f value)" + if is_service_enabled tls-proxy; then + iniset $CINDER_CONF DEFAULT backup_s3_ca_cert_file "$SSL_BUNDLE_FILE" + fi +} + +# cleanup_cinder_backup_s3_swift: nothing to do + +# Restore xtrace +$_XTRACE_CINDER_S3_SWIFT + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_backups/swift b/lib/cinder_backups/swift new file mode 100644 index 0000000000..d7c977e1e3 --- /dev/null +++ b/lib/cinder_backups/swift @@ -0,0 +1,38 @@ +#!/bin/bash +# +# lib/cinder_backups/swift +# Configure the swift backup driver + +# Enable with: +# +# CINDER_BACKUP_DRIVER=swift + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# Save trace setting +_XTRACE_CINDER_SWIFT=$(set +o | grep xtrace) +set +o xtrace + + +function configure_cinder_backup_swift { + # NOTE(mriedem): The default backup driver uses swift and if we're + # on a subnode we might not know if swift is enabled, but chances are + # good that it is on the controller so configure the backup service + # to use it. + iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.swift.SwiftBackupDriver" + iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_" +} + +# init_cinder_backup_swift: nothing to do +# cleanup_cinder_backup_swift: nothing to do + + +# Restore xtrace +$_XTRACE_CINDER_SWIFT + +# Local variables: +# mode: shell-script +# End: From 110b9a9b1b05d9163a674e5bcc05fcd8d48cb5bf Mon Sep 17 00:00:00 2001 From: Nobuhiro MIKI Date: Thu, 1 Apr 2021 11:00:25 +0900 Subject: [PATCH 037/574] Fix typo in multinode-lab document Signed-off-by: Nobuhiro MIKI Change-Id: I1b6100d6b8231f1f96a7768e26ab83f010f1e4dc --- doc/source/guides/multinode-lab.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index c0b3f58157..dc3568a845 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -395,7 +395,7 @@ SSH keys need to be exchanged between each compute node: 3. Verify that login via ssh works without a password:: - ssh -i /root/.ssh/id_rsa.pub stack@DESTINATION + ssh -i /root/.ssh/id_rsa stack@DESTINATION In essence, this means that every compute node's root user's public RSA key must exist in every other compute node's stack user's authorized_keys file and From 362641b1b8b0596371f13db8448ab0f43bd53482 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Fri, 19 Mar 2021 10:33:24 +0000 Subject: [PATCH 038/574] cinder: Increase VOLUME_BACKING_FILE_SIZE As reported in bug #1920136 the tempest-integrated-compute job has started to see insufficient free virtual space errors being reported by c-sch and c-vol when creating volumes. This change simply increases the default size of the underlying LVM PV used to host these volumes within the default LVM/iSCSI c-vol backend deployed by devstack. Change-Id: I965d4a485215ac482403f1e83609452550dfd860 Closes-Bug: #1920136 --- stackrc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackrc b/stackrc index 648a028beb..81e0f12866 100644 --- a/stackrc +++ b/stackrc @@ -758,8 +758,8 @@ for image_url in ${IMAGE_URLS//,/ }; do fi done -# 24Gb default volume backing file size -VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-24G} +# 30Gb default volume backing file size +VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-30G} # Prefixes for volume and instance names VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} From 25d37efb9154f2f08e094f4dda3366a7bcd0af31 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Tue, 6 Apr 2021 10:35:19 -0500 Subject: [PATCH 039/574] Update DEVSTACK_SERIES to xena stable/wallaby branch has been created now and current master is for xena. Change-Id: I42f67361fe50795d929752434342effddf123486 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 648a028beb..9630221f4e 100644 --- a/stackrc +++ b/stackrc @@ -245,7 +245,7 @@ REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements} # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="wallaby" +DEVSTACK_SERIES="xena" ############## # From 6f2c807bfade2a218636e0ca441de45c5662aca0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Tue, 6 Apr 2021 14:15:34 +0000 Subject: [PATCH 040/574] gzip, not xz xz may cause POST_FAILUREs due to memory pressure [1]. [1] http://lists.openstack.org/pipermail/openstack-discuss/2021-April/021609.html Change-Id: I2ea3175ecf2508b62640bfffdd798d7072e55550 --- roles/export-devstack-journal/tasks/main.yaml | 2 +- .../templates/devstack.journal.README.txt.j2 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/export-devstack-journal/tasks/main.yaml b/roles/export-devstack-journal/tasks/main.yaml index ef839edaaf..db38b10a44 100644 --- a/roles/export-devstack-journal/tasks/main.yaml +++ b/roles/export-devstack-journal/tasks/main.yaml @@ -45,7 +45,7 @@ cmd: | journalctl -o export \ --since="$(cat {{ devstack_base_dir }}/log-start-timestamp.txt)" \ - | xz --threads=0 - > {{ stage_dir }}/logs/devstack.journal.xz + | gzip > {{ stage_dir }}/logs/devstack.journal.gz - name: Save journal README become: true diff --git a/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 b/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 index fe36653102..30519f63d7 100644 --- a/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 +++ b/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 @@ -7,7 +7,7 @@ devstack run. To use it, you will need to convert it so journalctl can read it locally. After downloading the file: - $ /lib/systemd/systemd-journal-remote <(xzcat ./devstack.journal.xz) -o output.journal + $ /lib/systemd/systemd-journal-remote <(zcat ./devstack.journal.gz) -o output.journal Note this binary is not in the regular path. On Debian/Ubuntu platforms, you will need to have the "systemd-journal-remote" package From 448db9ec41930d13a785c553e09a34417507f594 Mon Sep 17 00:00:00 2001 From: Francesco Pantano Date: Fri, 19 Feb 2021 13:25:10 +0100 Subject: [PATCH 041/574] Rely on ceph.conf settings when cinder backup pool is created Ceph adds the osd pool default size option on ceph.conf via [1]; this means we don't need to specify the size of this pool if the same value (same variable) is used (CEPH_REPLICAS). This change is an attempt of removing the size setting, relying on the implicit declaration of the value provided by ceph.conf. [1] https://github.com/openstack/devstack-plugin-ceph/blob/master/devstack/lib/ceph#L425 Change-Id: I5fa2105ceb3b97a4e38926d76c1e4028f1108d4a --- lib/cinder_backups/ceph | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/lib/cinder_backups/ceph b/lib/cinder_backups/ceph index 26136bef96..e4003c0720 100644 --- a/lib/cinder_backups/ceph +++ b/lib/cinder_backups/ceph @@ -27,12 +27,8 @@ CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak} function configure_cinder_backup_ceph { sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP} - if [ "$REMOTE_CEPH" = "False" ]; then - # Configure Cinder backup service options, ceph pool, ceph user and ceph key - sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS} - if [[ $CEPH_REPLICAS -ne 1 ]]; then - sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID} - fi + if [[ "$REMOTE_CEPH" = "False" && "$CEPH_REPLICAS" -ne 1 ]]; then + sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID} fi sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring From 580fec54c3a970de80ab66b3decca69704ff1179 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Thu, 8 Apr 2021 11:03:37 -0500 Subject: [PATCH 042/574] Make stackviz tasks not to fail jobs Due to issue on stckviz side, job start failing with POST_FAILURE. If we fix the issue still we need to wait for periodic job periodic-package-stackviz-element to publish the latest tarball on https://tarballs.openstack.org/stackviz/dist/. Let's not fail the job for any issue occur during stackviz processing. Closes-Bug: 1863161 Change-Id: Ifee04f28ecee52e74803f1623aba5cfe5ee5ec90 --- roles/process-stackviz/tasks/main.yaml | 125 +++++++++++++------------ 1 file changed, 64 insertions(+), 61 deletions(-) diff --git a/roles/process-stackviz/tasks/main.yaml b/roles/process-stackviz/tasks/main.yaml index c51c66cdb3..3ba3d9c2e6 100644 --- a/roles/process-stackviz/tasks/main.yaml +++ b/roles/process-stackviz/tasks/main.yaml @@ -1,70 +1,73 @@ -- name: Devstack checks if stackviz archive exists - stat: - path: "/opt/cache/files/stackviz-latest.tar.gz" - register: stackviz_archive +- name: Process Stackviz + block: -- debug: - msg: "Stackviz archive could not be found in /opt/cache/files/stackviz-latest.tar.gz" - when: not stackviz_archive.stat.exists + - name: Devstack checks if stackviz archive exists + stat: + path: "/opt/cache/files/stackviz-latest.tar.gz" + register: stackviz_archive -- name: Check if subunit data exists - stat: - path: "{{ zuul_work_dir }}/testrepository.subunit" - register: subunit_input + - debug: + msg: "Stackviz archive could not be found in /opt/cache/files/stackviz-latest.tar.gz" + when: not stackviz_archive.stat.exists -- debug: - msg: "Subunit file could not be found at {{ zuul_work_dir }}/testrepository.subunit" - when: not subunit_input.stat.exists + - name: Check if subunit data exists + stat: + path: "{{ zuul_work_dir }}/testrepository.subunit" + register: subunit_input -- name: Install stackviz - when: - - stackviz_archive.stat.exists - - subunit_input.stat.exists - block: - - include_role: - name: ensure-pip + - debug: + msg: "Subunit file could not be found at {{ zuul_work_dir }}/testrepository.subunit" + when: not subunit_input.stat.exists + + - name: Install stackviz + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + block: + - include_role: + name: ensure-pip + + - pip: + name: "file://{{ stackviz_archive.stat.path }}" + virtualenv: /tmp/stackviz + virtualenv_command: '{{ ensure_pip_virtualenv_command }}' + extra_args: -U - - pip: - name: "file://{{ stackviz_archive.stat.path }}" - virtualenv: /tmp/stackviz - virtualenv_command: '{{ ensure_pip_virtualenv_command }}' - extra_args: -U + - name: Deploy stackviz static html+js + command: cp -pR /tmp/stackviz/share/stackviz-html {{ stage_dir }}/stackviz + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists -- name: Deploy stackviz static html+js - command: cp -pR /tmp/stackviz/share/stackviz-html {{ stage_dir }}/stackviz - when: - - stackviz_archive.stat.exists - - subunit_input.stat.exists + - name: Check if dstat data exists + stat: + path: "{{ devstack_base_dir }}/logs/dstat-csv.log" + register: dstat_input + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists -- name: Check if dstat data exists - stat: - path: "{{ devstack_base_dir }}/logs/dstat-csv.log" - register: dstat_input - when: - - stackviz_archive.stat.exists - - subunit_input.stat.exists + - name: Run stackviz with dstat + shell: | + cat {{ subunit_input.stat.path }} | \ + /tmp/stackviz/bin/stackviz-export \ + --dstat "{{ devstack_base_dir }}/logs/dstat-csv.log" \ + --env --stdin \ + {{ stage_dir }}/stackviz/data + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + - dstat_input.stat.exists -- name: Run stackviz with dstat - shell: | - cat {{ subunit_input.stat.path }} | \ - /tmp/stackviz/bin/stackviz-export \ - --dstat "{{ devstack_base_dir }}/logs/dstat-csv.log" \ - --env --stdin \ - {{ stage_dir }}/stackviz/data - when: - - stackviz_archive.stat.exists - - subunit_input.stat.exists - - dstat_input.stat.exists - failed_when: False + - name: Run stackviz without dstat + shell: | + cat {{ subunit_input.stat.path }} | \ + /tmp/stackviz/bin/stackviz-export \ + --env --stdin \ + {{ stage_dir }}/stackviz/data + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + - not dstat_input.stat.exists -- name: Run stackviz without dstat - shell: | - cat {{ subunit_input.stat.path }} | \ - /tmp/stackviz/bin/stackviz-export \ - --env --stdin \ - {{ stage_dir }}/stackviz/data - when: - - stackviz_archive.stat.exists - - subunit_input.stat.exists - - not dstat_input.stat.exists - failed_when: False + ignore_errors: yes From aa5c38727b314b03cd7ab69612435aa206bd5e2c Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Wed, 14 Apr 2021 14:27:32 -0700 Subject: [PATCH 043/574] Work around CHILD_MAX bash limitation for async Apparently bash (via POSIX) only guarantees a small (32ish) number of children can be started and their statuses retrieved at any given point. On larger jobs with lots of plugins and additional work, we may go over that limit, especially for long-lived children, such as the install_tempest task. This works around that issue by creating a fifo for each child at spawn time. When the child is complete, it will block on a read against that fifo (and thus not exit). When the parent goes to wait on the child, it first writes to that fifo, unblocking the child so that it can exit near the time we go to wait. Closes-Bug: #1923728 Change-Id: Id755bdb1e7f1664ec08742d034c174e87a3d2902 --- inc/async | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/inc/async b/inc/async index c63bc2045a..11bcdfa39e 100644 --- a/inc/async +++ b/inc/async @@ -57,6 +57,7 @@ function async_log { function async_inner { local name="$1" local rc + local fifo=${DEST}/async/${name}.fifo shift set -o xtrace if $* >${DEST}/async/${name}.log 2>&1; then @@ -69,6 +70,8 @@ function async_inner { async_log "$name" "FAILED with rc $rc" fi iniset ${DEST}/async/${name}.ini job end_time $(date "+%s%3N") + # Block on the fifo until we are signaled to exit by the main process + cat $fifo return $rc } @@ -86,12 +89,14 @@ function async_run { local name="$1" shift local inifile=${DEST}/async/${name}.ini + local fifo=${DEST}/async/${name}.fifo touch $inifile iniset $inifile job command "$*" iniset $inifile job start_time $(date +%s%3N) if [[ "$DEVSTACK_PARALLEL" = "True" ]]; then + mkfifo $fifo async_inner $name $* & iniset $inifile job pid $! async_log "$name" "running: %command" @@ -119,17 +124,23 @@ function async_wait { xtrace=$(set +o | grep xtrace) set +o xtrace - local pid rc running inifile runtime + local pid rc running inifile runtime fifo rc=0 for name in $*; do running=$(ls ${DEST}/async/*.ini 2>/dev/null | wc -l) inifile="${DEST}/async/${name}.ini" + fifo=${DEST}/async/${name}.fifo if pid=$(async_pidof "$name"); then async_log "$name" "Waiting for completion of %command" \ "($running other jobs running)" time_start async_wait if [[ "$pid" != "self" ]]; then + # Signal the child to go ahead and exit since we are about to + # wait for it to collect its status. + echo "Signaling exit" + echo WAKEUP > $fifo + echo "Signaled" # Do not actually call wait if we ran synchronously if wait $pid; then rc=0 @@ -137,6 +148,7 @@ function async_wait { rc=$? fi cat ${DEST}/async/${name}.log + rm -f $fifo fi time_stop async_wait local start_time From 51e384554b4653a05abea435432431cdca4728fb Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Wed, 14 Apr 2021 07:23:10 -0700 Subject: [PATCH 044/574] Add some debug to async_wait failures This dumps some data in the case where we fail to wait for a child pid to help debug what is going on. This also cleans up a few review comments from the actual fix. Change-Id: I7b58ce0cf2b41bdffa448973edb4c992fe5f730c Related-Bug: #1923728 --- inc/async | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/inc/async b/inc/async index 11bcdfa39e..56338f5343 100644 --- a/inc/async +++ b/inc/async @@ -48,7 +48,7 @@ function async_log { command=$(iniget $inifile job command | tr '#' '-') message=$(echo "$message" | sed "s#%command#$command#g") - echo "[Async ${name}:${pid}]: $message" + echo "[$BASHPID Async ${name}:${pid}]: $message" } # Inner function that actually runs the requested task. We wrap it like this @@ -57,7 +57,7 @@ function async_log { function async_inner { local name="$1" local rc - local fifo=${DEST}/async/${name}.fifo + local fifo="${DEST}/async/${name}.fifo" shift set -o xtrace if $* >${DEST}/async/${name}.log 2>&1; then @@ -116,6 +116,24 @@ function async_runfunc { async_run $1 $* } +# Dump some information to help debug a failed wait +function async_wait_dump { + local failpid=$1 + + echo "=== Wait failure dump from $BASHPID ===" + echo "Processes:" + ps -f + echo "Waiting jobs:" + for name in $(ls ${DEST}/async/*.ini); do + echo "Job $name :" + cat "$name" + done + echo "Failed PID status:" + sudo cat /proc/$failpid/status + sudo cat /proc/$failpid/cmdline + echo "=== End wait failure dump ===" +} + # Wait for an async future to complete. May return immediately if already # complete, or of the future has already been waited on (avoid this). May # block until the future completes. @@ -129,18 +147,18 @@ function async_wait { for name in $*; do running=$(ls ${DEST}/async/*.ini 2>/dev/null | wc -l) inifile="${DEST}/async/${name}.ini" - fifo=${DEST}/async/${name}.fifo + fifo="${DEST}/async/${name}.fifo" if pid=$(async_pidof "$name"); then async_log "$name" "Waiting for completion of %command" \ - "($running other jobs running)" + "running on PID $pid ($running other jobs running)" time_start async_wait if [[ "$pid" != "self" ]]; then # Signal the child to go ahead and exit since we are about to # wait for it to collect its status. - echo "Signaling exit" + async_log "$name" "Signaling child to exit" echo WAKEUP > $fifo - echo "Signaled" + async_log "$name" "Signaled" # Do not actually call wait if we ran synchronously if wait $pid; then rc=0 @@ -161,6 +179,7 @@ function async_wait { "$rc in $runtime seconds" rm -f $inifile if [ $rc -ne 0 ]; then + async_wait_dump $pid echo Stopping async wait due to error: $* break fi From d04e795b316f5be466532e60104a983bf6419716 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Mon, 19 Apr 2021 06:52:30 +0000 Subject: [PATCH 045/574] [TrivialFix] Delete symlink apts-debs Follow up on old I0416180db5b6add996ce5b48c6966c1b68adbcb0 Change-Id: If2f6166cf7c585bf303d0f6c28a2745d85eabbed --- files/apts | 1 - 1 file changed, 1 deletion(-) delete mode 120000 files/apts diff --git a/files/apts b/files/apts deleted file mode 120000 index ef926de053..0000000000 --- a/files/apts +++ /dev/null @@ -1 +0,0 @@ -debs/ \ No newline at end of file From c062792709def9ef10ddac68867e1b7bf9009435 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Tue, 9 Mar 2021 22:36:57 +0000 Subject: [PATCH 046/574] cinder: Default CINDER_ISCSI_HELPER to lioadm As outlined in bug #1917750 the use of tgtadm in multinode environments with multiple c-vol services can cause volumes to use duplicate WWNs. This has been shown to cause some encrypted volume test failures as os-brick returns a /dev/disk/by-id path to n-cpu that can point to the wrong underlying volume when multiple volumes with the same WWN are connected to a host. There is also some speculation that the duplicate WWNs are also causing libvirt to fail to detach volumes from instances but as yet this has not been proven. This change aims to avoid all of the above by switching the default of CINDER_ISCSI_HELPER to lioadm for all deployments instead of just EL and SUSE based deployments. The Bionic platform job however is pinned to tgtadm as there issues installing python3-rtslib-fb. Closes-Bug: #1917750 Change-Id: If5c860d1e69aaef9a9236303c370479a7714ad43 --- .zuul.yaml | 3 +++ lib/cinder | 10 +++++----- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 67d4c24000..00129b5ca4 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -590,6 +590,9 @@ description: Ubuntu Bionic platform test nodeset: openstack-single-node-bionic voting: false + vars: + devstack_localrc: + CINDER_ISCSI_HELPER: tgtadm - job: name: devstack-async diff --git a/lib/cinder b/lib/cinder index f20631b56c..9c8d85cf59 100644 --- a/lib/cinder +++ b/lib/cinder @@ -88,15 +88,15 @@ CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-lvm:lvmdriver-1} CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}} CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]') -# Centos7 and OpenSUSE switched to using LIO and that's all that's supported, -# although the tgt bits are in EPEL and OpenSUSE we don't want that for CI +# Default to lioadm +CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm} + +# EL and SUSE should only use lioadm, we continue to allow Ubuntu based +# deployments to use tgtadm for specific jobs in the cinder-tempest-plugin if is_fedora || is_suse; then - CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm} if [[ ${CINDER_ISCSI_HELPER} != "lioadm" ]]; then die "lioadm is the only valid Cinder target_helper config on this platform" fi -else - CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-tgtadm} fi # For backward compatibility From 0386c1cda61b57da3aedde05e317137c354fb4d9 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Wed, 28 Apr 2021 09:26:23 +0100 Subject: [PATCH 047/574] cinder: Default CINDER_ISCSI_HELPER to tgtadm on Bionic If5c860d1e69aaef9a9236303c370479a7714ad43 attempted to move this default to lioadm while pinning certain Bionic based jobs to tgtadm. Unfortunately it missed the legacy dsvm based jobs within various projects that do not inherit from the devstack-platform-bionic base job and that are also not covered by devstack's gate. This change simply forces CINDER_ISCSI_HELPER to tgtadm on Bionic based hosts to ensure it is always used. Closes-Bug: #1926411 Change-Id: Ib4b38b45f25575c92fb09b8e97fa1b24af0cc06a --- lib/cinder | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/cinder b/lib/cinder index 9c8d85cf59..34d618674e 100644 --- a/lib/cinder +++ b/lib/cinder @@ -91,8 +91,13 @@ CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]') # Default to lioadm CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm} -# EL and SUSE should only use lioadm, we continue to allow Ubuntu based -# deployments to use tgtadm for specific jobs in the cinder-tempest-plugin +# Bionic needs to default to tgtadm until support is dropped within devstack +# as the rtslib-fb-targetctl service doesn't start after installing lioadm. +if is_ubuntu && [[ "$DISTRO" == "bionic" ]]; then + CINDER_ISCSI_HELPER=tgtadm +fi + +# EL and SUSE should only use lioadm if is_fedora || is_suse; then if [[ ${CINDER_ISCSI_HELPER} != "lioadm" ]]; then die "lioadm is the only valid Cinder target_helper config on this platform" From b02a43291cc5e2d5677ecbb80c0fd608d67a1374 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 27 Nov 2018 12:59:04 +1100 Subject: [PATCH 048/574] Handle disappearing pids in mlock_report.py If a pid disappears on us while we're reading, we should just continue on. EnvironmentError is just an alias for OSError since Python 3.3, so use the latter name. [0] [0] https://docs.python.org/3/library/exceptions.html#OSError Change-Id: I3a25cca328e1469f72c84a118a9691c1c0258bc4 Closes-Bug: #1926434 --- tools/mlock_report.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/tools/mlock_report.py b/tools/mlock_report.py index b15a0bf80b..1b081bbe6f 100644 --- a/tools/mlock_report.py +++ b/tools/mlock_report.py @@ -24,17 +24,19 @@ def _get_report(): # iterate over the /proc/%pid/status files manually try: s = open("%s/%d/status" % (psutil.PROCFS_PATH, proc.pid), 'r') - except EnvironmentError: + with s: + for line in s: + result = LCK_SUMMARY_REGEX.search(line) + if result: + locked = int(result.group('locked')) + if locked: + mlock_users.append({'name': proc.name(), + 'pid': proc.pid, + 'locked': locked}) + except OSError: + # pids can disappear, we're ok with that continue - with s: - for line in s: - result = LCK_SUMMARY_REGEX.search(line) - if result: - locked = int(result.group('locked')) - if locked: - mlock_users.append({'name': proc.name(), - 'pid': proc.pid, - 'locked': locked}) + # produce a single line log message with per process mlock stats if mlock_users: From 7ad4cd07c8bf4f302acc4fc6684e362309332c9d Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Thu, 29 Apr 2021 09:24:38 -0500 Subject: [PATCH 049/574] Drop Bionic support Since victoria cycle, we have moved upstream testing to Ubuntu Focal (20.04) and so does no Bionic distro in Xena cycle testing runtime[1]. Grenade jobs also started running on Focal since victoria was released. Only thing left was legacy jobs which were not migrated to Ubuntu Focal in Victoria and as per another community-wide goal[2], all the lgeacy jobs were suppsoed to be migrated to zuulv3 native jobs in victoria cycle itself. One of the pending job was in nova (nova-grenade-multinode) which is also migrated to zuulv3 native now - https://review.opendev.org/c/openstack/nova/+/778885 If there is any job running on bionic, we strongly recommend to migrate it to Ubuntu Focal. [1] https://governance.openstack.org/tc/reference/runtimes/xena.html [2] https://governance.openstack.org/tc/goals/selected/victoria/native-zuulv3-jobs.html Change-Id: I39e38e4a6c2e52dd3822c9fdea354258359a9f53 --- .zuul.yaml | 11 ----------- lib/apache | 3 --- lib/cinder | 6 ------ stack.sh | 2 +- tools/fixup_stuff.sh | 38 -------------------------------------- 5 files changed, 1 insertion(+), 59 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 00129b5ca4..b65aeec4dd 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -584,16 +584,6 @@ voting: false timeout: 9000 -- job: - name: devstack-platform-bionic - parent: tempest-full-py3 - description: Ubuntu Bionic platform test - nodeset: openstack-single-node-bionic - voting: false - vars: - devstack_localrc: - CINDER_ISCSI_HELPER: tgtadm - - job: name: devstack-async parent: tempest-full-py3 @@ -695,7 +685,6 @@ - devstack-ipv6 - devstack-platform-fedora-latest - devstack-platform-centos-8 - - devstack-platform-bionic - devstack-async - devstack-multinode - devstack-unit-tests diff --git a/lib/apache b/lib/apache index 870a65a9d2..04259ba31f 100644 --- a/lib/apache +++ b/lib/apache @@ -93,9 +93,6 @@ function install_apache_uwsgi { if is_ubuntu; then local pkg_list="uwsgi uwsgi-plugin-python3 libapache2-mod-proxy-uwsgi" - if [[ "$DISTRO" == 'bionic' ]]; then - pkg_list="${pkg_list} uwsgi-plugin-python" - fi install_package ${pkg_list} elif is_fedora; then # Note httpd comes with mod_proxy_uwsgi and it is loaded by diff --git a/lib/cinder b/lib/cinder index 34d618674e..7d6e843a3d 100644 --- a/lib/cinder +++ b/lib/cinder @@ -91,12 +91,6 @@ CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]') # Default to lioadm CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm} -# Bionic needs to default to tgtadm until support is dropped within devstack -# as the rtslib-fb-targetctl service doesn't start after installing lioadm. -if is_ubuntu && [[ "$DISTRO" == "bionic" ]]; then - CINDER_ISCSI_HELPER=tgtadm -fi - # EL and SUSE should only use lioadm if is_fedora || is_suse; then if [[ ${CINDER_ISCSI_HELPER} != "lioadm" ]]; then diff --git a/stack.sh b/stack.sh index ca9ecfa213..9d854da581 100755 --- a/stack.sh +++ b/stack.sh @@ -227,7 +227,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bionic|focal|f31|f32|opensuse-15.2|opensuse-tumbleweed|rhel8" +SUPPORTED_DISTROS="focal|f31|f32|opensuse-15.2|opensuse-tumbleweed|rhel8" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 25f726892f..19219435ad 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -59,43 +59,6 @@ function fixup_keystone { fi } -# Ubuntu Repositories -#-------------------- -# Enable universe for bionic since it is missing when installing from ISO. -function fixup_ubuntu { - if [[ "$DISTRO" != "bionic" ]]; then - return - fi - - # This pulls in apt-add-repository - install_package "software-properties-common" - - # Enable universe - sudo add-apt-repository -y universe - - if [[ -f /etc/ci/mirror_info.sh ]] ; then - # If we are on a nodepool provided host and it has told us about - # where we can find local mirrors then use that mirror. - source /etc/ci/mirror_info.sh - sudo apt-add-repository -y "deb $NODEPOOL_UCA_MIRROR bionic-updates/ussuri main" - else - # Enable UCA:ussuri for updated versions of QEMU and libvirt - sudo add-apt-repository -y cloud-archive:ussuri - fi - REPOS_UPDATED=False - apt_get_update - - # Since pip10, pip will refuse to uninstall files from packages - # that were created with distutils (rather than more modern - # setuptools). This is because it technically doesn't have a - # manifest of what to remove. However, in most cases, simply - # overwriting works. So this hacks around those packages that - # have been dragged in by some other system dependency - sudo rm -rf /usr/lib/python3/dist-packages/httplib2-*.egg-info - sudo rm -rf /usr/lib/python3/dist-packages/pyasn1_modules-*.egg-info - sudo rm -rf /usr/lib/python3/dist-packages/PyYAML-*.egg-info -} - # Python Packages # --------------- @@ -194,7 +157,6 @@ function fixup_ovn_centos { function fixup_all { fixup_keystone - fixup_ubuntu fixup_fedora fixup_suse } From 06b7352478170521a07875154eef317bde0c5321 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Thu, 29 Apr 2021 11:46:35 -0700 Subject: [PATCH 050/574] Fix async race updating nova configs The configure_neutron_nova function updates nova configs. While that is still running we separately update nova configs in stack.sh. This can result in unexpected configs (that don't work). Fix this by waiting for configure_neutron_nova to complete its work before we do nova config updates directly in stack.sh. For specifics we say that: [neutron] project_domain_name = Default was missing from both nova.conf and nova-cpu.conf and instances could not be created because keystone complained about not finding domain in project. The strong suspicion here is that on some systems configure_neutron_nova would write out project_domain_name while the stack.sh inisets were running resulting in stack.sh overwriting the project_domain_name content. One theory is that disabling swift makes this problem more likely as there is swift work in the middle of the async period. This is supported by the fact that our job that hits this problem does indeed disable swift. Change-Id: I0961d882d555a21233c6b4fbfc077cfe33b88499 --- stack.sh | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/stack.sh b/stack.sh index ca9ecfa213..163fc5b370 100755 --- a/stack.sh +++ b/stack.sh @@ -1238,17 +1238,21 @@ fi # deployments. This ensures the keys match across nova and cinder across all # hosts. FIXED_KEY=${FIXED_KEY:-bae3516cc1c0eb18b05440eba8012a4a880a2ee04d584a9c1579445e675b12defdc716ec} -if is_service_enabled nova; then - iniset $NOVA_CONF key_manager fixed_key "$FIXED_KEY" - iniset $NOVA_CPU_CONF key_manager fixed_key "$FIXED_KEY" -fi - if is_service_enabled cinder; then iniset $CINDER_CONF key_manager fixed_key "$FIXED_KEY" fi async_wait configure_neutron_nova +# NOTE(clarkb): This must come after async_wait configure_neutron_nova because +# configure_neutron_nova modifies $NOVA_CONF and $NOVA_CPU_CONF as well. If +# we don't wait then these two ini updates race either other and can result +# in unexpected configs. +if is_service_enabled nova; then + iniset $NOVA_CONF key_manager fixed_key "$FIXED_KEY" + iniset $NOVA_CPU_CONF key_manager fixed_key "$FIXED_KEY" +fi + # Launch the nova-api and wait for it to answer before continuing if is_service_enabled n-api; then echo_summary "Starting Nova API" From 5c304d817682d6c807b532b50a2f105479ac3fa2 Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Thu, 11 Jun 2020 11:00:56 +0100 Subject: [PATCH 051/574] Change Neutron's default ML2 driver to OVN As part of the Victoria PTG the Neutron community [0] agreed on changing the default backend driver from ML2/OVS to ML2/OVN in DevStack. A lot of changes have been submitted towards this goal including but not limted to: * Moving the OVN module to DevStack: https://review.opendev.org/c/openstack/devstack/+/734621 * Updating the OVN module to use distro packages instead of compiling OVN from source: https://review.opendev.org/c/openstack/devstack/+/763402o And now this patch is changing the the actual Q_AGENT, Q_ML2_TENANT_NETWORK_TYPE and Q_ML2_PLUGIN_MECHANISM_DRIVERS values in devstack to what is expected by OVN as well as updating the Zuul templates to enable the OVN services. [0] https://etherpad.opendev.org/p/neutron-victoria-ptg Change-Id: I92054ce9d2ab7a42746ed5dececef583b0f8a833 Signed-off-by: Lucas Alvares Gomes --- .zuul.yaml | 35 +++++++++++++++++++++++++++-------- lib/neutron_plugins/ml2 | 6 +++--- 2 files changed, 30 insertions(+), 11 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 00129b5ca4..e133bfacd7 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -468,6 +468,10 @@ SWIFT_HASH: 1234123412341234 DEBUG_LIBVIRT_COREDUMPS: true NOVA_VNC_ENABLED: true + OVN_L3_CREATE_PUBLIC_NETWORK: true + OVN_DBS_LOG_LEVEL: dbg + ENABLE_CHASSIS_AS_GW: true + Q_USE_PROVIDERNET_FOR_PUBLIC: true devstack_local_conf: post-config: $NEUTRON_CONF: @@ -477,7 +481,11 @@ # Core services enabled for this branch. # This list replaces the test-matrix. # Shared services - dstat: true + # + # NOTE(lucasagomes): disable dstat until bug + # https://bugs.launchpad.net/ubuntu/+source/dstat/+bug/1866619 is + # fixed. Also see: https://bugs.launchpad.net/neutron/+bug/1898863 + dstat: false etcd3: true memory_tracker: true mysql: true @@ -496,13 +504,14 @@ n-sch: true # Placement service placement-api: true + # OVN services + ovn-controller: true + ovn-northd: true + ovs-vswitchd: true + ovsdb-server: true # Neutron services - q-agt: true - q-dhcp: true - q-l3: true - q-meta: true - q-metering: true q-svc: true + q-ovn-metadata-agent: true # Swift services s-account: true s-container: true @@ -527,15 +536,24 @@ # Core services enabled for this branch. # This list replaces the test-matrix. # Shared services - dstat: true + # + # NOTE(lucasagomes): disable dstat until bug + # https://bugs.launchpad.net/ubuntu/+source/dstat/+bug/1866619 is + # fixed. Also see: https://bugs.launchpad.net/neutron/+bug/1898863 + dstat: false memory_tracker: true tls-proxy: true # Nova services n-cpu: true # Placement services placement-client: true + # OVN services + ovn-controller: true + ovn-northd: false + ovs-vswitchd: true + ovsdb-server: true # Neutron services - q-agt: true + q-ovn-metadata-agent: true # Cinder services c-bak: true c-vol: true @@ -549,6 +567,7 @@ # integrated gate, so specifying the services has not effect. # ceilometer-*: false devstack_localrc: + ENABLE_CHASSIS_AS_GW: false # Subnode specific settings GLANCE_HOSTPORT: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}:9292" Q_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index ae4b251d83..a58ba5cbcd 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -8,14 +8,14 @@ _XTRACE_NEUTRON_ML2=$(set +o | grep xtrace) set +o xtrace # Default openvswitch L2 agent -Q_AGENT=${Q_AGENT:-openvswitch} +Q_AGENT=${Q_AGENT:-ovn} if [ -f $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent ]; then source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent fi # Enable this to simply and quickly enable tunneling with ML2. # Select either 'gre', 'vxlan', or 'gre,vxlan' -Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"vxlan"} +Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"geneve"} # This has to be set here since the agent will set this in the config file if [[ "$Q_ML2_TENANT_NETWORK_TYPE" == "gre" || "$Q_ML2_TENANT_NETWORK_TYPE" == "vxlan" ]]; then Q_TUNNEL_TYPES=$Q_ML2_TENANT_NETWORK_TYPE @@ -24,7 +24,7 @@ elif [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then fi # List of MechanismDrivers to load -Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-openvswitch,linuxbridge} +Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn} # Default GRE TypeDriver options Q_ML2_PLUGIN_GRE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GRE_TYPE_OPTIONS:-tunnel_id_ranges=$TENANT_TUNNEL_RANGES} # Default VXLAN TypeDriver options From 69a66fb62bcb77145b6eec21fc2d56d40a861d0d Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Tue, 11 May 2021 11:04:32 +0100 Subject: [PATCH 052/574] Fix docs job Sphinx 4.0.0 added a new dependency [0] which is causing the job to fail at the moment. This patch fix the problem by adding UC to the docs jobs. [0] https://www.sphinx-doc.org/en/master/changes.html (LaTeX: add tex-gyre font dependency) Change-Id: I28019331017405c06577ada88f8e9f6d9a2afc23 Signed-off-by: Lucas Alvares Gomes --- tox.ini | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index ed28636d3a..5bb2268c4f 100644 --- a/tox.ini +++ b/tox.ini @@ -34,7 +34,9 @@ commands = bash -c "find {toxinidir} \ -print0 | xargs -0 bashate -v -iE006 -eE005,E042" [testenv:docs] -deps = -r{toxinidir}/doc/requirements.txt +deps = + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} + -r{toxinidir}/doc/requirements.txt whitelist_externals = bash setenv = TOP_DIR={toxinidir} From 230f5c434c800c94935d5cd0dcfc1bd18329759f Mon Sep 17 00:00:00 2001 From: Anand Bhat Date: Wed, 12 May 2021 16:53:15 +0530 Subject: [PATCH 053/574] Changed minversion in tox to 3.18.0 The patch bumps min version of tox to 3.18.0 python in order to replace tox's whitelist_externals by allowlist_externals option: https://github.com/tox-dev/tox/blob/master/docs/changelog.rst#v3180-2020-07-23 Change-Id: Id8bdda703afc39d352e3a53877318dc30d91a5f7 --- tox.ini | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tox.ini b/tox.ini index 5bb2268c4f..ec764abc87 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -minversion = 1.6 +minversion = 3.18.0 skipsdist = True envlist = bashate @@ -13,7 +13,7 @@ basepython = python3 # modified bashate tree deps = {env:BASHATE_INSTALL_PATH:bashate==2.0.0} -whitelist_externals = bash +allowlist_externals = bash commands = bash -c "find {toxinidir} \ -not \( -type d -name .?\* -prune \) \ -not \( -type d -name doc -prune \) \ @@ -37,7 +37,7 @@ commands = bash -c "find {toxinidir} \ deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/doc/requirements.txt -whitelist_externals = bash +allowlist_externals = bash setenv = TOP_DIR={toxinidir} commands = @@ -45,7 +45,7 @@ commands = [testenv:pdf-docs] deps = {[testenv:docs]deps} -whitelist_externals = +allowlist_externals = make commands = sphinx-build -W -b latex doc/source doc/build/pdf From ff073a5643f01dda3200d2ce426f23dc24e28b8f Mon Sep 17 00:00:00 2001 From: Ghanshyam Date: Thu, 13 May 2021 16:25:17 +0000 Subject: [PATCH 054/574] Revert "Change Neutron's default ML2 driver to OVN" This reverts commit 5c304d817682d6c807b532b50a2f105479ac3fa2. Reason for revert: There are more things to fix/move like done in 791085 and 791282 Also let's change all required default in devstack scripts instead of devstack's zuul job side. Basically do this change without any change in .zuul.yaml Change-Id: Ie0f59d1b9a4b97ad9fd8131819054dfb616f31fd --- .zuul.yaml | 35 ++++++++--------------------------- lib/neutron_plugins/ml2 | 6 +++--- 2 files changed, 11 insertions(+), 30 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index e133bfacd7..00129b5ca4 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -468,10 +468,6 @@ SWIFT_HASH: 1234123412341234 DEBUG_LIBVIRT_COREDUMPS: true NOVA_VNC_ENABLED: true - OVN_L3_CREATE_PUBLIC_NETWORK: true - OVN_DBS_LOG_LEVEL: dbg - ENABLE_CHASSIS_AS_GW: true - Q_USE_PROVIDERNET_FOR_PUBLIC: true devstack_local_conf: post-config: $NEUTRON_CONF: @@ -481,11 +477,7 @@ # Core services enabled for this branch. # This list replaces the test-matrix. # Shared services - # - # NOTE(lucasagomes): disable dstat until bug - # https://bugs.launchpad.net/ubuntu/+source/dstat/+bug/1866619 is - # fixed. Also see: https://bugs.launchpad.net/neutron/+bug/1898863 - dstat: false + dstat: true etcd3: true memory_tracker: true mysql: true @@ -504,14 +496,13 @@ n-sch: true # Placement service placement-api: true - # OVN services - ovn-controller: true - ovn-northd: true - ovs-vswitchd: true - ovsdb-server: true # Neutron services + q-agt: true + q-dhcp: true + q-l3: true + q-meta: true + q-metering: true q-svc: true - q-ovn-metadata-agent: true # Swift services s-account: true s-container: true @@ -536,24 +527,15 @@ # Core services enabled for this branch. # This list replaces the test-matrix. # Shared services - # - # NOTE(lucasagomes): disable dstat until bug - # https://bugs.launchpad.net/ubuntu/+source/dstat/+bug/1866619 is - # fixed. Also see: https://bugs.launchpad.net/neutron/+bug/1898863 - dstat: false + dstat: true memory_tracker: true tls-proxy: true # Nova services n-cpu: true # Placement services placement-client: true - # OVN services - ovn-controller: true - ovn-northd: false - ovs-vswitchd: true - ovsdb-server: true # Neutron services - q-ovn-metadata-agent: true + q-agt: true # Cinder services c-bak: true c-vol: true @@ -567,7 +549,6 @@ # integrated gate, so specifying the services has not effect. # ceilometer-*: false devstack_localrc: - ENABLE_CHASSIS_AS_GW: false # Subnode specific settings GLANCE_HOSTPORT: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}:9292" Q_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index a58ba5cbcd..ae4b251d83 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -8,14 +8,14 @@ _XTRACE_NEUTRON_ML2=$(set +o | grep xtrace) set +o xtrace # Default openvswitch L2 agent -Q_AGENT=${Q_AGENT:-ovn} +Q_AGENT=${Q_AGENT:-openvswitch} if [ -f $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent ]; then source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent fi # Enable this to simply and quickly enable tunneling with ML2. # Select either 'gre', 'vxlan', or 'gre,vxlan' -Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"geneve"} +Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"vxlan"} # This has to be set here since the agent will set this in the config file if [[ "$Q_ML2_TENANT_NETWORK_TYPE" == "gre" || "$Q_ML2_TENANT_NETWORK_TYPE" == "vxlan" ]]; then Q_TUNNEL_TYPES=$Q_ML2_TENANT_NETWORK_TYPE @@ -24,7 +24,7 @@ elif [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then fi # List of MechanismDrivers to load -Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn} +Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-openvswitch,linuxbridge} # Default GRE TypeDriver options Q_ML2_PLUGIN_GRE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GRE_TYPE_OPTIONS:-tunnel_id_ranges=$TENANT_TUNNEL_RANGES} # Default VXLAN TypeDriver options From c19c1262c8b81f1cc543eafb9e5c3a34c4b632fe Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Mon, 17 May 2021 13:54:10 +0100 Subject: [PATCH 055/574] Replace dstat with pcp-dstat The dstat project is no longer maintained. The pcp-dstat package installs a dstat command so no further updates to the scripts should be needed. Change-Id: Ied8c9d29bed4f887c364db7080a0f2a0c02328af Signed-off-by: Lucas Alvares Gomes --- files/debs/dstat | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/files/debs/dstat b/files/debs/dstat index 2b643b8b1b..40d00f4aa4 100644 --- a/files/debs/dstat +++ b/files/debs/dstat @@ -1 +1,2 @@ -dstat +dstat # dist:bionic +pcp From 6e9f7c25704afb5bcc33c6f17a01f62068664e40 Mon Sep 17 00:00:00 2001 From: Brian Rosmaita Date: Tue, 13 Oct 2020 14:20:38 -0400 Subject: [PATCH 056/574] Support optimized cinder backend for glance When Glance is configured with a cinder glance_store, Cinder can be configured to allow cloning of image data directly in the backend instead of transferring data through Glance. Expose these configuration options in devstack to facilitate testing this feature. Adds: - CINDER_ALLOWED_DIRECT_URL_SCHEMES - GLANCE_SHOW_DIRECT_URL - GLANCE_SHOW_MULTIPLE_LOCATIONS Change-Id: Iee619b443088fd77cf7b1a48563203bdf4a93a39 --- lib/cinder | 19 +++++++++++++++++++ lib/glance | 15 +++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/lib/cinder b/lib/cinder index cfa3693f03..fca01a2140 100644 --- a/lib/cinder +++ b/lib/cinder @@ -104,6 +104,22 @@ if is_fedora || is_suse; then fi fi +# When Cinder is used as a backend for Glance, it can be configured to clone +# the volume containing image data directly in the backend instead of +# transferring data from volume to volume. Value is a comma separated list of +# schemes (currently only 'file' and 'cinder' are supported). The default +# configuration in Cinder is empty (that is, do not use this feature). NOTE: +# to use this feature you must also enable GLANCE_SHOW_DIRECT_URL and/or +# GLANCE_SHOW_MULTIPLE_LOCATIONS for glance-api.conf. +CINDER_ALLOWED_DIRECT_URL_SCHEMES=${CINDER_ALLOWED_DIRECT_URL_SCHEMES:-} +if [[ -n "$CINDER_ALLOWED_DIRECT_URL_SCHEMES" ]]; then + if [[ "${GLANCE_SHOW_DIRECT_URL:-False}" != "True" \ + && "${GLANCE_SHOW_MULTIPLE_LOCATIONS:-False}" != "True" ]]; then + warn $LINENO "CINDER_ALLOWED_DIRECT_URL_SCHEMES is set, but neither \ +GLANCE_SHOW_DIRECT_URL nor GLANCE_SHOW_MULTIPLE_LOCATIONS is True" + fi +fi + # For backward compatibility # Before CINDER_BACKUP_DRIVER was introduced, ceph backup driver was configured # along with ceph backend driver. @@ -266,6 +282,9 @@ function configure_cinder { fi iniset $CINDER_CONF key_manager backend cinder.keymgr.conf_key_mgr.ConfKeyManager iniset $CINDER_CONF key_manager fixed_key $(openssl rand -hex 16) + if [[ -n "$CINDER_ALLOWED_DIRECT_URL_SCHEMES" ]]; then + iniset $CINDER_CONF DEFAULT allowed_direct_url_schemes $CINDER_ALLOWED_DIRECT_URL_SCHEMES + fi # Avoid RPC timeouts in slow CI and test environments by doubling the # default response timeout set by RPC clients. See bug #1873234 for more diff --git a/lib/glance b/lib/glance index e789affaf1..118fa7c863 100644 --- a/lib/glance +++ b/lib/glance @@ -51,6 +51,18 @@ GLANCE_STORE_ROOTWRAP_BASE_DIR=/usr/local/etc/glance if is_opensuse; then GLANCE_STORE_ROOTWRAP_BASE_DIR=/usr/etc/glance fi +# When Cinder is used as a glance store, you can optionally configure cinder to +# optimize bootable volume creation by allowing volumes to be cloned directly +# in the backend instead of transferring data via Glance. To use this feature, +# set CINDER_ALLOWED_DIRECT_URL_SCHEMES for cinder.conf and enable +# GLANCE_SHOW_DIRECT_URL and/or GLANCE_SHOW_MULTIPLE_LOCATIONS for Glance. The +# default value for both of these is False, because for some backends they +# present a grave security risk (though not for Cinder, because all that's +# exposed is the volume_id where the image data is stored.) See OSSN-0065 for +# more information: https://wiki.openstack.org/wiki/OSSN/OSSN-0065 +GLANCE_SHOW_DIRECT_URL=$(trueorfalse False GLANCE_SHOW_DIRECT_URL) +GLANCE_SHOW_MULTIPLE_LOCATIONS=$(trueorfalse False GLANCE_SHOW_MULTIPLE_LOCATIONS) + # Glance multi-store configuration # Boolean flag to enable multiple store configuration for glance GLANCE_ENABLE_MULTIPLE_STORES=$(trueorfalse False GLANCE_ENABLE_MULTIPLE_STORES) @@ -283,6 +295,9 @@ function configure_glance { if [ "$VIRT_DRIVER" = 'libvirt' ] && [ "$LIBVIRT_TYPE" = 'parallels' ]; then iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso,ploop" fi + # Only use these if you know what you are doing! See OSSN-0065 + iniset $GLANCE_API_CONF DEFAULT show_image_direct_url $GLANCE_SHOW_DIRECT_URL + iniset $GLANCE_API_CONF DEFAULT show_multiple_locations $GLANCE_SHOW_MULTIPLE_LOCATIONS # Configure glance_store configure_glance_store $USE_CINDER_FOR_GLANCE $GLANCE_ENABLE_MULTIPLE_STORES From 35cec0d7c0857d76d3ea0b52b97f2a166c04c13e Mon Sep 17 00:00:00 2001 From: Brian Rosmaita Date: Mon, 17 May 2021 18:58:59 -0400 Subject: [PATCH 057/574] Remove Block Storage API v2 support The Block Storage API v2 was deprecated in Pike by change I913c44799cddc37c3342729ec0ef34068db5b2d4 and is (finally) being removed in Xena [0]. So remove v2 support from devstack. [0] https://wiki.openstack.org/wiki/CinderXenaPTGSummary#Removing_the_Block_Storage_API_v2 Depends-on: https://review.opendev.org/c/openstack/devstack/+/792048 Change-Id: I856d78648d28ac4cad0fb212bef1ae6ad32fca90 --- lib/cinder | 12 ------------ lib/tempest | 13 ------------- 2 files changed, 25 deletions(-) diff --git a/lib/cinder b/lib/cinder index cfa3693f03..dab2aea1c6 100644 --- a/lib/cinder +++ b/lib/cinder @@ -376,12 +376,6 @@ function create_cinder_accounts { "$REGION_NAME" \ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" - get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2" - get_or_create_endpoint \ - "volumev2" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(project_id)s" - get_or_create_service "cinderv3" "volumev3" "Cinder Volume Service V3" get_or_create_endpoint \ "volumev3" \ @@ -393,12 +387,6 @@ function create_cinder_accounts { "$REGION_NAME" \ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v3/\$(project_id)s" - get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2" - get_or_create_endpoint \ - "volumev2" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v2/\$(project_id)s" - get_or_create_service "cinderv3" "volumev3" "Cinder Volume Service V3" get_or_create_endpoint \ "volumev3" \ diff --git a/lib/tempest b/lib/tempest index 29a62290ce..d835c68d4a 100644 --- a/lib/tempest +++ b/lib/tempest @@ -459,13 +459,6 @@ function configure_tempest { iniset $TEMPEST_CONFIG validation network_for_ssh $TEMPEST_SSH_NETWORK_NAME # Volume - # Set the service catalog entry for Tempest to run on. Typically - # used to try different Volume API version targets. The tempest - # default it to 'volumev3'(v3 APIs endpoint) , so only set this - # if you want to change it. - if [[ -n "$TEMPEST_VOLUME_TYPE" ]]; then - iniset $TEMPEST_CONFIG volume catalog_type $TEMPEST_VOLUME_TYPE - fi # Only turn on TEMPEST_VOLUME_MANAGE_SNAPSHOT by default for "lvm" backends if [[ "$CINDER_ENABLED_BACKENDS" == *"lvm"* ]]; then TEMPEST_VOLUME_MANAGE_SNAPSHOT=${TEMPEST_VOLUME_MANAGE_SNAPSHOT:-True} @@ -489,12 +482,6 @@ function configure_tempest { iniset $TEMPEST_CONFIG volume-feature-enabled volume_revert $(trueorfalse False TEMPEST_VOLUME_REVERT_TO_SNAPSHOT) local tempest_volume_min_microversion=${TEMPEST_VOLUME_MIN_MICROVERSION:-None} local tempest_volume_max_microversion=${TEMPEST_VOLUME_MAX_MICROVERSION:-"latest"} - # Reset microversions to None where v2 is running which does not support microversion. - # Both "None" means no microversion testing. - if [[ "$TEMPEST_VOLUME_TYPE" == "volumev2" ]]; then - tempest_volume_min_microversion=None - tempest_volume_max_microversion=None - fi if [ "$tempest_volume_min_microversion" == "None" ]; then inicomment $TEMPEST_CONFIG volume min_microversion else From 2a9673f0278699d03931d69b4da22e9709300026 Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Wed, 19 May 2021 10:22:18 -0400 Subject: [PATCH 058/574] docs: recommend Ubuntu 20.04 instead of Bionic Bionic support was dropped in I39e38e4a6c2e52dd3822c9fdea354258359a9f53. Change-Id: I765aac352590fd2f74d3fd90676d6d098548e6b8 --- doc/source/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index 8b8acde38c..9f477ab911 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -40,7 +40,7 @@ Start with a clean and minimal install of a Linux system. DevStack attempts to support the two latest LTS releases of Ubuntu, the latest/current Fedora version, CentOS/RHEL 8 and OpenSUSE. -If you do not have a preference, Ubuntu 18.04 (Bionic Beaver) is the +If you do not have a preference, Ubuntu 20.04 (Focal Fossa) is the most tested, and will probably go the smoothest. Add Stack User (optional) From f0736406f5ce055072a62a62fe9fdc7cead49671 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Tue, 18 May 2021 17:15:30 -0500 Subject: [PATCH 059/574] Fix unit test to use python3 command unit test jobs staretd to run on ubuntu-focal now and failing for using 'python' command. Change-Id: Ie002faf4c96ac7f207207a481c057b8df0289e6c --- tests/test_write_devstack_local_conf_role.sh | 2 +- tests/unittest.sh | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/test_write_devstack_local_conf_role.sh b/tests/test_write_devstack_local_conf_role.sh index b2bc0a2c46..71d8d51614 100755 --- a/tests/test_write_devstack_local_conf_role.sh +++ b/tests/test_write_devstack_local_conf_role.sh @@ -6,4 +6,4 @@ TOP=$(cd $(dirname "$0")/.. && pwd) source $TOP/functions source $TOP/tests/unittest.sh -python ./roles/write-devstack-local-conf/library/test.py +${PYTHON} $TOP/roles/write-devstack-local-conf/library/test.py diff --git a/tests/unittest.sh b/tests/unittest.sh index 3703ece91d..fced2abe65 100644 --- a/tests/unittest.sh +++ b/tests/unittest.sh @@ -17,6 +17,8 @@ ERROR=0 PASS=0 FAILED_FUNCS="" +export PYTHON=$(which python3 2>/dev/null) + # pass a test, printing out MSG # usage: passed message function passed { From e38a39ad404637ca1649cea072883aa0a4592c4f Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Fri, 14 May 2021 09:14:24 +0100 Subject: [PATCH 060/574] Change default network backend driver to ML2/OVN This patch is changing the default network backend driver in DevStack to OVN. This is a long effort from the Neutron community that has been discussed on previous PTGs and agreed upon. A similar patch to this one [0] have been merged in the past but got reverted [1] because it did break some zuul jobs. This one also include fixes for such jobs and was verified at [2] [0] https://review.opendev.org/c/openstack/devstack/+/735097 [1] https://review.opendev.org/c/openstack/neutron/+/775632 [2] https://review.opendev.org/c/zuul/zuul-jobs/+/791117 Change-Id: I8c2be82f33ed9f6c36f5509b3b210ee1a38e87ca Signed-off-by: Lucas Alvares Gomes --- .zuul.yaml | 20 ++++++++++++++------ lib/neutron_plugins/ml2 | 11 ++++++----- lib/neutron_plugins/ovn_agent | 18 +++++++++++++----- stackrc | 4 +++- 4 files changed, 36 insertions(+), 17 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index b65aeec4dd..4ca0257cc6 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -468,6 +468,8 @@ SWIFT_HASH: 1234123412341234 DEBUG_LIBVIRT_COREDUMPS: true NOVA_VNC_ENABLED: true + OVN_L3_CREATE_PUBLIC_NETWORK: true + OVN_DBS_LOG_LEVEL: dbg devstack_local_conf: post-config: $NEUTRON_CONF: @@ -496,13 +498,14 @@ n-sch: true # Placement service placement-api: true + # OVN services + ovn-controller: true + ovn-northd: true + ovs-vswitchd: true + ovsdb-server: true # Neutron services - q-agt: true - q-dhcp: true - q-l3: true - q-meta: true - q-metering: true q-svc: true + q-ovn-metadata-agent: true # Swift services s-account: true s-container: true @@ -534,8 +537,12 @@ n-cpu: true # Placement services placement-client: true + # OVN services + ovn-controller: true + ovs-vswitchd: true + ovsdb-server: true # Neutron services - q-agt: true + q-ovn-metadata-agent: true # Cinder services c-bak: true c-vol: true @@ -553,6 +560,7 @@ GLANCE_HOSTPORT: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}:9292" Q_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" NOVA_VNC_ENABLED: true + ENABLE_CHASSIS_AS_GW: false - job: name: devstack-ipv6 diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index ae4b251d83..e1f868f0a7 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -7,15 +7,16 @@ _XTRACE_NEUTRON_ML2=$(set +o | grep xtrace) set +o xtrace -# Default openvswitch L2 agent -Q_AGENT=${Q_AGENT:-openvswitch} +# Default OVN L2 agent +Q_AGENT=${Q_AGENT:-ovn} if [ -f $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent ]; then source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent fi # Enable this to simply and quickly enable tunneling with ML2. -# Select either 'gre', 'vxlan', or 'gre,vxlan' -Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"vxlan"} +# For ML2/OVS select either 'gre', 'vxlan', or 'gre,vxlan'. +# For ML2/OVN use 'geneve'. +Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"geneve"} # This has to be set here since the agent will set this in the config file if [[ "$Q_ML2_TENANT_NETWORK_TYPE" == "gre" || "$Q_ML2_TENANT_NETWORK_TYPE" == "vxlan" ]]; then Q_TUNNEL_TYPES=$Q_ML2_TENANT_NETWORK_TYPE @@ -24,7 +25,7 @@ elif [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then fi # List of MechanismDrivers to load -Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-openvswitch,linuxbridge} +Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn} # Default GRE TypeDriver options Q_ML2_PLUGIN_GRE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GRE_TYPE_OPTIONS:-tunnel_id_ranges=$TENANT_TUNNEL_RANGES} # Default VXLAN TypeDriver options diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index e4d0d75230..4af1340a26 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -21,10 +21,6 @@ source ${TOP_DIR}/lib/neutron_plugins/ovs_base source ${TOP_DIR}/lib/neutron_plugins/openvswitch_agent -# Load devstack ovs base functions -source $NEUTRON_DIR/devstack/lib/ovs - - # Defaults # -------- @@ -88,12 +84,18 @@ OVN_INSTALL_OVS_PYTHON_MODULE=$(trueorfalse False OVN_INSTALL_OVS_PYTHON_MODULE) # configure the MTU DHCP option. OVN_GENEVE_OVERHEAD=${OVN_GENEVE_OVERHEAD:-38} -# The log level of the OVN databases (north and south) +# The log level of the OVN databases (north and south). +# Supported log levels are: off, emer, err, warn, info or dbg. +# More information about log levels can be found at +# http://www.openvswitch.org/support/dist-docs/ovs-appctl.8.txt OVN_DBS_LOG_LEVEL=${OVN_DBS_LOG_LEVEL:-info} OVN_META_CONF=$NEUTRON_CONF_DIR/neutron_ovn_metadata_agent.ini OVN_META_DATA_HOST=${OVN_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)} +# If True (default) the node will be considered a gateway node. +ENABLE_CHASSIS_AS_GW=$(trueorfalse True ENABLE_CHASSIS_AS_GW) + export OVSDB_SERVER_LOCAL_HOST=$SERVICE_LOCAL_HOST if [[ "$SERVICE_IP_VERSION" == 6 ]]; then OVSDB_SERVER_LOCAL_HOST=[$OVSDB_SERVER_LOCAL_HOST] @@ -171,6 +173,9 @@ function wait_for_sock_file { } function use_new_ovn_repository { + if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]]; then + return 0 + fi if [ -z "$is_new_ovn" ]; then local ovs_repo_dir=$DEST/$OVS_REPO_NAME if [ ! -d $ovs_repo_dir ]; then @@ -390,6 +395,9 @@ function install_ovn { sudo ln -s $OVS_RUNDIR $OVN_RUNDIR if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + # Load devstack ovs base functions + source $NEUTRON_DIR/devstack/lib/ovs + # If OVS is already installed, remove it, because we're about to # re-install it from source. for package in openvswitch openvswitch-switch openvswitch-common; do diff --git a/stackrc b/stackrc index 196f61fa3c..05016594eb 100644 --- a/stackrc +++ b/stackrc @@ -72,8 +72,10 @@ if ! isset ENABLED_SERVICES ; then ENABLED_SERVICES+=,g-api # Cinder ENABLED_SERVICES+=,c-sch,c-api,c-vol + # OVN + ENABLED_SERVICES+=,ovn-controller,ovn-northd,ovs-vswitchd,ovsdb-server # Neutron - ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt,q-l3 + ENABLED_SERVICES+=,q-svc,q-ovn-metadata-agent # Dashboard ENABLED_SERVICES+=,horizon # Additional services From ddb66f2344b933f278b0b52be3ca59a4c511ab14 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Sat, 18 Jul 2020 12:18:39 +0200 Subject: [PATCH 061/574] [CI] Add no-tls-proxy job Some gates run devstack like this and it follows different code paths. Let's ensure we don't break it now and then. Change-Id: I6ee1bfc30bced53f6d7fb841e01714069919fd88 Reference: http://lists.openstack.org/pipermail/openstack-discuss/2020-July/015997.html Reference: http://eavesdrop.openstack.org/meetings/qa/2021/qa.2021-05-11-14.00.log.html --- .zuul.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index b65aeec4dd..1ede4448a0 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -595,6 +595,17 @@ zuul_copy_output: /opt/stack/async: logs +- job: + name: devstack-no-tls-proxy + parent: tempest-full-py3 + description: | + Tempest job with tls-proxy off. + + Some gates run devstack like this and it follows different code paths. + vars: + devstack_services: + tls-proxy: false + - job: name: devstack-platform-fedora-latest parent: tempest-full-py3 @@ -814,3 +825,7 @@ - ^.*\.rst$ - ^doc/.*$ - devstack-platform-fedora-latest-virt-preview + - devstack-no-tls-proxy + periodic: + jobs: + - devstack-no-tls-proxy From 7604e085b4542c50aefc9c23aa339958757a5770 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Tue, 25 May 2021 13:06:14 -0500 Subject: [PATCH 062/574] Pin nodeset for unit test job devstack unit test job does not set any nodeset and so does use default nodeset defined in base jobs in opendev. When opendev switches the default nodeset to the latest distro version, devstack unit test job can start failing. Example: - https://review.opendev.org/q/I01408f2f2959b0788fe712ac268a526502226ee9 - https://review.opendev.org/q/Ib1ea47bc7384e1f579cb08c779a32151fccd6845 To avoid such a situation in future, let's set the working nodeset for this job also so that when we cut the stable branch we can run it on the working distro version. Change-Id: I302140778fedf08bc8ba72c453837fa7b8f8f9ae --- .zuul.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.zuul.yaml b/.zuul.yaml index 4ca0257cc6..9a559ef776 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -676,6 +676,7 @@ - job: name: devstack-unit-tests + nodeset: ubuntu-focal description: | Runs unit tests on devstack project. From 22038a9a8c4418d6d49bed83024a3cd97e627860 Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Thu, 27 May 2021 13:44:20 +0100 Subject: [PATCH 063/574] [OVN] Configure public bridge enabled by default This patch makes the OVN_L3_CREATE_PUBLIC_NETWORK configuration True by default. This option makes the OVN lib in DevStack create & configure the external bridge, matching the same behavior from the OVS driver in DevStack. Change-Id: Icda53b95fdc3c169ac48a6ec4343c87ba404baa4 Signed-off-by: Lucas Alvares Gomes --- .zuul.yaml | 1 - lib/neutron_plugins/ovn_agent | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 6484b2a6ce..5bc6a8b424 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -468,7 +468,6 @@ SWIFT_HASH: 1234123412341234 DEBUG_LIBVIRT_COREDUMPS: true NOVA_VNC_ENABLED: true - OVN_L3_CREATE_PUBLIC_NETWORK: true OVN_DBS_LOG_LEVEL: dbg devstack_local_conf: post-config: diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 4af1340a26..f12e6a491d 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -95,6 +95,7 @@ OVN_META_DATA_HOST=${OVN_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)} # If True (default) the node will be considered a gateway node. ENABLE_CHASSIS_AS_GW=$(trueorfalse True ENABLE_CHASSIS_AS_GW) +OVN_L3_CREATE_PUBLIC_NETWORK=$(trueorfalse True OVN_L3_CREATE_PUBLIC_NETWORK) export OVSDB_SERVER_LOCAL_HOST=$SERVICE_LOCAL_HOST if [[ "$SERVICE_IP_VERSION" == 6 ]]; then From 77835633c5d1daba37f453c5bf7c84fad0f2d68e Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 13 May 2021 13:14:42 +1000 Subject: [PATCH 064/574] OVN : include source compliation functions This patch moves the OVS compilation module from Neutron into DevStack. It also renamed it to "ovs_source" to highlight its function, and the include has been moved to where the rest of the includes are located. Although this module is not required since by default DevStack installs OVS/OVN from the host OS packages instead of compiling from source, this is a nice to have as it avoids having bits and pieces of the code scattered around multiple repositories. Co-Authored-By: Lucas Alvares Gomes Change-Id: I39ec9ce0a91bea05cf8c446a9767ab879ac8e8f3 --- lib/neutron_plugins/ovn_agent | 6 +- lib/neutron_plugins/ovs_source | 215 +++++++++++++++++++++++++++++++++ 2 files changed, 218 insertions(+), 3 deletions(-) create mode 100644 lib/neutron_plugins/ovs_source diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 4af1340a26..71a4c60129 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -21,6 +21,9 @@ source ${TOP_DIR}/lib/neutron_plugins/ovs_base source ${TOP_DIR}/lib/neutron_plugins/openvswitch_agent +# Load devstack ovs compliation and loading functions +source ${TOP_DIR}/lib/neutron_plugins/ovs_source + # Defaults # -------- @@ -395,9 +398,6 @@ function install_ovn { sudo ln -s $OVS_RUNDIR $OVN_RUNDIR if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then - # Load devstack ovs base functions - source $NEUTRON_DIR/devstack/lib/ovs - # If OVS is already installed, remove it, because we're about to # re-install it from source. for package in openvswitch openvswitch-switch openvswitch-common; do diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source new file mode 100644 index 0000000000..294171f18b --- /dev/null +++ b/lib/neutron_plugins/ovs_source @@ -0,0 +1,215 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Defaults +# -------- + +# Set variables for building OVS from source +OVS_REPO=${OVS_REPO:-https://github.com/openvswitch/ovs.git} +OVS_REPO_NAME=$(basename ${OVS_REPO} | cut -f1 -d'.') +OVS_REPO_NAME=${OVS_REPO_NAME:-ovs} +OVS_BRANCH=${OVS_BRANCH:-0047ca3a0290f1ef954f2c76b31477cf4b9755f5} + +# Functions + +# load_module() - Load module using modprobe module given by argument and dies +# on failure +# - fatal argument is optional and says whether function should +# exit if module can't be loaded +function load_module { + local module=$1 + local fatal=$2 + + if [ "$(trueorfalse True fatal)" == "True" ]; then + sudo modprobe $module || (dmesg && die $LINENO "FAILED TO LOAD $module") + else + sudo modprobe $module || (echo "FAILED TO LOAD $module" && dmesg) + fi +} + +# prepare_for_compilation() - Fetch ovs git repository and install packages needed for +# compilation. +function prepare_for_ovs_compilation { + local build_modules=${1:-False} + OVS_DIR=$DEST/$OVS_REPO_NAME + + if [ ! -d $OVS_DIR ] ; then + # We can't use git_clone here because we want to ignore ERROR_ON_CLONE + git_timed clone $OVS_REPO $OVS_DIR + cd $OVS_DIR + git checkout $OVS_BRANCH + else + # Even though the directory already exists, call git_clone to update it + # if needed based on the RECLONE option + git_clone $OVS_REPO $OVS_DIR $OVS_BRANCH + cd $OVS_DIR + fi + + # TODO: Can you create package list files like you can inside devstack? + install_package autoconf automake libtool gcc patch make + + # If build_modules is False, we don't need to install the kernel-* + # packages. Just return. + if [[ "$build_modules" == "False" ]]; then + return + fi + + KERNEL_VERSION=`uname -r` + if is_fedora ; then + # is_fedora covers Fedora, RHEL, CentOS, etc... + if [[ "$os_VENDOR" == "Fedora" ]]; then + install_package elfutils-libelf-devel + KERNEL_VERSION=`echo $KERNEL_VERSION | cut --delimiter='-' --field 1` + elif [[ ${KERNEL_VERSION:0:2} != "3." ]]; then + # dash is illegal character in rpm version so replace + # them with underscore like it is done in the kernel + # https://github.com/torvalds/linux/blob/master/scripts/package/mkspec#L25 + # but only for latest series of the kernel, not 3.x + + KERNEL_VERSION=`echo $KERNEL_VERSION | tr - _` + fi + + echo NOTE: if kernel-devel-$KERNEL_VERSION or kernel-headers-$KERNEL_VERSION installation + echo failed, please, provide a repository with the package, or yum update / reboot + echo your machine to get the latest kernel. + + install_package kernel-devel-$KERNEL_VERSION + install_package kernel-headers-$KERNEL_VERSION + + elif is_ubuntu ; then + install_package linux-headers-$KERNEL_VERSION + fi +} + +# load_ovs_kernel_modules() - load openvswitch kernel module +function load_ovs_kernel_modules { + load_module openvswitch + load_module vport-geneve False + dmesg | tail +} + +# reload_ovs_kernel_modules() - reload openvswitch kernel module +function reload_ovs_kernel_modules { + set +e + ovs_system=$(sudo ovs-dpctl dump-dps | grep ovs-system) + if [ -n "$ovs_system" ]; then + sudo ovs-dpctl del-dp ovs-system + fi + set -e + sudo modprobe -r vport_geneve + sudo modprobe -r openvswitch + load_ovs_kernel_modules +} + +# compile_ovs() - Compile OVS from source and load needed modules. +# Accepts two parameters: +# - first one is False by default and means that modules are not built and installed. +# - second optional parameter defines prefix for ovs compilation +# - third optional parameter defines localstatedir for ovs single machine runtime +# Env variables OVS_REPO_NAME, OVS_REPO and OVS_BRANCH must be set +function compile_ovs { + local _pwd=$PWD + local build_modules=${1:-False} + local prefix=$2 + local localstatedir=$3 + + if [ -n "$prefix" ]; then + prefix="--prefix=$prefix" + fi + + if [ -n "$localstatedir" ]; then + localstatedir="--localstatedir=$localstatedir" + fi + + prepare_for_ovs_compilation $build_modules + + KERNEL_VERSION=$(uname -r) + major_version=$(echo "${KERNEL_VERSION}" | cut -d '.' -f1) + patch_level=$(echo "${KERNEL_VERSION}" | cut -d '.' -f2) + if [ "${major_version}" -gt 5 ] || [ "${major_version}" == 5 ] && [ "${patch_level}" -gt 5 ]; then + echo "NOTE: KERNEL VERSION is ${KERNEL_VERSION} and OVS doesn't support compiling " + echo "Kernel module for version higher than 5.5. Skipping module compilation..." + build_modules="False" + fi + + if [ ! -f configure ] ; then + ./boot.sh + fi + if [ ! -f config.status ] || [ configure -nt config.status ] ; then + if [[ "$build_modules" == "True" ]]; then + ./configure $prefix $localstatedir --with-linux=/lib/modules/$(uname -r)/build + else + ./configure $prefix $localstatedir + fi + fi + make -j$(($(nproc) + 1)) + sudo make install + if [[ "$build_modules" == "True" ]]; then + sudo make INSTALL_MOD_DIR=kernel/net/openvswitch modules_install + reload_ovs_kernel_modules + else + load_ovs_kernel_modules + fi + + cd $_pwd +} + +# action_service - call an action over openvswitch service +# Accepts one parameter that can be either +# 'start', 'restart' and 'stop'. +function action_openvswitch { + local action=$1 + + if is_ubuntu; then + ${action}_service openvswitch-switch + elif is_fedora; then + ${action}_service openvswitch + elif is_suse; then + if [[ $DISTRO == "sle12" ]] && [[ $os_RELEASE -lt 12.2 ]]; then + ${action}_service openvswitch-switch + else + ${action}_service openvswitch + fi + fi +} + +# start_new_ovs() - removes old ovs database, creates a new one and starts ovs +function start_new_ovs { + sudo rm -f /etc/openvswitch/conf.db /etc/openvswitch/.conf.db~lock~ + sudo /usr/share/openvswitch/scripts/ovs-ctl start +} + +# stop_new_ovs() - stops ovs +function stop_new_ovs { + local ovs_ctl='/usr/share/openvswitch/scripts/ovs-ctl' + + if [ -x $ovs_ctl ] ; then + sudo $ovs_ctl stop + fi +} + +# remove_ovs_packages() - removes old ovs packages from the system +function remove_ovs_packages { + for package in openvswitch openvswitch-switch openvswitch-common; do + if is_package_installed $package; then + uninstall_package $package + fi + done +} + + +# load_conntrack_gre_module() - loads nf_conntrack_proto_gre kernel module +function load_conntrack_gre_module { + sudo modprobe nf_conntrack_proto_gre +} From 58f6048dd488664aab6ae42efeb9deb90b051acf Mon Sep 17 00:00:00 2001 From: Anand Bhat Date: Fri, 28 May 2021 11:36:23 +0530 Subject: [PATCH 065/574] setup.cfg: Replace dashes with underscores Setuptools v54.1.0 introduces a warning that the use of dash-separated options in 'setup.cfg' will not be supported in a future version [1]. Get ahead of the issue by replacing the dashes with underscores. Without this, we see 'UserWarning' messages like the following on new enough versions of setuptools: UserWarning: Usage of dash-separated 'description-file' will not be supported in future versions. Please use the underscore name 'description_file' instead [1] https://github.com/pypa/setuptools/commit/a2e9ae4cb Change-Id: I6b8e791c06319fa5fa0935337520c36800b1abd6 --- setup.cfg | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/setup.cfg b/setup.cfg index 146f010243..a4e621f6df 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,11 +1,11 @@ [metadata] name = DevStack summary = OpenStack DevStack -description-file = +description_file = README.rst author = OpenStack -author-email = openstack-discuss@lists.openstack.org -home-page = https://docs.openstack.org/devstack/latest +author_email = openstack-discuss@lists.openstack.org +home_page = https://docs.openstack.org/devstack/latest classifier = Intended Audience :: Developers License :: OSI Approved :: Apache Software License From 8ea11c2947753f988979330ecc5fab2a6362881c Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Mon, 31 May 2021 15:04:29 -0500 Subject: [PATCH 066/574] Update IRC network to OFTC Change-Id: I260d9e65782add011f00d9087e0a5ac71e2be324 --- doc/source/contributor/contributing.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst index 5e0df569f7..4de238fbf8 100644 --- a/doc/source/contributor/contributing.rst +++ b/doc/source/contributor/contributing.rst @@ -13,7 +13,7 @@ with Devstack. Communication ~~~~~~~~~~~~~ -* IRC channel ``#openstack-qa`` at FreeNode +* IRC channel ``#openstack-qa`` at OFTC. * Mailing list (prefix subjects with ``[qa][devstack]`` for faster responses) http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss From 949f5ada608961fd05435e01bd9d06757b6c62c5 Mon Sep 17 00:00:00 2001 From: Gregory Thiemonge Date: Mon, 15 Mar 2021 18:25:04 +0100 Subject: [PATCH 067/574] Add route to IPv6 private subnets in ML2/OVN Enable IPv6 private subnet routing in ML2/OVN, it uses the behavior that already exists in ML2/OVS: add a route from the devstack node to the CIDRs of the default IPv6 subnet pool. Any IPv6 subnet created using the default subnet pool and plugged into the default router is reachable from the host (ex: ipv6-private-subnet). Change-Id: I02ca1d94e9f4d5ad4a06182f5ac9a2434941cf08 --- lib/neutron_plugins/services/l3 | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 75a3567096..5d339a00aa 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -392,8 +392,8 @@ function _neutron_configure_router_v6 { openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID fi - # This logic is specific to using the l3-agent for layer 3 - if is_service_enabled q-l3 || is_service_enabled neutron-l3; then + # This logic is specific to using OVN or the l3-agent for layer 3 + if ([[ $Q_AGENT == "ovn" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then # if the Linux host considers itself to be a router then it will # ignore all router advertisements # Ensure IPv6 RAs are accepted on interfaces with a default route. @@ -420,6 +420,11 @@ function _neutron_configure_router_v6 { # Configure interface for public bridge sudo ip -6 addr replace $ipv6_ext_gw_ip/$ipv6_cidr_len dev $ext_gw_interface + # Any IPv6 private subnet that uses the default IPV6 subnet pool + # and that is plugged into the default router (Q_ROUTER_NAME) will + # be reachable from the devstack node (ex: ipv6-private-subnet). + # Some scenario tests (such as octavia-tempest-plugin) rely heavily + # on this feature. local replace_range=${SUBNETPOOL_PREFIX_V6} if [[ -z "${SUBNETPOOL_V6_ID}" ]]; then replace_range=${FIXED_RANGE_V6} From cbc0b64a343e31506b43a9395e6a77fcfd6eb70b Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 3 Jun 2021 06:14:05 +0000 Subject: [PATCH 068/574] Updated from generate-devstack-plugins-list Change-Id: I2d5b0c59d5dd33f639ec685b16768325d67e9dbf --- doc/source/plugin-registry.rst | 22 +++++----------------- 1 file changed, 5 insertions(+), 17 deletions(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 4e7c2d7b2f..691fffa846 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -24,6 +24,8 @@ official OpenStack projects. ======================================== === Plugin Name URL ======================================== === +inspur/venus `https://opendev.org/inspur/venus `__ +inspur/venus-dashboard `https://opendev.org/inspur/venus-dashboard `__ openstack/aodh `https://opendev.org/openstack/aodh `__ openstack/barbican `https://opendev.org/openstack/barbican `__ openstack/blazar `https://opendev.org/openstack/blazar `__ @@ -39,21 +41,18 @@ openstack/devstack-plugin-container `https://opendev.org/openstack/devstack openstack/devstack-plugin-kafka `https://opendev.org/openstack/devstack-plugin-kafka `__ openstack/devstack-plugin-nfs `https://opendev.org/openstack/devstack-plugin-nfs `__ openstack/devstack-plugin-open-cas `https://opendev.org/openstack/devstack-plugin-open-cas `__ -openstack/devstack-plugin-pika `https://opendev.org/openstack/devstack-plugin-pika `__ -openstack/devstack-plugin-zmq `https://opendev.org/openstack/devstack-plugin-zmq `__ openstack/ec2-api `https://opendev.org/openstack/ec2-api `__ openstack/freezer `https://opendev.org/openstack/freezer `__ openstack/freezer-api `https://opendev.org/openstack/freezer-api `__ openstack/freezer-tempest-plugin `https://opendev.org/openstack/freezer-tempest-plugin `__ openstack/freezer-web-ui `https://opendev.org/openstack/freezer-web-ui `__ +openstack/glance `https://opendev.org/openstack/glance `__ openstack/heat `https://opendev.org/openstack/heat `__ openstack/heat-dashboard `https://opendev.org/openstack/heat-dashboard `__ openstack/ironic `https://opendev.org/openstack/ironic `__ openstack/ironic-inspector `https://opendev.org/openstack/ironic-inspector `__ openstack/ironic-prometheus-exporter `https://opendev.org/openstack/ironic-prometheus-exporter `__ openstack/ironic-ui `https://opendev.org/openstack/ironic-ui `__ -openstack/karbor `https://opendev.org/openstack/karbor `__ -openstack/karbor-dashboard `https://opendev.org/openstack/karbor-dashboard `__ openstack/keystone `https://opendev.org/openstack/keystone `__ openstack/kuryr-kubernetes `https://opendev.org/openstack/kuryr-kubernetes `__ openstack/kuryr-libnetwork `https://opendev.org/openstack/kuryr-libnetwork `__ @@ -65,21 +64,15 @@ openstack/manila-tempest-plugin `https://opendev.org/openstack/manila-t openstack/manila-ui `https://opendev.org/openstack/manila-ui `__ openstack/masakari `https://opendev.org/openstack/masakari `__ openstack/mistral `https://opendev.org/openstack/mistral `__ -openstack/monasca-analytics `https://opendev.org/openstack/monasca-analytics `__ openstack/monasca-api `https://opendev.org/openstack/monasca-api `__ -openstack/monasca-ceilometer `https://opendev.org/openstack/monasca-ceilometer `__ openstack/monasca-events-api `https://opendev.org/openstack/monasca-events-api `__ -openstack/monasca-log-api `https://opendev.org/openstack/monasca-log-api `__ openstack/monasca-tempest-plugin `https://opendev.org/openstack/monasca-tempest-plugin `__ -openstack/monasca-transform `https://opendev.org/openstack/monasca-transform `__ openstack/murano `https://opendev.org/openstack/murano `__ openstack/networking-bagpipe `https://opendev.org/openstack/networking-bagpipe `__ openstack/networking-baremetal `https://opendev.org/openstack/networking-baremetal `__ openstack/networking-bgpvpn `https://opendev.org/openstack/networking-bgpvpn `__ openstack/networking-generic-switch `https://opendev.org/openstack/networking-generic-switch `__ openstack/networking-hyperv `https://opendev.org/openstack/networking-hyperv `__ -openstack/networking-l2gw `https://opendev.org/openstack/networking-l2gw `__ -openstack/networking-midonet `https://opendev.org/openstack/networking-midonet `__ openstack/networking-odl `https://opendev.org/openstack/networking-odl `__ openstack/networking-powervm `https://opendev.org/openstack/networking-powervm `__ openstack/networking-sfc `https://opendev.org/openstack/networking-sfc `__ @@ -93,19 +86,13 @@ openstack/octavia `https://opendev.org/openstack/octavia openstack/octavia-dashboard `https://opendev.org/openstack/octavia-dashboard `__ openstack/octavia-tempest-plugin `https://opendev.org/openstack/octavia-tempest-plugin `__ openstack/openstacksdk `https://opendev.org/openstack/openstacksdk `__ -openstack/os-loganalyze `https://opendev.org/openstack/os-loganalyze `__ openstack/osprofiler `https://opendev.org/openstack/osprofiler `__ openstack/oswin-tempest-plugin `https://opendev.org/openstack/oswin-tempest-plugin `__ openstack/ovn-octavia-provider `https://opendev.org/openstack/ovn-octavia-provider `__ -openstack/panko `https://opendev.org/openstack/panko `__ openstack/patrole `https://opendev.org/openstack/patrole `__ -openstack/qinling `https://opendev.org/openstack/qinling `__ -openstack/qinling-dashboard `https://opendev.org/openstack/qinling-dashboard `__ openstack/rally-openstack `https://opendev.org/openstack/rally-openstack `__ openstack/sahara `https://opendev.org/openstack/sahara `__ openstack/sahara-dashboard `https://opendev.org/openstack/sahara-dashboard `__ -openstack/searchlight `https://opendev.org/openstack/searchlight `__ -openstack/searchlight-ui `https://opendev.org/openstack/searchlight-ui `__ openstack/senlin `https://opendev.org/openstack/senlin `__ openstack/shade `https://opendev.org/openstack/shade `__ openstack/solum `https://opendev.org/openstack/solum `__ @@ -143,6 +130,7 @@ x/devstack-plugin-glusterfs `https://opendev.org/x/devstack-plugin- x/devstack-plugin-hdfs `https://opendev.org/x/devstack-plugin-hdfs `__ x/devstack-plugin-libvirt-qemu `https://opendev.org/x/devstack-plugin-libvirt-qemu `__ x/devstack-plugin-mariadb `https://opendev.org/x/devstack-plugin-mariadb `__ +x/devstack-plugin-tobiko `https://opendev.org/x/devstack-plugin-tobiko `__ x/devstack-plugin-vmax `https://opendev.org/x/devstack-plugin-vmax `__ x/drbd-devstack `https://opendev.org/x/drbd-devstack `__ x/fenix `https://opendev.org/x/fenix `__ @@ -169,6 +157,7 @@ x/networking-fortinet `https://opendev.org/x/networking-forti x/networking-hpe `https://opendev.org/x/networking-hpe `__ x/networking-huawei `https://opendev.org/x/networking-huawei `__ x/networking-infoblox `https://opendev.org/x/networking-infoblox `__ +x/networking-l2gw `https://opendev.org/x/networking-l2gw `__ x/networking-lagopus `https://opendev.org/x/networking-lagopus `__ x/networking-mlnx `https://opendev.org/x/networking-mlnx `__ x/networking-nec `https://opendev.org/x/networking-nec `__ @@ -193,7 +182,6 @@ x/stackube `https://opendev.org/x/stackube `__ x/tap-as-a-service-dashboard `https://opendev.org/x/tap-as-a-service-dashboard `__ x/tatu `https://opendev.org/x/tatu `__ -x/tobiko `https://opendev.org/x/tobiko `__ x/trio2o `https://opendev.org/x/trio2o `__ x/valet `https://opendev.org/x/valet `__ x/vmware-nsx `https://opendev.org/x/vmware-nsx `__ From 4e916aeb060ecf99203f297a0ff726a65d27f50e Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 26 Apr 2021 08:52:23 -0700 Subject: [PATCH 069/574] Configure glance unified limit quotas This makes us configure limits for glance and enable enforcement. Depends-On: https://review.opendev.org/c/openstack/glance/+/794247 Change-Id: If58d8474cae95b1be3044bd52010b8288a7f5fcc --- lib/glance | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/lib/glance b/lib/glance index e789affaf1..fd2e0afcc1 100644 --- a/lib/glance +++ b/lib/glance @@ -84,6 +84,7 @@ GLANCE_STAGING_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/os_glance_stagi GLANCE_TASKS_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/os_glance_tasks_store} GLANCE_USE_IMPORT_WORKFLOW=$(trueorfalse False GLANCE_USE_IMPORT_WORKFLOW) +GLANCE_ENABLE_QUOTAS=$(trueorfalse True GLANCE_ENABLE_QUOTAS) GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance} GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs @@ -263,6 +264,45 @@ function configure_glance_store { fi } +function configure_glance_quotas { + + # NOTE(danms): We need to have some of the OS_ things unset in + # order to use system scope, which is required for creating these + # limits. This is a hack, but I dunno how else to get osc to use + # system scope. + + bash -c "unset OS_USERNAME OS_TENANT_NAME OS_PROJECT_NAME; + openstack --os-cloud devstack-system-admin registered limit create \ + --service glance --default-limit 1000 --region $REGION_NAME \ + image_size_total; \ + openstack --os-cloud devstack-system-admin registered limit create \ + --service glance --default-limit 1000 --region $REGION_NAME \ + image_stage_total; \ + openstack --os-cloud devstack-system-admin registered limit create \ + --service glance --default-limit 100 --region $REGION_NAME \ + image_count_total; \ + openstack --os-cloud devstack-system-admin registered limit create \ + --service glance --default-limit 100 --region $REGION_NAME \ + image_count_uploading" + + # Tell glance to use these limits + iniset $GLANCE_API_CONF DEFAULT use_keystone_limits True + + # Configure oslo_limit so it can talk to keystone + iniset $GLANCE_API_CONF oslo_limit user_domain_name $SERVICE_DOMAIN_NAME + iniset $GLANCE_API_CONF oslo_limit password $SERVICE_PASSWORD + iniset $GLANCE_API_CONF oslo_limit username glance + iniset $GLANCE_API_CONF oslo_limit auth_type password + iniset $GLANCE_API_CONF oslo_limit auth_url $KEYSTONE_SERVICE_URI + iniset $GLANCE_API_CONF oslo_limit system_scope "'all'" + iniset $GLANCE_API_CONF oslo_limit endpoint_id \ + $(openstack endpoint list --service glance -f value -c ID) + + # Allow the glance service user to read quotas + openstack role add --user glance --user-domain Default --system all \ + reader +} + # configure_glance() - Set config files, create data dirs, etc function configure_glance { sudo install -d -o $STACK_USER $GLANCE_CONF_DIR $GLANCE_METADEF_DIR @@ -403,6 +443,11 @@ function create_glance_accounts { service_domain_id=$(get_or_create_domain $SERVICE_DOMAIN_NAME) iniset $GLANCE_SWIFT_STORE_CONF ref1 project_domain_id $service_domain_id iniset $GLANCE_SWIFT_STORE_CONF ref1 user_domain_id $service_domain_id + + if [[ "$GLANCE_ENABLE_QUOTAS" = True ]]; then + configure_glance_quotas + fi + fi } From 6843bc798c3fe5f17286e1c07ede95171bb49a25 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 14 May 2021 14:51:51 -0500 Subject: [PATCH 070/574] Temporary add feature pragma OpenStackSDK has a feature branch "feature/r1" as a preparation for the R1.0 release. Due to different branch naming functional tests with devstack are not running. Add temporarily (for the duration of the feature branch) pragma to the zuul.yaml to allow Zuul run functional tests. It will be dropped once SDK gets next major release. Change-Id: I671b589150fe731125e16316a994a5942219920b --- .zuul.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 00129b5ca4..9a675c64ce 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -1,3 +1,10 @@ +- pragma: + # NOTE(gtema): this is required for the changes in SDK feature/r1 branch to + # be using devstack + # TODO(gtema): delete this once r1 branch is merged into master + implied-branches: + - feature/r1 + - nodeset: name: openstack-single-node nodes: From 3ad1661384636eee78a4bf5196812b6223e1b0a6 Mon Sep 17 00:00:00 2001 From: Ghanshyam Date: Fri, 4 Jun 2021 21:32:17 +0000 Subject: [PATCH 071/574] Revert "Temporary add feature pragma" This reverts commit 6843bc798c3fe5f17286e1c07ede95171bb49a25. Reason for revert: not sure why but this end up disabling the integration job on check pipeline, Change-Id: Icfaf8ea17b3ce2e405414c23f8075b18d297bf8b example: latest recheck on PS12 check pipeline job for neutron - https://review.opendev.org/c/openstack/neutron/+/790060 --- .zuul.yaml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 9a675c64ce..00129b5ca4 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -1,10 +1,3 @@ -- pragma: - # NOTE(gtema): this is required for the changes in SDK feature/r1 branch to - # be using devstack - # TODO(gtema): delete this once r1 branch is merged into master - implied-branches: - - feature/r1 - - nodeset: name: openstack-single-node nodes: From 96509ea025459ac077d2c85289da7725c53235cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Wed, 21 Oct 2020 20:33:08 +0200 Subject: [PATCH 072/574] Check centos-8-stream CentOS Stream 8 (aka CentOS 8 Stream) is the currently supported runtime platform. [0] Some background history: The Manila team has asked QA to test centos-8-stream in the common gate. A bit later it turned out the point releases of CentOS 8 (aka CentOS Linux 8) will stop happening entirely by the end of 2021. [1] Includes a workaround to the edk2-ovmf issue on CentOS Stream 8 x86_64. [0] https://governance.openstack.org/tc/reference/runtimes/xena.html [1] https://lists.centos.org/pipermail/centos-devel/2020-December/075451.html Change-Id: Iee5a262af757f27f79ba1d6f790e949427dca190 --- .zuul.yaml | 19 +++++++++++++++++++ lib/nova_plugins/functions-libvirt | 12 +++++++++++- 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 5bc6a8b424..e45ff8febc 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -58,6 +58,16 @@ nodes: - controller +- nodeset: + name: devstack-single-node-centos-8-stream + nodes: + - name: controller + label: centos-8-stream + groups: + - name: tempest + nodes: + - controller + - nodeset: name: devstack-single-node-opensuse-15 nodes: @@ -591,6 +601,14 @@ voting: false timeout: 9000 +- job: + name: devstack-platform-centos-8-stream + parent: tempest-full-py3 + description: CentOS 8 Stream platform test + nodeset: devstack-single-node-centos-8-stream + voting: false + timeout: 9000 + - job: name: devstack-async parent: tempest-full-py3 @@ -704,6 +722,7 @@ - devstack-ipv6 - devstack-platform-fedora-latest - devstack-platform-centos-8 + - devstack-platform-centos-8-stream - devstack-async - devstack-multinode - devstack-unit-tests diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index d3827c30dd..58adde7cd4 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -81,7 +81,17 @@ function install_libvirt { install_package qemu-kvm install_package libvirt libvirt-devel - if is_arch "aarch64"; then + if is_arch "x86_64"; then + # NOTE(yoctozepto): recent edk2-ovmf on CentOS Stream 8 x86_64 started failing with + # "libvirt.libvirtError: internal error: unknown feature amd-sev-es", + # so reinstall a known working version until the relevant bugs get fixed: + # * https://bugzilla.redhat.com/show_bug.cgi?id=1961558 + # * https://bugzilla.redhat.com/show_bug.cgi?id=1961562 + # TODO(yoctozepto): Remove this code when the time is right. + if [ "$os_VENDOR" = "CentOSStream" ]; then + install_package edk2-ovmf-20200602gitca407c7246bf-4.el8 + fi + elif is_arch "aarch64"; then install_package edk2.git-aarch64 fi From 81937a230a8adb5c028db5a9ba9abf59b122a2ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Mon, 7 Jun 2021 17:28:38 +0000 Subject: [PATCH 073/574] [CI] Drop CentOS Linux 8 job and nodeset CentOS Stream 8 (aka CentOS 8 Stream) is the currently supported runtime platform. [0] DevStack works with CentOS Stream only now. [1] The only usage of the nodeset being dropped is handled by the Depends-On. [0] https://governance.openstack.org/tc/reference/runtimes/xena.html [1] https://review.opendev.org/c/openstack/devstack/+/759122 Depends-On: https://review.opendev.org/c/openstack/cinder-tempest-plugin/+/795159 Change-Id: Ic0f696b46dce3dba529b53a8f9de8cda6b913c7b --- .zuul.yaml | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index e45ff8febc..74223d44f5 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -48,16 +48,6 @@ nodes: - controller -- nodeset: - name: devstack-single-node-centos-8 - nodes: - - name: controller - label: centos-8 - groups: - - name: tempest - nodes: - - controller - - nodeset: name: devstack-single-node-centos-8-stream nodes: @@ -593,14 +583,6 @@ # we often have to rush things through devstack to stabilise the gate, # and these platforms don't have the round-the-clock support to avoid # becoming blockers in that situation. -- job: - name: devstack-platform-centos-8 - parent: tempest-full-py3 - description: Centos 8 platform test - nodeset: devstack-single-node-centos-8 - voting: false - timeout: 9000 - - job: name: devstack-platform-centos-8-stream parent: tempest-full-py3 @@ -721,7 +703,6 @@ - devstack - devstack-ipv6 - devstack-platform-fedora-latest - - devstack-platform-centos-8 - devstack-platform-centos-8-stream - devstack-async - devstack-multinode From 2fb8c7a5eea6f9321eef36fb9b8fd8e55465c91e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Mon, 7 Jun 2021 17:44:54 +0000 Subject: [PATCH 074/574] Move verify-ipv6-only-deployments from Tempest to DevStack as it tests DevStack side of things and is useful for projects not using Tempest. Verbatim copy except for the devstack- prefix and the /devstack/ path. Change-Id: Ie166730843f874b9c99e37244e460d7ad33b7eeb --- .../README.rst | 16 ++++ .../defaults/main.yaml | 1 + .../tasks/main.yaml | 4 + tools/verify-ipv6-only-deployments.sh | 92 +++++++++++++++++++ 4 files changed, 113 insertions(+) create mode 100644 roles/devstack-ipv6-only-deployments-verification/README.rst create mode 100644 roles/devstack-ipv6-only-deployments-verification/defaults/main.yaml create mode 100644 roles/devstack-ipv6-only-deployments-verification/tasks/main.yaml create mode 100755 tools/verify-ipv6-only-deployments.sh diff --git a/roles/devstack-ipv6-only-deployments-verification/README.rst b/roles/devstack-ipv6-only-deployments-verification/README.rst new file mode 100644 index 0000000000..400a8da222 --- /dev/null +++ b/roles/devstack-ipv6-only-deployments-verification/README.rst @@ -0,0 +1,16 @@ +Verify the IPv6-only deployments + +This role needs to be invoked from a playbook that +run tests. This role verifies the IPv6 setting on +devstack side and devstack deploy services on IPv6. +This role is invoked before tests are run so that +if any missing IPv6 setting or deployments can fail +the job early. + + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/devstack-ipv6-only-deployments-verification/defaults/main.yaml b/roles/devstack-ipv6-only-deployments-verification/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/devstack-ipv6-only-deployments-verification/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/devstack-ipv6-only-deployments-verification/tasks/main.yaml b/roles/devstack-ipv6-only-deployments-verification/tasks/main.yaml new file mode 100644 index 0000000000..59d3b79bc1 --- /dev/null +++ b/roles/devstack-ipv6-only-deployments-verification/tasks/main.yaml @@ -0,0 +1,4 @@ +- name: Verify the ipv6-only deployments + become: true + become_user: stack + shell: "{{ devstack_base_dir }}/devstack/tools/verify-ipv6-only-deployments.sh" diff --git a/tools/verify-ipv6-only-deployments.sh b/tools/verify-ipv6-only-deployments.sh new file mode 100755 index 0000000000..2596395165 --- /dev/null +++ b/tools/verify-ipv6-only-deployments.sh @@ -0,0 +1,92 @@ +#!/bin/bash +# +# +# NOTE(gmann): This script is used in 'devstack-tempest-ipv6' zuul job to verify that +# services are deployed on IPv6 properly or not. This will capture if any devstck or devstack +# plugins are missing the required setting to listen on IPv6 address. This is run as part of +# run phase of zuul job and before test run. Child job of 'devstack-tempest-ipv6' +# can expand the IPv6 verification specific to project by defining the new post-run script which +# will run along with this base script. +# If there are more common verification for IPv6 then we can always extent this script. + +# Keep track of the DevStack directory +TOP_DIR=$(cd $(dirname "$0")/../../devstack && pwd) +source $TOP_DIR/stackrc +source $TOP_DIR/openrc admin admin + +function verify_devstack_ipv6_setting { + local _service_host='' + _service_host=$(echo $SERVICE_HOST | tr -d []) + local _host_ipv6='' + _host_ipv6=$(echo $HOST_IPV6 | tr -d []) + local _service_listen_address='' + _service_listen_address=$(echo $SERVICE_LISTEN_ADDRESS | tr -d []) + local _service_local_host='' + _service_local_host=$(echo $SERVICE_LOCAL_HOST | tr -d []) + if [[ "$SERVICE_IP_VERSION" != 6 ]]; then + echo $SERVICE_IP_VERSION "SERVICE_IP_VERSION is not set to 6 which is must for devstack to deploy services with IPv6 address." + exit 1 + fi + is_service_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_host'"))') + if [[ "$is_service_host_ipv6" != "True" ]]; then + echo $SERVICE_HOST "SERVICE_HOST is not ipv6 which means devstack cannot deploy services on IPv6 address." + exit 1 + fi + is_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_host_ipv6'"))') + if [[ "$is_host_ipv6" != "True" ]]; then + echo $HOST_IPV6 "HOST_IPV6 is not ipv6 which means devstack cannot deploy services on IPv6 address." + exit 1 + fi + is_service_listen_address=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_listen_address'"))') + if [[ "$is_service_listen_address" != "True" ]]; then + echo $SERVICE_LISTEN_ADDRESS "SERVICE_LISTEN_ADDRESS is not ipv6 which means devstack cannot deploy services on IPv6 address." + exit 1 + fi + is_service_local_host=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_local_host'"))') + if [[ "$is_service_local_host" != "True" ]]; then + echo $SERVICE_LOCAL_HOST "SERVICE_LOCAL_HOST is not ipv6 which means devstack cannot deploy services on IPv6 address." + exit 1 + fi + echo "Devstack is properly configured with IPv6" + echo "SERVICE_IP_VERSION: " $SERVICE_IP_VERSION "HOST_IPV6: " $HOST_IPV6 "SERVICE_HOST: " $SERVICE_HOST "SERVICE_LISTEN_ADDRESS: " $SERVICE_LISTEN_ADDRESS "SERVICE_LOCAL_HOST: " $SERVICE_LOCAL_HOST +} + +function sanity_check_system_ipv6_enabled { + system_ipv6_enabled=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_ipv6_enabled())') + if [[ $system_ipv6_enabled != "True" ]]; then + echo "IPv6 is disabled in system" + exit 1 + fi + echo "IPv6 is enabled in system" +} + +function verify_service_listen_address_is_ipv6 { + local endpoints_verified=False + local all_ipv6=True + endpoints=$(openstack endpoint list -f value -c URL) + for endpoint in ${endpoints}; do + local endpoint_address='' + endpoint_address=$(echo "$endpoint" | awk -F/ '{print $3}' | awk -F] '{print $1}') + endpoint_address=$(echo $endpoint_address | tr -d []) + local is_endpoint_ipv6='' + is_endpoint_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$endpoint_address'"))') + if [[ "$is_endpoint_ipv6" != "True" ]]; then + all_ipv6=False + echo $endpoint ": This is not ipv6 endpoint which means corresponding service is not listening on IPv6 address." + continue + fi + endpoints_verified=True + done + if [[ "$all_ipv6" == "False" ]] || [[ "$endpoints_verified" == "False" ]]; then + exit 1 + fi + echo "All services deployed by devstack is on IPv6 endpoints" + echo $endpoints +} + +#First thing to verify if system has IPv6 enabled or not +sanity_check_system_ipv6_enabled +#Verify whether devstack is configured properly with IPv6 setting +verify_devstack_ipv6_setting +#Get all registrfed endpoints by devstack in keystone and verify that each endpoints address is IPv6. +verify_service_listen_address_is_ipv6 From 95298788085de38342e789bf10c35849c7117dfc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Tue, 8 Jun 2021 16:19:40 +0000 Subject: [PATCH 075/574] [OVN] Set up routing on public bridge like OVS did This fixes various reported and unreported issues with the new behaviour. Removes code repetition as well to pay off some technical debt. Closes-Bug: #1930360 Change-Id: I726c532e96ca434520838ae8a35d5b88b6dd337b --- lib/neutron_plugins/ovn_agent | 42 ++------------------------------- lib/neutron_plugins/services/l3 | 6 ++--- 2 files changed, 5 insertions(+), 43 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 948fede3b9..cfcb01ee91 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -262,48 +262,10 @@ function clone_repository { ERROR_ON_CLONE=false git_clone $repo $dir $branch } -function get_ext_gw_interface { - # Get ext_gw_interface depending on value of Q_USE_PUBLIC_VETH - # This function is copied directly from the devstack neutron-legacy script - if [[ "$Q_USE_PUBLIC_VETH" == "True" ]]; then - echo $Q_PUBLIC_VETH_EX - else - # Disable in-band as we are going to use local port - # to communicate with VMs - sudo ovs-vsctl set Bridge $PUBLIC_BRIDGE \ - other_config:disable-in-band=true - echo $PUBLIC_BRIDGE - fi -} - function create_public_bridge { # Create the public bridge that OVN will use - # This logic is based on the devstack neutron-legacy _neutron_configure_router_v4 and _v6 - local ext_gw_ifc - ext_gw_ifc=$(get_ext_gw_interface) - - sudo ovs-vsctl --may-exist add-br $ext_gw_ifc -- set bridge $ext_gw_ifc protocols=OpenFlow13,OpenFlow15 - sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=$PHYSICAL_NETWORK:$ext_gw_ifc - if [ -n "$FLOATING_RANGE" ]; then - local cidr_len=${FLOATING_RANGE#*/} - sudo ip addr replace $PUBLIC_NETWORK_GATEWAY/$cidr_len dev $ext_gw_ifc - fi - - # Ensure IPv6 RAs are accepted on the interface with the default route. - # This is needed for neutron-based devstack clouds to work in - # IPv6-only clouds in the gate. Please do not remove this without - # talking to folks in Infra. This fix is based on a devstack fix for - # neutron L3 agent: https://review.openstack.org/#/c/359490/. - default_route_dev=$(ip route | grep ^default | awk '{print $5}') - sudo sysctl -w net.ipv6.conf.$default_route_dev.accept_ra=2 - - sudo sysctl -w net.ipv6.conf.all.forwarding=1 - if [ -n "$IPV6_PUBLIC_RANGE" ]; then - local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/} - sudo ip -6 addr replace $IPV6_PUBLIC_NETWORK_GATEWAY/$ipv6_cidr_len dev $ext_gw_ifc - fi - - sudo ip link set $ext_gw_ifc up + sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE -- set bridge $PUBLIC_BRIDGE protocols=OpenFlow13,OpenFlow15 + sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=$PHYSICAL_NETWORK:$PUBLIC_BRIDGE } function _disable_libvirt_apparmor { diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 5d339a00aa..b6bc02818c 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -343,8 +343,8 @@ function _neutron_configure_router_v4 { # Configure the external network as the default router gateway openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID - # This logic is specific to using the l3-agent for layer 3 - if is_service_enabled q-l3 || is_service_enabled neutron-l3; then + # This logic is specific to using OVN or the l3-agent for layer 3 + if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then # Configure and enable public bridge local ext_gw_interface="none" if is_neutron_ovs_base_plugin; then @@ -393,7 +393,7 @@ function _neutron_configure_router_v6 { fi # This logic is specific to using OVN or the l3-agent for layer 3 - if ([[ $Q_AGENT == "ovn" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then + if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then # if the Linux host considers itself to be a router then it will # ignore all router advertisements # Ensure IPv6 RAs are accepted on interfaces with a default route. From 5a684eb51b4c18aee2051c5a7c703f50bbcc41ca Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Wed, 9 Jun 2021 09:37:34 +0200 Subject: [PATCH 076/574] Drop broute from ebtables_dump This table is no longer present on most installations, drop it from the list to avoid error messages during log collection that people mistake to be the real error why devstack is failing. This may lose some debugging information in edge cases, but I think the improvement of the general user experience is more important. Change-Id: Ibb9b247a018a788c8c4b40487762319fe470bf0f Closes-Bug: 1885198 --- tools/worlddump.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/worlddump.py b/tools/worlddump.py index 22770f15b6..e2921737db 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -134,7 +134,7 @@ def disk_space(): def ebtables_dump(): - tables = ['filter', 'nat', 'broute'] + tables = ['filter', 'nat'] _header("EB Tables Dump") if not _find_cmd('ebtables'): return From 20d6a21e8a1daed9eee0a8413217b2f8e4e863dd Mon Sep 17 00:00:00 2001 From: Mohammed Naser Date: Wed, 9 Jun 2021 14:20:01 -0400 Subject: [PATCH 077/574] Switch fedora-latest to use fedora-34 Fedora 32 is now EOL, we should test against the newly released version of Fedora which is 34. Depends-On: https://review.opendev.org/c/openstack/project-config/+/795604 Change-Id: I10d868aca20d1a10d3e7fcfeb78f6fda4c896ee8 --- .zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 74223d44f5..968d79f76a 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -72,7 +72,7 @@ name: devstack-single-node-fedora-latest nodes: - name: controller - label: fedora-32 + label: fedora-34 groups: - name: tempest nodes: From 89baa314c1408251abd9f4d61d9cf5e5c945bc4e Mon Sep 17 00:00:00 2001 From: Artem Goncharov Date: Fri, 11 Jun 2021 16:59:52 +0200 Subject: [PATCH 078/574] Temporary add feature pragma OpenStackSDK has a feature branch "feature/r1" as a preparation for the R1.0 release. Due to different branch naming functional tests with devstack are not running. Add temporarily (for the duration of the feature branch) pragma to the zuul.yaml to allow Zuul run functional tests. It will be dropped once SDK gets next major release. Previous attemp didn't work well for other projects, therefore explicitly include master as well. Change-Id: I3a5722873f395bc52cc55a0fd6bcea0ebe3b74fc --- .zuul.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 74223d44f5..3c490ff180 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -1,3 +1,11 @@ +- pragma: + # NOTE(gtema): this is required for the changes in SDK feature/r1 branch to + # be using devstack + # TODO(gtema): delete this once r1 branch is merged into master + implied-branches: + - master + - feature/r1 + - nodeset: name: openstack-single-node nodes: From 6af3cb9eb273c127c20bc07f65c9a5d7f8ba95cd Mon Sep 17 00:00:00 2001 From: Julia Kreger Date: Thu, 11 Mar 2021 11:28:47 -0800 Subject: [PATCH 079/574] nova ironic-hypevisor - support scoped auth config The Secure RBAC effort has updated Ironic such that it can support a mode where it is scope enforcing for all interactions with the API. Due to the design, and operating nature of Ironic's API, services speaking with it must authenticate with a system scope to have a full picture of the universe. In this case, we need to update the nova configuration accordingly such that the compute service understands how to talk to ironic so that it can see the nodes under management. Ironic will likely update this again at a later point in time to enable a "hybrid" mixed-mode as the operating model and related permissions *should* allow nova to use a project scoped "owner" account with Ironic, in order to access and command nodes to deploy. But at this time, we're focusing on the exclusive operating mode. Change-Id: I1946725ce08c495178c419eaf38829f921c91bbe Needed-By: https://review.opendev.org/c/openstack/ironic/+/778957 --- functions-common | 5 +++++ lib/nova_plugins/hypervisor-ironic | 9 ++++++--- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/functions-common b/functions-common index 340da754a2..11679e4aa3 100644 --- a/functions-common +++ b/functions-common @@ -1037,6 +1037,11 @@ function is_ironic_hardware { return 1 } +function is_ironic_enforce_scope { + is_service_enabled ironic && [[ "$IRONIC_ENFORCE_SCOPE" == "True" ]] && return 0 + return 1 +} + # Package Functions # ================= diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic index bda6ef6998..f058e9bb53 100644 --- a/lib/nova_plugins/hypervisor-ironic +++ b/lib/nova_plugins/hypervisor-ironic @@ -47,9 +47,13 @@ function configure_nova_hypervisor { iniset $NOVA_CONF ironic username admin iniset $NOVA_CONF ironic password $ADMIN_PASSWORD iniset $NOVA_CONF ironic auth_url $KEYSTONE_SERVICE_URI - iniset $NOVA_CONF ironic project_domain_id default + if is_ironic_enforce_scope; then + iniset $NOVA_CONF ironic system_scope all + else + iniset $NOVA_CONF ironic project_domain_id default + iniset $NOVA_CONF ironic project_name demo + fi iniset $NOVA_CONF ironic user_domain_id default - iniset $NOVA_CONF ironic project_name demo iniset $NOVA_CONF ironic region_name $REGION_NAME # These are used with crufty legacy ironicclient @@ -82,7 +86,6 @@ function stop_nova_hypervisor { : } - # Restore xtrace $_XTRACE_HYP_IRONIC From 808331488dc16afdf9bd4c2c3103a4a8fc9a6209 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Fri, 18 Jun 2021 12:06:02 +0000 Subject: [PATCH 080/574] Revert edk2 workaround It is not needed anymore. Change-Id: I706a33b0a7c737a23b9a7270af1e53e5de83c66f --- lib/nova_plugins/functions-libvirt | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 58adde7cd4..d3827c30dd 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -81,17 +81,7 @@ function install_libvirt { install_package qemu-kvm install_package libvirt libvirt-devel - if is_arch "x86_64"; then - # NOTE(yoctozepto): recent edk2-ovmf on CentOS Stream 8 x86_64 started failing with - # "libvirt.libvirtError: internal error: unknown feature amd-sev-es", - # so reinstall a known working version until the relevant bugs get fixed: - # * https://bugzilla.redhat.com/show_bug.cgi?id=1961558 - # * https://bugzilla.redhat.com/show_bug.cgi?id=1961562 - # TODO(yoctozepto): Remove this code when the time is right. - if [ "$os_VENDOR" = "CentOSStream" ]; then - install_package edk2-ovmf-20200602gitca407c7246bf-4.el8 - fi - elif is_arch "aarch64"; then + if is_arch "aarch64"; then install_package edk2.git-aarch64 fi From 5344885a61fe39565692014c15e0b4fb1055c835 Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Thu, 17 Jun 2021 12:37:35 +0100 Subject: [PATCH 081/574] os_vif: Add support for configuring os_vif_ovs plugin This change add an os-vif lib that declares two new variables OS_VIF_OVS_OVSDB_INTERFACE and OS_VIF_OVS_ISOLATE_VIF The former is introduced to workaround bug #1929446 which cause the nova and neutron agents to periodically block waiting for ovs to respond. OS_VIF_OVS_ISOLATE_VIF is added to address bug #1734320 when using ml2/ovs vif isolation should always be used to prevent cross tenant traffic during a live migration. This makes devstack more closely mirror reality by enabling it when ml2/ovs is used and disabling it otherwise. Related-Bug: #1734320 Related-Bug: #1929446 Related-Bug: #1912310 Change-Id: I88254c6e22b52585506ee4907c1c03b8d4f2dac7 --- lib/os-vif | 29 +++++++++++++++++++++++++++++ stack.sh | 6 ++++++ 2 files changed, 35 insertions(+) create mode 100644 lib/os-vif diff --git a/lib/os-vif b/lib/os-vif new file mode 100644 index 0000000000..865645c0d5 --- /dev/null +++ b/lib/os-vif @@ -0,0 +1,29 @@ +#!/bin/bash + +# support vsctl or native. +# until bug #1929446 is resolved we override the os-vif default +# and fall back to the legacy "vsctl" driver. +OS_VIF_OVS_OVSDB_INTERFACE=${OS_VIF_OVS_OVSDB_INTERFACE:="vsctl"} + +function is_ml2_ovs { + if [[ "${Q_AGENT}" == "openvswitch" ]]; then + echo "True" + fi + echo "False" +} + +# This should be true for any ml2/ovs job but should be set to false for +# all other ovs based jobs e.g. ml2/ovn +OS_VIF_OVS_ISOLATE_VIF=${OS_VIF_OVS_ISOLATE_VIF:=$(is_ml2_ovs)} +OS_VIF_OVS_ISOLATE_VIF=$(trueorfalse False OS_VIF_OVS_ISOLATE_VIF) + +function configure_os_vif { + if [[ -e ${NOVA_CONF} ]]; then + iniset ${NOVA_CONF} os_vif_ovs ovsdb_interface ${OS_VIF_OVS_OVSDB_INTERFACE} + iniset ${NOVA_CONF} os_vif_ovs isolate_vif ${OS_VIF_OVS_ISOLATE_VIF} + fi + if [[ -e ${NEUTRON_CONF} ]]; then + iniset ${NEUTRON_CONF} os_vif_ovs ovsdb_interface ${OS_VIF_OVS_OVSDB_INTERFACE} + iniset ${NEUTRON_CONF} os_vif_ovs isolate_vif ${OS_VIF_OVS_ISOLATE_VIF} + fi +} diff --git a/stack.sh b/stack.sh index 6858ab8c42..44f1c8fa01 100755 --- a/stack.sh +++ b/stack.sh @@ -597,6 +597,7 @@ source $TOP_DIR/lib/ldap source $TOP_DIR/lib/dstat source $TOP_DIR/lib/tcpdump source $TOP_DIR/lib/etcd3 +source $TOP_DIR/lib/os-vif # Extras Source # -------------- @@ -1159,6 +1160,11 @@ if is_service_enabled q-dhcp; then sudo sysctl -w net.ipv4.ip_forward=1 fi +# os-vif +# ------ +if is_service_enabled nova neutron; then + configure_os_vif +fi # Storage Service # --------------- From 7befae663c6aa99343cb2c90e74ee2e3bc676559 Mon Sep 17 00:00:00 2001 From: Gregory Thiemonge Date: Sat, 19 Jun 2021 13:24:00 +0200 Subject: [PATCH 082/574] Delay horizon startup Move the 'Starting Horizon' task after the end of the wait for create_flavors. The start_horizon function restarts the httpd server, the openstack services are unavailable during a short period of time, so the "openstack flavor create" calls might fail randomly. Closes-Bug: #1932580 Change-Id: I32ee7457586e3de8ba4dfce3b1a12025f9776542 --- stack.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 6858ab8c42..ef1ad3d26a 100755 --- a/stack.sh +++ b/stack.sh @@ -1341,6 +1341,7 @@ if is_service_enabled $DATABASE_BACKENDS && is_service_enabled glance; then done fi +async_wait create_flavors if is_service_enabled horizon; then echo_summary "Starting Horizon" @@ -1348,8 +1349,6 @@ if is_service_enabled horizon; then start_horizon fi -async_wait create_flavors - # Create account rc files # ======================= From a5d52831dc4d357906f5514943e17ab535e9e578 Mon Sep 17 00:00:00 2001 From: Gregory Thiemonge Date: Fri, 18 Jun 2021 13:53:21 +0200 Subject: [PATCH 083/574] Fix updating setuptools in Centos In RHEL-based distributions, updating setuptools using pip removes the files from the python3-setuptools RPM. It breaks some tools such as semanage (which is used by diskimage-builder) that use the -s flag of the python interpreter (don't import modules from /usr/local). This commit reinstalls python3-setuptools to fix those applications. Change-Id: Ib44857e83f75acf37823fae912960a801c83cf7f --- tools/fixup_stuff.sh | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 19219435ad..060abb1605 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -106,6 +106,16 @@ function fixup_fedora { # overwriting works. So this hacks around those packages that # have been dragged in by some other system dependency sudo rm -rf /usr/lib64/python3*/site-packages/PyYAML-*.egg-info + + # After updating setuptools based on the requirements, the files from the + # python3-setuptools RPM are deleted, it breaks some tools such as semanage + # (used in diskimage-builder) that use the -s flag of the python + # interpreter, enforcing the use of the packages from /usr/lib. + # Importing setuptools/pkg_resources in a such environment fails. + # Enforce the package re-installation to fix those applications. + if is_package_installed python3-setuptools; then + sudo dnf reinstall -y python3-setuptools + fi } function fixup_suse { From 5c9affdd9a2baff2166146f4743fe75009b32eab Mon Sep 17 00:00:00 2001 From: Julia Kreger Date: Fri, 12 Mar 2021 11:19:52 -0800 Subject: [PATCH 084/574] Use specific credentials for tempest plugin setup The tempest plugin expects the classic environment variables to be present for credentials to access the cloud, but this is wrong in cases where we're trying to setup system scoped services and need to remove the environment variables that was being used. Instead, change the plugin to use the os-cloud entry definitions, and specifically in this case devstack-admin which makes sense until we begin to start to make tempest itself scope aware. We likely will want to change the environment variables from being registered in devstack at some point and completely shift towards passing an-os-cloud parameter, but that is outside the scope of this change as doing so will likely break all plugins. Change-Id: I8d4ec68f116eea07bc7346f939e134fa2e655eac --- lib/tempest | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/lib/tempest b/lib/tempest index d835c68d4a..545018b4a4 100644 --- a/lib/tempest +++ b/lib/tempest @@ -107,7 +107,7 @@ function remove_disabled_extensions { # Takes an image ID parameter as input function image_size_in_gib { local size - size=$(openstack image show $1 -c size -f value) + size=$(openstack --os-cloud devstack-admin image show $1 -c size -f value) echo $size | python3 -c "import math; print(int(math.ceil(float(int(input()) / 1024.0 ** 3))))" } @@ -173,7 +173,7 @@ function configure_tempest { image_uuid_alt="$IMAGE_UUID" fi images+=($IMAGE_UUID) - done < <(openstack image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }') + done < <(openstack --os-cloud devstack-admin image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }') case "${#images[*]}" in 0) @@ -209,23 +209,23 @@ function configure_tempest { local alt_username=${ALT_USERNAME:-alt_demo} local alt_project_name=${ALT_TENANT_NAME:-alt_demo} local admin_project_id - admin_project_id=$(openstack project list | awk "/ admin / { print \$2 }") + admin_project_id=$(openstack --os-cloud devstack-admin project list | awk "/ admin / { print \$2 }") if is_service_enabled nova; then # If ``DEFAULT_INSTANCE_TYPE`` is not declared, use the new behavior # Tempest creates its own instance types - available_flavors=$(nova flavor-list) + available_flavors=$(openstack --os-cloud devstack-admin flavor list) if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then # Determine the flavor disk size based on the image size. disk=$(image_size_in_gib $image_uuid) - openstack flavor create --id 42 --ram 128 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano + openstack --os-cloud devstack-admin flavor create --id 42 --ram 128 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano fi flavor_ref=42 if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then # Determine the alt flavor disk size based on the alt image size. disk=$(image_size_in_gib $image_uuid_alt) - openstack flavor create --id 84 --ram 192 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro + openstack --os-cloud devstack-admin flavor create --id 84 --ram 192 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro fi flavor_ref_alt=84 else @@ -251,7 +251,7 @@ function configure_tempest { fi flavor_ref=${flavors[0]} flavor_ref_alt=$flavor_ref - flavor_ref_size=$(openstack flavor show --format value --column disk "${flavor_ref}") + flavor_ref_size=$(openstack --os-cloud devstack-admin flavor show --format value --column disk "${flavor_ref}") # Ensure ``flavor_ref`` and ``flavor_ref_alt`` have different values. # Some resize instance in tempest tests depends on this. @@ -264,7 +264,7 @@ function configure_tempest { # flavor selected as default, e.g. m1.small, # we need to perform additional check. # - flavor_ref_alt_size=$(openstack flavor show --format value --column disk "${f}") + flavor_ref_alt_size=$(openstack --os-cloud devstack-admin flavor show --format value --column disk "${f}") if [[ "${flavor_ref_alt_size}" -lt "${flavor_ref_size}" ]]; then continue fi @@ -285,10 +285,10 @@ function configure_tempest { # If NEUTRON_CREATE_INITIAL_NETWORKS is not true, there is no network created # and the public_network_id should not be set. if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]] && is_networking_extension_supported 'external-net'; then - public_network_id=$(openstack network show -f value -c id $PUBLIC_NETWORK_NAME) + public_network_id=$(openstack --os-cloud devstack-admin network show -f value -c id $PUBLIC_NETWORK_NAME) # make sure shared network presence does not confuses the tempest tests - openstack network create --share shared - openstack subnet create --description shared-subnet --subnet-range ${TEMPEST_SHARED_POOL:-192.168.233.0/24} --network shared shared-subnet + openstack --os-cloud devstack-admin network create --share shared + openstack --os-cloud devstack-admin subnet create --description shared-subnet --subnet-range ${TEMPEST_SHARED_POOL:-192.168.233.0/24} --network shared shared-subnet fi iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG From bf13075632b076d19b22f347aaa52cba7dcb7169 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Wed, 23 Jun 2021 13:02:57 +0000 Subject: [PATCH 085/574] Make explicit the network backend used in the CI jobs All Neutron CI jobs (except for unit, functional and fullstack jobs), have explicitly defined the network backend used: - linuxbridge - ovs - ovn That was discussed and approved during the Neutron CI meetings [1]. [1]https://meetings.opendev.org/meetings/neutron_ci/2021/neutron_ci.2021-06-15-15.00.log.html Depends-On: https://review.opendev.org/c/openstack/neutron/+/797051 Change-Id: Ib14542311e9b1d49829bef54f433b8a04709a9fd --- .zuul.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 3c490ff180..f8435a55e4 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -730,11 +730,11 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - - neutron-grenade-multinode: + - neutron-ovs-grenade-multinode: irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - - neutron-tempest-linuxbridge: + - neutron-linuxbridge-tempest: irrelevant-files: - ^.*\.rst$ - ^doc/.*$ @@ -767,11 +767,11 @@ - devstack-multinode - devstack-unit-tests - openstack-tox-bashate - - neutron-grenade-multinode: + - neutron-ovs-grenade-multinode: irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - - neutron-tempest-linuxbridge: + - neutron-linuxbridge-tempest: irrelevant-files: - ^.*\.rst$ - ^doc/.*$ @@ -824,11 +824,11 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - - neutron-tempest-dvr: + - neutron-ovs-tempest-dvr: irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - - neutron-tempest-dvr-ha-multinode-full: + - neutron-ovs-tempest-dvr-ha-multinode-full: irrelevant-files: - ^.*\.rst$ - ^doc/.*$ From 2175ff31085972911d155144e02fb178cafaa638 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Fri, 25 Jun 2021 10:59:29 +0100 Subject: [PATCH 086/574] zuul: Add /etc/libvirt to log collection Useful when debugging libvirtd issues such as bug #1912310. Related-Bug: #1912310 Change-Id: Ic8504bd61316e44215672cc44436a3b9a19e114d --- .zuul.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.zuul.yaml b/.zuul.yaml index 3c490ff180..b189849655 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -336,6 +336,7 @@ /var/log/postgresql: logs /var/log/mysql: logs /var/log/libvirt: logs + /etc/libvirt: logs /etc/sudoers: logs /etc/sudoers.d: logs '{{ stage_dir }}/iptables.txt': logs From f0bf2bdff12b66eefbb2eae83e919611eb7cc76d Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Tue, 29 Jun 2021 09:18:47 +0100 Subject: [PATCH 087/574] libvirt: Stop installing python bindings from pip As set out in bug #1933096 these bindings are dynamically built against the version of libvirt present in the environment at build time. As a result using a pre-built wheel can cause AttributeError's when the bindings have previously been built elsewhere against an older version of libvirt installed on the host. This is currently the case in CentOS 8 stream based CI jobs where we try to use 7.4.0 bindings that appear to be built against libvirt <= 6.10 leading to bug #1933096. This change seeks to avoid this by installing the bindings from packages that will always be built against the correct corresponding version of libvirt. Change-Id: I76184c17a776c4e1ecaab9549d9d36c8c07c60fa Closes-Bug: #1933096 --- lib/nova_plugins/functions-libvirt | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index d3827c30dd..e9ceae4dea 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -58,13 +58,10 @@ EOF function install_libvirt { if is_ubuntu; then - install_package qemu-system libvirt-clients libvirt-daemon-system libvirt-dev + install_package qemu-system libvirt-clients libvirt-daemon-system libvirt-dev python3-libvirt if is_arch "aarch64"; then install_package qemu-efi fi - # uninstall in case the libvirt version changed - pip_uninstall libvirt-python - pip_install_gr libvirt-python #pip_install_gr elif is_fedora || is_suse; then @@ -79,14 +76,11 @@ function install_libvirt { # as the base system version is too old. We should have # pre-installed these install_package qemu-kvm + install_package libvirt libvirt-devel python3-libvirt - install_package libvirt libvirt-devel if is_arch "aarch64"; then install_package edk2.git-aarch64 fi - - pip_uninstall libvirt-python - pip_install_gr libvirt-python fi if [[ $DEBUG_LIBVIRT_COREDUMPS == True ]]; then From 1ab63132df6831bdf7ce180cf7923540305dcd02 Mon Sep 17 00:00:00 2001 From: Przemyslaw Szczerbik Date: Tue, 6 Jul 2021 14:26:40 +0200 Subject: [PATCH 088/574] Allow to install os-resource-classes from git repo Example local.conf config snippet: LIBS_FROM_GIT="os-resource-classes" OS_RESOURCE_CLASSES_REPO="${LOCAL_GIT_BASE}/os-resource-classes" OS_RESOURCE_CLASSES_BRANCH="dev_branch" Closes-Bug: #1934784 Change-Id: I972a2a49aa816433152e5cfac4f672c0465d083f --- lib/libraries | 2 ++ stackrc | 4 ++++ tests/test_libs_from_pypi.sh | 2 +- 3 files changed, 7 insertions(+), 1 deletion(-) mode change 100644 => 100755 lib/libraries mode change 100644 => 100755 stackrc diff --git a/lib/libraries b/lib/libraries old mode 100644 new mode 100755 index c7aa8151ae..67ff21f41a --- a/lib/libraries +++ b/lib/libraries @@ -59,6 +59,7 @@ GITDIR["tooz"]=$DEST/tooz # Non oslo libraries are welcomed below as well, this prevents # duplication of this code. GITDIR["os-brick"]=$DEST/os-brick +GITDIR["os-resource-classes"]=$DEST/os-resource-classes GITDIR["os-traits"]=$DEST/os-traits # Support entry points installation of console scripts @@ -122,6 +123,7 @@ function install_libs { # # os-traits for nova _install_lib_from_source "os-brick" + _install_lib_from_source "os-resource-classes" _install_lib_from_source "os-traits" # # python client libraries we might need from git can go here diff --git a/stackrc b/stackrc old mode 100644 new mode 100755 index 05016594eb..620b1fc04d --- a/stackrc +++ b/stackrc @@ -548,6 +548,10 @@ GITREPO["neutron-lib"]=${NEUTRON_LIB_REPO:-${GIT_BASE}/openstack/neutron-lib.git GITBRANCH["neutron-lib"]=${NEUTRON_LIB_BRANCH:-$TARGET_BRANCH} GITDIR["neutron-lib"]=$DEST/neutron-lib +# os-resource-classes library containing a list of standardized resource classes for OpenStack +GITREPO["os-resource-classes"]=${OS_RESOURCE_CLASSES_REPO=:-${GIT_BASE}/openstack/os-resource-classes.git} +GITBRANCH["os-resource-classes"]=${OS_RESOURCE_CLASSES_BRANCH:-$TARGET_BRANCH} + # os-traits library for resource provider traits in the placement service GITREPO["os-traits"]=${OS_TRAITS_REPO:-${GIT_BASE}/openstack/os-traits.git} GITBRANCH["os-traits"]=${OS_TRAITS_BRANCH:-$TARGET_BRANCH} diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index 5b53389073..ce1b34461c 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -44,7 +44,7 @@ ALL_LIBS+=" debtcollector os-brick os-traits automaton futurist oslo.service" ALL_LIBS+=" oslo.cache oslo.reports osprofiler cursive" ALL_LIBS+=" keystoneauth ironic-lib neutron-lib oslo.privsep" ALL_LIBS+=" diskimage-builder os-vif python-brick-cinderclient-ext" -ALL_LIBS+=" castellan python-barbicanclient ovsdbapp os-ken" +ALL_LIBS+=" castellan python-barbicanclient ovsdbapp os-ken os-resource-classes" # Generate the above list with # echo ${!GITREPO[@]} From 6f4eafb823e498a8d5eb344376c41f5bec8a1b04 Mon Sep 17 00:00:00 2001 From: zenkuro Date: Thu, 15 Jul 2021 19:24:28 +0300 Subject: [PATCH 089/574] Added AlmaLinux to CentOS 8 family Change-Id: I9fb6f010842a495c838d468b47dc5081596f41a2 --- functions-common | 2 ++ 1 file changed, 2 insertions(+) diff --git a/functions-common b/functions-common index 11679e4aa3..40567f8e1d 100644 --- a/functions-common +++ b/functions-common @@ -391,6 +391,7 @@ function GetDistro { DISTRO="sle${os_RELEASE%.*}" elif [[ "$os_VENDOR" =~ (Red.*Hat) || \ "$os_VENDOR" =~ (CentOS) || \ + "$os_VENDOR" =~ (AlmaLinux) || \ "$os_VENDOR" =~ (Scientific) || \ "$os_VENDOR" =~ (OracleServer) || \ "$os_VENDOR" =~ (Virtuozzo) ]]; then @@ -451,6 +452,7 @@ function is_fedora { [ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \ [ "$os_VENDOR" = "RedHatEnterprise" ] || \ [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "CentOSStream" ] || \ + [ "$os_VENDOR" = "AlmaLinux" ] || \ [ "$os_VENDOR" = "OracleServer" ] || [ "$os_VENDOR" = "Virtuozzo" ] } From 5a642450d6ac94ff1ea2bea3e7ce3887ca79dcc4 Mon Sep 17 00:00:00 2001 From: Julia Kreger Date: Mon, 19 Jul 2021 07:01:29 -0700 Subject: [PATCH 090/574] Provide override for glance image size limit The glance image size limitation was added and unfortuantely does prevent larger images from being uploaded to glance. In the case of all baremetal testing, this value is realistically smaller than stock "cloud" images which support booting to baremetal with often requisite firmware blobs, which forces some images over 1GB in size. Adds GLANCE_LIMIT_IMAGE_SIZE_TOTAL which allows users who need larger images to be able to override the default while still enabling limits enforcement in their deployment. The default value is 1000. Change-Id: Id425aa546f1a5973bae8be9c017782d18f0b4a47 --- lib/glance | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/lib/glance b/lib/glance index fd2e0afcc1..b132f37834 100644 --- a/lib/glance +++ b/lib/glance @@ -108,6 +108,10 @@ GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT} GLANCE_SERVICE_PROTOCOL=${GLANCE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} GLANCE_UWSGI=$GLANCE_BIN_DIR/glance-wsgi-api GLANCE_UWSGI_CONF=$GLANCE_CONF_DIR/glance-uwsgi.ini + +# Glance default limit for Devstack +GLANCE_LIMIT_IMAGE_SIZE_TOTAL=${GLANCE_LIMIT_IMAGE_SIZE_TOTAL:-1000} + # If wsgi mode is uwsgi run glance under uwsgi, else default to eventlet # TODO(mtreinish): Remove the eventlet path here and in all the similar # conditionals below after the Pike release @@ -273,11 +277,11 @@ function configure_glance_quotas { bash -c "unset OS_USERNAME OS_TENANT_NAME OS_PROJECT_NAME; openstack --os-cloud devstack-system-admin registered limit create \ - --service glance --default-limit 1000 --region $REGION_NAME \ - image_size_total; \ + --service glance --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL \ + --region $REGION_NAME image_size_total; \ openstack --os-cloud devstack-system-admin registered limit create \ - --service glance --default-limit 1000 --region $REGION_NAME \ - image_stage_total; \ + --service glance --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL \ + --region $REGION_NAME image_stage_total; \ openstack --os-cloud devstack-system-admin registered limit create \ --service glance --default-limit 100 --region $REGION_NAME \ image_count_total; \ From c8b66ff33e14c8339c8146d3116b9ff672f912ec Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 19 Jul 2021 11:14:18 -0700 Subject: [PATCH 091/574] Add configuration notes about glance limits Change-Id: I21a43584116f4b719cf99d3942044cbf13fefb9a --- doc/source/configuration.rst | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 2d0c894530..67456142de 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -689,6 +689,24 @@ use the v3 API. It is possible to setup keystone without v2 API, by doing: ENABLE_IDENTITY_V2=False + +Glance +++++++ + +The default image size quota of 1GiB may be too small if larger images +are to be used. Change the default at setup time with: + +:: + + GLANCE_LIMIT_IMAGE_SIZE_TOTAL=5000 + +or at runtime via: + +:: + + openstack --os-cloud devstack-system-admin registered limit update \ + --service glance --default-limit 5000 --region RegionOne image_size_total + .. _arch-configuration: Architectures From 71bd10e45197a405cd497c8923db7442bde14a95 Mon Sep 17 00:00:00 2001 From: Pavan Kesava Rao Date: Mon, 19 Jul 2021 13:33:42 -0400 Subject: [PATCH 092/574] Enable tempest tests for hostname sanitization Starting Wallaby release, nova sanitizes instance hostnames having freeform characters with dashes. It should be tested in Devstack. Depends-On: https://review.opendev.org/c/openstack/tempest/+/795699 Change-Id: I54794e58b67620c36e8f2966ec3b62dd24da745b --- lib/tempest | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/tempest b/lib/tempest index 4eacfa09ca..095361d4f4 100644 --- a/lib/tempest +++ b/lib/tempest @@ -419,6 +419,9 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute-feature-enabled live_migrate_back_and_forth ${LIVE_MIGRATE_BACK_AND_FORTH:-False} iniset $TEMPEST_CONFIG compute-feature-enabled attach_encrypted_volume ${ATTACH_ENCRYPTED_VOLUME_AVAILABLE:-True} + # Starting Wallaby, nova sanitizes instance hostnames having freeform characters with dashes + iniset $TEMPEST_CONFIG compute-feature-enabled hostname_fqdn_sanitization True + if [[ -n "$NOVA_FILTERS" ]]; then iniset $TEMPEST_CONFIG compute-feature-enabled scheduler_enabled_filters ${NOVA_FILTERS} fi From 524487728e85388c73ececae3f2eb272321cffc7 Mon Sep 17 00:00:00 2001 From: Marek Tamaskovic Date: Wed, 28 Jul 2021 16:54:50 +0200 Subject: [PATCH 093/574] Fix opening shell as user 'stack' The usage of sudo with su is not recommended. It results in incosnistent environment variables. Instead use just sudo with appropriate arguments. The argument '-u stack' specifies that the sudo will execute as user 'stack'. The last argument '-i' will launch an interactive shell. Closes-Bug: #1938148 Change-Id: I42387660480377cdf9a0b04f190e7e1f21fb354f --- doc/source/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index 9f477ab911..08ce4cb061 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -63,7 +63,7 @@ have sudo privileges: .. code-block:: console $ echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack - $ sudo su - stack + $ sudo -u stack -i Download DevStack ----------------- From 0456baaee5309431cd1f88ba4a0ceaa7f050b743 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Fri, 30 Jul 2021 19:29:57 +0530 Subject: [PATCH 094/574] Fix Usage of rdo-release rpm rdo-release.el8.rpm rpm points to latest RDO release, so use it for master, for stable releases use corresponding release rpm. Change-Id: I508eceb00d7501ffcfac73d7bc2272badb241494 --- stack.sh | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/stack.sh b/stack.sh index c439a7217f..e3d67f571f 100755 --- a/stack.sh +++ b/stack.sh @@ -300,10 +300,14 @@ function _install_epel { } function _install_rdo { - # NOTE(ianw) 2020-04-30 : when we have future branches, we - # probably want to install the relevant branch RDO release as - # well. But for now it's all master. - sudo dnf -y install https://rdoproject.org/repos/rdo-release.el8.rpm + if [[ "$TARGET_BRANCH" == "master" ]]; then + # rdo-release.el8.rpm points to latest RDO release, use that for master + sudo dnf -y install https://rdoproject.org/repos/rdo-release.el8.rpm + else + # For stable branches use corresponding release rpm + rdo_release=$(echo $TARGET_BRANCH | sed "s|stable/||g") + sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el8.rpm + fi sudo dnf -y update } From ba68a49598309c5f6c5e4a0ec9d2b13b8229eee0 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Sat, 31 Jul 2021 06:13:46 +0000 Subject: [PATCH 095/574] Updated from generate-devstack-plugins-list Change-Id: I062b9a121c79650973c8d8d975e1c723d5798777 --- doc/source/plugin-registry.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 691fffa846..490132e0d7 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -98,6 +98,7 @@ openstack/shade `https://opendev.org/openstack/shade `__ openstack/storlets `https://opendev.org/openstack/storlets `__ openstack/tacker `https://opendev.org/openstack/tacker `__ +openstack/tap-as-a-service `https://opendev.org/openstack/tap-as-a-service `__ openstack/telemetry-tempest-plugin `https://opendev.org/openstack/telemetry-tempest-plugin `__ openstack/trove `https://opendev.org/openstack/trove `__ openstack/trove-dashboard `https://opendev.org/openstack/trove-dashboard `__ @@ -179,7 +180,6 @@ x/rsd-virt-for-nova `https://opendev.org/x/rsd-virt-for-nov x/scalpels `https://opendev.org/x/scalpels `__ x/slogging `https://opendev.org/x/slogging `__ x/stackube `https://opendev.org/x/stackube `__ -x/tap-as-a-service `https://opendev.org/x/tap-as-a-service `__ x/tap-as-a-service-dashboard `https://opendev.org/x/tap-as-a-service-dashboard `__ x/tatu `https://opendev.org/x/tatu `__ x/trio2o `https://opendev.org/x/trio2o `__ From f44aa0c55a81e24a8ad321c0c741939e86705e09 Mon Sep 17 00:00:00 2001 From: Brian Rosmaita Date: Wed, 4 Aug 2021 18:27:48 -0400 Subject: [PATCH 096/574] Allow cinder default quotas configuration The default cinder quotas for volumes, backups, or snapshots may be too low for highly concurrent testing, so make these configurable in devstack. Change-Id: Ie3cf3239b48f9905f5760ad0166eea954ecf5eed --- doc/source/configuration.rst | 6 ++++++ lib/cinder | 5 +++++ 2 files changed, 11 insertions(+) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 67456142de..8244525075 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -642,6 +642,12 @@ with ``VOLUME_BACKING_FILE_SIZE``. VOLUME_NAME_PREFIX="volume-" VOLUME_BACKING_FILE_SIZE=24G +When running highly concurrent tests, the default per-project quotas +for volumes, backups, or snapshots may be too small. These can be +adjusted by setting ``CINDER_QUOTA_VOLUMES``, ``CINDER_QUOTA_BACKUPS``, +or ``CINDER_QUOTA_SNAPSHOTS`` to the desired value. (The default for +each is 10.) + Keystone ~~~~~~~~ diff --git a/lib/cinder b/lib/cinder index 7f2f29f892..9235428335 100644 --- a/lib/cinder +++ b/lib/cinder @@ -267,6 +267,11 @@ function configure_cinder { iniset $CINDER_CONF key_manager backend cinder.keymgr.conf_key_mgr.ConfKeyManager iniset $CINDER_CONF key_manager fixed_key $(openssl rand -hex 16) + # set default quotas + iniset $CINDER_CONF DEFAULT quota_volumes ${CINDER_QUOTA_VOLUMES:-10} + iniset $CINDER_CONF DEFAULT quota_backups ${CINDER_QUOTA_BACKUPS:-10} + iniset $CINDER_CONF DEFAULT quota_snapshots ${CINDER_QUOTA_SNAPSHOTS:-10} + # Avoid RPC timeouts in slow CI and test environments by doubling the # default response timeout set by RPC clients. See bug #1873234 for more # details and example failures. From ac1b723c20fb67aaecd43cd08c6eee88c5f339f2 Mon Sep 17 00:00:00 2001 From: Roman Dobosz Date: Fri, 6 Aug 2021 12:52:01 +0200 Subject: [PATCH 097/574] Fix displaying usage for make_cert.sh Now, if no arguments are passed to make_cert.sh script, it will fail on: tools/make_cert.sh: line 30: [: missing `]' and might go on with generating certs depending on the bash settings. It is fixed within this patch. Change-Id: I62bf9c972ebd1644da622439e05114f245f20809 --- tools/make_cert.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/make_cert.sh b/tools/make_cert.sh index e91464fc0f..0212d0033a 100755 --- a/tools/make_cert.sh +++ b/tools/make_cert.sh @@ -27,7 +27,7 @@ function usage { } CN=$1 -if [ -z "$CN" ]]; then +if [ -z "$CN" ]; then usage fi ORG_UNIT_NAME=${2:-$ORG_UNIT_NAME} @@ -52,5 +52,5 @@ init_CA make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME # Create a cert bundle -cat $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/cacert.pem >$DEVSTACK_CERT - +cat $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key \ + $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/cacert.pem >$DEVSTACK_CERT From 6b9a5646225a766f6240e2a1a93a92b82e088aa0 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 28 Jul 2021 11:19:57 +1000 Subject: [PATCH 098/574] Revert "Workaround for new pip 20.3 behavior" This reverts commit 7a3a7ce876a37376fe0dca7278e41a4f46867daa and bcd0acf6c0b5d6501e91133c3a937b3fc40f7122 and part of f1ed7c77c50ac28cb58c9f7ed885c6a3e0a75403 which all cap our pip installs. Given the pip ecosystem can often incorporate major changes, tracking upstream at least generally gives us one problem at a time to solve rather than trying to handle version jumps when LTS distros update. The new dependency resolver included some changes that disallow setting URL's like "file:///path/to/project#egg=project" in constraints. Apparently the fact it used to work was an accident of the requires/constraints mechanism; it does make some sense as the URL doesn't really have a version-number that the resolver can put in an ordering graph. The _setup_package_with_constraints_edit function comment highlights what this is trying to do # Updates the constraints from REQUIREMENTS_DIR to reflect the # future installed state of this package. This ensures when we # install this package we get the from source version. In other words; if constraints has "foo==1.2.3" and Zuul has checked out "foo" for testing, we have to make sure pip doesn't choose version 1.2.3 from pypi. It seems like removing the entry from upper-requirements.txt is the important part; adding the URL path to the on-disk version was just something that seemed to work at the time, but isn't really necessary. We will install the package in question which will be the latest version (from Zuul checkout) and without the package in upper-requirements.txt nothing will try and downgrade it. Therefore the solution proposed here is to remove the adding of the URL parts. This allows us to uncap pip and restore testing with the new dependency resolver. Closes-Bug: #1906322 Change-Id: Ib9ba52147199a9d6d0293182d5db50c4a567d677 --- inc/python | 7 ++++--- lib/tempest | 3 --- tools/cap-pip.txt | 1 - tools/fixup_stuff.sh | 15 +++++++++++++++ tools/install_pip.sh | 9 +-------- 5 files changed, 20 insertions(+), 15 deletions(-) delete mode 100644 tools/cap-pip.txt diff --git a/inc/python b/inc/python index 8941fd038d..9382d352dc 100644 --- a/inc/python +++ b/inc/python @@ -378,12 +378,13 @@ function _setup_package_with_constraints_edit { project_dir=$(cd $project_dir && pwd) if [ -n "$REQUIREMENTS_DIR" ]; then - # Constrain this package to this project directory from here on out. + # Remove this package from constraints before we install it. + # That way, later installs won't "downgrade" the install from + # source we are about to do. local name name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg) $REQUIREMENTS_DIR/.venv/bin/edit-constraints \ - $REQUIREMENTS_DIR/upper-constraints.txt -- $name \ - "$flags file://$project_dir#egg=$name" + $REQUIREMENTS_DIR/upper-constraints.txt -- $name fi setup_package $bindep $project_dir "$flags" $extras diff --git a/lib/tempest b/lib/tempest index 3fa7ce0fb2..a1c02ef183 100644 --- a/lib/tempest +++ b/lib/tempest @@ -718,9 +718,6 @@ function install_tempest { set_tempest_venv_constraints $tmp_u_c_m tox -r --notest -efull - # TODO: remove the trailing pip constraint when a proper fix - # arrives for bug https://bugs.launchpad.net/devstack/+bug/1906322 - $TEMPEST_DIR/.tox/tempest/bin/pip install -U -r $RC_DIR/tools/cap-pip.txt # NOTE(mtreinish) Respect constraints in the tempest full venv, things that # are using a tox job other than full will not be respecting constraints but # running pip install -U on tempest requirements diff --git a/tools/cap-pip.txt b/tools/cap-pip.txt deleted file mode 100644 index 8ee551b261..0000000000 --- a/tools/cap-pip.txt +++ /dev/null @@ -1 +0,0 @@ -pip<20.3 diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 19219435ad..8a2c337fc4 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -155,8 +155,23 @@ function fixup_ovn_centos { yum_install centos-release-openstack-victoria } +function fixup_ubuntu { + if ! is_ubuntu; then + return + fi + + # Since pip10, pip will refuse to uninstall files from packages + # that were created with distutils (rather than more modern + # setuptools). This is because it technically doesn't have a + # manifest of what to remove. However, in most cases, simply + # overwriting works. So this hacks around those packages that + # have been dragged in by some other system dependency + sudo rm -rf /usr/lib/python3/dist-packages/PyYAML-*.egg-info +} + function fixup_all { fixup_keystone + fixup_ubuntu fixup_fedora fixup_suse } diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 9afd2e53c2..0082e9f73c 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -91,9 +91,7 @@ function install_get_pip { die $LINENO "Download of get-pip.py failed" touch $LOCAL_PIP.downloaded fi - # TODO: remove the trailing pip constraint when a proper fix - # arrives for bug https://bugs.launchpad.net/devstack/+bug/1906322 - sudo -H -E python${PYTHON3_VERSION} $LOCAL_PIP -c $TOOLS_DIR/cap-pip.txt + sudo -H -E python${PYTHON3_VERSION} $LOCAL_PIP } @@ -130,11 +128,6 @@ if [[ -n $PYPI_ALTERNATIVE_URL ]]; then configure_pypi_alternative_url fi -# Just use system pkgs on Focal -if [[ "$DISTRO" == focal ]]; then - exit 0 -fi - # Eradicate any and all system packages # Python in fedora/suse depends on the python-pip package so removing it From 2df2aa01584fb2a26112c60f0a16869e233cb7ee Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 10 Aug 2021 13:50:08 +1000 Subject: [PATCH 099/574] install_pip: don't fail when not installed On some platforms, "python -m pip" isn't available. Currently this is run undconditionally from the "get_versions" function; remove the call. Change-Id: I91d6c66d055f02fa7b4368593b629933f82d8117 --- tools/install_pip.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 0082e9f73c..eb0f6eba48 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -46,15 +46,13 @@ echo "Distro: $DISTRO" function get_versions { # FIXME(dhellmann): Deal with multiple python versions here? This # is just used for reporting, so maybe not? - PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null || true) + PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null || which pip3 2>/dev/null || true) if [[ -n $PIP ]]; then PIP_VERSION=$($PIP --version | awk '{ print $2}') echo "pip: $PIP_VERSION" else echo "pip: Not Installed" fi - # Show python3 module version - python${PYTHON3_VERSION} -m pip --version } From 26bd94b45efb63683072006e4281dd34a313d881 Mon Sep 17 00:00:00 2001 From: Ghanshyam Date: Tue, 10 Aug 2021 14:49:54 +0000 Subject: [PATCH 100/574] Revert "Add enforce_scope setting support for keystone" This reverts commit 9dc2b88eb42a5f98f43bc8ad3dfa3962a4d44d74. Reason for revert: Devstack creation/setup the things are not yet moved to scope tokens so we need to wait for that first and then do the scope check enable globally. Change-Id: If0368aca39c1325bf90abd23831118b89e746222 --- lib/keystone | 11 ----------- lib/tempest | 9 --------- 2 files changed, 20 deletions(-) diff --git a/lib/keystone b/lib/keystone index e282db0bfa..66e867ca68 100644 --- a/lib/keystone +++ b/lib/keystone @@ -134,12 +134,6 @@ KEYSTONE_PASSWORD_HASH_ROUNDS=${KEYSTONE_PASSWORD_HASH_ROUNDS:-4} # Cache settings KEYSTONE_ENABLE_CACHE=${KEYSTONE_ENABLE_CACHE:-True} -# Flag to set the oslo_policy.enforce_scope. This is used to switch -# the Identity API policies to start checking the scope of token. By Default, -# this flag is False. -# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope -KEYSTONE_ENFORCE_SCOPE=$(trueorfalse False KEYSTONE_ENFORCE_SCOPE) - # Functions # --------- @@ -287,11 +281,6 @@ function configure_keystone { iniset $KEYSTONE_CONF security_compliance lockout_duration $KEYSTONE_LOCKOUT_DURATION iniset $KEYSTONE_CONF security_compliance unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT fi - if [[ "$KEYSTONE_ENFORCE_SCOPE" == True ]] ; then - iniset $KEYSTONE_CONF oslo_policy enforce_scope true - iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults true - iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml - fi } # create_keystone_accounts() - Sets up common required keystone accounts diff --git a/lib/tempest b/lib/tempest index 3fa7ce0fb2..d39fa1c52b 100644 --- a/lib/tempest +++ b/lib/tempest @@ -600,15 +600,6 @@ function configure_tempest { fi done - # ``enforce_scope`` - # If services enable the enforce_scope for their policy - # we need to enable the same on Tempest side so that - # test can be run with scoped token. - if [[ "$KEYSTONE_ENFORCE_SCOPE" == True ]] ; then - iniset $TEMPEST_CONFIG enforce_scope keystone true - iniset $TEMPEST_CONFIG auth admin_system 'all' - iniset $TEMPEST_CONFIG auth admin_project_name '' - fi iniset $TEMPEST_CONFIG enforce_scope glance "$GLANCE_ENFORCE_SCOPE" iniset $TEMPEST_CONFIG enforce_scope cinder "$CINDER_ENFORCE_SCOPE" From 00ac547acacc9fef86f9045a979adf523ab7617b Mon Sep 17 00:00:00 2001 From: Abhishek Kekane Date: Mon, 9 Aug 2021 05:54:32 +0000 Subject: [PATCH 101/574] Glance remote worker should use own cache directory Earlier glance remote worker was using same cache directory used by glance worker. Ideally both should use their own cache directory. This patch makes provision for the same by setting different path for image_cache_dir config option. Change-Id: If2627e9c212fd765b96d925046c04e9cb1001c3d --- lib/glance | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/glance b/lib/glance index cd26d97dc4..6848aa3c1b 100644 --- a/lib/glance +++ b/lib/glance @@ -552,6 +552,11 @@ function start_glance_remote_clone { iniset $(glance_remote_conf "$GLANCE_API_CONF") os_glance_tasks_store \ filesystem_store_datadir "${remote_data}/os_glance_tasks_store" + # Point this worker to use different cache dir + mkdir -p "$remote_data/cache" + iniset $(glance_remote_conf "$GLANCE_API_CONF") DEFAULT \ + image_cache_dir "${remote_data}/cache" + # Change our uwsgi to our new port sed -ri "s/^(http-socket.*):[0-9]+/\1:$glance_remote_port/" \ "$glance_remote_uwsgi" From a20971850afb555ee4b04068a39a67a533b69901 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 10 Aug 2021 14:11:12 +1000 Subject: [PATCH 102/574] install_pip: Use packaged pip on Fedora This uses the python3-pip package for Fedora but maintains the status quo for existing distributions (i.e. for Suse we run get-pip.py but don't uninstall, and for everything else we uninstall python3-pip and run get-pip.py to be running the latest pip). As noted inline, installing get-pip.py over Fedora 34's package no longer works, and likely won't ever work again. Unlike the LTS distributions, the Fedora pip should be more up-to-date, so I think it's best we just avoid any package overwrites. Change-Id: I84129aadfcf585bb150a3daa39616246d3d84bbd --- tools/install_pip.sh | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index eb0f6eba48..7ecea4e821 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -126,19 +126,31 @@ if [[ -n $PYPI_ALTERNATIVE_URL ]]; then configure_pypi_alternative_url fi -# Eradicate any and all system packages - -# Python in fedora/suse depends on the python-pip package so removing it -# results in a nonfunctional system. pip on fedora installs to /usr so pip -# can safely override the system pip for all versions of fedora -if ! is_fedora && ! is_suse; then +if is_fedora && [[ ${DISTRO} == f* ]]; then + # get-pip.py will not install over the python3-pip package in + # Fedora 34 any more. + # https://bugzilla.redhat.com/show_bug.cgi?id=1988935 + # https://github.com/pypa/pip/issues/9904 + # You can still install using get-pip.py if python3-pip is *not* + # installed; this *should* remain separate under /usr/local and not break + # if python3-pip is later installed. + # For general sanity, we just use the packaged pip. It should be + # recent enough anyway. + install_package python3-pip +elif is_fedora || is_suse; then + # Python in suse/centos depends on the python-pip package; because + # of the split "system-python" uninstalling python3-pip also + # uninstalls the user python3 package which is bad and leaves us + # without a python to use. Just install over. + install_get_pip +else + # Remove packaged pip, and install the latest upstream. if is_package_installed python3-pip ; then uninstall_package python3-pip fi + install_get_pip fi -install_get_pip - set -x # Note setuptools is part of requirements.txt and we want to make sure From 156ccbad85f30cd4c9c33d30b612b3151d542f8e Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 10 Aug 2021 14:19:28 +1000 Subject: [PATCH 103/574] Add Fedora 34 support * update the support distro filter * don't install xinetd which doesn't exist in F34 any more. I think there is probably a bit more to do with swift ring-server but that can be a problem for another time. * remove old F31 workaround Change-Id:If2f74f146a166b9721540aaf3f1f9fce3030525c --- files/rpms/swift | 2 +- lib/nova | 6 ------ stack.sh | 2 +- 3 files changed, 2 insertions(+), 8 deletions(-) diff --git a/files/rpms/swift b/files/rpms/swift index 376c6f3df7..18c957c08a 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -4,4 +4,4 @@ memcached rsync-daemon sqlite xfsprogs -xinetd +xinetd # not:f34 diff --git a/lib/nova b/lib/nova index 930529a433..de91517add 100644 --- a/lib/nova +++ b/lib/nova @@ -298,12 +298,6 @@ function configure_nova { fi fi - if is_fedora && [[ $DISTRO =~ f31] ]]; then - # For f31 use the rebased 2.1.0 version of the package. - sudo dnf copr enable -y lyarwood/iscsi-initiator-utils - sudo dnf update -y - fi - if [[ ${ISCSID_DEBUG} == "True" ]]; then # Install an override that starts iscsid with debugging # enabled. diff --git a/stack.sh b/stack.sh index c439a7217f..47d6dc718a 100755 --- a/stack.sh +++ b/stack.sh @@ -227,7 +227,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="focal|f31|f32|opensuse-15.2|opensuse-tumbleweed|rhel8" +SUPPORTED_DISTROS="focal|f34|opensuse-15.2|opensuse-tumbleweed|rhel8" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" From 8dac135cb89fa5d27d7af344703db0a20cf550e1 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 11 Aug 2021 14:56:05 +1000 Subject: [PATCH 104/574] Simplify pip install The uninstall here has been around since Ibb4b42119dc2e51577c77bbbbffb110863e5324d. At the time, there might have been conflicts between packaged and installed pip. We don't need it today; get-pip.py keeps itself separate enough in /usr/local on all platforms. Thus we can also remove the suse/centos special-casing. python3-pip is in the RPM list so we don't need to re-install for Fedora. Add a note on why we are over-installing pip. Remove some old setuptools workarounds that are commented out. Change-Id: Ie3cb81a8ff71cf4b81e23831c380f83b0381de71 --- stack.sh | 14 +++++++++++++- tools/install_pip.sh | 22 ++-------------------- 2 files changed, 15 insertions(+), 21 deletions(-) diff --git a/stack.sh b/stack.sh index 47d6dc718a..1aa96c9d49 100755 --- a/stack.sh +++ b/stack.sh @@ -752,7 +752,19 @@ git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH echo_summary "Installing package prerequisites" source $TOP_DIR/tools/install_prereqs.sh -# Configure an appropriate Python environment +# Configure an appropriate Python environment. +# +# NOTE(ianw) 2021-08-11 : We install the latest pip here because pip +# is very active and changes are not generally reflected in the LTS +# distros. This often involves important things like dependency or +# conflict resolution, and has often been required because the +# complicated constraints etc. used by openstack have tickled bugs in +# distro versions of pip. We want to find these problems as they +# happen, rather than years later when we try to update our LTS +# distro. Whilst it is clear that global installations of upstream +# pip are less and less common, with virtualenv's being the general +# approach now; there are a lot of devstack plugins that assume a +# global install environment. if [[ "$OFFLINE" != "True" ]]; then PYPI_ALTERNATIVE_URL=${PYPI_ALTERNATIVE_URL:-""} $TOP_DIR/tools/install_pip.sh fi diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 7ecea4e821..a80c178f2a 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -111,14 +111,6 @@ function configure_pypi_alternative_url { } -# Setuptools 8 implements PEP 440, and 8.0.4 adds a warning triggered any time -# pkg_resources inspects the list of installed Python packages if there are -# non-compliant version numbers in the egg-info (for example, from distro -# system packaged Python libraries). This is off by default after 8.2 but can -# be enabled by uncommenting the lines below. -#PYTHONWARNINGS=$PYTHONWARNINGS,always::RuntimeWarning:pkg_resources -#export PYTHONWARNINGS - # Show starting versions get_versions @@ -135,19 +127,9 @@ if is_fedora && [[ ${DISTRO} == f* ]]; then # installed; this *should* remain separate under /usr/local and not break # if python3-pip is later installed. # For general sanity, we just use the packaged pip. It should be - # recent enough anyway. - install_package python3-pip -elif is_fedora || is_suse; then - # Python in suse/centos depends on the python-pip package; because - # of the split "system-python" uninstalling python3-pip also - # uninstalls the user python3 package which is bad and leaves us - # without a python to use. Just install over. - install_get_pip + # recent enough anyway. This is included via rpms/general + continue else - # Remove packaged pip, and install the latest upstream. - if is_package_installed python3-pip ; then - uninstall_package python3-pip - fi install_get_pip fi From 6fecfd4fff79620596db45c9d22f8ec63a0d5522 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Wed, 11 Aug 2021 10:32:42 -0500 Subject: [PATCH 105/574] Add devstack-enforce-scope job to enable the rbac scope checks keystone has system scope feature implemented since queens release. Now Devstack also started moving towards the new RBAC. This commit adds a new job 'devstack-enforce-scope' which enable the scope checks on service side and see if devstack setting are fine or not. This job will be expanded to enable the scope checks for the other service also once they start supporting the system scope. This will help us to test the scope check setting. Change-Id: Ie9cd9c7e7cd8fdf8c8930e59ae9d297f86eb9a95 --- .zuul.yaml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 8c275d84dc..517e12bc1c 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -580,6 +580,17 @@ SERVICE_IP_VERSION: 6 SERVICE_HOST: "" +- job: + name: devstack-enforce-scope + parent: devstack + description: | + This job runs the devstack with scope checks enabled. + vars: + devstack_localrc: + # Keep enabeling the services here to run with system scope + CINDER_ENFORCE_SCOPE: true + GLANCE_ENFORCE_SCOPE: true + - job: name: devstack-multinode parent: devstack @@ -711,6 +722,7 @@ jobs: - devstack - devstack-ipv6 + - devstack-enforce-scope - devstack-platform-fedora-latest - devstack-platform-centos-8-stream - devstack-async @@ -765,6 +777,7 @@ jobs: - devstack - devstack-ipv6 + - devstack-enforce-scope - devstack-multinode - devstack-unit-tests - openstack-tox-bashate From 60b5538c337dfa3c8f60fecdc64e671acd1f1cbe Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Fri, 6 Aug 2021 12:49:33 +0530 Subject: [PATCH 106/574] Set swap size to 4G for c8 jobs Tempest is failing randomly with different reasons as mentioned in the bug, updating swap size those issues are not seen. Before [1] default swap size used to be 8GB but was dropped to 1G so need to configure it in required job itself. Did couple of tests in [2] and with 4GB+ swap jobs are running green. On investigation found that with qemu-5 both Ubuntu and CentOS jobs have memory crunch, currently Ubuntu jobs are not impacted as they are running with qemu-4. [1] https://review.opendev.org/c/openstack/openstack-zuul-jobs/+/750941 [2] https://review.opendev.org/c/openstack/devstack/+/803144 Closes-Bug: #1938914 Change-Id: I57910b5fde5ddf2bd37d93e06c1aff77c6e231e9 --- .zuul.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 8c275d84dc..1295fed457 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -599,6 +599,8 @@ nodeset: devstack-single-node-centos-8-stream voting: false timeout: 9000 + vars: + configure_swap_size: 4096 - job: name: devstack-async From 26f814921898390eb263f1060fb99cddae1accdc Mon Sep 17 00:00:00 2001 From: Grzegorz Grasza Date: Mon, 16 Aug 2021 10:36:03 +0200 Subject: [PATCH 107/574] Use MDB backend in Ubuntu The MDB backend is the default in Ubuntu and specifying HDB in debconf doesn't change it to HDB. Closes-Bug: #1939700 Change-Id: If98f7fc8395678365fb73f0c5cd926cef083e470 --- files/ldap/manager.ldif.in | 2 +- lib/ldap | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/files/ldap/manager.ldif.in b/files/ldap/manager.ldif.in index 2f1f1395ee..d3b9be8b6e 100644 --- a/files/ldap/manager.ldif.in +++ b/files/ldap/manager.ldif.in @@ -1,4 +1,4 @@ -dn: olcDatabase={${LDAP_OLCDB_NUMBER}}hdb,cn=config +dn: olcDatabase={${LDAP_OLCDB_NUMBER}}${LDAP_OLCDB_TYPE},cn=config changetype: modify replace: olcSuffix olcSuffix: ${BASE_DN} diff --git a/lib/ldap b/lib/ldap index 5a53d0eaee..ea5faa1fe9 100644 --- a/lib/ldap +++ b/lib/ldap @@ -33,14 +33,17 @@ LDAP_SERVICE_NAME=slapd if is_ubuntu; then LDAP_OLCDB_NUMBER=1 + LDAP_OLCDB_TYPE=mdb LDAP_ROOTPW_COMMAND=replace elif is_fedora; then LDAP_OLCDB_NUMBER=2 + LDAP_OLCDB_TYPE=hdb LDAP_ROOTPW_COMMAND=add elif is_suse; then # SUSE has slappasswd in /usr/sbin/ PATH=$PATH:/usr/sbin/ LDAP_OLCDB_NUMBER=1 + LDAP_OLCDB_TYPE=hdb LDAP_ROOTPW_COMMAND=add LDAP_SERVICE_NAME=ldap fi @@ -56,6 +59,7 @@ function _ldap_varsubst { local slappass=$2 sed -e " s|\${LDAP_OLCDB_NUMBER}|$LDAP_OLCDB_NUMBER| + s|\${LDAP_OLCDB_TYPE}|$LDAP_OLCDB_TYPE| s|\${SLAPPASS}|$slappass| s|\${LDAP_ROOTPW_COMMAND}|$LDAP_ROOTPW_COMMAND| s|\${BASE_DC}|$LDAP_BASE_DC| @@ -157,7 +161,7 @@ function configure_ldap { slapd slapd/dump_database_destdir string /var/backups/slapd-VERSION slapd slapd/domain string Users slapd shared/organization string $LDAP_DOMAIN - slapd slapd/backend string HDB + slapd slapd/backend string ${LDAP_OLCDB_TYPE^^} slapd slapd/purge_database boolean true slapd slapd/move_old_database boolean true slapd slapd/allow_ldap_v2 boolean false From ed323805f26e51438fd08d6d51882d758551924c Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Tue, 17 Aug 2021 08:45:02 -0500 Subject: [PATCH 108/574] make swift-dsvm-functional job as voting swift-dsvm-functional job test swift under python3 and voting on swift gate whihc means this is a stable job now, let's make this voting to devstack gate too. Removing swift-dsvm-functional-py3 job as it does not exist anymore after- https://review.opendev.org/c/openstack/swift/+/731318 swift-dsvm-functional itself is py3 job now. Change-Id: I58847f74306194eaad132680815101a134fb4022 --- .zuul.yaml | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 517e12bc1c..772cc7b2d7 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -731,14 +731,7 @@ - openstack-tox-bashate - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa: voting: false - - swift-dsvm-functional: - voting: false - irrelevant-files: &dsvm-irrelevant-files - - ^.*\.rst$ - - ^doc/.*$ - - swift-dsvm-functional-py3: - voting: false - irrelevant-files: *dsvm-irrelevant-files + - swift-dsvm-functional - grenade: irrelevant-files: - ^.*\.rst$ @@ -789,6 +782,7 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ + - swift-dsvm-functional - grenade: irrelevant-files: - ^.*\.rst$ From 31f0418bad5a9542a38e8200131adf4ace08e01a Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Tue, 17 Aug 2021 08:49:28 -0500 Subject: [PATCH 109/574] Make Ironic job as voting ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa job is voting on Ironic and neutron gate which mean it is stable enough and make sense to make it voting on devstack gate too. ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa is alias job of ironic-tempest-bios-ipmi-direct-tinyipa so using the original job instead of alias - https://opendev.org/openstack/ironic/src/branch/master/zuul.d/ironic-jobs.yaml#L784 Change-Id: I95c67ad69e6eae6a72d25a851a71b7de85e56fd2 --- .zuul.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 772cc7b2d7..f98107cb3f 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -729,8 +729,7 @@ - devstack-multinode - devstack-unit-tests - openstack-tox-bashate - - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa: - voting: false + - ironic-tempest-bios-ipmi-direct-tinyipa - swift-dsvm-functional - grenade: irrelevant-files: @@ -782,6 +781,7 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ + - ironic-tempest-bios-ipmi-direct-tinyipa - swift-dsvm-functional - grenade: irrelevant-files: From 8b8a4c75b72eb44dd5c11950951cf6e6d9a1d778 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Wed, 18 Aug 2021 15:00:09 +0000 Subject: [PATCH 110/574] Remove libvirt-python from upper-constraints ... when installed from distribution. This is mostly to fix Ironic's gate as their ecosystem is too broad and complex to quickly remove libvirt-python from all possible requirements.txt More details inline. See also: https://review.opendev.org/c/openstack/devstack/+/798514 aka f0bf2bdff12b66eefbb2eae83e919611eb7cc76d Change-Id: Ic44daf15e952bbe3c424984ffb2474261e68008f --- lib/nova_plugins/functions-libvirt | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index e9ceae4dea..63882e05fe 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -56,6 +56,17 @@ EOF # Installs required distro-specific libvirt packages. function install_libvirt { + # NOTE(yoctozepto): The common consensus [1] is that libvirt-python should + # be installed from distro packages. However, various projects might be + # trying to ensure it is installed using pip AND use upper-constraints + # with that, causing pip to try to upgrade it and to fail. + # The following line removes libvirt-python from upper-constraints and + # avoids the situation described above. Now only if installed packages + # explicitly depend on a newer (or, in general, incompatible) libvirt-python + # version, will pip try to reinstall it. + # [1] https://review.opendev.org/c/openstack/devstack/+/798514 + $REQUIREMENTS_DIR/.venv/bin/edit-constraints \ + $REQUIREMENTS_DIR/upper-constraints.txt -- libvirt-python if is_ubuntu; then install_package qemu-system libvirt-clients libvirt-daemon-system libvirt-dev python3-libvirt From 1e86a25cc28e34d7f73a4c6ccbbc3fc667598d50 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Thu, 19 Aug 2021 14:24:28 +0100 Subject: [PATCH 111/574] nova: Enable apic removal workaround for bug #1939108 when using QEMU This change enables [workarounds]libvirt_disable_apic when devstack is deployed using the libvirt virt driver and qemu virt type in an effort to avoid issues outlined in bug #1939108 caused by the older kernel currently used in Cirros 0.5.2. Depends-On: https://review.opendev.org/c/openstack/nova/+/766043 Closes-Bug: #1939108 Change-Id: Ibb6c34133bb1c95ef11cc59d9b12a0f65502c61b --- lib/nova | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/nova b/lib/nova index de91517add..f4f4797b86 100644 --- a/lib/nova +++ b/lib/nova @@ -926,6 +926,11 @@ function start_nova_compute { iniset $NOVA_CPU_CONF os_vif_ovs ovsdb_connection "tcp:$OVSDB_SERVER_LOCAL_HOST:6640" fi + # Workaround bug #1939108 + if [[ "$VIRT_DRIVER" == "libvirt" && "$LIBVIRT_TYPE" == "qemu" ]]; then + iniset $NOVA_CPU_CONF workarounds libvirt_disable_apic True + fi + if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then # The group **$LIBVIRT_GROUP** is added to the current user in this script. # ``sg`` is used in run_process to execute nova-compute as a member of the From 14b7fc500367a24ed995d6598738c6f42dfe49ad Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 20 Aug 2021 06:13:36 +0000 Subject: [PATCH 112/574] Updated from generate-devstack-plugins-list Change-Id: I924cdf727818b33d71fe71ac110f224164c6b453 --- doc/source/plugin-registry.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 490132e0d7..4364dd9c72 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -46,7 +46,6 @@ openstack/freezer `https://opendev.org/openstack/freezer openstack/freezer-api `https://opendev.org/openstack/freezer-api `__ openstack/freezer-tempest-plugin `https://opendev.org/openstack/freezer-tempest-plugin `__ openstack/freezer-web-ui `https://opendev.org/openstack/freezer-web-ui `__ -openstack/glance `https://opendev.org/openstack/glance `__ openstack/heat `https://opendev.org/openstack/heat `__ openstack/heat-dashboard `https://opendev.org/openstack/heat-dashboard `__ openstack/ironic `https://opendev.org/openstack/ironic `__ From 25f84277eab5291aa0fa8c12ac39a69594611e08 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Sat, 21 Aug 2021 21:38:43 +0900 Subject: [PATCH 113/574] swift: Fix the empty gid option in rsyncd.conf This change fixes the empty value set to the gid option in rsyncd.conf, which was caused by reference to the invalid USER_GROUP variable, and ensures the option is set to the group which STACK_USER belongs to. This also fixes duplicate declaration of the local user_group variable. Closes-Bug: #1940742 Change-Id: Ifd0a5ef0bc5f3647f43b169df1f7176393971853 --- lib/swift | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/swift b/lib/swift index 790fb99442..98852415da 100644 --- a/lib/swift +++ b/lib/swift @@ -335,7 +335,6 @@ function configure_swift { local node_number local swift_node_config local swift_log_dir - local user_group # Make sure to kill all swift processes first $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true @@ -353,7 +352,7 @@ function configure_swift { # partitions (which make more sense when you have a multi-node # setup) we configure it with our version of rsync. sed -e " - s/%GROUP%/${USER_GROUP}/; + s/%GROUP%/$(id -g -n ${STACK_USER})/; s/%USER%/${STACK_USER}/; s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,; " $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf From b1a89eb80be83fe8c47eeb0431d85a8452e3c70b Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Thu, 26 Aug 2021 21:42:32 +0200 Subject: [PATCH 114/574] Configure access to physical network also with ML2/OVN backend Neutron L3 module in Devstack has way to conigure access to physical network on the node. It can put physical interface to the physical bridge or, in case when such physical device isn't set, it creates NAT rule in iptables. There was missing the same operation for ML2/OVN backend as L3 agent is not used there at all. This patch adds the same to be done in both L3 agent and ovn_agent modules. Closes-Bug: #1939627 Change-Id: I9e558d1d5d3edbce9e7a025ba3c11267f1579820 --- lib/neutron-legacy | 21 +++++++++++++++++++++ lib/neutron_plugins/ovn_agent | 1 + lib/neutron_plugins/services/l3 | 16 +--------------- 3 files changed, 23 insertions(+), 15 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 791ff18b10..31968498de 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -663,6 +663,27 @@ function _move_neutron_addresses_route { fi } +# _configure_public_network_connectivity() - Configures connectivity to the +# external network using $PUBLIC_INTERFACE or NAT on the single interface +# machines +function _configure_public_network_connectivity { + # If we've given a PUBLIC_INTERFACE to take over, then we assume + # that we can own the whole thing, and privot it into the OVS + # bridge. If we are not, we're probably on a single interface + # machine, and we just setup NAT so that fixed guests can get out. + if [[ -n "$PUBLIC_INTERFACE" ]]; then + _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet" + + if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then + _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6" + fi + else + for d in $default_v4_route_devs; do + sudo iptables -t nat -A POSTROUTING -o $d -s $FLOATING_RANGE -j MASQUERADE + done + fi +} + # cleanup_mutnauq() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_mutnauq { diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index cfcb01ee91..1f737fb58b 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -266,6 +266,7 @@ function create_public_bridge { # Create the public bridge that OVN will use sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE -- set bridge $PUBLIC_BRIDGE protocols=OpenFlow13,OpenFlow15 sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=$PHYSICAL_NETWORK:$PUBLIC_BRIDGE + _configure_public_network_connectivity } function _disable_libvirt_apparmor { diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index b6bc02818c..98b96ac06c 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -123,21 +123,7 @@ function _configure_neutron_l3_agent { neutron_plugin_configure_l3_agent $Q_L3_CONF_FILE - # If we've given a PUBLIC_INTERFACE to take over, then we assume - # that we can own the whole thing, and privot it into the OVS - # bridge. If we are not, we're probably on a single interface - # machine, and we just setup NAT so that fixed guests can get out. - if [[ -n "$PUBLIC_INTERFACE" ]]; then - _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet" - - if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then - _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6" - fi - else - for d in $default_v4_route_devs; do - sudo iptables -t nat -A POSTROUTING -o $d -s $FLOATING_RANGE -j MASQUERADE - done - fi + _configure_public_network_connectivity } # Explicitly set router id in l3 agent configuration From a38d41ed9222c32eb7058f91b004b2ec16cd19e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rados=C5=82aw=20Piliszek?= Date: Wed, 8 Sep 2021 07:51:47 +0000 Subject: [PATCH 115/574] Drop dep on libmysqlclient-dev It was required to build MySQL-python bindings but, for some time, we test and rely solely on PyMySQL which is pure Python and hence does not require this dep. This package is going away as distros move towards MariaDB. Change-Id: I6004ccf28a23009a0fc07bfc9458b59a927b969a --- files/debs/general | 1 - files/debs/neutron-common | 1 - files/debs/nova | 1 - 3 files changed, 3 deletions(-) diff --git a/files/debs/general b/files/debs/general index 7e481b4072..364f3cc6e2 100644 --- a/files/debs/general +++ b/files/debs/general @@ -14,7 +14,6 @@ iputils-ping libapache2-mod-proxy-uwsgi libffi-dev # for pyOpenSSL libjpeg-dev # Pillow 3.0.0 -libmysqlclient-dev # MySQL-python libpcre3-dev # for python-pcre libpq-dev # psycopg2 libssl-dev # for pyOpenSSL diff --git a/files/debs/neutron-common b/files/debs/neutron-common index e548396cd7..f6afc5bf55 100644 --- a/files/debs/neutron-common +++ b/files/debs/neutron-common @@ -6,7 +6,6 @@ haproxy # to serve as metadata proxy inside router/dhcp namespaces iptables iputils-arping iputils-ping -libmysqlclient-dev mysql-server #NOPRIME postgresql-server-dev-all python3-mysqldb diff --git a/files/debs/nova b/files/debs/nova index e19441453b..0194f00f2c 100644 --- a/files/debs/nova +++ b/files/debs/nova @@ -8,7 +8,6 @@ iptables iputils-arping kpartx libjs-jquery-tablesorter # Needed for coverage html reports -libmysqlclient-dev libvirt-clients # NOPRIME libvirt-daemon-system # NOPRIME libvirt-dev # NOPRIME From 6c8bd96f72eb913be5b1de5758b15f828fca5912 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 9 Sep 2021 06:10:23 +0000 Subject: [PATCH 116/574] Updated from generate-devstack-plugins-list Change-Id: If2ea45a2cc7993a9a80187092f2750496e7c8ab7 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 4364dd9c72..7c8d2b8aac 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -112,6 +112,7 @@ openstack/zaqar-ui `https://opendev.org/openstack/zaqar-ui openstack/zun `https://opendev.org/openstack/zun `__ openstack/zun-ui `https://opendev.org/openstack/zun-ui `__ performa/os-faults `https://opendev.org/performa/os-faults `__ +skyline/skyline-apiserver `https://opendev.org/skyline/skyline-apiserver `__ starlingx/config `https://opendev.org/starlingx/config `__ starlingx/fault `https://opendev.org/starlingx/fault `__ starlingx/ha `https://opendev.org/starlingx/ha `__ From c9f3e5bdd76430a7c14b9f02c3f8ba47214037ae Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Thu, 16 Sep 2021 11:43:03 -0400 Subject: [PATCH 117/574] Fix stackrc os-resource-classes typo Attempting to use LIBS_FROM_GIT="ALL" results in a failure due to a typo in stackrc for os-resource-classes repo. Cloning into '/opt/stack/os-resource-classes'... fatal: protocol ':-https' is not supported [ERROR] /opt/stack/devstack/functions-common:629 git call failed: [git clone :-https://opendev.org/openstack/os-resource-classes.git /opt/stack/os-resource-classes --branch master] Remove the extraneous '='. Change-Id: I21f86324dc15fe808b38e366f7af18c96fd3890c --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 620b1fc04d..16aa93ac94 100755 --- a/stackrc +++ b/stackrc @@ -549,7 +549,7 @@ GITBRANCH["neutron-lib"]=${NEUTRON_LIB_BRANCH:-$TARGET_BRANCH} GITDIR["neutron-lib"]=$DEST/neutron-lib # os-resource-classes library containing a list of standardized resource classes for OpenStack -GITREPO["os-resource-classes"]=${OS_RESOURCE_CLASSES_REPO=:-${GIT_BASE}/openstack/os-resource-classes.git} +GITREPO["os-resource-classes"]=${OS_RESOURCE_CLASSES_REPO:-${GIT_BASE}/openstack/os-resource-classes.git} GITBRANCH["os-resource-classes"]=${OS_RESOURCE_CLASSES_BRANCH:-$TARGET_BRANCH} # os-traits library for resource provider traits in the placement service From 8d1bfcacf8ffc73f0aa8c8a8a9e0fee447a1c116 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Fri, 24 Sep 2021 18:01:09 -0500 Subject: [PATCH 118/574] Update DEVSTACK_SERIES to yoga stable/xena branch has been created now and current master is for yoga. Change-Id: I0c7809bdac6482bb947f394b0c2535fabb4cf067 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 16aa93ac94..3dc800af2d 100755 --- a/stackrc +++ b/stackrc @@ -247,7 +247,7 @@ REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements} # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="xena" +DEVSTACK_SERIES="yoga" ############## # From 65b46a503a720f2438a6dc73c6f4670cdf89442f Mon Sep 17 00:00:00 2001 From: Rajat Dhasmana Date: Tue, 28 Sep 2021 03:08:10 -0400 Subject: [PATCH 119/574] Remove cinder from service names In devstack job, cinder is not a valid service name and logs error in gate[1] so remove it. 2021-09-28 05:44:47.791807 | controller | + functions-common:service_check:1603 : for service in ${ENABLED_SERVICES//,/ } 2021-09-28 05:44:47.795506 | controller | + functions-common:service_check:1605 : sudo systemctl is-enabled devstack@cinder.service 2021-09-28 05:44:47.809647 | controller | Failed to get unit file state for devstack@cinder.service: No such file or directory [1] https://e978bdcfc0235dcd9417-6560bc3b6382c1d289b358872777ca09.ssl.cf1.rackcdn.com/801989/7/check/tempest-integrated-storage/779d1e7/job-output.txt Change-Id: I7ca105201d82b72c7e56778425d3bce7c76047db --- .zuul.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index d1e356f4ea..25cbf923f8 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -524,7 +524,6 @@ c-bak: true c-sch: true c-vol: true - cinder: true # Services we don't need. # This section is not really needed, it's for readability. horizon: false From 56e75e4aef3ea42d13b192e805c48357b0071239 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Tue, 28 Sep 2021 20:02:34 +0200 Subject: [PATCH 120/574] Fix uwsgi config for trailing slashes The apache mod_proxy documentation[0] says that trailing slashes need to match for the ProxyPass statement. Since adding a slash to the redirected url would break things that need to access endpoints like /identity without anything added, we need to drop the trailing slash for the target URL. See [1] for the discussion of the CVE fix that changed the previous behavior. [0] https://httpd.apache.org/docs/trunk/mod/mod_proxy.html#proxypass [1] https://bugs.launchpad.net/ubuntu/+source/apache2/+bug/1945274 Depends-On: https://review.opendev.org/c/openstack/devstack/+/811389 Change-Id: Ia6b1a41957833fba87a2e6f048d2483267632385 --- lib/apache | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/apache b/lib/apache index 04259ba31f..4bea07dc55 100644 --- a/lib/apache +++ b/lib/apache @@ -303,7 +303,7 @@ function write_uwsgi_config { apache_conf=$(apache_site_config_for $name) iniset "$file" uwsgi socket "$socket" iniset "$file" uwsgi chmod-socket 666 - echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}/\" retry=0 " | sudo tee -a $apache_conf + echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}\" retry=0 " | sudo tee -a $apache_conf enable_apache_site $name restart_apache_server fi From 982b03c605bc06e0bf9a03ff576995816155c64e Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Thu, 30 Sep 2021 13:08:35 +0100 Subject: [PATCH 121/574] zuul: Remove dedicated devstack-async job I83d56c9363d481bb6d5921f5e1f9b024f136044b switched the default of DEVSTACK_PARALLEL over to True so this dedicated job is no longer required as *all* jobs should now be using it. Change-Id: I0f475ab177c2cd49eeb6be861cdd11581e8e0b97 --- .zuul.yaml | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index d1e356f4ea..ab2f80ae1b 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -613,17 +613,6 @@ vars: configure_swap_size: 4096 -- job: - name: devstack-async - parent: tempest-full-py3 - description: Async mode enabled - voting: false - vars: - devstack_localrc: - DEVSTACK_PARALLEL: True - zuul_copy_output: - /opt/stack/async: logs - - job: name: devstack-no-tls-proxy parent: tempest-full-py3 @@ -727,7 +716,6 @@ - devstack-enforce-scope - devstack-platform-fedora-latest - devstack-platform-centos-8-stream - - devstack-async - devstack-multinode - devstack-unit-tests - openstack-tox-bashate From c7791301be5e19ec7a84dffbf62c40e805483f43 Mon Sep 17 00:00:00 2001 From: melanie witt Date: Fri, 1 Oct 2021 17:24:10 +0000 Subject: [PATCH 122/574] Enable oslo.limit to be installed from git repo oslo.limit isn't currently in the list of libraries that can be installed from a git repo via LIBS_FROM_GIT. This adds oslo.limit to enable integrated testing against unmerged oslo.limit changes. Change-Id: I26cc567fdf4c84014040ae586bbb029b8de7a236 --- lib/libraries | 2 ++ stackrc | 4 ++++ tests/test_libs_from_pypi.sh | 1 + 3 files changed, 7 insertions(+) diff --git a/lib/libraries b/lib/libraries index 67ff21f41a..9ea32304fc 100755 --- a/lib/libraries +++ b/lib/libraries @@ -38,6 +38,7 @@ GITDIR["oslo.config"]=$DEST/oslo.config GITDIR["oslo.context"]=$DEST/oslo.context GITDIR["oslo.db"]=$DEST/oslo.db GITDIR["oslo.i18n"]=$DEST/oslo.i18n +GITDIR["oslo.limit"]=$DEST/oslo.limit GITDIR["oslo.log"]=$DEST/oslo.log GITDIR["oslo.messaging"]=$DEST/oslo.messaging GITDIR["oslo.middleware"]=$DEST/oslo.middleware @@ -102,6 +103,7 @@ function install_libs { _install_lib_from_source "oslo.context" _install_lib_from_source "oslo.db" _install_lib_from_source "oslo.i18n" + _install_lib_from_source "oslo.limit" _install_lib_from_source "oslo.log" _install_lib_from_source "oslo.messaging" _install_lib_from_source "oslo.middleware" diff --git a/stackrc b/stackrc index 3dc800af2d..e0d71dfa49 100755 --- a/stackrc +++ b/stackrc @@ -415,6 +415,10 @@ GITBRANCH["oslo.db"]=${OSLODB_BRANCH:-$TARGET_BRANCH} GITREPO["oslo.i18n"]=${OSLOI18N_REPO:-${GIT_BASE}/openstack/oslo.i18n.git} GITBRANCH["oslo.i18n"]=${OSLOI18N_BRANCH:-$TARGET_BRANCH} +# oslo.limit +GITREPO["oslo.limit"]=${OSLOLIMIT_REPO:-${GIT_BASE}/openstack/oslo.limit.git} +GITBRANCH["oslo.limit"]=${OSLOLIMIT_BRANCH:-$TARGET_BRANCH} + # oslo.log GITREPO["oslo.log"]=${OSLOLOG_REPO:-${GIT_BASE}/openstack/oslo.log.git} GITBRANCH["oslo.log"]=${OSLOLOG_BRANCH:-$TARGET_BRANCH} diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index ce1b34461c..839e3a1328 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -45,6 +45,7 @@ ALL_LIBS+=" oslo.cache oslo.reports osprofiler cursive" ALL_LIBS+=" keystoneauth ironic-lib neutron-lib oslo.privsep" ALL_LIBS+=" diskimage-builder os-vif python-brick-cinderclient-ext" ALL_LIBS+=" castellan python-barbicanclient ovsdbapp os-ken os-resource-classes" +ALL_LIBS+=" oslo.limit" # Generate the above list with # echo ${!GITREPO[@]} From 959a7c262a65296a404252f8ec2014237196710d Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Sun, 2 May 2021 09:29:15 +0200 Subject: [PATCH 123/574] Enable running on Debian Bullseye MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some adaption in database handling is all that is missing. Also add a platform job that tests this. Co-Authored-By: Radosław Piliszek Change-Id: I6dd3e48444dd415d84df5e7f5c74540847cdd6db --- .zuul.yaml | 54 +++++++++++++++++++++++++++++++++++++++++++++ lib/databases/mysql | 4 +++- stack.sh | 2 +- 3 files changed, 58 insertions(+), 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index ab2f80ae1b..721897e9e5 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -86,6 +86,16 @@ nodes: - controller +- nodeset: + name: devstack-single-node-debian-bullseye + nodes: + - name: controller + label: debian-bullseye + groups: + - name: tempest + nodes: + - controller + - nodeset: name: openstack-two-node nodes: @@ -613,6 +623,49 @@ vars: configure_swap_size: 4096 +- job: + name: devstack-platform-debian-bullseye + parent: tempest-full-py3 + description: Debian Bullseye platform test + nodeset: devstack-single-node-debian-bullseye + voting: false + timeout: 9000 + vars: + # NOTE(yoctozepto): With concurrency equal 2, there is a random event + # that this job will run out of memory at some point. + tempest_concurrency: 1 + # NOTE(yoctozepto): Debian Bullseye does not yet offer OVN. Switch to OVS + # for the time being. + devstack_localrc: + Q_AGENT: openvswitch + Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch + Q_ML2_TENANT_NETWORK_TYPE: vxlan + devstack_services: + # Disable OVN services + ovn-northd: false + ovn-controller: false + ovs-vswitchd: false + ovsdb-server: false + # Disable Neutron ML2/OVN services + q-ovn-metadata-agent: false + # Enable Neutron ML2/OVS services + q-agt: true + q-dhcp: true + q-l3: true + q-meta: true + q-metering: true + group-vars: + subnode: + devstack_services: + # Disable OVN services + ovn-controller: false + ovs-vswitchd: false + ovsdb-server: false + # Disable Neutron ML2/OVN services + q-ovn-metadata-agent: false + # Enable Neutron ML2/OVS services + q-agt: true + - job: name: devstack-no-tls-proxy parent: tempest-full-py3 @@ -716,6 +769,7 @@ - devstack-enforce-scope - devstack-platform-fedora-latest - devstack-platform-centos-8-stream + - devstack-platform-debian-bullseye - devstack-multinode - devstack-unit-tests - openstack-tox-bashate diff --git a/lib/databases/mysql b/lib/databases/mysql index d4969d713c..d0fa1199a7 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -25,6 +25,8 @@ if [[ -z "$MYSQL_SERVICE_NAME" ]]; then # provide a mysql.service symlink for backwards-compatibility, but # let's not rely on that. MYSQL_SERVICE_NAME=mariadb + elif [[ "$DISTRO" == "bullseye" ]]; then + MYSQL_SERVICE_NAME=mariadb fi fi @@ -105,7 +107,7 @@ function configure_database_mysql { # In mariadb e.g. on Ubuntu socket plugin is used for authentication # as root so it works only as sudo. To restore old "mysql like" behaviour, # we need to change auth plugin for root user - if is_ubuntu && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then + if is_ubuntu && [[ "$DISTRO" != "bullseye" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then sudo mysql $cmd_args -e "UPDATE mysql.user SET plugin='' WHERE user='$DATABASE_USER' AND host='localhost';" sudo mysql $cmd_args -e "FLUSH PRIVILEGES;" fi diff --git a/stack.sh b/stack.sh index 48f61fb3af..8a8e3ea6be 100755 --- a/stack.sh +++ b/stack.sh @@ -227,7 +227,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="focal|f34|opensuse-15.2|opensuse-tumbleweed|rhel8" +SUPPORTED_DISTROS="bullseye|focal|f34|opensuse-15.2|opensuse-tumbleweed|rhel8" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" From 714826d1a27085ba2384ca495c876588d77f0d27 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Mon, 4 Oct 2021 18:07:17 +0100 Subject: [PATCH 124/574] nova: Ensure each compute uses a unique iSCSI initiator The current initiator name embedded in our CI images is not unique at present and can often cause failures during live migrations with attached volumes. This change ensures the name is unique by running iscsi-iname again and overwriting the existing name. We could potentially do this during the image build process itself but given that devstack systems are not supposed to be multi-purpose this should be safe to do during the devstack run. Closes-Bug: #1945983 Change-Id: I9ed26a17858df96c04be9ae52bf2e33e023869a5 --- lib/nova | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/nova b/lib/nova index f4f4797b86..bbb1039199 100644 --- a/lib/nova +++ b/lib/nova @@ -298,6 +298,9 @@ function configure_nova { fi fi + # Ensure each compute host uses a unique iSCSI initiator + echo InitiatorName=$(iscsi-iname) | sudo tee /etc/iscsi/initiatorname.iscsi + if [[ ${ISCSID_DEBUG} == "True" ]]; then # Install an override that starts iscsid with debugging # enabled. From bfc79dc98bfe68e22d3a58099bf550eda1a90a67 Mon Sep 17 00:00:00 2001 From: Michal Berger Date: Tue, 5 Oct 2021 15:40:20 +0200 Subject: [PATCH 125/574] tools: Fix use of continue continue is not used in a proper context here (outside of loop). Use null cmd instead to simply fall through the pip installation. Signed-off-by: Michal Berger Change-Id: Iaea2e5c0177b475edf19d08d71933a74debbb5d9 --- tools/install_pip.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index a80c178f2a..c72dc89a55 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -128,7 +128,7 @@ if is_fedora && [[ ${DISTRO} == f* ]]; then # if python3-pip is later installed. # For general sanity, we just use the packaged pip. It should be # recent enough anyway. This is included via rpms/general - continue + : # Simply fall through else install_get_pip fi From f758b60a4b70261393242b932063d2b6d705594c Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Wed, 6 Oct 2021 12:02:22 +0200 Subject: [PATCH 126/574] Rehome functions to enable Neutron's Trunk service plugin Those functions were part of the neutron devstack plugin but we discussed on the neutron team meeting [1] to move it to the Devstack repo as it's mature enough now. [1] https://meetings.opendev.org/meetings/networking/2021/networking.2021-10-05-14.00.log.html#l-156 Change-Id: I35446adad1d8a7fed142d834de20c48b611015a5 --- lib/neutron-legacy | 9 +++++++++ lib/neutron_plugins/services/trunk | 5 +++++ 2 files changed, 14 insertions(+) create mode 100644 lib/neutron_plugins/services/trunk diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 31968498de..7b20a96ed7 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -275,6 +275,10 @@ source $TOP_DIR/lib/neutron_plugins/services/metering # L3 Service functions source $TOP_DIR/lib/neutron_plugins/services/l3 + +# Additional Neutron service plugins +source $TOP_DIR/lib/neutron_plugins/services/trunk + # Use security group or not if has_neutron_plugin_security_group; then Q_USE_SECGROUP=${Q_USE_SECGROUP:-True} @@ -369,6 +373,11 @@ function configure_mutnauq { configure_ovn_plugin fi + # Configure Neutron's advanced services + if is_service_enabled q-trunk neutron-trunk; then + configure_trunk_extension + fi + iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS" # devstack is not a tool for running uber scale OpenStack # clouds, therefore running without a dedicated RPC worker diff --git a/lib/neutron_plugins/services/trunk b/lib/neutron_plugins/services/trunk new file mode 100644 index 0000000000..8e0f6944cf --- /dev/null +++ b/lib/neutron_plugins/services/trunk @@ -0,0 +1,5 @@ +#!/bin/bash + +function configure_trunk_extension { + neutron_service_plugin_class_add "trunk" +} From 61a37bff9a35337f5924ca7cc61c76e42e55d787 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Fri, 8 Oct 2021 10:59:09 +0200 Subject: [PATCH 127/574] Further fixup for Ubuntu cloud images The official Ubuntu cloud images have some further python pkgs preinstalled that conflict with our requirements. Allow to overwrite them. Signed-off-by: Dr. Jens Harbott Closes-Bug: 1871485 Change-Id: I793c250cae5e7b9bc835b7016d790d1f9ae8a7f3 --- tools/fixup_stuff.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 8a2c337fc4..197a12d8f5 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -167,6 +167,8 @@ function fixup_ubuntu { # overwriting works. So this hacks around those packages that # have been dragged in by some other system dependency sudo rm -rf /usr/lib/python3/dist-packages/PyYAML-*.egg-info + sudo rm -rf /usr/lib/python3/dist-packages/pyasn1_modules-*.egg-info + sudo rm -rf /usr/lib/python3/dist-packages/simplejson-*.egg-info } function fixup_all { From b4e683e6b9a442ed9c37407cbc85288fe92aa85c Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Tue, 5 Oct 2021 20:44:57 +0200 Subject: [PATCH 128/574] Don't fail if there is no nf_conntrack_proto_gre module available It may be that it is already compiled in the kernel so there is no need to load kernel module in such case. Change-Id: Ie1d32e3fd529e13958857cb3ced6710eebde1e4d --- lib/neutron_plugins/ovs_source | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source index 294171f18b..08951d175d 100644 --- a/lib/neutron_plugins/ovs_source +++ b/lib/neutron_plugins/ovs_source @@ -211,5 +211,5 @@ function remove_ovs_packages { # load_conntrack_gre_module() - loads nf_conntrack_proto_gre kernel module function load_conntrack_gre_module { - sudo modprobe nf_conntrack_proto_gre + load_module nf_conntrack_proto_gre False } From 84901f563ef6b5d93a00a89f9cdb41a0380d493e Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Fri, 15 Oct 2021 15:04:49 +0200 Subject: [PATCH 129/574] Create clouds.yaml early enough When using glance limits, the create_glance_accounts call needs access to the devstack-system-admin cloud definition, so we need to create the clouds.yaml file before that step. Change-Id: Ie6d807c46b88b16b316aa166870a6a13f2bb346d Signed-off-by: Dr. Jens Harbott --- stack.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index 48f61fb3af..979867895f 100755 --- a/stack.sh +++ b/stack.sh @@ -1090,6 +1090,9 @@ fi source $TOP_DIR/userrc_early +# Write a clouds.yaml file +write_clouds_yaml + if is_service_enabled keystone; then echo_summary "Starting Keystone" @@ -1118,9 +1121,6 @@ if is_service_enabled keystone; then fi -# Write a clouds.yaml file -write_clouds_yaml - # Horizon # ------- From c027ddd3f895802f5cab37d2cb04162686a3a3cb Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Sat, 16 Oct 2021 06:26:49 +0000 Subject: [PATCH 130/574] Updated from generate-devstack-plugins-list Change-Id: I1abc356970a7f2427bc9683a7e64e54ab52a7651 --- doc/source/plugin-registry.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 7c8d2b8aac..3edd708d8b 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -24,8 +24,6 @@ official OpenStack projects. ======================================== === Plugin Name URL ======================================== === -inspur/venus `https://opendev.org/inspur/venus `__ -inspur/venus-dashboard `https://opendev.org/inspur/venus-dashboard `__ openstack/aodh `https://opendev.org/openstack/aodh `__ openstack/barbican `https://opendev.org/openstack/barbican `__ openstack/blazar `https://opendev.org/openstack/blazar `__ @@ -101,6 +99,8 @@ openstack/tap-as-a-service `https://opendev.org/openstack/tap-as-a openstack/telemetry-tempest-plugin `https://opendev.org/openstack/telemetry-tempest-plugin `__ openstack/trove `https://opendev.org/openstack/trove `__ openstack/trove-dashboard `https://opendev.org/openstack/trove-dashboard `__ +openstack/venus `https://opendev.org/openstack/venus `__ +openstack/venus-dashboard `https://opendev.org/openstack/venus-dashboard `__ openstack/vitrage `https://opendev.org/openstack/vitrage `__ openstack/vitrage-dashboard `https://opendev.org/openstack/vitrage-dashboard `__ openstack/vitrage-tempest-plugin `https://opendev.org/openstack/vitrage-tempest-plugin `__ From ee1c614eda833b38ad0d526b4b1e493dfe5968be Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Sat, 16 Oct 2021 17:33:12 +0200 Subject: [PATCH 131/574] Fix use of yaml.load() The use of this function has been deprecated for a long time[0]. With PyYAML==6.0 the call is now failing, so replace it with the safe version. [0] https://msg.pyyaml.org/load Signed-off-by: Jens Harbott Change-Id: I7a170262b50a5c80a516095b872d52e1bea5479d --- tools/update_clouds_yaml.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py index 7be995e8f3..74dcdb2a07 100755 --- a/tools/update_clouds_yaml.py +++ b/tools/update_clouds_yaml.py @@ -65,7 +65,7 @@ def run(self): def _read_clouds(self): try: with open(self._clouds_path) as clouds_file: - self._clouds = yaml.load(clouds_file) + self._clouds = yaml.safe_load(clouds_file) except IOError: # The user doesn't have a clouds.yaml file. print("The user clouds.yaml file didn't exist.") From c2491bac9d3b3f0446e67b4ea960cb88da9aec0e Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Sun, 14 Jun 2020 18:06:23 +0200 Subject: [PATCH 132/574] Stop creating a keystone admin site Keystone no longer has any special functionality hidden behind the admin site. KEYSTONE_AUTH_URI which used to point to the admin site has long ago been changed to be a copy of KEYSTONE_SERVICE_URI, which points to the public site. Drop all KEYSTONE_AUTH_* variables except KEYSTONE_AUTH_URI which may still be in use in some plugins. This also allows to finally drop the fixup_keystone() function. Change-Id: I549f3cadc27d137e014241cdd47e90267859c848 --- doc/source/configuration.rst | 1 - files/apache-keystone.template | 25 ------------------------- lib/keystone | 32 ++++---------------------------- lib/swift | 4 ++-- stack.sh | 4 ++-- tools/fixup_stuff.sh | 34 ---------------------------------- 6 files changed, 8 insertions(+), 92 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 67456142de..d1144ae9ed 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -666,7 +666,6 @@ In RegionTwo: disable_service horizon KEYSTONE_SERVICE_HOST= - KEYSTONE_AUTH_HOST= REGION_NAME=RegionTwo KEYSTONE_REGION_NAME=RegionOne diff --git a/files/apache-keystone.template b/files/apache-keystone.template index 128436027d..1a353e5f4a 100644 --- a/files/apache-keystone.template +++ b/files/apache-keystone.template @@ -1,5 +1,4 @@ Listen %PUBLICPORT% -Listen %ADMINPORT% LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" keystone_combined @@ -20,20 +19,6 @@ LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" %SSLKEYFILE% - - WSGIDaemonProcess keystone-admin processes=3 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% - WSGIProcessGroup keystone-admin - WSGIScriptAlias / %KEYSTONE_BIN%/keystone-wsgi-admin - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - ErrorLogFormat "%M" - ErrorLog /var/log/%APACHE_NAME%/keystone.log - CustomLog /var/log/%APACHE_NAME%/keystone_access.log keystone_combined - %SSLENGINE% - %SSLCERTFILE% - %SSLKEYFILE% - - %SSLLISTEN% %SSLLISTEN% %SSLENGINE% %SSLLISTEN% %SSLCERTFILE% @@ -49,13 +34,3 @@ Alias /identity %KEYSTONE_BIN%/keystone-wsgi-public WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - -Alias /identity_admin %KEYSTONE_BIN%/keystone-wsgi-admin - - SetHandler wsgi-script - Options +ExecCGI - - WSGIProcessGroup keystone-admin - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - diff --git a/lib/keystone b/lib/keystone index 66e867ca68..096bafb41f 100644 --- a/lib/keystone +++ b/lib/keystone @@ -50,9 +50,7 @@ fi KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone} KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf KEYSTONE_PUBLIC_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-public.ini -KEYSTONE_ADMIN_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-admin.ini KEYSTONE_PUBLIC_UWSGI=$KEYSTONE_BIN_DIR/keystone-wsgi-public -KEYSTONE_ADMIN_UWSGI=$KEYSTONE_BIN_DIR/keystone-wsgi-admin # KEYSTONE_DEPLOY defines how keystone is deployed, allowed values: # - mod_wsgi : Run keystone under Apache HTTPd mod_wsgi @@ -81,21 +79,12 @@ KEYSTONE_RESOURCE_BACKEND=${KEYSTONE_RESOURCE_BACKEND:-sql} KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-fernet} KEYSTONE_TOKEN_FORMAT=$(echo ${KEYSTONE_TOKEN_FORMAT} | tr '[:upper:]' '[:lower:]') -# Set Keystone interface configuration -KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST} -KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357} -KEYSTONE_AUTH_PORT_INT=${KEYSTONE_AUTH_PORT_INT:-35358} -KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL} - # Public facing bits KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST} KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000} KEYSTONE_SERVICE_PORT_INT=${KEYSTONE_SERVICE_PORT_INT:-5001} KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} -# Bind hosts -KEYSTONE_ADMIN_BIND_HOST=${KEYSTONE_ADMIN_BIND_HOST:-$KEYSTONE_SERVICE_HOST} - # Set the project for service accounts in Keystone SERVICE_DOMAIN_NAME=${SERVICE_DOMAIN_NAME:-Default} SERVICE_PROJECT_NAME=${SERVICE_PROJECT_NAME:-service} @@ -106,7 +95,6 @@ SERVICE_TENANT_NAME=${SERVICE_PROJECT_NAME:-service} # if we are running with SSL use https protocols if is_service_enabled tls-proxy; then - KEYSTONE_AUTH_PROTOCOL="https" KEYSTONE_SERVICE_PROTOCOL="https" fi @@ -154,11 +142,8 @@ function cleanup_keystone { sudo rm -f $(apache_site_config_for keystone) else stop_process "keystone" - # TODO: remove admin at pike-2 remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" - remove_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI" sudo rm -f $(apache_site_config_for keystone-wsgi-public) - sudo rm -f $(apache_site_config_for keystone-wsgi-admin) fi } @@ -171,12 +156,10 @@ function _config_keystone_apache_wsgi { local keystone_certfile="" local keystone_keyfile="" local keystone_service_port=$KEYSTONE_SERVICE_PORT - local keystone_auth_port=$KEYSTONE_AUTH_PORT local venv_path="" if is_service_enabled tls-proxy; then keystone_service_port=$KEYSTONE_SERVICE_PORT_INT - keystone_auth_port=$KEYSTONE_AUTH_PORT_INT fi if [[ ${USE_VENV} = True ]]; then venv_path="python-path=${PROJECT_VENV["keystone"]}/lib/$(python_version)/site-packages" @@ -185,7 +168,6 @@ function _config_keystone_apache_wsgi { sudo cp $FILES/apache-keystone.template $keystone_apache_conf sudo sed -e " s|%PUBLICPORT%|$keystone_service_port|g; - s|%ADMINPORT%|$keystone_auth_port|g; s|%APACHE_NAME%|$APACHE_NAME|g; s|%SSLLISTEN%|$keystone_ssl_listen|g; s|%SSLENGINE%|$keystone_ssl|g; @@ -223,12 +205,10 @@ function configure_keystone { iniset_rpc_backend keystone $KEYSTONE_CONF oslo_messaging_notifications local service_port=$KEYSTONE_SERVICE_PORT - local auth_port=$KEYSTONE_AUTH_PORT if is_service_enabled tls-proxy; then # Set the service ports for a proxy to take the originals service_port=$KEYSTONE_SERVICE_PORT_INT - auth_port=$KEYSTONE_AUTH_PORT_INT fi # Override the endpoints advertised by keystone (the public_endpoint and @@ -238,7 +218,7 @@ function configure_keystone { # don't want the port (in the case of putting keystone on a path in # apache). iniset $KEYSTONE_CONF DEFAULT public_endpoint $KEYSTONE_SERVICE_URI - iniset $KEYSTONE_CONF DEFAULT admin_endpoint $KEYSTONE_AUTH_URI + iniset $KEYSTONE_CONF DEFAULT admin_endpoint $KEYSTONE_SERVICE_URI if [[ "$KEYSTONE_TOKEN_FORMAT" != "" ]]; then iniset $KEYSTONE_CONF token provider $KEYSTONE_TOKEN_FORMAT @@ -261,7 +241,6 @@ function configure_keystone { _config_keystone_apache_wsgi else # uwsgi write_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" "/identity" - write_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI" "/identity_admin" fi iniset $KEYSTONE_CONF DEFAULT max_token_size 16384 @@ -518,7 +497,7 @@ function install_keystone { function start_keystone { # Get right service port for testing local service_port=$KEYSTONE_SERVICE_PORT - local auth_protocol=$KEYSTONE_AUTH_PROTOCOL + local auth_protocol=$KEYSTONE_SERVICE_PROTOCOL if is_service_enabled tls-proxy; then service_port=$KEYSTONE_SERVICE_PORT_INT auth_protocol="http" @@ -546,7 +525,6 @@ function start_keystone { # Start proxies if enabled if is_service_enabled tls-proxy; then start_tls_proxy keystone-service '*' $KEYSTONE_SERVICE_PORT $KEYSTONE_SERVICE_HOST $KEYSTONE_SERVICE_PORT_INT - start_tls_proxy keystone-auth '*' $KEYSTONE_AUTH_PORT $KEYSTONE_AUTH_HOST $KEYSTONE_AUTH_PORT_INT fi # (re)start memcached to make sure we have a clean memcache. @@ -569,9 +547,7 @@ function stop_keystone { # - ``ADMIN_PASSWORD`` # - ``IDENTITY_API_VERSION`` # - ``REGION_NAME`` -# - ``KEYSTONE_SERVICE_PROTOCOL`` -# - ``KEYSTONE_SERVICE_HOST`` -# - ``KEYSTONE_SERVICE_PORT`` +# - ``KEYSTONE_SERVICE_URI`` function bootstrap_keystone { $KEYSTONE_BIN_DIR/keystone-manage bootstrap \ --bootstrap-username admin \ @@ -580,7 +556,7 @@ function bootstrap_keystone { --bootstrap-role-name admin \ --bootstrap-service-name keystone \ --bootstrap-region-id "$REGION_NAME" \ - --bootstrap-admin-url "$KEYSTONE_AUTH_URI" \ + --bootstrap-admin-url "$KEYSTONE_SERVICE_URI" \ --bootstrap-public-url "$KEYSTONE_SERVICE_URI" } diff --git a/lib/swift b/lib/swift index 98852415da..b376993150 100644 --- a/lib/swift +++ b/lib/swift @@ -430,7 +430,7 @@ function configure_swift { swift_pipeline+=" authtoken" if is_service_enabled s3api;then swift_pipeline+=" s3token" - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token auth_uri ${KEYSTONE_AUTH_URI_V3} + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token auth_uri ${KEYSTONE_SERVICE_URI_V3} iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token delay_auth_decision true fi swift_pipeline+=" keystoneauth" @@ -521,7 +521,7 @@ function configure_swift { local auth_vers auth_vers=$(iniget ${testfile} func_test auth_version) iniset ${testfile} func_test auth_host ${KEYSTONE_SERVICE_HOST} - if [[ "$KEYSTONE_AUTH_PROTOCOL" == "https" ]]; then + if [[ "$KEYSTONE_SERVICE_PROTOCOL" == "https" ]]; then iniset ${testfile} func_test auth_port 443 else iniset ${testfile} func_test auth_port 80 diff --git a/stack.sh b/stack.sh index 48f61fb3af..fc26e21627 100755 --- a/stack.sh +++ b/stack.sh @@ -876,7 +876,7 @@ fi install_keystonemiddleware if is_service_enabled keystone; then - if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then + if [ "$KEYSTONE_SERVICE_HOST" == "$SERVICE_HOST" ]; then stack_install_service keystone configure_keystone fi @@ -1093,7 +1093,7 @@ source $TOP_DIR/userrc_early if is_service_enabled keystone; then echo_summary "Starting Keystone" - if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then + if [ "$KEYSTONE_SERVICE_HOST" == "$SERVICE_HOST" ]; then init_keystone start_keystone bootstrap_keystone diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 71fba2e2a6..fe5dafa994 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -26,39 +26,6 @@ if [[ -z "$TOP_DIR" ]]; then FILES=$TOP_DIR/files fi -# Keystone Port Reservation -# ------------------------- -# Reserve and prevent ``KEYSTONE_AUTH_PORT`` and ``KEYSTONE_AUTH_PORT_INT`` from -# being used as ephemeral ports by the system. The default(s) are 35357 and -# 35358 which are in the Linux defined ephemeral port range (in disagreement -# with the IANA ephemeral port range). This is a workaround for bug #1253482 -# where Keystone will try and bind to the port and the port will already be -# in use as an ephemeral port by another process. This places an explicit -# exception into the Kernel for the Keystone AUTH ports. -function fixup_keystone { - keystone_ports=${KEYSTONE_AUTH_PORT:-35357},${KEYSTONE_AUTH_PORT_INT:-35358} - - # Only do the reserved ports when available, on some system (like containers) - # where it's not exposed we are almost pretty sure these ports would be - # exclusive for our DevStack. - if sysctl net.ipv4.ip_local_reserved_ports >/dev/null 2>&1; then - # Get any currently reserved ports, strip off leading whitespace - reserved_ports=$(sysctl net.ipv4.ip_local_reserved_ports | awk -F'=' '{print $2;}' | sed 's/^ //') - - if [[ -z "${reserved_ports}" ]]; then - # If there are no currently reserved ports, reserve the keystone ports - sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports} - else - # If there are currently reserved ports, keep those and also reserve the - # Keystone specific ports. Duplicate reservations are merged into a single - # reservation (or range) automatically by the kernel. - sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports},${reserved_ports} - fi - else - echo_summary "WARNING: unable to reserve keystone ports" - fi -} - # Python Packages # --------------- @@ -182,7 +149,6 @@ function fixup_ubuntu { } function fixup_all { - fixup_keystone fixup_ubuntu fixup_fedora fixup_suse From eb37657d8e34e1d0f8ee639c3953a752ba615d8d Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Wed, 24 Feb 2021 10:04:31 +0100 Subject: [PATCH 133/574] Make creation of keystone admin endpoint optional The keystone admin endpoint technically isn't different any longer from the other keystone endpoints in v3 of the API. However, some applications like heat are still relying on it to exist. So we make the creation of the admin endpoint during bootstrap optional here, with the intention to change the default to False once all jobs that still need this are modified to explicitly require it. Change-Id: I7ab12141c558186e397c174c248a613d1810011b --- lib/keystone | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index 096bafb41f..f8b5ccb009 100644 --- a/lib/keystone +++ b/lib/keystone @@ -122,6 +122,9 @@ KEYSTONE_PASSWORD_HASH_ROUNDS=${KEYSTONE_PASSWORD_HASH_ROUNDS:-4} # Cache settings KEYSTONE_ENABLE_CACHE=${KEYSTONE_ENABLE_CACHE:-True} +# Whether to create a keystone admin endpoint for legacy applications +KEYSTONE_ADMIN_ENDPOINT=$(trueorfalse True KEYSTONE_ADMIN_ENDPOINT) + # Functions # --------- @@ -556,8 +559,16 @@ function bootstrap_keystone { --bootstrap-role-name admin \ --bootstrap-service-name keystone \ --bootstrap-region-id "$REGION_NAME" \ - --bootstrap-admin-url "$KEYSTONE_SERVICE_URI" \ --bootstrap-public-url "$KEYSTONE_SERVICE_URI" + if [ "$KEYSTONE_ADMIN_ENDPOINT" == "True" ]; then + openstack endpoint create --region "$REGION_NAME" \ + --os-username admin \ + --os-user-domain-id default \ + --os-password "$ADMIN_PASSWORD" \ + --os-project-name admin \ + --os-project-domain-id default \ + keystone admin "$KEYSTONE_SERVICE_URI" + fi } # create_ldap_domain() - Create domain file and initialize domain with a user From b538b3267cec11f0b345ce101607bbd36ada8f32 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Wed, 24 Feb 2021 10:24:03 +0100 Subject: [PATCH 134/574] Switch off creating a keystone admin endpoint by default With the depending patch, the endpoint will still be created for heat tests, so we can turn it off for everyone else. Depends-On: https://review.opendev.org/c/openstack/openstacksdk/+/777343 Change-Id: I0dc7d6cedd07e942b9f23b26a785b386aff41fbc --- lib/keystone | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index f8b5ccb009..0609abd289 100644 --- a/lib/keystone +++ b/lib/keystone @@ -123,7 +123,7 @@ KEYSTONE_PASSWORD_HASH_ROUNDS=${KEYSTONE_PASSWORD_HASH_ROUNDS:-4} KEYSTONE_ENABLE_CACHE=${KEYSTONE_ENABLE_CACHE:-True} # Whether to create a keystone admin endpoint for legacy applications -KEYSTONE_ADMIN_ENDPOINT=$(trueorfalse True KEYSTONE_ADMIN_ENDPOINT) +KEYSTONE_ADMIN_ENDPOINT=$(trueorfalse False KEYSTONE_ADMIN_ENDPOINT) # Functions # --------- From 4aa27976ebb2e4a4dc95a20f96e5d8f25b1ac10d Mon Sep 17 00:00:00 2001 From: Tristan Cacqueray Date: Tue, 19 Oct 2021 21:47:27 +0000 Subject: [PATCH 135/574] [ci] Remove the implied-branches pragma This change enables using devstack job with custom branche names. Change-Id: I95c368f05042a6f8f208988af9a6d89a522a5526 --- .zuul.yaml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index b756db3b22..d114053d26 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -1,11 +1,3 @@ -- pragma: - # NOTE(gtema): this is required for the changes in SDK feature/r1 branch to - # be using devstack - # TODO(gtema): delete this once r1 branch is merged into master - implied-branches: - - master - - feature/r1 - - nodeset: name: openstack-single-node nodes: From e06d954229fc4fca827105f5bb0809a19075d590 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Thu, 21 Oct 2021 08:15:12 -0700 Subject: [PATCH 136/574] Use Nehalem CPU model by default CentOS/RHEL 9 are being compiled for the x86_64-v2 architecture which is newer than the qemu default of qemu64. This means that for devstack to boot these instances we need a newer CPU model. Nehalem is apparently the oldest model that works for x86_64-v2 and is expected to work on Intel and AMD cpus with kvm or qemu. Switch devstack to this model by default. Note that we cannot use host-passthrough or host-model because we want to support live migration between devstack deployed nova-compute instances and even within the CI instances that we get the host CPUs can differ. Also, we should run this change against as many clouds as possible to ensure that the newer model works across all of our clouds. There is some fear that the virtual CPUs presented to us in some clouds may not be able to run these newer CPU models. Change-Id: Ibd6e11b59f3c8655bc60ace7383a08458b2177f2 --- lib/nova | 3 ++- lib/nova_plugins/hypervisor-libvirt | 3 +++ stackrc | 3 ++- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/nova b/lib/nova index bbb1039199..9aae2c4a9c 100644 --- a/lib/nova +++ b/lib/nova @@ -260,7 +260,8 @@ function configure_nova { if [ ! -e /dev/kvm ]; then echo "WARNING: Switching to QEMU" LIBVIRT_TYPE=qemu - LIBVIRT_CPU_MODE=none + LIBVIRT_CPU_MODE=custom + LIBVIRT_CPU_MODEL=Nehalem if which selinuxenabled >/dev/null 2>&1 && selinuxenabled; then # https://bugzilla.redhat.com/show_bug.cgi?id=753589 sudo setsebool virt_use_execmem on diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 321775d324..c1cd132548 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -40,6 +40,9 @@ function configure_nova_hypervisor { configure_libvirt iniset $NOVA_CONF libvirt virt_type "$LIBVIRT_TYPE" iniset $NOVA_CONF libvirt cpu_mode "$LIBVIRT_CPU_MODE" + if [ "$LIBVIRT_CPU_MODE" == "custom" ] ; then + iniset $NOVA_CONF libvirt cpu_model "$LIBVIRT_CPU_MODEL" + fi # Do not enable USB tablet input devices to avoid QEMU CPU overhead. iniset $NOVA_CONF DEFAULT pointer_model "ps2mouse" iniset $NOVA_CONF libvirt live_migration_uri "qemu+ssh://$STACK_USER@%s/system" diff --git a/stackrc b/stackrc index 3dc800af2d..ebe472c033 100755 --- a/stackrc +++ b/stackrc @@ -623,7 +623,8 @@ VIRT_DRIVER=${VIRT_DRIVER:-$DEFAULT_VIRT_DRIVER} case "$VIRT_DRIVER" in ironic|libvirt) LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} - LIBVIRT_CPU_MODE=${LIBVIRT_CPU_MODE:-none} + LIBVIRT_CPU_MODE=${LIBVIRT_CPU_MODE:-custom} + LIBVIRT_CPU_MODEL=${LIBVIRT_CPU_MODEL:-Nehalem} if [[ "$os_VENDOR" =~ (Debian|Ubuntu) ]]; then # The groups change with newer libvirt. Older Ubuntu used # 'libvirtd', but now uses libvirt like Debian. Do a quick check From 7f6d9283b85bb743d3224981c031d331f5e9608a Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Wed, 27 Oct 2021 16:40:30 +0200 Subject: [PATCH 137/574] Rehome functions to enable Neutron's placement integration Those functions were part of the neutron devstack plugin but we discussed it during last PTG [1] and decided to move to the Devstack repo as plugins which are used by e.g. CI jobs which are defined outside of the neutron repository. Placement integration is used e.g. in the tempest-slow job which is defined in tempest and used by many different OpenStack projects. [1] https://etherpad.opendev.org/p/neutron-yoga-ptg#L142 Change-Id: Ib86071881f16de1b69c0f9b1b19b6df8b7e66a07 --- lib/neutron-legacy | 4 ++++ lib/neutron_plugins/services/placement | 21 +++++++++++++++++++++ 2 files changed, 25 insertions(+) create mode 100644 lib/neutron_plugins/services/placement diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 7b20a96ed7..b41dfcae42 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -277,6 +277,7 @@ source $TOP_DIR/lib/neutron_plugins/services/metering source $TOP_DIR/lib/neutron_plugins/services/l3 # Additional Neutron service plugins +source $TOP_DIR/lib/neutron_plugins/services/placement source $TOP_DIR/lib/neutron_plugins/services/trunk # Use security group or not @@ -374,6 +375,9 @@ function configure_mutnauq { fi # Configure Neutron's advanced services + if is_service_enabled q-placement neutron-placement; then + configure_placement_extension + fi if is_service_enabled q-trunk neutron-trunk; then configure_trunk_extension fi diff --git a/lib/neutron_plugins/services/placement b/lib/neutron_plugins/services/placement new file mode 100644 index 0000000000..3ec185bae6 --- /dev/null +++ b/lib/neutron_plugins/services/placement @@ -0,0 +1,21 @@ +#!/bin/bash + +function configure_placement_service_plugin { + neutron_service_plugin_class_add "placement" +} + +function configure_placement_neutron { + iniset $NEUTRON_CONF placement auth_type "$NEUTRON_PLACEMENT_AUTH_TYPE" + iniset $NEUTRON_CONF placement auth_url "$KEYSTONE_SERVICE_URI" + iniset $NEUTRON_CONF placement username "$NEUTRON_PLACEMENT_USERNAME" + iniset $NEUTRON_CONF placement password "$SERVICE_PASSWORD" + iniset $NEUTRON_CONF placement user_domain_name "$SERVICE_DOMAIN_NAME" + iniset $NEUTRON_CONF placement project_name "$SERVICE_TENANT_NAME" + iniset $NEUTRON_CONF placement project_domain_name "$SERVICE_DOMAIN_NAME" + iniset $NEUTRON_CONF placement region_name "$REGION_NAME" +} + +function configure_placement_extension { + configure_placement_service_plugin + configure_placement_neutron +} From 325792d9b9238f9a8b8b493ba50572add99b9d82 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Fri, 15 Oct 2021 15:55:54 -0500 Subject: [PATCH 138/574] Clarify error message for ERROR_ON_CLONE=True If ERROR_ON_CLONE is set to True which is case for all the devstack based job, devstack does not clone the repo instead raise error. From current error message, it is difficult to know that ERROR_ON_CLONE is True until we traceback the code or check devstack-base job set ERROR_ON_CLONE to True. Current error message is like: ------- + functions-common:git_clone:560 : echo 'The /opt/stack/oslo.limit project was not found; if this is a gate job, add' The /opt/stack/oslo.limit project was not found; if this is a gate job, add + functions-common:git_clone:561 : echo 'the project to the $PROJECTS variable in the job definition.' the project to the $PROJECTS variable in the job definition. + functions-common:git_clone:562 : die 562 'Cloning not allowed in this configuration' -------- Adding ERROR_ON_CLONE info in error message will help to know the reason of devstack not cloning the repo. Change-Id: I9e9852f046fefb299b4ef4446323e9c86437212f --- functions-common | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/functions-common b/functions-common index 11679e4aa3..7a628db737 100644 --- a/functions-common +++ b/functions-common @@ -547,7 +547,7 @@ function git_clone { if [[ "$ERROR_ON_CLONE" = "True" ]]; then echo "The $git_dest project was not found; if this is a gate job, add" echo "the project to 'required-projects' in the job definition." - die $LINENO "Cloning not allowed in this configuration" + die $LINENO "ERROR_ON_CLONE is set to True so cloning not allowed in this configuration" fi git_timed clone $git_clone_flags $git_remote $git_dest fi @@ -559,7 +559,7 @@ function git_clone { if [[ "$ERROR_ON_CLONE" = "True" ]]; then echo "The $git_dest project was not found; if this is a gate job, add" echo "the project to the \$PROJECTS variable in the job definition." - die $LINENO "Cloning not allowed in this configuration" + die $LINENO "ERROR_ON_CLONE is set to True so cloning not allowed in this configuration" fi # '--branch' can also take tags git_timed clone $git_clone_flags $git_remote $git_dest --branch $git_ref From f8e00b86aee9a8f9646bf5aed2c618843307b963 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Fri, 29 Oct 2021 14:39:41 +0200 Subject: [PATCH 139/574] Run Bullseye with more swap Since Bullseye like Centos 8 Stream needs more memory due to changed default settings in newer qemu versions, set the swap size to 4G, which is the same setting already being used for the CS8 jobs successfully. Change-Id: I83ea34d6aa647d2ab9d4d78ed354904fce836e68 Signed-off-by: Dr. Jens Harbott --- .zuul.yaml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index cbcb8638c4..7a85266eaa 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -630,9 +630,7 @@ voting: false timeout: 9000 vars: - # NOTE(yoctozepto): With concurrency equal 2, there is a random event - # that this job will run out of memory at some point. - tempest_concurrency: 1 + configure_swap_size: 4096 # NOTE(yoctozepto): Debian Bullseye does not yet offer OVN. Switch to OVS # for the time being. devstack_localrc: From 021ae0bcc8f67b6fd307aaf3c8ac59ba6cbe23b6 Mon Sep 17 00:00:00 2001 From: Lance Bragstad Date: Thu, 11 Mar 2021 15:47:50 +0000 Subject: [PATCH 140/574] Update lib/keystone to add more system users Keystone has supported system-scope since Queens and we already make sure we create a cloud profile for system-admin in /etc/openstack/clouds.yaml. This commit ensures keystone creates a couple of new users to model system-member and system-reader personas. Doing this by default in devstack makes it easier for people to use. We've already taken a similar approach in tempest by setting up the various system personas for tempest clients to use. Change-Id: Iceb7c5f517db20072e121dc7538abaa888423c67 --- functions-common | 53 ++++++++++++++++++++++++++++++++++++++++++++++++ lib/keystone | 23 ++++++++++++++++++++- 2 files changed, 75 insertions(+), 1 deletion(-) diff --git a/functions-common b/functions-common index 11679e4aa3..111d339372 100644 --- a/functions-common +++ b/functions-common @@ -129,6 +129,28 @@ function write_clouds_yaml { --os-password $ADMIN_PASSWORD \ --os-system-scope all + # system member + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-system-member \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username system_member \ + --os-password $ADMIN_PASSWORD \ + --os-system-scope all + + # system reader + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-system-reader \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username system_reader \ + --os-password $ADMIN_PASSWORD \ + --os-system-scope all + cat >> $CLOUDS_YAML < [] +function get_or_add_user_system_role { + local user_role_id + local domain_args + + domain_args=$(_get_domain_args $4) + + # Gets user role id + user_role_id=$(openstack role assignment list \ + --role $1 \ + --user $2 \ + --system $3 \ + $domain_args \ + -f value -c Role) + if [[ -z "$user_role_id" ]]; then + # Adds role to user and get it + openstack role add $1 \ + --user $2 \ + --system $3 \ + $domain_args + user_role_id=$(openstack role assignment list \ + --role $1 \ + --user $2 \ + --system $3 \ + $domain_args \ + -f value -c Role) + fi + echo $user_role_id +} + # Gets or adds group role to project # Usage: get_or_add_group_project_role function get_or_add_group_project_role { diff --git a/lib/keystone b/lib/keystone index 0609abd289..065ca70ec3 100644 --- a/lib/keystone +++ b/lib/keystone @@ -285,20 +285,28 @@ function configure_keystone { # admins admin admin admin # nonadmins demo, alt_demo member, anotherrole demo, alt_demo +# System User Roles +# ------------------------------------------------------------------ +# all admin admin +# all system_reader reader +# all system_member member + # Migrated from keystone_data.sh function create_keystone_accounts { # The keystone bootstrapping process (performed via keystone-manage - # bootstrap) creates an admin user, admin role, member role, and admin + # bootstrap) creates an admin user and an admin # project. As a sanity check we exercise the CLI to retrieve the IDs for # these values. local admin_project admin_project=$(openstack project show "admin" -f value -c id) local admin_user admin_user=$(openstack user show "admin" -f value -c id) + # These roles are also created during bootstrap but we don't need their IDs local admin_role="admin" local member_role="member" + local reader_role="reader" async_run ks-domain-role get_or_add_user_domain_role $admin_role $admin_user default @@ -349,6 +357,18 @@ function create_keystone_accounts { async_run ks-alt-admin get_or_add_user_project_role $admin_role $admin_user $alt_demo_project async_run ks-alt-another get_or_add_user_project_role $another_role $alt_demo_user $alt_demo_project + # Create two users, give one the member role on the system and the other + # the reader role on the system. These two users model system-member and + # system-reader personas. The admin user already has the admin role on the + # system and we can re-use this user as a system-admin. + system_member_user=$(get_or_create_user "system_member" \ + "$ADMIN_PASSWORD" "default" "system_member@example.com") + async_run ks-system-member get_or_add_user_system_role $member_role $system_member_user "all" + + system_reader_user=$(get_or_create_user "system_reader" \ + "$ADMIN_PASSWORD" "default" "system_reader@example.com") + async_run ks-system-reader get_or_add_user_system_role $reader_role $system_reader_user "all" + # groups local admin_group admin_group=$(get_or_create_group "admins" \ @@ -365,6 +385,7 @@ function create_keystone_accounts { async_wait ks-demo-{member,admin,another,invis} async_wait ks-alt-{member,admin,another} + async_wait ks-system-{member,reader} async_wait ks-group-{memberdemo,anotherdemo,memberalt,anotheralt,admin} if is_service_enabled ldap; then From 95555ba39827e2f3648eb89bde91b1342c493306 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Wed, 10 Nov 2021 06:22:52 +0100 Subject: [PATCH 141/574] Cleanup keystone library IDENTITY_API_VERSION is hardcoded to 3 in most locations already, drop the remaining occurrences, but keep the variable definition since some plugins still depend on it. Drop ENABLE_IDENTITY_V2 which no longer has any effect. Amend variable list for bootstrap_keystone(). Signed-off-by: Dr. Jens Harbott Change-Id: I06f476d2105bc6ec2b511fc5effcfcc3973eaf97 --- doc/source/configuration.rst | 11 ----------- lib/keystone | 4 +--- openrc | 2 +- stackrc | 18 +++--------------- 4 files changed, 5 insertions(+), 30 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index b4fff4f208..dd8f21faaf 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -684,17 +684,6 @@ KEYSTONE_REGION_NAME to specify the region of Keystone service. KEYSTONE_REGION_NAME has a default value the same as REGION_NAME thus we omit it in the configuration of RegionOne. -Disabling Identity API v2 -+++++++++++++++++++++++++ - -The Identity API v2 is deprecated as of Mitaka and it is recommended to only -use the v3 API. It is possible to setup keystone without v2 API, by doing: - -:: - - ENABLE_IDENTITY_V2=False - - Glance ++++++ diff --git a/lib/keystone b/lib/keystone index 065ca70ec3..349d2573b6 100644 --- a/lib/keystone +++ b/lib/keystone @@ -9,7 +9,6 @@ # - ``tls`` file # - ``DEST``, ``STACK_USER`` # - ``FILES`` -# - ``IDENTITY_API_VERSION`` # - ``BASE_SQL_CONN`` # - ``SERVICE_HOST``, ``SERVICE_PROTOCOL`` # - ``S3_SERVICE_PORT`` (template backend only) @@ -540,7 +539,7 @@ function start_keystone { # unencryted traffic at this point. # If running in Apache, use the path rather than port. - local service_uri=$auth_protocol://$KEYSTONE_SERVICE_HOST/identity/v$IDENTITY_API_VERSION/ + local service_uri=$auth_protocol://$KEYSTONE_SERVICE_HOST/identity/v3/ if ! wait_for_service $SERVICE_TIMEOUT $service_uri; then die $LINENO "keystone did not start" @@ -569,7 +568,6 @@ function stop_keystone { # This function uses the following GLOBAL variables: # - ``KEYSTONE_BIN_DIR`` # - ``ADMIN_PASSWORD`` -# - ``IDENTITY_API_VERSION`` # - ``REGION_NAME`` # - ``KEYSTONE_SERVICE_URI`` function bootstrap_keystone { diff --git a/openrc b/openrc index beeaebea42..6d488bb0ba 100644 --- a/openrc +++ b/openrc @@ -74,7 +74,7 @@ else fi # Identity API version -export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-3} +export OS_IDENTITY_API_VERSION=3 # Ask keystoneauth1 to use keystone export OS_AUTH_TYPE=password diff --git a/stackrc b/stackrc index ebe472c033..62749a7005 100755 --- a/stackrc +++ b/stackrc @@ -175,21 +175,9 @@ else export PS4='+ $(short_source): ' fi -# Configure Identity API version: 2.0, 3 -IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-3} - -# Set the option ENABLE_IDENTITY_V2 to True. It defines whether the DevStack -# deployment will be deploying the Identity v2 pipelines. If this option is set -# to ``False``, DevStack will: i) disable Identity v2; ii) configure Tempest to -# skip Identity v2 specific tests; and iii) configure Horizon to use Identity -# v3. When this option is set to ``False``, the option IDENTITY_API_VERSION -# will to be set to ``3`` in order to make DevStack register the Identity -# endpoint as v3. This flag is experimental and will be used as basis to -# identify the projects which still have issues to operate with Identity v3. -ENABLE_IDENTITY_V2=$(trueorfalse False ENABLE_IDENTITY_V2) -if [ "$ENABLE_IDENTITY_V2" == "False" ]; then - IDENTITY_API_VERSION=3 -fi +# Configure Identity API version +# TODO(frickler): Drop this when plugins no longer need it +IDENTITY_API_VERSION=3 # Enable use of Python virtual environments. Individual project use of # venvs are controlled by the PROJECT_VENV array; every project with From 9c81321bfc694bd511dee8dd5d04273e368e5545 Mon Sep 17 00:00:00 2001 From: Lance Bragstad Date: Thu, 11 Mar 2021 16:29:31 +0000 Subject: [PATCH 142/574] Add additional project personas for secure RBAC This commit formalizes some additional users to act as different project users and updates clouds.yaml file so they're easy to use. It creates: - a reader on the demo project - a reader on the alt_demo project - a member on the alt_demo project With the adoption of secure RBAC personas, these are useful for using OpenStack APIs as that work continues. Change-Id: I3237a771275311377313b7d7d80ac059ac69d031 --- functions-common | 51 +++++++++++++++++++++++++++++++++++++++--------- lib/keystone | 32 ++++++++++++++++++++++++------ 2 files changed, 68 insertions(+), 15 deletions(-) diff --git a/functions-common b/functions-common index 111d339372..996827f292 100644 --- a/functions-common +++ b/functions-common @@ -85,7 +85,7 @@ function write_clouds_yaml { if [ -f "$SSL_BUNDLE_FILE" ]; then CA_CERT_ARG="--os-cacert $SSL_BUNDLE_FILE" fi - # demo -> devstack + # devstack: user with the member role on demo project $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ --file $CLOUDS_YAML \ --os-cloud devstack \ @@ -96,7 +96,18 @@ function write_clouds_yaml { --os-password $ADMIN_PASSWORD \ --os-project-name demo - # alt_demo -> devstack-alt + # devstack-admin: user with the admin role on the admin project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-admin \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username admin \ + --os-password $ADMIN_PASSWORD \ + --os-project-name admin + + # devstack-alt: user with the member role on alt_demo project $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ --file $CLOUDS_YAML \ --os-cloud devstack-alt \ @@ -107,18 +118,40 @@ function write_clouds_yaml { --os-password $ADMIN_PASSWORD \ --os-project-name alt_demo - # admin -> devstack-admin + # devstack-alt-member: user with the member role on alt_demo project $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ --file $CLOUDS_YAML \ - --os-cloud devstack-admin \ + --os-cloud devstack-alt-member \ --os-region-name $REGION_NAME \ $CA_CERT_ARG \ --os-auth-url $KEYSTONE_SERVICE_URI \ - --os-username admin \ + --os-username alt_demo_member \ --os-password $ADMIN_PASSWORD \ - --os-project-name admin + --os-project-name alt_demo + + # devstack-alt-reader: user with the reader role on alt_demo project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-alt-reader \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username alt_demo_reader \ + --os-password $ADMIN_PASSWORD \ + --os-project-name alt_demo + + # devstack-reader: user with the reader role on demo project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-reader \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username demo_reader \ + --os-password $ADMIN_PASSWORD \ + --os-project-name demo - # admin with a system-scoped token -> devstack-system + # devstack-system-admin: user with the admin role on the system $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ --file $CLOUDS_YAML \ --os-cloud devstack-system-admin \ @@ -129,7 +162,7 @@ function write_clouds_yaml { --os-password $ADMIN_PASSWORD \ --os-system-scope all - # system member + # devstack-system-member: user with the member role on the system $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ --file $CLOUDS_YAML \ --os-cloud devstack-system-member \ @@ -140,7 +173,7 @@ function write_clouds_yaml { --os-password $ADMIN_PASSWORD \ --os-system-scope all - # system reader + # devstack-system-reader: user with the reader role on the system $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ --file $CLOUDS_YAML \ --os-cloud devstack-system-reader \ diff --git a/lib/keystone b/lib/keystone index 065ca70ec3..1ae950d0d1 100644 --- a/lib/keystone +++ b/lib/keystone @@ -346,19 +346,39 @@ function create_keystone_accounts { async_run ks-demo-another get_or_add_user_project_role $another_role $demo_user $demo_project async_run ks-demo-invis get_or_add_user_project_role $member_role $demo_user $invis_project - # alt_demo + # Create a user to act as a reader on project demo + local demo_reader + demo_reader=$(get_or_create_user "demo_reader" \ + "$ADMIN_PASSWORD" "default" "demo_reader@example.com") + + async_run ks-demo-reader get_or_add_user_project_role $reader_role $demo_reader $demo_project + + # Create a different project called alt_demo local alt_demo_project alt_demo_project=$(get_or_create_project "alt_demo" default) + # Create a user to act as member, admin and anotherrole on project alt_demo local alt_demo_user alt_demo_user=$(get_or_create_user "alt_demo" \ "$ADMIN_PASSWORD" "default" "alt_demo@example.com") async_run ks-alt-member get_or_add_user_project_role $member_role $alt_demo_user $alt_demo_project - async_run ks-alt-admin get_or_add_user_project_role $admin_role $admin_user $alt_demo_project + async_run ks-alt-admin get_or_add_user_project_role $admin_role $alt_demo_user $alt_demo_project async_run ks-alt-another get_or_add_user_project_role $another_role $alt_demo_user $alt_demo_project - # Create two users, give one the member role on the system and the other - # the reader role on the system. These two users model system-member and + # Create another user to act as a member on project alt_demo + local alt_demo_member + alt_demo_member=$(get_or_create_user "alt_demo_member" \ + "$ADMIN_PASSWORD" "default" "alt_demo_member@example.com") + async_run ks-alt-member-user get_or_add_user_project_role $member_role $alt_demo_member $alt_demo_project + + # Create another user to act as a reader on project alt_demo + local alt_demo_reader + alt_demo_reader=$(get_or_create_user "alt_demo_reader" \ + "$ADMIN_PASSWORD" "default" "alt_demo_reader@example.com") + async_run ks-alt-reader-user get_or_add_user_project_role $reader_role $alt_demo_reader $alt_demo_project + + # Create two users, give one the member role on the system and the other the + # reader role on the system. These two users model system-member and # system-reader personas. The admin user already has the admin role on the # system and we can re-use this user as a system-admin. system_member_user=$(get_or_create_user "system_member" \ @@ -383,8 +403,8 @@ function create_keystone_accounts { async_run ks-group-anotheralt get_or_add_group_project_role $another_role $non_admin_group $alt_demo_project async_run ks-group-admin get_or_add_group_project_role $admin_role $admin_group $admin_project - async_wait ks-demo-{member,admin,another,invis} - async_wait ks-alt-{member,admin,another} + async_wait ks-demo-{member,admin,another,invis,reader} + async_wait ks-alt-{member,admin,another,member-user,reader-user} async_wait ks-system-{member,reader} async_wait ks-group-{memberdemo,anotherdemo,memberalt,anotheralt,admin} From f9a896c6e6afcf52e9a50613285940c26e353ba3 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Wed, 27 Oct 2021 16:50:11 +0200 Subject: [PATCH 143/574] Rehome functions to enable Neutron's QoS service Those functions were part of the neutron devstack plugin but we discussed it during last PTG [1] and decided to move to the Devstack repo plugins which are used by e.g. CI jobs which are defined outside of the neutron repository. QoS service is used e.g. in the tempest-slow job which is defined in tempest and used by many different OpenStack projects. [1] https://etherpad.opendev.org/p/neutron-yoga-ptg#L142 Change-Id: I48f65d530db53fe2c94cad57a8072e1158d738b0 --- lib/neutron-legacy | 8 ++++++++ lib/neutron_plugins/services/qos | 30 ++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) create mode 100644 lib/neutron_plugins/services/qos diff --git a/lib/neutron-legacy b/lib/neutron-legacy index be29f99024..a3f6f0788d 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -279,6 +279,7 @@ source $TOP_DIR/lib/neutron_plugins/services/l3 # Additional Neutron service plugins source $TOP_DIR/lib/neutron_plugins/services/placement source $TOP_DIR/lib/neutron_plugins/services/trunk +source $TOP_DIR/lib/neutron_plugins/services/qos # Use security group or not if has_neutron_plugin_security_group; then @@ -381,6 +382,13 @@ function configure_mutnauq { if is_service_enabled q-trunk neutron-trunk; then configure_trunk_extension fi + if is_service_enabled q-qos neutron-qos; then + configure_qos + if is_service_enabled q-l3 neutron-l3; then + configure_l3_agent_extension_fip_qos + configure_l3_agent_extension_gateway_ip_qos + fi + fi iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS" # devstack is not a tool for running uber scale OpenStack diff --git a/lib/neutron_plugins/services/qos b/lib/neutron_plugins/services/qos new file mode 100644 index 0000000000..af9eb3d5b4 --- /dev/null +++ b/lib/neutron_plugins/services/qos @@ -0,0 +1,30 @@ +#!/bin/bash + +function configure_qos_service_plugin { + neutron_service_plugin_class_add "qos" +} + + +function configure_qos_core_plugin { + configure_qos_$NEUTRON_CORE_PLUGIN +} + + +function configure_qos_l2_agent { + plugin_agent_add_l2_agent_extension "qos" +} + + +function configure_qos { + configure_qos_service_plugin + configure_qos_core_plugin + configure_qos_l2_agent +} + +function configure_l3_agent_extension_fip_qos { + plugin_agent_add_l3_agent_extension "fip_qos" +} + +function configure_l3_agent_extension_gateway_ip_qos { + plugin_agent_add_l3_agent_extension "gateway_ip_qos" +} From f56f7a557ac4941b5204852f4173db9cf82b4dae Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Tue, 9 Nov 2021 10:57:25 +0100 Subject: [PATCH 144/574] Stop creating userrc_early We can use the devstack-admin cloud configuration everywhere now and don't need to set environment variables with cloud credentials any longer. Fix the swift setup, where some more options need to be explicitly specified now and the default OS_CLOUD setting overridden. Signed-off-by: Dr. Jens Harbott Change-Id: I86ffa9cd52454f1c1c72d29b3a0e0caa3e44b829 --- lib/swift | 15 +++++++++------ stack.sh | 28 +++------------------------- 2 files changed, 12 insertions(+), 31 deletions(-) diff --git a/lib/swift b/lib/swift index b376993150..9c13701c6e 100644 --- a/lib/swift +++ b/lib/swift @@ -866,12 +866,15 @@ function stop_swift { function swift_configure_tempurls { # note we are using swift credentials! - OS_USERNAME=swift \ - OS_PASSWORD=$SERVICE_PASSWORD \ - OS_USER_DOMAIN_NAME=$SERVICE_DOMAIN_NAME \ - OS_PROJECT_NAME=$SERVICE_PROJECT_NAME \ - OS_PROJECT_DOMAIN_NAME=$SERVICE_DOMAIN_NAME \ - openstack object store account \ + openstack --os-cloud "" \ + --os-region-name $REGION_NAME \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username=swift \ + --os-password=$SERVICE_PASSWORD \ + --os-user-domain-name=$SERVICE_DOMAIN_NAME \ + --os-project-name=$SERVICE_PROJECT_NAME \ + --os-project-domain-name=$SERVICE_DOMAIN_NAME \ + object store account \ set --property "Temp-URL-Key=$SWIFT_TEMPURL_KEY" } diff --git a/stack.sh b/stack.sh index b5ad81b081..ead56e68c0 100755 --- a/stack.sh +++ b/stack.sh @@ -1063,35 +1063,13 @@ fi # Keystone # -------- -# Rather than just export these, we write them out to a -# intermediate userrc file that can also be used to debug if -# something goes wrong between here and running -# tools/create_userrc.sh (this script relies on services other -# than keystone being available, so we can't call it right now) -cat > $TOP_DIR/userrc_early <> $TOP_DIR/userrc_early start_tls_proxy http-services '*' 443 $SERVICE_HOST 80 fi -source $TOP_DIR/userrc_early - -# Write a clouds.yaml file +# Write a clouds.yaml file and use the devstack-admin cloud write_clouds_yaml +export OS_CLOUD=devstack-admin if is_service_enabled keystone; then echo_summary "Starting Keystone" @@ -1380,7 +1358,7 @@ fi # which is helpful in image bundle steps. if is_service_enabled nova && is_service_enabled keystone; then - USERRC_PARAMS="-PA --target-dir $TOP_DIR/accrc" + USERRC_PARAMS="-PA --target-dir $TOP_DIR/accrc --os-password $ADMIN_PASSWORD" if [ -f $SSL_BUNDLE_FILE ]; then USERRC_PARAMS="$USERRC_PARAMS --os-cacert $SSL_BUNDLE_FILE" From faed11d2a1c9a9bb06ba855d5b551b231dd6bf82 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Thu, 18 Nov 2021 10:36:57 +0100 Subject: [PATCH 145/574] Add missing ml2, L2 and L3 agent functions to devstack Previously those functions were defined in the neutron's devstack plugin but with [1] we moved qos related code into devstack and we missed about moving them too. This is follow up patch to fix that issue. [1] https://review.opendev.org/c/openstack/devstack/+/815686 Change-Id: Icf459a2f8c6ae3c3cb29b16ba0b92766af41af30 --- lib/neutron-legacy | 9 +++++++++ lib/neutron_plugins/ml2 | 4 ++++ lib/neutron_plugins/services/l3 | 9 +++++++++ 3 files changed, 22 insertions(+) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index a3f6f0788d..a5a608df72 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -1039,6 +1039,15 @@ function _ssh_check_neutron { test_with_retry "$testcmd" "server $ip didn't become ssh-able" $timeout_sec } +function plugin_agent_add_l2_agent_extension { + local l2_agent_extension=$1 + if [[ -z "$L2_AGENT_EXTENSIONS" ]]; then + L2_AGENT_EXTENSIONS=$l2_agent_extension + elif [[ ! ,${L2_AGENT_EXTENSIONS}, =~ ,${l2_agent_extension}, ]]; then + L2_AGENT_EXTENSIONS+=",$l2_agent_extension" + fi +} + # Restore xtrace $_XTRACE_NEUTRON diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index e1f868f0a7..f00feac6b4 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -156,5 +156,9 @@ function has_neutron_plugin_security_group { return 0 } +function configure_qos_ml2 { + neutron_ml2_extension_driver_add "qos" +} + # Restore xtrace $_XTRACE_NEUTRON_ML2 diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 98b96ac06c..72f7a32b26 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -427,3 +427,12 @@ function is_networking_extension_supported { EXT_LIST=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" extension list --network -c Alias -f value) [[ $EXT_LIST =~ $extension ]] && return 0 } + +function plugin_agent_add_l3_agent_extension { + local l3_agent_extension=$1 + if [[ -z "$L3_AGENT_EXTENSIONS" ]]; then + L3_AGENT_EXTENSIONS=$l3_agent_extension + elif [[ ! ,${L3_AGENT_EXTENSIONS}, =~ ,${l3_agent_extension}, ]]; then + L3_AGENT_EXTENSIONS+=",$l3_agent_extension" + fi +} From c96993d138ea9ba447fc3b3dbbbf4879fd8c20db Mon Sep 17 00:00:00 2001 From: Julia Kreger Date: Thu, 18 Nov 2021 10:39:36 -0800 Subject: [PATCH 146/574] Make OS_CLOUD be able to be configured OS_CLOUD is used to communiate to CLI tools what cloud credentials to utilize. The change I86ffa9cd52454f1c1c72d29b3a0e0caa3e44b829 unfortunately set an explicit OS_CLOUD account which breaks any jobs which are expecting a previosuly set OS_CLOUD which may be different to work. For example, OS_CLOUD being set as devstack-system-admin to facilitate Secure RBAC testing. Change-Id: Iee900e552584dda622f57eea3508df48dff2e071 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index ead56e68c0..b5dc0ee05e 100755 --- a/stack.sh +++ b/stack.sh @@ -1069,7 +1069,7 @@ fi # Write a clouds.yaml file and use the devstack-admin cloud write_clouds_yaml -export OS_CLOUD=devstack-admin +export OS_CLOUD=${OS_CLOUD:-devstack-admin} if is_service_enabled keystone; then echo_summary "Starting Keystone" From 1d8888dc24143d81c13557ffdfa615052e794ebe Mon Sep 17 00:00:00 2001 From: Lance Bragstad Date: Thu, 11 Mar 2021 16:36:28 +0000 Subject: [PATCH 147/574] Remove unnecessary member role assignments from alt_demo This user already has the admin role assignment on a project, which implies the member role, making explicit calls to add the member role redundant. Change-Id: I398c5e2f098aeeb896de83872085cbce529a778a --- lib/keystone | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/keystone b/lib/keystone index 1ae950d0d1..2d48bb10bd 100644 --- a/lib/keystone +++ b/lib/keystone @@ -342,6 +342,7 @@ function create_keystone_accounts { async_wait ks-{domain-role,domain,project,service,reseller,anotherrole} async_run ks-demo-member get_or_add_user_project_role $member_role $demo_user $demo_project + async_run ks-demo-admin get_or_add_user_project_role $admin_role $admin_user $demo_project async_run ks-demo-another get_or_add_user_project_role $another_role $demo_user $demo_project async_run ks-demo-invis get_or_add_user_project_role $member_role $demo_user $invis_project @@ -361,7 +362,6 @@ function create_keystone_accounts { alt_demo_user=$(get_or_create_user "alt_demo" \ "$ADMIN_PASSWORD" "default" "alt_demo@example.com") - async_run ks-alt-member get_or_add_user_project_role $member_role $alt_demo_user $alt_demo_project async_run ks-alt-admin get_or_add_user_project_role $admin_role $alt_demo_user $alt_demo_project async_run ks-alt-another get_or_add_user_project_role $another_role $alt_demo_user $alt_demo_project @@ -404,7 +404,7 @@ function create_keystone_accounts { async_run ks-group-admin get_or_add_group_project_role $admin_role $admin_group $admin_project async_wait ks-demo-{member,admin,another,invis,reader} - async_wait ks-alt-{member,admin,another,member-user,reader-user} + async_wait ks-alt-{admin,another,member-user,reader-user} async_wait ks-system-{member,reader} async_wait ks-group-{memberdemo,anotherdemo,memberalt,anotheralt,admin} From 65a5db8e3376fbeb6871629cbfe5d77fe848e039 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Mon, 3 May 2021 00:08:15 +0900 Subject: [PATCH 148/574] keystone: Dot not set the removed admin_endpoint parameter The admin_endpoint parameter has been removed from keystone[1], and setting the parameter is no longer effective. [1] 192cde56e57a06750641b319da8a72cdcaa554d0 Change-Id: I6ae6a3122668551acc018972624e914fcbb79a22 --- lib/keystone | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/lib/keystone b/lib/keystone index 1ae950d0d1..dafe005664 100644 --- a/lib/keystone +++ b/lib/keystone @@ -214,14 +214,11 @@ function configure_keystone { service_port=$KEYSTONE_SERVICE_PORT_INT fi - # Override the endpoints advertised by keystone (the public_endpoint and - # admin_endpoint) so that clients use the correct endpoint. By default, the - # keystone server uses the public_port and admin_port which isn't going to - # work when you want to use a different port (in the case of proxy), or you - # don't want the port (in the case of putting keystone on a path in - # apache). + # Override the endpoints advertised by keystone so that clients use the correct + # endpoint. By default, the keystone server uses the public_port which isn't + # going to work when you want to use a different port (in the case of proxy), + # or you don't want the port (in the case of putting keystone on a path in apache). iniset $KEYSTONE_CONF DEFAULT public_endpoint $KEYSTONE_SERVICE_URI - iniset $KEYSTONE_CONF DEFAULT admin_endpoint $KEYSTONE_SERVICE_URI if [[ "$KEYSTONE_TOKEN_FORMAT" != "" ]]; then iniset $KEYSTONE_CONF token provider $KEYSTONE_TOKEN_FORMAT From 6822ff39444f8ebe01084e0d92bc05ac40db8216 Mon Sep 17 00:00:00 2001 From: Gregory Thiemonge Date: Wed, 1 Sep 2021 09:36:31 +0200 Subject: [PATCH 149/574] Fix OVN DBs cleanup on startup When initializing OVN, clean up the correct database directory when using OVN from packages (/var/lib/ovn/ instead of /opt/stack/data/ovn/). The /opt/stack/data/ovn location is used only when building OVN from sources, so a fresh devstack deployment with OVN packages may already have hundreds of existing routers and ports, creating ARP collisions. Closes-Bug: #1942201 Change-Id: Ic90d4f2f9d8aaef825ea3325c0ad8fef2a1c5e39 --- lib/neutron_plugins/ovn_agent | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 1f737fb58b..999851e33d 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -119,7 +119,13 @@ OVS_SCRIPTDIR=$OVS_SHAREDIR/scripts OVS_DATADIR=$DATA_DIR/ovs OVS_SYSCONFDIR=${OVS_SYSCONFDIR:-$OVS_PREFIX/etc/openvswitch} -OVN_DATADIR=$DATA_DIR/ovn +if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + OVN_DATADIR=$DATA_DIR/ovn +else + # When using OVN from packages, the data dir for OVN DBs is + # /var/lib/ovn + OVN_DATADIR=/var/lib/ovn +fi OVN_SHAREDIR=$OVS_PREFIX/share/ovn OVN_SCRIPTDIR=$OVN_SHAREDIR/scripts OVN_RUNDIR=$OVS_PREFIX/var/run/ovn @@ -562,13 +568,13 @@ function init_ovn { _disable_libvirt_apparmor - mkdir -p $OVN_DATADIR + sudo mkdir -p $OVN_DATADIR mkdir -p $OVS_DATADIR rm -f $OVS_DATADIR/*.db rm -f $OVS_DATADIR/.*.db.~lock~ - rm -f $OVN_DATADIR/*.db - rm -f $OVN_DATADIR/.*.db.~lock~ + sudo rm -f $OVN_DATADIR/*.db + sudo rm -f $OVN_DATADIR/.*.db.~lock~ } function _start_ovs { From c20cd8ed9d0294e99293cdcb5eea0885a5a12573 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Wed, 24 Nov 2021 01:35:29 +0900 Subject: [PATCH 150/574] cinder-backup: Ensure ca cert is defined when tls-proxy is enabled Change-Id: Id679eb7061d8e609ce76fbb5b720a041990e8e86 --- lib/cinder_backups/swift | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/cinder_backups/swift b/lib/cinder_backups/swift index d7c977e1e3..c7ec306246 100644 --- a/lib/cinder_backups/swift +++ b/lib/cinder_backups/swift @@ -24,6 +24,9 @@ function configure_cinder_backup_swift { # to use it. iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.swift.SwiftBackupDriver" iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_" + if is_service_enabled tls-proxy; then + iniset $CINDER_CONF DEFAULT backup_swift_ca_cert_file $SSL_BUNDLE_FILE + fi } # init_cinder_backup_swift: nothing to do From afd0f84eae75fd5a5a7611cb6e8368ef7b845211 Mon Sep 17 00:00:00 2001 From: Lance Bragstad Date: Mon, 8 Nov 2021 19:53:40 +0000 Subject: [PATCH 151/574] Remove unnecessary unset for project-scoped token in glance Before, we needed to unset a couple of parameters that would make the client return a project-scoped token instead of a system-scoped token, which we need when interacting with registered limits in keystone. This commit removes those unsets since we no longer source those variables by default. This commit also cleans up some of the redundant parameters in the registered limit calls, like region. Change-Id: I1af8a168a29e895d57504d41e30efea271ea232d --- lib/glance | 29 +++++++++++------------------ 1 file changed, 11 insertions(+), 18 deletions(-) diff --git a/lib/glance b/lib/glance index f18bea9ccb..4c2755f76f 100644 --- a/lib/glance +++ b/lib/glance @@ -288,24 +288,17 @@ function configure_glance_store { function configure_glance_quotas { - # NOTE(danms): We need to have some of the OS_ things unset in - # order to use system scope, which is required for creating these - # limits. This is a hack, but I dunno how else to get osc to use - # system scope. - - bash -c "unset OS_USERNAME OS_TENANT_NAME OS_PROJECT_NAME; - openstack --os-cloud devstack-system-admin registered limit create \ - --service glance --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL \ - --region $REGION_NAME image_size_total; \ - openstack --os-cloud devstack-system-admin registered limit create \ - --service glance --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL \ - --region $REGION_NAME image_stage_total; \ - openstack --os-cloud devstack-system-admin registered limit create \ - --service glance --default-limit 100 --region $REGION_NAME \ - image_count_total; \ - openstack --os-cloud devstack-system-admin registered limit create \ - --service glance --default-limit 100 --region $REGION_NAME \ - image_count_uploading" + # Registered limit resources in keystone are system-specific resources. + # Make sure we use a system-scoped token to interact with this API. + + openstack --os-cloud devstack-system-admin registered limit create --service glance \ + --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL --region $REGION_NAME image_size_total + openstack --os-cloud devstack-system-admin registered limit create --service glance \ + --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL --region $REGION_NAME image_stage_total + openstack --os-cloud devstack-system-admin registered limit create --service glance \ + --default-limit 100 --region $REGION_NAME image_count_total + openstack --os-cloud devstack-system-admin registered limit create --service glance \ + --default-limit 100 --region $REGION_NAME image_count_uploading # Tell glance to use these limits iniset $GLANCE_API_CONF DEFAULT use_keystone_limits True From b575af0cfe8aac71825b406394e9927a33ca51bd Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Fri, 26 Nov 2021 12:44:41 +0530 Subject: [PATCH 152/574] Do not use sudo with OVN_DATADIR when building from source Jobs with OVN_BUILD_FROMS_SOURCE=True are broken since [1] as ovn nortd not starting due to permission issues. Fix it by not using sudo for creating OVN_DATADIR when building from source. [1] https://review.opendev.org/c/openstack/devstack/+/806858 Closes-Bug: #1952393 Change-Id: I00f0c8c8173b4d8270fbb3e6079d0d8b332e9de5 --- lib/neutron_plugins/ovn_agent | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 999851e33d..56686f2741 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -568,7 +568,11 @@ function init_ovn { _disable_libvirt_apparmor - sudo mkdir -p $OVN_DATADIR + if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + mkdir -p $OVN_DATADIR + else + sudo mkdir -p $OVN_DATADIR + fi mkdir -p $OVS_DATADIR rm -f $OVS_DATADIR/*.db From bd68251463b2a86e07643387b56deca53a90f3c5 Mon Sep 17 00:00:00 2001 From: Roman Dobosz Date: Fri, 26 Nov 2021 15:34:50 +0100 Subject: [PATCH 153/574] Change a way for creating data dir in case of OVN. Calculate the sudo usage with local variable. Change-Id: I39dff770ff296dc06395acdb430a9cfe1722a30f --- lib/neutron_plugins/ovn_agent | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 56686f2741..3fc38288f1 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -567,12 +567,13 @@ function init_ovn { # create new ones on each devstack run. _disable_libvirt_apparmor + local mkdir_cmd="mkdir -p ${OVN_DATADIR}" - if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then - mkdir -p $OVN_DATADIR - else - sudo mkdir -p $OVN_DATADIR + if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]]; then + mkdir_cmd="sudo ${mkdir_cmd}" fi + + $mkdir_cmd mkdir -p $OVS_DATADIR rm -f $OVS_DATADIR/*.db From 5ea4c3c18c558ec1fa37e0dd9b28ce66c3da1130 Mon Sep 17 00:00:00 2001 From: Alfredo Moralejo Date: Tue, 16 Nov 2021 15:13:03 +0100 Subject: [PATCH 154/574] Support CentOS Stream 9 This patch includes changes required to run devstack on CentOS Stream 9 which has been already published in official repos by CentOS team [1]: - Add RDO deps repository for CS9. - remove xinetd package from installation for swift. Note that rsync-daemon is installed which should work fine. - Replace genisoimage by xorriso in CS9. - Use /etc/os-release to identify the distro in CS9 as it doesn't provide lsb_release command. - Use pip from rpm package instead of from get-pip.py as done in Fedora. - Add non-voting job devstack-platform-centos-9-stream to the check pipeline. Change-Id: Ic67cddabd5069211dc0611994b8b8360bcd61bef --- .zuul.yaml | 21 +++++++++++++++++++++ files/rpms/ceph | 2 +- files/rpms/n-cpu | 3 ++- files/rpms/nova | 3 ++- files/rpms/swift | 2 +- functions-common | 17 ++++++++++++----- lib/nova | 3 ++- stack.sh | 24 ++++++++++++++++-------- tools/install_pip.sh | 2 +- 9 files changed, 58 insertions(+), 19 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 7a85266eaa..3945faf82e 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -66,6 +66,16 @@ nodes: - controller +- nodeset: + name: devstack-single-node-centos-9-stream + nodes: + - name: controller + label: centos-9-stream + groups: + - name: tempest + nodes: + - controller + - nodeset: name: devstack-single-node-opensuse-15 nodes: @@ -622,6 +632,16 @@ vars: configure_swap_size: 4096 +- job: + name: devstack-platform-centos-9-stream + parent: tempest-full-py3 + description: CentOS 9 Stream platform test + nodeset: devstack-single-node-centos-9-stream + voting: false + timeout: 9000 + vars: + configure_swap_size: 4096 + - job: name: devstack-platform-debian-bullseye parent: tempest-full-py3 @@ -766,6 +786,7 @@ - devstack-enforce-scope - devstack-platform-fedora-latest - devstack-platform-centos-8-stream + - devstack-platform-centos-9-stream - devstack-platform-debian-bullseye - devstack-multinode - devstack-unit-tests diff --git a/files/rpms/ceph b/files/rpms/ceph index 64befc5f00..33a55f80ea 100644 --- a/files/rpms/ceph +++ b/files/rpms/ceph @@ -1,3 +1,3 @@ ceph # NOPRIME -redhat-lsb-core +redhat-lsb-core # not:rhel9 xfsprogs diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu index 68e5472685..7ce5a72d6b 100644 --- a/files/rpms/n-cpu +++ b/files/rpms/n-cpu @@ -1,9 +1,10 @@ cryptsetup dosfstools -genisoimage +genisoimage # not:rhel9 iscsi-initiator-utils libosinfo lvm2 sg3_utils # Stuff for diablo volumes sysfsutils +xorriso # not:rhel8 diff --git a/files/rpms/nova b/files/rpms/nova index 8ea8ccc5ca..9522e5729d 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -3,7 +3,7 @@ curl dnsmasq # for q-dhcp dnsmasq-utils # for dhcp_release ebtables -genisoimage # required for config_drive +genisoimage # not:rhel9 required for config_drive iptables iputils kernel-modules @@ -13,3 +13,4 @@ polkit rabbitmq-server # NOPRIME sqlite sudo +xorriso # not:rhel8 diff --git a/files/rpms/swift b/files/rpms/swift index 18c957c08a..b6009a321e 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -4,4 +4,4 @@ memcached rsync-daemon sqlite xfsprogs -xinetd # not:f34 +xinetd # not:f34,rhel9 diff --git a/functions-common b/functions-common index b1ca6ad3c0..bd029dd700 100644 --- a/functions-common +++ b/functions-common @@ -368,12 +368,19 @@ function _ensure_lsb_release { # - os_VENDOR # - os_PACKAGE function GetOSVersion { - # We only support distros that provide a sane lsb_release - _ensure_lsb_release + # CentOS Stream 9 does not provide lsb_release + source /etc/os-release + if [[ "${ID}${VERSION}" == "centos9" ]]; then + os_RELEASE=${VERSION_ID} + os_CODENAME="n/a" + os_VENDOR=$(echo $NAME | tr -d '[:space:]') + else + _ensure_lsb_release - os_RELEASE=$(lsb_release -r -s) - os_CODENAME=$(lsb_release -c -s) - os_VENDOR=$(lsb_release -i -s) + os_RELEASE=$(lsb_release -r -s) + os_CODENAME=$(lsb_release -c -s) + os_VENDOR=$(lsb_release -i -s) + fi if [[ $os_VENDOR =~ (Debian|Ubuntu|LinuxMint) ]]; then os_PACKAGE="deb" diff --git a/lib/nova b/lib/nova index 9aae2c4a9c..31b7642efc 100644 --- a/lib/nova +++ b/lib/nova @@ -479,7 +479,8 @@ function create_nova_conf { fi # nova defaults to genisoimage but only mkisofs is available for 15.0+ - if is_suse; then + # rhel provides mkisofs symlink to genisoimage or xorriso appropiately + if is_suse || is_fedora; then iniset $NOVA_CONF DEFAULT mkisofs_cmd /usr/bin/mkisofs fi diff --git a/stack.sh b/stack.sh index b5ad81b081..fa4e7e9006 100755 --- a/stack.sh +++ b/stack.sh @@ -227,7 +227,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bullseye|focal|f34|opensuse-15.2|opensuse-tumbleweed|rhel8" +SUPPORTED_DISTROS="bullseye|focal|f34|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" @@ -300,13 +300,17 @@ function _install_epel { } function _install_rdo { - if [[ "$TARGET_BRANCH" == "master" ]]; then - # rdo-release.el8.rpm points to latest RDO release, use that for master - sudo dnf -y install https://rdoproject.org/repos/rdo-release.el8.rpm - else - # For stable branches use corresponding release rpm - rdo_release=$(echo $TARGET_BRANCH | sed "s|stable/||g") - sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el8.rpm + if [[ $DISTRO == "rhel8" ]]; then + if [[ "$TARGET_BRANCH" == "master" ]]; then + # rdo-release.el8.rpm points to latest RDO release, use that for master + sudo dnf -y install https://rdoproject.org/repos/rdo-release.el8.rpm + else + # For stable branches use corresponding release rpm + rdo_release=$(echo $TARGET_BRANCH | sed "s|stable/||g") + sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el8.rpm + fi + elif [[ $DISTRO == "rhel9" ]]; then + sudo curl -L -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos9-master/delorean-deps.repo fi sudo dnf -y update } @@ -385,6 +389,10 @@ if [[ $DISTRO == "rhel8" ]]; then # RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=1154272 # Patch: https://github.com/rpm-software-management/dnf/pull/1448 echo "[]" | sudo tee /var/cache/dnf/expired_repos.json +elif [[ $DISTRO == "rhel9" ]]; then + sudo dnf config-manager --set-enabled crb + # rabbitmq and other packages are provided by RDO repositories. + _install_rdo fi # Ensure python is installed diff --git a/tools/install_pip.sh b/tools/install_pip.sh index c72dc89a55..259375a150 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -118,7 +118,7 @@ if [[ -n $PYPI_ALTERNATIVE_URL ]]; then configure_pypi_alternative_url fi -if is_fedora && [[ ${DISTRO} == f* ]]; then +if is_fedora && [[ ${DISTRO} == f* || ${DISTRO} == rhel9 ]]; then # get-pip.py will not install over the python3-pip package in # Fedora 34 any more. # https://bugzilla.redhat.com/show_bug.cgi?id=1988935 From 31334f9a9b2edbdc4a7b45e2e04aaec827639d62 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Thu, 4 Nov 2021 18:30:29 +0000 Subject: [PATCH 155/574] nova: Use noVNC 1.3.0 when installing from source Additionally make the repo name lowercase to match the project name in our zuul config so that jobs can check the repo out. Change-Id: Ic2d9c4fa837461bbc29e067a81912b5f72efd3ca --- lib/nova | 2 +- stackrc | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/nova b/lib/nova index 9aae2c4a9c..3460c2145a 100644 --- a/lib/nova +++ b/lib/nova @@ -831,7 +831,7 @@ function install_nova { NOVNC_WEB_DIR=/usr/share/novnc install_package novnc else - NOVNC_WEB_DIR=$DEST/noVNC + NOVNC_WEB_DIR=$DEST/novnc git_clone $NOVNC_REPO $NOVNC_WEB_DIR $NOVNC_BRANCH fi fi diff --git a/stackrc b/stackrc index 62749a7005..4fc09af9a8 100755 --- a/stackrc +++ b/stackrc @@ -590,8 +590,8 @@ IRONIC_PYTHON_AGENT_REPO=${IRONIC_PYTHON_AGENT_REPO:-${GIT_BASE}/openstack/ironi IRONIC_PYTHON_AGENT_BRANCH=${IRONIC_PYTHON_AGENT_BRANCH:-$TARGET_BRANCH} # a websockets/html5 or flash powered VNC console for vm instances -NOVNC_REPO=${NOVNC_REPO:-https://github.com/novnc/noVNC.git} -NOVNC_BRANCH=${NOVNC_BRANCH:-v1.1.0} +NOVNC_REPO=${NOVNC_REPO:-https://github.com/novnc/novnc.git} +NOVNC_BRANCH=${NOVNC_BRANCH:-v1.3.0} # a websockets/html5 or flash powered SPICE console for vm instances SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git} From fc8ef86fbe09a467ee8bcffa79760d3f1e699450 Mon Sep 17 00:00:00 2001 From: Lee Yarwood Date: Tue, 9 Mar 2021 17:32:25 +0000 Subject: [PATCH 156/574] Only write out uwsgi configs when deploying API services Previously this would always happen for Nova and Cinder even if n-api and c-api were not enabled on the host respectively. This change stops this by placing both calls write_uwsgi_config behind is_service_enabled checks. Change-Id: I997685da771736dbad79bcfe4b00dbc63bd6d6b6 --- lib/cinder | 4 +++- lib/nova | 9 +++++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/lib/cinder b/lib/cinder index cefb609676..76314c1d1f 100644 --- a/lib/cinder +++ b/lib/cinder @@ -353,7 +353,9 @@ function configure_cinder { # Format logging setup_logging $CINDER_CONF $CINDER_USE_MOD_WSGI - write_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" "/volume" + if is_service_enabled c-api; then + write_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" "/volume" + fi if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then configure_cinder_driver diff --git a/lib/nova b/lib/nova index 9aae2c4a9c..8109446572 100644 --- a/lib/nova +++ b/lib/nova @@ -488,8 +488,13 @@ function create_nova_conf { iniset $NOVA_CONF upgrade_levels compute "auto" - write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute" - write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" "$SERVICE_LISTEN_ADDRESS:${METADATA_SERVICE_PORT}" + if is_service_enabled n-api; then + write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute" + fi + + if is_service_enabled n-api-meta; then + write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" "$SERVICE_LISTEN_ADDRESS:${METADATA_SERVICE_PORT}" + fi if is_service_enabled ceilometer; then iniset $NOVA_CONF DEFAULT instance_usage_audit "True" From 418535883763cb31e54ab3882ee3eacc42afd4f3 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Tue, 6 Jul 2021 12:05:31 +0200 Subject: [PATCH 157/574] Install OVS from source when it was configured like that Function _neutron_ovs_base_install_agent_packages always tried to install openvswitch from packages and start it using systemd units. That was failing when ovs was expected to be installed from source. This patch fixes that. Change-Id: Iae8625dd800d30061ea3dbed9eb0dfbe16f21572 --- lib/neutron_plugins/ovn_agent | 13 +++------ lib/neutron_plugins/ovs_base | 49 +++++++++++++++++++++------------- lib/neutron_plugins/ovs_source | 1 + 3 files changed, 35 insertions(+), 28 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 1f737fb58b..c0bba2c370 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -24,11 +24,6 @@ source ${TOP_DIR}/lib/neutron_plugins/openvswitch_agent # Load devstack ovs compliation and loading functions source ${TOP_DIR}/lib/neutron_plugins/ovs_source -# Defaults -# -------- - -Q_BUILD_OVS_FROM_GIT=$(trueorfalse True Q_BUILD_OVS_FROM_GIT) - # Set variables for building OVN from source OVN_REPO=${OVN_REPO:-https://github.com/ovn-org/ovn.git} OVN_REPO_NAME=$(basename ${OVN_REPO} | cut -f1 -d'.') @@ -74,6 +69,9 @@ OVN_UUID=${OVN_UUID:-} # unless the distro kernel includes ovs+conntrack support. OVN_BUILD_MODULES=$(trueorfalse False OVN_BUILD_MODULES) OVN_BUILD_FROM_SOURCE=$(trueorfalse False OVN_BUILD_FROM_SOURCE) +if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + Q_BUILD_OVS_FROM_GIT=True +fi # Whether or not to install the ovs python module from ovs source. This can be # used to test and validate new ovs python features. This should only be used @@ -341,11 +339,6 @@ function ovn_sanity_check { # install_ovn() - Collect source and prepare function install_ovn { - if [[ "$Q_BUILD_OVS_FROM_GIT" == "False" ]]; then - echo "Installation of OVS from source disabled." - return 0 - fi - echo "Installing OVN and dependent packages" # Check the OVN configuration diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 2e63fe3c7b..8acf586189 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -7,6 +7,12 @@ _XTRACE_NEUTRON_OVS_BASE=$(set +o | grep xtrace) set +o xtrace +# Load devstack ovs compliation and loading functions +source ${TOP_DIR}/lib/neutron_plugins/ovs_source + +# Defaults +# -------- + OVS_BRIDGE=${OVS_BRIDGE:-br-int} # OVS recognize default 'system' datapath or 'netdev' for userspace datapath OVS_DATAPATH_TYPE=${OVS_DATAPATH_TYPE:-system} @@ -60,26 +66,33 @@ function _neutron_ovs_base_install_ubuntu_dkms { } function _neutron_ovs_base_install_agent_packages { - # Install deps - install_package $(get_packages "openvswitch") - if is_ubuntu; then - _neutron_ovs_base_install_ubuntu_dkms - restart_service openvswitch-switch - elif is_fedora; then - restart_service openvswitch - sudo systemctl enable openvswitch - elif is_suse; then - if [[ $DISTRO == "sle12" ]] && vercmp "$os_RELEASE" "<" "12.2" ; then + if [ "$Q_BUILD_OVS_FROM_GIT" == "True" ]; then + remove_ovs_packages + compile_ovs False /usr /var + load_conntrack_gre_module + start_new_ovs + else + # Install deps + install_package $(get_packages "openvswitch") + if is_ubuntu; then + _neutron_ovs_base_install_ubuntu_dkms restart_service openvswitch-switch - else - # workaround for https://bugzilla.suse.com/show_bug.cgi?id=1085971 - if [[ $DISTRO =~ "tumbleweed" ]]; then - sudo sed -i -e "s,^OVS_USER_ID=.*,OVS_USER_ID='root:root'," /etc/sysconfig/openvswitch + elif is_fedora; then + restart_service openvswitch + sudo systemctl enable openvswitch + elif is_suse; then + if [[ $DISTRO == "sle12" ]] && vercmp "$os_RELEASE" "<" "12.2" ; then + restart_service openvswitch-switch + else + # workaround for https://bugzilla.suse.com/show_bug.cgi?id=1085971 + if [[ $DISTRO =~ "tumbleweed" ]]; then + sudo sed -i -e "s,^OVS_USER_ID=.*,OVS_USER_ID='root:root'," /etc/sysconfig/openvswitch + fi + restart_service openvswitch || { + journalctl -xe || : + systemctl status openvswitch + } fi - restart_service openvswitch || { - journalctl -xe || : - systemctl status openvswitch - } fi fi } diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source index 08951d175d..9c87dce551 100644 --- a/lib/neutron_plugins/ovs_source +++ b/lib/neutron_plugins/ovs_source @@ -14,6 +14,7 @@ # Defaults # -------- +Q_BUILD_OVS_FROM_GIT=$(trueorfalse False Q_BUILD_OVS_FROM_GIT) # Set variables for building OVS from source OVS_REPO=${OVS_REPO:-https://github.com/openvswitch/ovs.git} From c3b7051387d4332f956148c5676383499fa31859 Mon Sep 17 00:00:00 2001 From: Ade Lee Date: Fri, 6 Aug 2021 14:26:37 -0400 Subject: [PATCH 158/574] Add option to set chap algorithms for iscsid for FIPS The default CHAP algorithm for iscsid is md5, which is disallowed under fips. We will set the chap algorithm to "SHA3-256,SHA256", which should work under all configurations. Change-Id: Ide186fb53b3f9826ff602cb7fb797f245a15033a --- lib/nova | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/nova b/lib/nova index 5fcccffec1..1420183a19 100644 --- a/lib/nova +++ b/lib/nova @@ -315,6 +315,10 @@ EOF sudo systemctl daemon-reload fi + # set chap algorithms. The default chap_algorithm is md5 which will + # not work under FIPS + iniset -sudo /etc/iscsi/iscsid.conf DEFAULT "node.session.auth.chap_algs" "SHA3-256,SHA256" + # ensure that iscsid is started, even when disabled by default restart_service iscsid fi From 24b65adc9cedff9c7a8ab412fb39613ef5d4a627 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Tue, 22 Jun 2021 15:31:46 +0200 Subject: [PATCH 159/574] Deploy Neutron with enforced new RBAC rules This patch adds new config option NEUTRON_ENFORCE_NEW_DEFAULTS which if set to True will deploy Neutron with enforce new rbac defaults and scopes. It will also use SYSTEM_ADMIN user to interact with Neutron where it is needed. Depends-On: https://review.opendev.org/c/openstack/neutron/+/798821 Change-Id: I14d934f0deced34d74003b92824cad3c44ec4f5e --- .zuul.yaml | 1 + lib/neutron | 19 ++++++++++ lib/neutron-legacy | 18 ++++++++++ lib/neutron_plugins/services/l3 | 62 +++++++++++++++++++++------------ lib/tempest | 10 ++++-- 5 files changed, 85 insertions(+), 25 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 3945faf82e..b5ab1277b6 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -609,6 +609,7 @@ # Keep enabeling the services here to run with system scope CINDER_ENFORCE_SCOPE: true GLANCE_ENFORCE_SCOPE: true + NEUTRON_ENFORCE_SCOPE: true - job: name: devstack-multinode diff --git a/lib/neutron b/lib/neutron index 885df97f7c..15d548e33d 100644 --- a/lib/neutron +++ b/lib/neutron @@ -37,6 +37,11 @@ NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI) NEUTRON_AGENT=${NEUTRON_AGENT:-openvswitch} NEUTRON_DIR=$DEST/neutron +# If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope" +# and "enforce_new_defaults" to True in the Neutron's config to enforce usage +# of the new RBAC policies and scopes. +NEUTRON_ENFORCE_SCOPE=$(trueorfalse False NEUTRON_ENFORCE_SCOPE) + NEUTRON_DISTRIBUTED_ROUTING=$(trueorfalse False NEUTRON_DISTRIBUTED_ROUTING) # Distributed Virtual Router (DVR) configuration # Can be: @@ -232,6 +237,7 @@ function configure_neutron_new { if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then neutron_ml2_extension_driver_add port_security fi + configure_rbac_policies fi # Neutron OVS or LB agent @@ -612,6 +618,19 @@ function configure_neutron { fi } +# configure_rbac_policies() - Configure Neutron to enforce new RBAC +# policies and scopes if NEUTRON_ENFORCE_SCOPE == True +function configure_rbac_policies { + if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then + iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True + iniset $NEUTRON_CONF oslo_policy enforce_scope True + else + iniset $NEUTRON_CONF oslo_policy enforce_new_defaults False + iniset $NEUTRON_CONF oslo_policy enforce_scope False + fi +} + + function configure_neutron_nova { if is_neutron_legacy_enabled; then # Call back to old function diff --git a/lib/neutron-legacy b/lib/neutron-legacy index a5a608df72..b906a1b2ff 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -90,6 +90,11 @@ NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI) NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini +# If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope" +# and "enforce_new_defaults" to True in the Neutron's config to enforce usage +# of the new RBAC policies and scopes. +NEUTRON_ENFORCE_SCOPE=$(trueorfalse False NEUTRON_ENFORCE_SCOPE) + # Agent binaries. Note, binary paths for other agents are set in per-service # scripts in lib/neutron_plugins/services/ AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent" @@ -489,6 +494,19 @@ function configure_neutron_after_post_config { if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES fi + configure_rbac_policies +} + +# configure_rbac_policies() - Configure Neutron to enforce new RBAC +# policies and scopes if NEUTRON_ENFORCE_SCOPE == True +function configure_rbac_policies { + if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then + iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True + iniset $NEUTRON_CONF oslo_policy enforce_scope True + else + iniset $NEUTRON_CONF oslo_policy enforce_new_defaults False + iniset $NEUTRON_CONF oslo_policy enforce_scope False + fi } # Start running OVN processes diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 72f7a32b26..ccb5398f75 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -100,6 +100,11 @@ SUBNETPOOL_PREFIX_V6=${SUBNETPOOL_PREFIX_V6:-$IPV6_ADDRS_SAFE_TO_USE} SUBNETPOOL_SIZE_V4=${SUBNETPOOL_SIZE_V4:-26} SUBNETPOOL_SIZE_V6=${SUBNETPOOL_SIZE_V6:-64} +NEUTRON_ADMIN_CLOUD_NAME="devstack-admin" +if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then + NEUTRON_ADMIN_CLOUD_NAME="devstack-system-admin" +fi + default_v4_route_devs=$(ip -4 route | grep ^default | awk '{print $5}') default_v6_route_devs=$(ip -6 route list match default table all | grep via | awk '{print $5}') @@ -151,6 +156,10 @@ function create_neutron_initial_network { project_id=$(openstack project list | grep " demo " | get_field 1) die_if_not_set $LINENO project_id "Failure retrieving project_id for demo" + local admin_project_id + admin_project_id=$(openstack project list | grep " admin " | get_field 1) + die_if_not_set $LINENO admin_project_id "Failure retrieving project_id for admin" + # Allow drivers that need to create an initial network to do so here if type -p neutron_plugin_create_initial_network_profile > /dev/null; then neutron_plugin_create_initial_network_profile $PHYSICAL_NETWORK @@ -159,10 +168,10 @@ function create_neutron_initial_network { if is_networking_extension_supported "auto-allocated-topology"; then if [[ "$USE_SUBNETPOOL" == "True" ]]; then if [[ "$IP_VERSION" =~ 4.* ]]; then - SUBNETPOOL_V4_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V4 --default-prefix-length $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --share --default -f value -c id) + SUBNETPOOL_V4_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V4 --project "$admin_project_id" --default-prefix-length $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --share --default -f value -c id) fi if [[ "$IP_VERSION" =~ .*6 ]]; then - SUBNETPOOL_V6_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V6 --default-prefix-length $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --share --default -f value -c id) + SUBNETPOOL_V6_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V6 --project "$admin_project_id" --default-prefix-length $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --share --default -f value -c id) fi fi fi @@ -170,14 +179,14 @@ function create_neutron_initial_network { if is_provider_network; then die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK" die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specify the PROVIDER_NETWORK_TYPE" - NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --project $project_id --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share | grep ' id ' | get_field 2) + NET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --project $project_id --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share | grep ' id ' | get_field 2) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $project_id" if [[ "$IP_VERSION" =~ 4.* ]]; then if [ -z $SUBNETPOOL_V4_ID ]; then fixed_range_v4=$FIXED_RANGE fi - SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} | grep ' id ' | get_field 2) + SUBNET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} | grep ' id ' | get_field 2) die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $project_id" fi @@ -187,7 +196,7 @@ function create_neutron_initial_network { if [ -z $SUBNETPOOL_V6_ID ]; then fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE fi - IPV6_SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} | grep ' id ' | get_field 2) + IPV6_SUBNET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} | grep ' id ' | get_field 2) die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME $project_id" fi @@ -197,7 +206,7 @@ function create_neutron_initial_network { sudo ip link set $PUBLIC_INTERFACE up fi else - NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create --project $project_id "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) + NET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" network create --project $project_id "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME $project_id" if [[ "$IP_VERSION" =~ 4.* ]]; then @@ -215,11 +224,11 @@ function create_neutron_initial_network { # Create a router, and add the private subnet as one of its interfaces if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then # create a tenant-owned router. - ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create --project $project_id $Q_ROUTER_NAME | grep ' id ' | get_field 2) + ROUTER_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router create --project $project_id $Q_ROUTER_NAME | grep ' id ' | get_field 2) die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $project_id $Q_ROUTER_NAME" else # Plugin only supports creating a single router, which should be admin owned. - ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2) + ROUTER_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router create $Q_ROUTER_NAME --project $admin_project_id | grep ' id ' | get_field 2) die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $Q_ROUTER_NAME" fi @@ -229,9 +238,9 @@ function create_neutron_initial_network { fi # Create an external network, and a subnet. Configure the external network as router gw if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then - EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2) + EXT_NET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} --project $admin_project_id | grep ' id ' | get_field 2) else - EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS | grep ' id ' | get_field 2) + EXT_NET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --project $admin_project_id | grep ' id ' | get_field 2) fi die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME" @@ -258,11 +267,12 @@ function _neutron_create_private_subnet_v4 { if [[ -n "$NETWORK_GATEWAY" ]]; then subnet_params+="--gateway $NETWORK_GATEWAY " fi + subnet_params+="${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} " subnet_params+="${fixed_range_v4:+--subnet-range $fixed_range_v4} " subnet_params+="--network $NET_ID $PRIVATE_SUBNET_NAME" local subnet_id - subnet_id=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) + subnet_id=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet for $project_id" echo $subnet_id } @@ -285,14 +295,17 @@ function _neutron_create_private_subnet_v6 { subnet_params+="${fixed_range_v6:+--subnet-range $fixed_range_v6} " subnet_params+="$ipv6_modes --network $NET_ID $IPV6_PRIVATE_SUBNET_NAME " local ipv6_subnet_id - ipv6_subnet_id=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) + ipv6_subnet_id=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet for $project_id" echo $ipv6_subnet_id } # Create public IPv4 subnet function _neutron_create_public_subnet_v4 { - local subnet_params="--ip-version 4 " + local admin_project_id + admin_project_id=$(openstack project list | grep " admin " | get_field 1) + die_if_not_set $LINENO admin_project_id "Failure retrieving project_id for admin" + local subnet_params="--ip-version 4 --project $admin_project_id" subnet_params+="${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} " if [[ -n "$PUBLIC_NETWORK_GATEWAY" ]]; then subnet_params+="--gateway $PUBLIC_NETWORK_GATEWAY " @@ -300,26 +313,29 @@ function _neutron_create_public_subnet_v4 { subnet_params+="--network $EXT_NET_ID --subnet-range $FLOATING_RANGE --no-dhcp " subnet_params+="$PUBLIC_SUBNET_NAME" local id_and_ext_gw_ip - id_and_ext_gw_ip=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ') + id_and_ext_gw_ip=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ') die_if_not_set $LINENO id_and_ext_gw_ip "Failure creating public IPv4 subnet" echo $id_and_ext_gw_ip } # Create public IPv6 subnet function _neutron_create_public_subnet_v6 { - local subnet_params="--ip-version 6 " + local admin_project_id + admin_project_id=$(openstack project list | grep " admin " | get_field 1) + die_if_not_set $LINENO admin_project_id "Failure retrieving project_id for admin" + local subnet_params="--ip-version 6 --project $admin_project_id " subnet_params+="--gateway $IPV6_PUBLIC_NETWORK_GATEWAY " subnet_params+="--network $EXT_NET_ID --subnet-range $IPV6_PUBLIC_RANGE --no-dhcp " subnet_params+="$IPV6_PUBLIC_SUBNET_NAME" local ipv6_id_and_ext_gw_ip - ipv6_id_and_ext_gw_ip=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ') + ipv6_id_and_ext_gw_ip=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ') die_if_not_set $LINENO ipv6_id_and_ext_gw_ip "Failure creating an IPv6 public subnet" echo $ipv6_id_and_ext_gw_ip } # Configure neutron router for IPv4 public access function _neutron_configure_router_v4 { - openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router add subnet $ROUTER_ID $SUBNET_ID + openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router add subnet $ROUTER_ID $SUBNET_ID # Create a public subnet on the external network local id_and_ext_gw_ip id_and_ext_gw_ip=$(_neutron_create_public_subnet_v4 $EXT_NET_ID) @@ -327,7 +343,7 @@ function _neutron_configure_router_v4 { ext_gw_ip=$(echo $id_and_ext_gw_ip | get_field 2) PUB_SUBNET_ID=$(echo $id_and_ext_gw_ip | get_field 5) # Configure the external network as the default router gateway - openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID + openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID # This logic is specific to using OVN or the l3-agent for layer 3 if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then @@ -354,7 +370,7 @@ function _neutron_configure_router_v4 { sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface sudo ip link set $ext_gw_interface up fi - ROUTER_GW_IP=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' --device-owner network:router_gateway | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ') + ROUTER_GW_IP=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' --device-owner network:router_gateway | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ') die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP" fi _neutron_set_router_id @@ -363,7 +379,7 @@ function _neutron_configure_router_v4 { # Configure neutron router for IPv6 public access function _neutron_configure_router_v6 { - openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router add subnet $ROUTER_ID $IPV6_SUBNET_ID + openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router add subnet $ROUTER_ID $IPV6_SUBNET_ID # Create a public subnet on the external network local ipv6_id_and_ext_gw_ip ipv6_id_and_ext_gw_ip=$(_neutron_create_public_subnet_v6 $EXT_NET_ID) @@ -375,7 +391,7 @@ function _neutron_configure_router_v6 { # If the external network has not already been set as the default router # gateway when configuring an IPv4 public subnet, do so now if [[ "$IP_VERSION" == "6" ]]; then - openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID + openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID fi # This logic is specific to using OVN or the l3-agent for layer 3 @@ -396,7 +412,7 @@ function _neutron_configure_router_v6 { sudo sysctl -w net.ipv6.conf.all.forwarding=1 # Configure and enable public bridge # Override global IPV6_ROUTER_GW_IP with the true value from neutron - IPV6_ROUTER_GW_IP=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' | grep $ipv6_pub_subnet_id | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ') + IPV6_ROUTER_GW_IP=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' | grep $ipv6_pub_subnet_id | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ') die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP" if is_neutron_ovs_base_plugin; then @@ -424,7 +440,7 @@ function _neutron_configure_router_v6 { function is_networking_extension_supported { local extension=$1 # TODO(sc68cal) cache this instead of calling every time - EXT_LIST=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" extension list --network -c Alias -f value) + EXT_LIST=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" extension list --network -c Alias -f value) [[ $EXT_LIST =~ $extension ]] && return 0 } diff --git a/lib/tempest b/lib/tempest index 8fd54c5d5e..ab802171d1 100644 --- a/lib/tempest +++ b/lib/tempest @@ -90,6 +90,10 @@ TEMPEST_USE_TEST_ACCOUNTS=$(trueorfalse False TEMPEST_USE_TEST_ACCOUNTS) # it will run tempest with TEMPEST_CONCURRENCY=${TEMPEST_CONCURRENCY:-$(nproc)} +NEUTRON_ADMIN_CLOUD_NAME="devstack-admin" +if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then + NEUTRON_ADMIN_CLOUD_NAME="devstack-system-admin" +fi # Functions # --------- @@ -287,8 +291,8 @@ function configure_tempest { if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]] && is_networking_extension_supported 'external-net'; then public_network_id=$(openstack --os-cloud devstack-admin network show -f value -c id $PUBLIC_NETWORK_NAME) # make sure shared network presence does not confuses the tempest tests - openstack --os-cloud devstack-admin network create --share shared - openstack --os-cloud devstack-admin subnet create --description shared-subnet --subnet-range ${TEMPEST_SHARED_POOL:-192.168.233.0/24} --network shared shared-subnet + openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" network create --share shared --project "$admin_project_id" + openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create --description shared-subnet --subnet-range ${TEMPEST_SHARED_POOL:-192.168.233.0/24} --network shared shared-subnet --project "$admin_project_id" fi iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG @@ -443,6 +447,8 @@ function configure_tempest { iniset $TEMPEST_CONFIG network-feature-enabled ipv6_subnet_attributes "$IPV6_SUBNET_ATTRIBUTES_ENABLED" iniset $TEMPEST_CONFIG network-feature-enabled port_security $NEUTRON_PORT_SECURITY + iniset $TEMPEST_CONFIG enforce_scope neutron "$NEUTRON_ENFORCE_SCOPE" + # Scenario SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES} SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_FILE_NAME From 7880ba665e2d594b2eabb0533e6dca52e042ca50 Mon Sep 17 00:00:00 2001 From: Kevin Zhao Date: Wed, 31 Mar 2021 04:58:28 +0000 Subject: [PATCH 160/574] openEuler 20.03 LTS SP2 support openEuler is an open-source Linux based operating system. The current openEuler kernel is based on Linux and supports multi arch, such as X86_64 and aarch64. It fully unleashes the potential of computing chips. As an efficient, stable, and secure open-source OS built by global open-source contributors, openEuler applies to database, big data, cloud computing, and AI scenarios. openEuler is using RPM for package management. Note: Currently there is no available package for uwsgi-plugin-python3 and ovn, so that openEuler needs manually install them from source. Website: https://www.openeuler.org/en/ Change-Id: I169a0017998054604a63ac6c177d0f43f8a32ba6 Co-Authored-By: wangxiyuan Signed-off-by: Kevin Zhao --- .zuul.yaml | 25 +++++++++++++++++++++++++ doc/source/index.rst | 2 +- files/rpms/ceph | 2 +- files/rpms/general | 4 +++- files/rpms/nova | 2 +- files/rpms/swift | 2 +- functions-common | 14 ++++++++++++-- lib/apache | 16 +++------------- lib/nova_plugins/functions-libvirt | 2 +- roles/apache-logs-conf/tasks/main.yaml | 1 + stack.sh | 8 +++++++- tools/fixup_stuff.sh | 24 ++++++++++++++++++++++++ 12 files changed, 80 insertions(+), 22 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 3945faf82e..a4385572bc 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -106,6 +106,16 @@ nodes: - controller +- nodeset: + name: devstack-single-node-openeuler-20.03-sp2 + nodes: + - name: controller + label: openEuler-20-03-LTS-SP2 + groups: + - name: tempest + nodes: + - controller + - nodeset: name: openstack-two-node nodes: @@ -683,6 +693,20 @@ # Enable Neutron ML2/OVS services q-agt: true +- job: + name: devstack-platform-openEuler-20.03-SP2 + parent: tempest-full-py3 + description: openEuler 20.03 SP2 platform test + nodeset: devstack-single-node-openeuler-20.03-sp2 + voting: false + timeout: 9000 + vars: + configure_swap_size: 4096 + devstack_localrc: + # NOTE(wxy): OVN package is not supported by openEuler yet. Build it + # from source instead. + OVN_BUILD_FROM_SOURCE: True + - job: name: devstack-no-tls-proxy parent: tempest-full-py3 @@ -788,6 +812,7 @@ - devstack-platform-centos-8-stream - devstack-platform-centos-9-stream - devstack-platform-debian-bullseye + - devstack-platform-openEuler-20.03-SP2 - devstack-multinode - devstack-unit-tests - openstack-tox-bashate diff --git a/doc/source/index.rst b/doc/source/index.rst index 08ce4cb061..feb50ce4e9 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -38,7 +38,7 @@ Install Linux Start with a clean and minimal install of a Linux system. DevStack attempts to support the two latest LTS releases of Ubuntu, the -latest/current Fedora version, CentOS/RHEL 8 and OpenSUSE. +latest/current Fedora version, CentOS/RHEL 8, OpenSUSE and openEuler. If you do not have a preference, Ubuntu 20.04 (Focal Fossa) is the most tested, and will probably go the smoothest. diff --git a/files/rpms/ceph b/files/rpms/ceph index 33a55f80ea..93b5746aa6 100644 --- a/files/rpms/ceph +++ b/files/rpms/ceph @@ -1,3 +1,3 @@ ceph # NOPRIME -redhat-lsb-core # not:rhel9 +redhat-lsb-core # not:rhel9,openEuler-20.03 xfsprogs diff --git a/files/rpms/general b/files/rpms/general index 33da0a5385..163a7c8f24 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -16,6 +16,7 @@ libjpeg-turbo-devel # Pillow 3.0.0 libxml2-devel # lxml libxslt-devel # lxml libyaml-devel +make # dist:openEuler-20.03 net-tools openssh-server openssl @@ -27,7 +28,8 @@ psmisc python3-devel python3-pip python3-systemd -redhat-rpm-config # missing dep for gcc hardening flags, see rhbz#1217376 +redhat-rpm-config # not:openEuler-20.03 missing dep for gcc hardening flags, see rhbz#1217376 +systemd-devel # dist:openEuler-20.03 tar tcpdump unzip diff --git a/files/rpms/nova b/files/rpms/nova index 9522e5729d..9e8621c628 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -6,7 +6,7 @@ ebtables genisoimage # not:rhel9 required for config_drive iptables iputils -kernel-modules +kernel-modules # not:openEuler-20.03 kpartx parted polkit diff --git a/files/rpms/swift b/files/rpms/swift index b6009a321e..faf0a3175a 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -1,5 +1,5 @@ curl -liberasurecode-devel +liberasurecode-devel # not:openEuler-20.03 memcached rsync-daemon sqlite diff --git a/functions-common b/functions-common index 80f43554d0..e593328f37 100644 --- a/functions-common +++ b/functions-common @@ -388,7 +388,7 @@ function _ensure_lsb_release { elif [[ -x $(command -v zypper 2>/dev/null) ]]; then sudo zypper -n install lsb-release elif [[ -x $(command -v dnf 2>/dev/null) ]]; then - sudo dnf install -y redhat-lsb-core + sudo dnf install -y redhat-lsb-core || sudo dnf install -y openeuler-lsb else die $LINENO "Unable to find or auto-install lsb_release" fi @@ -459,6 +459,10 @@ function GetDistro { # Drop the . release as we assume it's compatible # XXX re-evaluate when we get RHEL10 DISTRO="rhel${os_RELEASE::1}" + elif [[ "$os_VENDOR" =~ (openEuler) ]]; then + # The DISTRO here is `openEuler-20.03`. While, actually only openEuler + # 20.03 LTS SP2 is fully tested. Other SP version maybe have bugs. + DISTRO="openEuler-$os_RELEASE" else # We can't make a good choice here. Setting a sensible DISTRO # is part of the problem, but not the major issue -- we really @@ -510,6 +514,7 @@ function is_fedora { fi [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \ + [ "$os_VENDOR" = "openEuler" ] || \ [ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \ [ "$os_VENDOR" = "RedHatEnterprise" ] || \ [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "CentOSStream" ] || \ @@ -558,7 +563,12 @@ function is_ubuntu { [ "$os_PACKAGE" = "deb" ] } - +function is_openeuler { + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + [ "$os_VENDOR" = "openEuler" ] +} # Git Functions # ============= diff --git a/lib/apache b/lib/apache index 4bea07dc55..cbe61adf34 100644 --- a/lib/apache +++ b/lib/apache @@ -82,19 +82,10 @@ function install_apache_uwsgi { apxs="apxs" fi - # This varies based on packaged/installed. If we've - # pip_installed, then the pip setup will only build a "python" - # module that will be either python2 or python3 depending on what - # it was built with. - # - # For package installs, the distro ships both plugins and you need - # to select the right one ... it will not be autodetected. - UWSGI_PYTHON_PLUGIN=python3 - if is_ubuntu; then local pkg_list="uwsgi uwsgi-plugin-python3 libapache2-mod-proxy-uwsgi" install_package ${pkg_list} - elif is_fedora; then + elif is_fedora && ! is_openeuler; then # Note httpd comes with mod_proxy_uwsgi and it is loaded by # default; the mod_proxy_uwsgi package actually conflicts now. # See: @@ -122,7 +113,6 @@ function install_apache_uwsgi { popd # delete the temp directory sudo rm -rf $dir - UWSGI_PYTHON_PLUGIN=python fi if is_ubuntu || is_suse ; then @@ -283,7 +273,7 @@ function write_uwsgi_config { # configured after graceful shutdown iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT iniset "$file" uwsgi enable-threads true - iniset "$file" uwsgi plugins http,${UWSGI_PYTHON_PLUGIN} + iniset "$file" uwsgi plugins http,python3 # uwsgi recommends this to prevent thundering herd on accept. iniset "$file" uwsgi thunder-lock true # Set hook to trigger graceful shutdown on SIGTERM @@ -336,7 +326,7 @@ function write_local_uwsgi_http_config { iniset "$file" uwsgi die-on-term true iniset "$file" uwsgi exit-on-reload false iniset "$file" uwsgi enable-threads true - iniset "$file" uwsgi plugins http,${UWSGI_PYTHON_PLUGIN} + iniset "$file" uwsgi plugins http,python3 # uwsgi recommends this to prevent thundering herd on accept. iniset "$file" uwsgi thunder-lock true # Set hook to trigger graceful shutdown on SIGTERM diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 63882e05fe..3e7d2801d6 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -90,7 +90,7 @@ function install_libvirt { install_package libvirt libvirt-devel python3-libvirt if is_arch "aarch64"; then - install_package edk2.git-aarch64 + install_package edk2-aarch64 fi fi diff --git a/roles/apache-logs-conf/tasks/main.yaml b/roles/apache-logs-conf/tasks/main.yaml index bd64574c9b..6b7ea37857 100644 --- a/roles/apache-logs-conf/tasks/main.yaml +++ b/roles/apache-logs-conf/tasks/main.yaml @@ -64,6 +64,7 @@ 'Debian': '/etc/apache2/sites-enabled/' 'Suse': '/etc/apache2/conf.d/' 'RedHat': '/etc/httpd/conf.d/' + 'openEuler': '/etc/httpd/conf.d/' - name: Discover configurations find: diff --git a/stack.sh b/stack.sh index a10e6eff25..cb16fda998 100755 --- a/stack.sh +++ b/stack.sh @@ -227,7 +227,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bullseye|focal|f34|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9" +SUPPORTED_DISTROS="bullseye|focal|f34|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9|openEuler-20.03" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" @@ -278,6 +278,12 @@ chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh +# TODO(wxy): Currently some base packages are not installed by default in +# openEuler. Remove the code below once the packaged are installed by default +# in the future. +if [[ $DISTRO == "openEuler-20.03" ]]; then + install_package hostname +fi # Configure Distro Repositories # ----------------------------- diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index fe5dafa994..750849db68 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -148,8 +148,32 @@ function fixup_ubuntu { sudo rm -rf /usr/lib/python3/dist-packages/simplejson-*.egg-info } +function fixup_openeuler { + if ! is_openeuler; then + return + fi + + if is_arch "x86_64"; then + arch="x86_64" + elif is_arch "aarch64"; then + arch="aarch64" + fi + + # Some packages' version in openEuler are too old, use the newer ones we + # provide in oepkg. (oepkg is an openEuler third part yum repo which is + # endorsed by openEuler community) + (echo '[openstack-ci]' + echo 'name=openstack' + echo 'baseurl=https://repo.oepkgs.net/openEuler/rpm/openEuler-20.03-LTS-SP2/budding-openeuler/openstack-master-ci/'$arch'/' + echo 'enabled=1' + echo 'gpgcheck=0') | sudo tee -a /etc/yum.repos.d/openstack-master.repo > /dev/null + + yum_install liberasurecode-devel +} + function fixup_all { fixup_ubuntu fixup_fedora fixup_suse + fixup_openeuler } From 588894753971c32f6fff9b2158c3427b012cf9ec Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Wed, 22 Dec 2021 16:00:29 +0100 Subject: [PATCH 161/574] Clean up compile_ovn function's parameters That function was accepting 3 positional arguments and first of them was boolean value "build_modules" which isn't used anywhere in that function. So this patch cleans it a bit by removing that not used parameter. Change-Id: I5c57b9116338a63b7bfb170c02e33bb4eae725da --- lib/neutron_plugins/ovn_agent | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 3fc38288f1..099b639458 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -294,16 +294,13 @@ function _disable_libvirt_apparmor { # compile_ovn() - Compile OVN from source and load needed modules # Accepts three parameters: -# - first optional is False by default and means that -# modules are built and installed. -# - second optional parameter defines prefix for +# - first optional parameter defines prefix for # ovn compilation -# - third optional parameter defines localstatedir for +# - second optional parameter defines localstatedir for # ovn single machine runtime function compile_ovn { - local build_modules=${1:-False} - local prefix=$2 - local localstatedir=$3 + local prefix=$1 + local localstatedir=$2 if [ -n "$prefix" ]; then prefix="--prefix=$prefix" @@ -381,7 +378,7 @@ function install_ovn { compile_ovs $OVN_BUILD_MODULES if use_new_ovn_repository; then - compile_ovn $OVN_BUILD_MODULES + compile_ovn fi sudo mkdir -p $OVS_PREFIX/var/log/openvswitch From 353c3f9cb1e70929898116b0b6c0020c43d93aea Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Thu, 23 Dec 2021 12:01:44 +0100 Subject: [PATCH 162/574] Fix stacking without preconfigured DATABASE_PASSWORD When we need to read a DATABASE_PASSWORD from the user, make sure we actually use it in our database URLs. Signed-off-by: Dr. Jens Harbott Change-Id: I5ebf6b0280e82f2c87a63cbee7a9957c6bd26898 --- lib/database | 6 ++++-- stack.sh | 2 ++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/database b/lib/database index 7940cf2208..78563f6f6d 100644 --- a/lib/database +++ b/lib/database @@ -89,6 +89,10 @@ function initialize_database_backends { DATABASE_PASSWORD=$MYSQL_PASSWORD fi + return 0 +} + +function define_database_baseurl { # We configure Nova, Horizon, Glance and Keystone to use MySQL as their # database server. While they share a single server, each has their own # database and tables. @@ -100,8 +104,6 @@ function initialize_database_backends { # NOTE: Don't specify ``/db`` in this string so we can use it for multiple services BASE_SQL_CONN=${BASE_SQL_CONN:-$(get_database_type_$DATABASE_TYPE)://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOST} - - return 0 } # Recreate a given database diff --git a/stack.sh b/stack.sh index a10e6eff25..6b5625e922 100755 --- a/stack.sh +++ b/stack.sh @@ -691,6 +691,8 @@ if initialize_database_backends; then # Last chance for the database password. This must be handled here # because read_password is not a library function. read_password DATABASE_PASSWORD "ENTER A PASSWORD TO USE FOR THE DATABASE." + + define_database_baseurl else echo "No database enabled" fi From 134205c1388ac69169698ff2fe36cba23044ff62 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Thu, 23 Dec 2021 12:26:36 +0100 Subject: [PATCH 163/574] Don't enable the dstat service in CI jobs We still are seeing regular job failures because the pcp package fails to install. Assume that we can still enable it on demand when someone needs to debug specific job issues, let us just disable it by default. Related-Bug: 1943184 Signed-off-by: Dr. Jens Harbott Change-Id: I32ef8038e21c818623db9389588b3c6d3f98dcad --- .zuul.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index b5ab1277b6..e5ef7ee080 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -432,7 +432,7 @@ PUBLIC_BRIDGE_MTU: '{{ external_bridge_mtu }}' devstack_services: # Shared services - dstat: true + dstat: false etcd3: true memory_tracker: true mysql: true @@ -441,7 +441,7 @@ subnode: devstack_services: # Shared services - dstat: true + dstat: false memory_tracker: true devstack_localrc: # Multinode specific settings @@ -507,7 +507,7 @@ # Core services enabled for this branch. # This list replaces the test-matrix. # Shared services - dstat: true + dstat: false etcd3: true memory_tracker: true mysql: true @@ -557,7 +557,7 @@ # Core services enabled for this branch. # This list replaces the test-matrix. # Shared services - dstat: true + dstat: false memory_tracker: true tls-proxy: true # Nova services From 05e622ead243325658ae5aff5b5b53ce60135c57 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Wed, 29 Dec 2021 12:30:01 +0530 Subject: [PATCH 164/574] Use upper-constraints from in review changes Currently upper-constraints.txt is not getting used from in-review changes of requirements project and leading to merge of broken requirements[1]. Use master branch to fetch constraints instead of the remote branch. [1] https://review.opendev.org/c/openstack/requirements/+/822575 Depends-On: https://review.opendev.org/c/openstack/requirements/+/823128 Change-Id: I5d42ac6b54bf20804d7e5faa39d1289102318b64 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index ab802171d1..bdbd3caec8 100644 --- a/lib/tempest +++ b/lib/tempest @@ -119,7 +119,7 @@ function set_tempest_venv_constraints { local tmp_c tmp_c=$1 if [[ $TEMPEST_VENV_UPPER_CONSTRAINTS == "master" ]]; then - (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > $tmp_c + (cd $REQUIREMENTS_DIR && git show master:upper-constraints.txt) > $tmp_c else echo "Using $TEMPEST_VENV_UPPER_CONSTRAINTS constraints in Tempest virtual env." cat $TEMPEST_VENV_UPPER_CONSTRAINTS > $tmp_c From c1a75c6a504d720e9d46f924f5c3da07fddfee72 Mon Sep 17 00:00:00 2001 From: Miguel Lavalle Date: Fri, 31 Dec 2021 16:14:23 -0600 Subject: [PATCH 165/574] Fix mysqladmin failure for Fedora 34 and mariadb mysqladmin is incorrectly installed in Fedora 34 with mariadb. This causes the failure of Zuul Fedora based jobs. The issue is a conflict between mariadb and community mysql that is described in [1] and [2]. The workaround is to explicitly install package "mariadb" Also configure an increased swap size like for the other platform jobs in order to avoid OOM issues. [1] https://bugzilla.redhat.com/show_bug.cgi?id=2026933 [2] https://lists.launchpad.net/maria-discuss/msg06179.html Closes-Bug: #1956116 Change-Id: Icf6d7e1af5130689ea10b29d37cc9b188b2c9754 --- .zuul.yaml | 3 +++ lib/databases/mysql | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 2c55b545e0..272018fa06 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -725,6 +725,8 @@ description: Fedora latest platform test nodeset: devstack-single-node-fedora-latest voting: false + vars: + configure_swap_size: 4096 - job: name: devstack-platform-fedora-latest-virt-preview @@ -733,6 +735,7 @@ nodeset: devstack-single-node-fedora-latest voting: false vars: + configure_swap_size: 4096 devstack_localrc: ENABLE_FEDORA_VIRT_PREVIEW_REPO: true diff --git a/lib/databases/mysql b/lib/databases/mysql index d0fa1199a7..8edbf8c4a4 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -178,7 +178,7 @@ EOF if is_oraclelinux; then install_package mysql-community-server elif is_fedora; then - install_package mariadb-server mariadb-devel + install_package mariadb-server mariadb-devel mariadb sudo systemctl enable $MYSQL_SERVICE_NAME elif is_suse; then install_package mariadb-server From 2ef4a4c8516bc6373bc7f4cafee62db715144952 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Mon, 3 Jan 2022 15:13:44 +0100 Subject: [PATCH 166/574] Fix tempest upper-constraints When deploying devstack in a stable branch, the master branch is available locally only in a CI environment where Zuul prepares all available branches. For a non-CI deployment we need to stick to using the remote branch as was the case before [0]. While the situation on the master branch isn't really broken, we apply the fix here anyway so that future stable branches are created in a working state. [0] I5d42ac6b54bf20804d7e5faa39d1289102318b64 Closes-Bug: #1956219 Signed-off-by: Dr. Jens Harbott Change-Id: Ib7719cb2d48b34db70f885e0afe77d904abba3b5 --- lib/tempest | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index bdbd3caec8..adffeda371 100644 --- a/lib/tempest +++ b/lib/tempest @@ -119,7 +119,9 @@ function set_tempest_venv_constraints { local tmp_c tmp_c=$1 if [[ $TEMPEST_VENV_UPPER_CONSTRAINTS == "master" ]]; then - (cd $REQUIREMENTS_DIR && git show master:upper-constraints.txt) > $tmp_c + (cd $REQUIREMENTS_DIR && + git show master:upper-constraints.txt 2>/dev/null || + git show origin/master:upper-constraints.txt) > $tmp_c else echo "Using $TEMPEST_VENV_UPPER_CONSTRAINTS constraints in Tempest virtual env." cat $TEMPEST_VENV_UPPER_CONSTRAINTS > $tmp_c From 4448f243f396c502e34fda5bb148ba0224f934e7 Mon Sep 17 00:00:00 2001 From: Eduardo Santos Date: Thu, 6 Jan 2022 14:03:16 -0300 Subject: [PATCH 167/574] Fix public subnet creation command There was no space after the --project option in the command that creates the public subnet, thus if any option follows, the option itself will be parsed as part of the value passed to the --project option. This change just adds the missing space. Change-Id: I1e7375578342a82717222e902fcd65a4a62e33a7 --- lib/neutron_plugins/services/l3 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index ccb5398f75..a8844c475e 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -305,7 +305,7 @@ function _neutron_create_public_subnet_v4 { local admin_project_id admin_project_id=$(openstack project list | grep " admin " | get_field 1) die_if_not_set $LINENO admin_project_id "Failure retrieving project_id for admin" - local subnet_params="--ip-version 4 --project $admin_project_id" + local subnet_params="--ip-version 4 --project $admin_project_id " subnet_params+="${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} " if [[ -n "$PUBLIC_NETWORK_GATEWAY" ]]; then subnet_params+="--gateway $PUBLIC_NETWORK_GATEWAY " From ac958698d0e29cc0bc4bbad4476fc7bc01ed870d Mon Sep 17 00:00:00 2001 From: Ade Lee Date: Wed, 5 Jan 2022 16:23:46 -0500 Subject: [PATCH 168/574] Only set chap algorithms if not openeuler For some reason, setting the CHAPAlgorithms as in c3b705138 breaks OpenEuler. Making this conditional so that tests continue to pass. Change-Id: Iaa740ecfbb9173dd97e90485bad88225caedb523 --- lib/nova | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/nova b/lib/nova index 5a12da6731..4f98d4d52c 100644 --- a/lib/nova +++ b/lib/nova @@ -320,8 +320,12 @@ EOF fi # set chap algorithms. The default chap_algorithm is md5 which will - # not work under FIPS - iniset -sudo /etc/iscsi/iscsid.conf DEFAULT "node.session.auth.chap_algs" "SHA3-256,SHA256" + # not work under FIPS. + # FIXME(alee) For some reason, this breaks openeuler. Openeuler devs should weigh in + # and determine the correct solution for openeuler here + if ! is_openeuler; then + iniset -sudo /etc/iscsi/iscsid.conf DEFAULT "node.session.auth.chap_algs" "SHA3-256,SHA256" + fi # ensure that iscsid is started, even when disabled by default restart_service iscsid From 807330ac370e8d0130cea2a99363cd3299422837 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Fri, 7 Jan 2022 11:40:54 +0100 Subject: [PATCH 169/574] Fix cloning requirements when GIT_DEPTH is set We always need the master branch of requirements in order to be able to install tempest with it, so override GIT_DEPTH when cloning that repo. Closes-Bug: 1956616 Change-Id: Id0b409bfadd73f2c30314724178d6e199121050b Signed-off-by: Dr. Jens Harbott --- stack.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 0659212ab3..c92cc79b40 100755 --- a/stack.sh +++ b/stack.sh @@ -765,7 +765,9 @@ save_stackenv $LINENO # Bring down global requirements before any use of pip_install. This is # necessary to ensure that the constraints file is in place before we # attempt to apply any constraints to pip installs. -git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH +# We always need the master branch in addition to any stable branch, so +# override GIT_DEPTH here. +GIT_DEPTH=0 git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH # Install package requirements # Source it so the entire environment is available From cc6e20b24d22475720f7b938aa08edf9ee7514fb Mon Sep 17 00:00:00 2001 From: Carlos Camacho Date: Fri, 7 Jan 2022 15:30:56 +0100 Subject: [PATCH 170/574] Allow skip the database server installation This patch allows to skip the installation of the database backend packages (MySQL or Postgres) with the introduction of the INSTALL_DATABASE_SERVER_PACKAGES variable (defaulted to True). This is useful in such environments that do not require to install the MySQL/Postgres server packages directly but using a container serving that purpose, for those cases all the remaining steps should be executed just skipping the packages install. Change-Id: I26628a31fdda3ce95ed04a2b7ae7b132c288581f --- lib/databases/mysql | 27 +++++++++++++++------------ lib/databases/postgresql | 20 +++++++++++--------- 2 files changed, 26 insertions(+), 21 deletions(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index 8edbf8c4a4..30e4b7c496 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -12,6 +12,7 @@ _XTRACE_DB_MYSQL=$(set +o | grep xtrace) set +o xtrace MYSQL_DRIVER=${MYSQL_DRIVER:-PyMySQL} +INSTALL_DATABASE_SERVER_PACKAGES=$(trueorfalse True INSTALL_DATABASE_SERVER_PACKAGES) register_database mysql @@ -175,18 +176,20 @@ EOF chmod 0600 $HOME/.my.cnf fi # Install mysql-server - if is_oraclelinux; then - install_package mysql-community-server - elif is_fedora; then - install_package mariadb-server mariadb-devel mariadb - sudo systemctl enable $MYSQL_SERVICE_NAME - elif is_suse; then - install_package mariadb-server - sudo systemctl enable $MYSQL_SERVICE_NAME - elif is_ubuntu; then - install_package $MYSQL_SERVICE_NAME-server - else - exit_distro_not_supported "mysql installation" + if [[ "$INSTALL_DATABASE_SERVER_PACKAGES" == "True" ]]; then + if is_oraclelinux; then + install_package mysql-community-server + elif is_fedora; then + install_package mariadb-server mariadb-devel mariadb + sudo systemctl enable $MYSQL_SERVICE_NAME + elif is_suse; then + install_package mariadb-server + sudo systemctl enable $MYSQL_SERVICE_NAME + elif is_ubuntu; then + install_package $MYSQL_SERVICE_NAME-server + else + exit_distro_not_supported "mysql installation" + fi fi } diff --git a/lib/databases/postgresql b/lib/databases/postgresql index 1f347f5548..4f0a5a0a4c 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -13,7 +13,7 @@ set +o xtrace MAX_DB_CONNECTIONS=${MAX_DB_CONNECTIONS:-200} - +INSTALL_DATABASE_SERVER_PACKAGES=$(trueorfalse True INSTALL_DATABASE_SERVER_PACKAGES) register_database postgresql @@ -104,15 +104,17 @@ EOF else sed -i "s/:root:\w\+/:root:$DATABASE_PASSWORD/" $pgpass fi - if is_ubuntu; then - install_package postgresql - elif is_fedora || is_suse; then - install_package postgresql-server - if is_fedora; then - sudo systemctl enable postgresql + if [[ "$INSTALL_DATABASE_SERVER_PACKAGES" == "True" ]]; then + if is_ubuntu; then + install_package postgresql + elif is_fedora || is_suse; then + install_package postgresql-server + if is_fedora; then + sudo systemctl enable postgresql + fi + else + exit_distro_not_supported "postgresql installation" fi - else - exit_distro_not_supported "postgresql installation" fi } From c994dc4de23620f74b750932e060306a27193add Mon Sep 17 00:00:00 2001 From: elajkat Date: Thu, 6 Jan 2022 11:28:55 +0100 Subject: [PATCH 171/574] Deprecate lib/neutron lib/neutron-legacy was recently undeprecated (see [0]), Openstack CI uses neutron-legacy and latest work was done in it also. To avoid double maintenance lib/neutron can be deprecated. For latest discussion see [1] and [2]. [0]: https://review.opendev.org/c/openstack/devstack/+/704829 [1]: https://meetings.opendev.org/meetings/networking/2022/networking.2022-01-04-14.04.log.html#l-52 [2]: https://meetings.opendev.org/irclogs/%23openstack-qa/%23openstack-qa.2022-01-05.log.html#t2022-01-05T15:57:37 Related-Bug: #1955765 Change-Id: I3fc328b7f47ccd7c1a97cceeea98fb2fbd609017 --- lib/neutron | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/lib/neutron b/lib/neutron index 15d548e33d..e7719d4ebc 100644 --- a/lib/neutron +++ b/lib/neutron @@ -146,6 +146,7 @@ fi # cleanup_neutron() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_neutron_new { + deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" source $TOP_DIR/lib/neutron_plugins/${NEUTRON_AGENT}_agent if is_neutron_ovs_base_plugin; then neutron_ovs_base_cleanup @@ -169,6 +170,7 @@ function configure_root_helper_options { # configure_neutron() - Set config files, create data dirs, etc function configure_neutron_new { + deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) @@ -359,6 +361,7 @@ function configure_neutron_rootwrap { # Takes a single optional argument which is the config file to update, # if not passed $NOVA_CONF is used. function configure_neutron_nova_new { + deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" local conf=${1:-$NOVA_CONF} iniset $conf neutron auth_type "password" iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI" @@ -385,6 +388,7 @@ function configure_neutron_nova_new { # create_neutron_accounts() - Create required service accounts function create_neutron_accounts_new { + deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" local neutron_url if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then @@ -408,6 +412,7 @@ function create_neutron_accounts_new { # init_neutron() - Initialize databases, etc. function init_neutron_new { + deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" recreate_database neutron time_start "dbsync" @@ -418,6 +423,7 @@ function init_neutron_new { # install_neutron() - Collect source and prepare function install_neutron_new { + deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH setup_develop $NEUTRON_DIR @@ -491,6 +497,7 @@ function start_neutron_api { # start_neutron() - Start running processes function start_neutron_new { + deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" # Start up the neutron agents if enabled # TODO(sc68cal) Make this pluggable so different DevStack plugins for different Neutron plugins # can resolve the $NEUTRON_AGENT_BINARY @@ -528,6 +535,7 @@ function start_neutron_new { # stop_neutron() - Stop running processes function stop_neutron_new { + deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" for serv in neutron-api neutron-agent neutron-l3; do stop_process $serv done @@ -550,6 +558,7 @@ function stop_neutron_new { # neutron_service_plugin_class_add() - add service plugin class function neutron_service_plugin_class_add_new { + deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" local service_plugin_class=$1 local plugins="" @@ -574,11 +583,13 @@ function _neutron_ml2_extension_driver_add { } function neutron_server_config_add_new { + deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS+=($1) } # neutron_deploy_rootwrap_filters() - deploy rootwrap filters function neutron_deploy_rootwrap_filters_new { + deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" local srcdir=$1 sudo install -d -o root -g root -m 755 $NEUTRON_CONF_DIR/rootwrap.d sudo install -o root -g root -m 644 $srcdir/etc/neutron/rootwrap.d/*.filters $NEUTRON_CONF_DIR/rootwrap.d From d5d0bed479497560489983ae1fc80444b44fe029 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Mon, 17 Jan 2022 12:04:16 +0530 Subject: [PATCH 172/574] Workaround CentOS 8-stream bug until fixed Recent iputils release in CentOS 8-stream causing ping failures with non root user. This needs a fix in systemd package as mentioned in the Related Bugs, until it's fixed and is in 8-stream mirrors let's workaround it by setting net.ipv4.ping_group_range setting manually. Related-Bug: #1957941 Related-Bug: rhbz#2037807 Change-Id: I0d8dac910647968b625020c2a94e626ba5255058 --- tools/fixup_stuff.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 750849db68..f24ac40ad5 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -83,6 +83,11 @@ function fixup_fedora { if is_package_installed python3-setuptools; then sudo dnf reinstall -y python3-setuptools fi + # Workaround CentOS 8-stream iputils and systemd Bug + # https://bugzilla.redhat.com/show_bug.cgi?id=2037807 + if [[ $os_VENDOR == "CentOSStream" && $os_RELEASE -eq 8 ]]; then + sudo sysctl -w net.ipv4.ping_group_range='0 2147483647' + fi } function fixup_suse { From 0a31630323cc172561d6544c8bee50004538cfb2 Mon Sep 17 00:00:00 2001 From: Pierre Riteau Date: Fri, 21 Jan 2022 10:07:07 +0100 Subject: [PATCH 173/574] Adapt compute node local.conf to OVN The default Neutron configuration is now using OVN, but the multinode lab was using an incompatible configuration: The q-agt/neutron-agt service must be disabled with OVN. Change-Id: I518a739a3daac941880463cde6b47951331d0911 --- doc/source/guides/multinode-lab.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index c0b3f58157..f62e7a969c 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -169,7 +169,7 @@ machines, create a ``local.conf`` with: MYSQL_HOST=$SERVICE_HOST RABBIT_HOST=$SERVICE_HOST GLANCE_HOSTPORT=$SERVICE_HOST:9292 - ENABLED_SERVICES=n-cpu,q-agt,c-vol,placement-client + ENABLED_SERVICES=n-cpu,c-vol,placement-client,ovn-controller,ovs-vswitchd,ovsdb-server,q-ovn-metadata-agent NOVA_VNC_ENABLED=True NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_lite.html" VNCSERVER_LISTEN=$HOST_IP From d6909e41af4b776e68fb133a31ff086fdaff38ff Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Sat, 22 Jan 2022 13:54:12 +0100 Subject: [PATCH 174/574] Use distro pip on Ubuntu Running get-pip.py fails on Ubuntu when running twice, e.g. after a unstack/stack cycle. Just use distro pip instead. Closes-Bug: #1957048 Signed-off-by: Dr. Jens Harbott Change-Id: I87a8d53ed8860dd017a6c826dee6b6f4baef3c96 --- tools/install_pip.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 259375a150..5d73a1f0d8 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -129,6 +129,8 @@ if is_fedora && [[ ${DISTRO} == f* || ${DISTRO} == rhel9 ]]; then # For general sanity, we just use the packaged pip. It should be # recent enough anyway. This is included via rpms/general : # Simply fall through +elif is_ubuntu; then + : # pip on Ubuntu 20.04 is new enough, too else install_get_pip fi From ae40825df618a6b8164be8345f5cc1b11a2dc614 Mon Sep 17 00:00:00 2001 From: Grzegorz Grasza Date: Tue, 26 Oct 2021 10:37:07 +0200 Subject: [PATCH 175/574] Use devstack-system-admin for keystone objects creation This is needed so we can set keystone into enforcing secure RBAC. This also adjusts lib/glance, which already partially used devstack-system-admin. Change-Id: I6df8ad23a3077a8420340167a748ae23ad094962 --- functions-common | 46 +++++++++++++++++++++++----------------------- lib/glance | 6 +++--- 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/functions-common b/functions-common index 7042408f40..7a83b4bc5c 100644 --- a/functions-common +++ b/functions-common @@ -867,10 +867,10 @@ function get_or_create_domain { # Gets domain id domain_id=$( # Gets domain id - openstack domain show $1 \ + openstack --os-cloud devstack-system-admin domain show $1 \ -f value -c id 2>/dev/null || # Creates new domain - openstack domain create $1 \ + openstack --os-cloud devstack-system-admin domain create $1 \ --description "$2" \ -f value -c id ) @@ -885,7 +885,7 @@ function get_or_create_group { # Gets group id group_id=$( # Creates new group with --or-show - openstack group create $1 \ + openstack --os-cloud devstack-system-admin group create $1 \ --domain $2 --description "$desc" --or-show \ -f value -c id ) @@ -904,7 +904,7 @@ function get_or_create_user { # Gets user id user_id=$( # Creates new user with --or-show - openstack user create \ + openstack --os-cloud devstack-system-admin user create \ $1 \ --password "$2" \ --domain=$3 \ @@ -921,7 +921,7 @@ function get_or_create_project { local project_id project_id=$( # Creates new project with --or-show - openstack project create $1 \ + openstack --os-cloud devstack-system-admin project create $1 \ --domain=$2 \ --or-show -f value -c id ) @@ -934,7 +934,7 @@ function get_or_create_role { local role_id role_id=$( # Creates role with --or-show - openstack role create $1 \ + openstack --os-cloud devstack-system-admin role create $1 \ --or-show -f value -c id ) echo $role_id @@ -964,7 +964,7 @@ function get_or_add_user_project_role { domain_args=$(_get_domain_args $4 $5) # Gets user role id - user_role_id=$(openstack role assignment list \ + user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --user $2 \ --project $3 \ @@ -972,11 +972,11 @@ function get_or_add_user_project_role { | grep '^|\s[a-f0-9]\+' | get_field 1) if [[ -z "$user_role_id" ]]; then # Adds role to user and get it - openstack role add $1 \ + openstack --os-cloud devstack-system-admin role add $1 \ --user $2 \ --project $3 \ $domain_args - user_role_id=$(openstack role assignment list \ + user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --user $2 \ --project $3 \ @@ -991,17 +991,17 @@ function get_or_add_user_project_role { function get_or_add_user_domain_role { local user_role_id # Gets user role id - user_role_id=$(openstack role assignment list \ + user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --user $2 \ --domain $3 \ | grep '^|\s[a-f0-9]\+' | get_field 1) if [[ -z "$user_role_id" ]]; then # Adds role to user and get it - openstack role add $1 \ + openstack --os-cloud devstack-system-admin role add $1 \ --user $2 \ --domain $3 - user_role_id=$(openstack role assignment list \ + user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --user $2 \ --domain $3 \ @@ -1019,7 +1019,7 @@ function get_or_add_user_system_role { domain_args=$(_get_domain_args $4) # Gets user role id - user_role_id=$(openstack role assignment list \ + user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --user $2 \ --system $3 \ @@ -1027,11 +1027,11 @@ function get_or_add_user_system_role { -f value -c Role) if [[ -z "$user_role_id" ]]; then # Adds role to user and get it - openstack role add $1 \ + openstack --os-cloud devstack-system-admin role add $1 \ --user $2 \ --system $3 \ $domain_args - user_role_id=$(openstack role assignment list \ + user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --user $2 \ --system $3 \ @@ -1046,17 +1046,17 @@ function get_or_add_user_system_role { function get_or_add_group_project_role { local group_role_id # Gets group role id - group_role_id=$(openstack role assignment list \ + group_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --group $2 \ --project $3 \ -f value) if [[ -z "$group_role_id" ]]; then # Adds role to group and get it - openstack role add $1 \ + openstack --os-cloud devstack-system-admin role add $1 \ --group $2 \ --project $3 - group_role_id=$(openstack role assignment list \ + group_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --group $2 \ --project $3 \ @@ -1072,9 +1072,9 @@ function get_or_create_service { # Gets service id service_id=$( # Gets service id - openstack service show $2 -f value -c id 2>/dev/null || + openstack --os-cloud devstack-system-admin service show $2 -f value -c id 2>/dev/null || # Creates new service if not exists - openstack service create \ + openstack --os-cloud devstack-system-admin service create \ $2 \ --name $1 \ --description="$3" \ @@ -1087,14 +1087,14 @@ function get_or_create_service { # Usage: _get_or_create_endpoint_with_interface function _get_or_create_endpoint_with_interface { local endpoint_id - endpoint_id=$(openstack endpoint list \ + endpoint_id=$(openstack --os-cloud devstack-system-admin endpoint list \ --service $1 \ --interface $2 \ --region $4 \ -c ID -f value) if [[ -z "$endpoint_id" ]]; then # Creates new endpoint - endpoint_id=$(openstack endpoint create \ + endpoint_id=$(openstack --os-cloud devstack-system-admin endpoint create \ $1 $2 $3 --region $4 -f value -c id) fi @@ -1128,7 +1128,7 @@ function get_or_create_endpoint { # Get a URL from the identity service # Usage: get_endpoint_url function get_endpoint_url { - echo $(openstack endpoint list \ + echo $(openstack --os-cloud devstack-system-admin endpoint list \ --service $1 --interface $2 \ -c URL -f value) } diff --git a/lib/glance b/lib/glance index 4c2755f76f..9bba938b9d 100644 --- a/lib/glance +++ b/lib/glance @@ -311,11 +311,11 @@ function configure_glance_quotas { iniset $GLANCE_API_CONF oslo_limit auth_url $KEYSTONE_SERVICE_URI iniset $GLANCE_API_CONF oslo_limit system_scope "'all'" iniset $GLANCE_API_CONF oslo_limit endpoint_id \ - $(openstack endpoint list --service glance -f value -c ID) + $(openstack --os-cloud devstack-system-admin endpoint list --service glance -f value -c ID) # Allow the glance service user to read quotas - openstack role add --user glance --user-domain Default --system all \ - reader + openstack --os-cloud devstack-system-admin role add --user glance --user-domain Default \ + --system all reader } # configure_glance() - Set config files, create data dirs, etc From 5f5002a3781e255a16711f99cb784a28d6f27258 Mon Sep 17 00:00:00 2001 From: Grzegorz Grasza Date: Tue, 26 Oct 2021 10:50:37 +0200 Subject: [PATCH 176/574] Revert "Revert "Add enforce_scope setting support for keystone"" This reverts commit 26bd94b45efb63683072006e4281dd34a313d881. Reason for revert: Devstack keystone creation/setup are moved to scope tokens, so we can reintroduce the scope check enable. Change-Id: I6e1c261196dbcaf632748fb6f04e0867648b76c7 --- lib/keystone | 11 +++++++++++ lib/tempest | 9 +++++++++ 2 files changed, 20 insertions(+) diff --git a/lib/keystone b/lib/keystone index b953972dd3..a4c8a52121 100644 --- a/lib/keystone +++ b/lib/keystone @@ -124,6 +124,12 @@ KEYSTONE_ENABLE_CACHE=${KEYSTONE_ENABLE_CACHE:-True} # Whether to create a keystone admin endpoint for legacy applications KEYSTONE_ADMIN_ENDPOINT=$(trueorfalse False KEYSTONE_ADMIN_ENDPOINT) +# Flag to set the oslo_policy.enforce_scope. This is used to switch +# the Identity API policies to start checking the scope of token. By Default, +# this flag is False. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +KEYSTONE_ENFORCE_SCOPE=$(trueorfalse False KEYSTONE_ENFORCE_SCOPE) + # Functions # --------- @@ -259,6 +265,11 @@ function configure_keystone { iniset $KEYSTONE_CONF security_compliance lockout_duration $KEYSTONE_LOCKOUT_DURATION iniset $KEYSTONE_CONF security_compliance unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT fi + if [[ "$KEYSTONE_ENFORCE_SCOPE" == True ]] ; then + iniset $KEYSTONE_CONF oslo_policy enforce_scope true + iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults true + iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml + fi } # create_keystone_accounts() - Sets up common required keystone accounts diff --git a/lib/tempest b/lib/tempest index adffeda371..9d5e1fce9f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -608,6 +608,15 @@ function configure_tempest { fi done + # ``enforce_scope`` + # If services enable the enforce_scope for their policy + # we need to enable the same on Tempest side so that + # test can be run with scoped token. + if [[ "$KEYSTONE_ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope keystone true + iniset $TEMPEST_CONFIG auth admin_system 'all' + iniset $TEMPEST_CONFIG auth admin_project_name '' + fi iniset $TEMPEST_CONFIG enforce_scope glance "$GLANCE_ENFORCE_SCOPE" iniset $TEMPEST_CONFIG enforce_scope cinder "$CINDER_ENFORCE_SCOPE" From be7b5bf671b4cdc082fb9b7bb73ec55cab0054dd Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Thu, 27 Jan 2022 16:04:32 +0100 Subject: [PATCH 177/574] Disable enforcing scopes in Neutron temporary After patch [1] was merged in Neutron, enforcing scopes there is broken. So lets disable it temporary to unblock Devstack's gate for now. [1] https://review.opendev.org/c/openstack/neutron/+/821208 Related-Bug: #1959196 Change-Id: I24da6f3897a638749d16f738329a873a5f9a291d --- .zuul.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 248a56beb9..5a58d743fe 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -619,7 +619,9 @@ # Keep enabeling the services here to run with system scope CINDER_ENFORCE_SCOPE: true GLANCE_ENFORCE_SCOPE: true - NEUTRON_ENFORCE_SCOPE: true + # TODO(slaweq): Enable enforce scopes in Neutron when bug + # https://bugs.launchpad.net/neutron/+bug/1959196 will be fixed + # NEUTRON_ENFORCE_SCOPE: true - job: name: devstack-multinode From 1fd45940f370dc3aab6a5e9492c36e735f673c8a Mon Sep 17 00:00:00 2001 From: Ade Lee Date: Tue, 25 Jan 2022 16:44:36 -0500 Subject: [PATCH 178/574] Add openstack-two-node-centos-8-stream This will allow multinode FIPS testing Change-Id: I82b3b8fe56275aed72e13f6d1bd9170c50e5da0d --- .zuul.yaml | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 248a56beb9..ea7708f751 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -146,6 +146,36 @@ nodes: - compute1 +- nodeset: + name: openstack-two-node-centos-8-stream + nodes: + - name: controller + label: centos-8-stream + - name: compute1 + label: centos-8-stream + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + - nodeset: name: openstack-two-node-focal nodes: From 14a0c09001e8e2304eff4918206163cc7e6db1eb Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Fri, 28 Jan 2022 09:44:40 +0100 Subject: [PATCH 179/574] Fix deployment of Neutron with enforced scopes After patch [1] new RBAC policies changed in the way that SYSTEM_ADMIN user isn't anymore allowed to e.g. create resources in behalf of some projects. Now PROJECT_ADMIN needs to create such resources instead. So this patch basically reverts most of the changes which were done in [2] some time ago. It also introduces new entry in the clouds.yaml file - "devstack-admin-demo" which is "admin" user in the "demo" project as it's needed to create some resouces in the demo project now. Additionally, because of bug [3] this patch changes way how IPv6 external gateway IP is found using Neutron API. This change may be reverted in the future when bug [3] will be fixed. [1] https://review.opendev.org/c/openstack/neutron/+/821208 [2] https://review.opendev.org/c/openstack/devstack/+/797450 [3] https://bugs.launchpad.net/neutron/+bug/1959332 Depends-On: https://review.opendev.org/c/openstack/neutron/+/826828 Closes-Bug: #1959196 Change-Id: I32a6e8b9b59269a8699644b563657363425f7174 --- functions-common | 11 +++++ lib/neutron_plugins/services/l3 | 73 ++++++++++++++------------------- lib/tempest | 9 +--- 3 files changed, 44 insertions(+), 49 deletions(-) diff --git a/functions-common b/functions-common index 7042408f40..b407ca5fe6 100644 --- a/functions-common +++ b/functions-common @@ -107,6 +107,17 @@ function write_clouds_yaml { --os-password $ADMIN_PASSWORD \ --os-project-name admin + # devstack-admin-demo: user with the admin role on the demo project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-admin-demo \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username admin \ + --os-password $ADMIN_PASSWORD \ + --os-project-name demo + # devstack-alt: user with the member role on alt_demo project $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ --file $CLOUDS_YAML \ diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index a8844c475e..cd98115746 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -100,11 +100,6 @@ SUBNETPOOL_PREFIX_V6=${SUBNETPOOL_PREFIX_V6:-$IPV6_ADDRS_SAFE_TO_USE} SUBNETPOOL_SIZE_V4=${SUBNETPOOL_SIZE_V4:-26} SUBNETPOOL_SIZE_V6=${SUBNETPOOL_SIZE_V6:-64} -NEUTRON_ADMIN_CLOUD_NAME="devstack-admin" -if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then - NEUTRON_ADMIN_CLOUD_NAME="devstack-system-admin" -fi - default_v4_route_devs=$(ip -4 route | grep ^default | awk '{print $5}') default_v6_route_devs=$(ip -6 route list match default table all | grep via | awk '{print $5}') @@ -156,10 +151,6 @@ function create_neutron_initial_network { project_id=$(openstack project list | grep " demo " | get_field 1) die_if_not_set $LINENO project_id "Failure retrieving project_id for demo" - local admin_project_id - admin_project_id=$(openstack project list | grep " admin " | get_field 1) - die_if_not_set $LINENO admin_project_id "Failure retrieving project_id for admin" - # Allow drivers that need to create an initial network to do so here if type -p neutron_plugin_create_initial_network_profile > /dev/null; then neutron_plugin_create_initial_network_profile $PHYSICAL_NETWORK @@ -168,10 +159,10 @@ function create_neutron_initial_network { if is_networking_extension_supported "auto-allocated-topology"; then if [[ "$USE_SUBNETPOOL" == "True" ]]; then if [[ "$IP_VERSION" =~ 4.* ]]; then - SUBNETPOOL_V4_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V4 --project "$admin_project_id" --default-prefix-length $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --share --default -f value -c id) + SUBNETPOOL_V4_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V4 --default-prefix-length $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --share --default -f value -c id) fi if [[ "$IP_VERSION" =~ .*6 ]]; then - SUBNETPOOL_V6_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V6 --project "$admin_project_id" --default-prefix-length $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --share --default -f value -c id) + SUBNETPOOL_V6_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V6 --default-prefix-length $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --share --default -f value -c id) fi fi fi @@ -179,14 +170,14 @@ function create_neutron_initial_network { if is_provider_network; then die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK" die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specify the PROVIDER_NETWORK_TYPE" - NET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --project $project_id --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share | grep ' id ' | get_field 2) + NET_ID=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share | grep ' id ' | get_field 2) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $project_id" if [[ "$IP_VERSION" =~ 4.* ]]; then if [ -z $SUBNETPOOL_V4_ID ]; then fixed_range_v4=$FIXED_RANGE fi - SUBNET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} | grep ' id ' | get_field 2) + SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} | grep ' id ' | get_field 2) die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $project_id" fi @@ -196,7 +187,7 @@ function create_neutron_initial_network { if [ -z $SUBNETPOOL_V6_ID ]; then fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE fi - IPV6_SUBNET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} | grep ' id ' | get_field 2) + IPV6_SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} | grep ' id ' | get_field 2) die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME $project_id" fi @@ -206,7 +197,7 @@ function create_neutron_initial_network { sudo ip link set $PUBLIC_INTERFACE up fi else - NET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" network create --project $project_id "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) + NET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" network create "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME $project_id" if [[ "$IP_VERSION" =~ 4.* ]]; then @@ -224,11 +215,11 @@ function create_neutron_initial_network { # Create a router, and add the private subnet as one of its interfaces if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then # create a tenant-owned router. - ROUTER_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router create --project $project_id $Q_ROUTER_NAME | grep ' id ' | get_field 2) + ROUTER_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2) die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $project_id $Q_ROUTER_NAME" else # Plugin only supports creating a single router, which should be admin owned. - ROUTER_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router create $Q_ROUTER_NAME --project $admin_project_id | grep ' id ' | get_field 2) + ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2) die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $Q_ROUTER_NAME" fi @@ -238,9 +229,9 @@ function create_neutron_initial_network { fi # Create an external network, and a subnet. Configure the external network as router gw if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then - EXT_NET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} --project $admin_project_id | grep ' id ' | get_field 2) + EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2) else - EXT_NET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --project $admin_project_id | grep ' id ' | get_field 2) + EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS | grep ' id ' | get_field 2) fi die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME" @@ -262,8 +253,7 @@ function _neutron_create_private_subnet_v4 { if [ -z $SUBNETPOOL_V4_ID ]; then fixed_range_v4=$FIXED_RANGE fi - local subnet_params="--project $project_id " - subnet_params+="--ip-version 4 " + local subnet_params="--ip-version 4 " if [[ -n "$NETWORK_GATEWAY" ]]; then subnet_params+="--gateway $NETWORK_GATEWAY " fi @@ -272,7 +262,7 @@ function _neutron_create_private_subnet_v4 { subnet_params+="${fixed_range_v4:+--subnet-range $fixed_range_v4} " subnet_params+="--network $NET_ID $PRIVATE_SUBNET_NAME" local subnet_id - subnet_id=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) + subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet for $project_id" echo $subnet_id } @@ -286,8 +276,7 @@ function _neutron_create_private_subnet_v6 { if [ -z $SUBNETPOOL_V6_ID ]; then fixed_range_v6=$FIXED_RANGE_V6 fi - local subnet_params="--project $project_id " - subnet_params+="--ip-version 6 " + local subnet_params="--ip-version 6 " if [[ -n "$IPV6_PRIVATE_NETWORK_GATEWAY" ]]; then subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY " fi @@ -295,17 +284,14 @@ function _neutron_create_private_subnet_v6 { subnet_params+="${fixed_range_v6:+--subnet-range $fixed_range_v6} " subnet_params+="$ipv6_modes --network $NET_ID $IPV6_PRIVATE_SUBNET_NAME " local ipv6_subnet_id - ipv6_subnet_id=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) + ipv6_subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet for $project_id" echo $ipv6_subnet_id } # Create public IPv4 subnet function _neutron_create_public_subnet_v4 { - local admin_project_id - admin_project_id=$(openstack project list | grep " admin " | get_field 1) - die_if_not_set $LINENO admin_project_id "Failure retrieving project_id for admin" - local subnet_params="--ip-version 4 --project $admin_project_id " + local subnet_params="--ip-version 4 " subnet_params+="${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} " if [[ -n "$PUBLIC_NETWORK_GATEWAY" ]]; then subnet_params+="--gateway $PUBLIC_NETWORK_GATEWAY " @@ -313,29 +299,26 @@ function _neutron_create_public_subnet_v4 { subnet_params+="--network $EXT_NET_ID --subnet-range $FLOATING_RANGE --no-dhcp " subnet_params+="$PUBLIC_SUBNET_NAME" local id_and_ext_gw_ip - id_and_ext_gw_ip=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ') + id_and_ext_gw_ip=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ') die_if_not_set $LINENO id_and_ext_gw_ip "Failure creating public IPv4 subnet" echo $id_and_ext_gw_ip } # Create public IPv6 subnet function _neutron_create_public_subnet_v6 { - local admin_project_id - admin_project_id=$(openstack project list | grep " admin " | get_field 1) - die_if_not_set $LINENO admin_project_id "Failure retrieving project_id for admin" - local subnet_params="--ip-version 6 --project $admin_project_id " + local subnet_params="--ip-version 6 " subnet_params+="--gateway $IPV6_PUBLIC_NETWORK_GATEWAY " subnet_params+="--network $EXT_NET_ID --subnet-range $IPV6_PUBLIC_RANGE --no-dhcp " subnet_params+="$IPV6_PUBLIC_SUBNET_NAME" local ipv6_id_and_ext_gw_ip - ipv6_id_and_ext_gw_ip=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ') + ipv6_id_and_ext_gw_ip=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ') die_if_not_set $LINENO ipv6_id_and_ext_gw_ip "Failure creating an IPv6 public subnet" echo $ipv6_id_and_ext_gw_ip } # Configure neutron router for IPv4 public access function _neutron_configure_router_v4 { - openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router add subnet $ROUTER_ID $SUBNET_ID + openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router add subnet $ROUTER_ID $SUBNET_ID # Create a public subnet on the external network local id_and_ext_gw_ip id_and_ext_gw_ip=$(_neutron_create_public_subnet_v4 $EXT_NET_ID) @@ -343,7 +326,7 @@ function _neutron_configure_router_v4 { ext_gw_ip=$(echo $id_and_ext_gw_ip | get_field 2) PUB_SUBNET_ID=$(echo $id_and_ext_gw_ip | get_field 5) # Configure the external network as the default router gateway - openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID + openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID # This logic is specific to using OVN or the l3-agent for layer 3 if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then @@ -370,7 +353,7 @@ function _neutron_configure_router_v4 { sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface sudo ip link set $ext_gw_interface up fi - ROUTER_GW_IP=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' --device-owner network:router_gateway | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ') + ROUTER_GW_IP=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' --device-owner network:router_gateway | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ') die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP" fi _neutron_set_router_id @@ -379,7 +362,7 @@ function _neutron_configure_router_v4 { # Configure neutron router for IPv6 public access function _neutron_configure_router_v6 { - openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router add subnet $ROUTER_ID $IPV6_SUBNET_ID + openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router add subnet $ROUTER_ID $IPV6_SUBNET_ID # Create a public subnet on the external network local ipv6_id_and_ext_gw_ip ipv6_id_and_ext_gw_ip=$(_neutron_create_public_subnet_v6 $EXT_NET_ID) @@ -391,7 +374,7 @@ function _neutron_configure_router_v6 { # If the external network has not already been set as the default router # gateway when configuring an IPv4 public subnet, do so now if [[ "$IP_VERSION" == "6" ]]; then - openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID + openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID fi # This logic is specific to using OVN or the l3-agent for layer 3 @@ -412,7 +395,13 @@ function _neutron_configure_router_v6 { sudo sysctl -w net.ipv6.conf.all.forwarding=1 # Configure and enable public bridge # Override global IPV6_ROUTER_GW_IP with the true value from neutron - IPV6_ROUTER_GW_IP=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' | grep $ipv6_pub_subnet_id | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ') + # NOTE(slaweq): when enforce scopes is enabled in Neutron, router's + # gateway ports aren't visible in API because such ports don't belongs + # to any tenant. Because of that, at least temporary we need to find + # IPv6 address of the router's gateway in a bit different way. + # It can be reverted when bug + # https://bugs.launchpad.net/neutron/+bug/1959332 will be fixed + IPV6_ROUTER_GW_IP=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router show $ROUTER_ID -c external_gateway_info -f json | grep -C 1 $ipv6_pub_subnet_id | grep ip_address | awk '{print $2}' | tr -d '"') die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP" if is_neutron_ovs_base_plugin; then @@ -440,7 +429,7 @@ function _neutron_configure_router_v6 { function is_networking_extension_supported { local extension=$1 # TODO(sc68cal) cache this instead of calling every time - EXT_LIST=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" extension list --network -c Alias -f value) + EXT_LIST=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" extension list --network -c Alias -f value) [[ $EXT_LIST =~ $extension ]] && return 0 } diff --git a/lib/tempest b/lib/tempest index adffeda371..9ca06acabf 100644 --- a/lib/tempest +++ b/lib/tempest @@ -90,11 +90,6 @@ TEMPEST_USE_TEST_ACCOUNTS=$(trueorfalse False TEMPEST_USE_TEST_ACCOUNTS) # it will run tempest with TEMPEST_CONCURRENCY=${TEMPEST_CONCURRENCY:-$(nproc)} -NEUTRON_ADMIN_CLOUD_NAME="devstack-admin" -if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then - NEUTRON_ADMIN_CLOUD_NAME="devstack-system-admin" -fi - # Functions # --------- @@ -293,8 +288,8 @@ function configure_tempest { if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]] && is_networking_extension_supported 'external-net'; then public_network_id=$(openstack --os-cloud devstack-admin network show -f value -c id $PUBLIC_NETWORK_NAME) # make sure shared network presence does not confuses the tempest tests - openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" network create --share shared --project "$admin_project_id" - openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create --description shared-subnet --subnet-range ${TEMPEST_SHARED_POOL:-192.168.233.0/24} --network shared shared-subnet --project "$admin_project_id" + openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create --share shared + openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --description shared-subnet --subnet-range ${TEMPEST_SHARED_POOL:-192.168.233.0/24} --network shared shared-subnet fi iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG From 081c9b716fc742ffc12263e46ae499d7a1f65a7e Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Fri, 28 Jan 2022 09:52:28 +0100 Subject: [PATCH 180/574] Revert "Disable enforcing scopes in Neutron temporary" This reverts commit be7b5bf671b4cdc082fb9b7bb73ec55cab0054dd. As related bug is fixed, lets enabled scope enforcement in Neutron again. Related-bug: #1959196 Change-Id: I72db7ef533e78a10734d105e6a0debef288e41a1 --- .zuul.yaml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 5a58d743fe..248a56beb9 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -619,9 +619,7 @@ # Keep enabeling the services here to run with system scope CINDER_ENFORCE_SCOPE: true GLANCE_ENFORCE_SCOPE: true - # TODO(slaweq): Enable enforce scopes in Neutron when bug - # https://bugs.launchpad.net/neutron/+bug/1959196 will be fixed - # NEUTRON_ENFORCE_SCOPE: true + NEUTRON_ENFORCE_SCOPE: true - job: name: devstack-multinode From 099a048fb933649606e58310e8e705e7c7e29cd7 Mon Sep 17 00:00:00 2001 From: melanie witt Date: Thu, 6 May 2021 00:09:33 +0000 Subject: [PATCH 181/574] Configure nova unified limits quotas This enables the configuration of nova to use unified limits in keystone and enforcement in oslo.limit. Related to blueprint unified-limits-nova Depends-On: https://review.opendev.org/c/openstack/nova/+/715271 Change-Id: Ifdef3510bc7da3098a71739814e35dbaf612ae34 --- lib/nova | 66 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/lib/nova b/lib/nova index 930529a433..9039c6b454 100644 --- a/lib/nova +++ b/lib/nova @@ -159,6 +159,9 @@ NOVA_NOTIFICATION_FORMAT=${NOVA_NOTIFICATION_FORMAT:-unversioned} # image in devstack is CirrOS. NOVA_SHUTDOWN_TIMEOUT=${NOVA_SHUTDOWN_TIMEOUT:-0} +# Whether to use Keystone unified limits instead of legacy quota limits. +NOVA_USE_UNIFIED_LIMITS=$(trueorfalse False NOVA_USE_UNIFIED_LIMITS) + # Functions # --------- @@ -384,6 +387,13 @@ function create_nova_accounts { "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ "http://$SERVICE_HOST:$S3_SERVICE_PORT" fi + + # Unified limits + if is_service_enabled n-api; then + if [[ "$NOVA_USE_UNIFIED_LIMITS" = True ]]; then + configure_nova_unified_limits + fi + fi } # create_nova_conf() - Create a new nova.conf file @@ -719,6 +729,62 @@ function configure_console_proxies { fi } +function configure_nova_unified_limits { + # Default limits. Mirror the config-based default values. + # Note: disk quota is new in nova as of unified limits. + bash -c "unset OS_USERNAME OS_TENANT_NAME OS_PROJECT_NAME; + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10 --region $REGION_NAME \ + servers; \ + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 20 --region $REGION_NAME \ + class:VCPU; \ + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit $((50 * 1024)) --region $REGION_NAME \ + class:MEMORY_MB; \ + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 20 --region $REGION_NAME \ + class:DISK_GB; \ + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 128 --region $REGION_NAME \ + server_metadata_items; \ + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 5 --region $REGION_NAME \ + server_injected_files; \ + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10240 --region $REGION_NAME \ + server_injected_file_content_bytes; \ + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 255 --region $REGION_NAME \ + server_injected_file_path_bytes; \ + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 100 --region $REGION_NAME \ + server_key_pairs; \ + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10 --region $REGION_NAME \ + server_groups; \ + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10 --region $REGION_NAME \ + server_group_members" + + # Tell nova to use these limits + iniset $NOVA_CONF quota driver "nova.quota.UnifiedLimitsDriver" + + # Configure oslo_limit so it can talk to keystone + iniset $NOVA_CONF oslo_limit user_domain_name $SERVICE_DOMAIN_NAME + iniset $NOVA_CONF oslo_limit password $SERVICE_PASSWORD + iniset $NOVA_CONF oslo_limit username nova + iniset $NOVA_CONF oslo_limit auth_type password + iniset $NOVA_CONF oslo_limit auth_url $KEYSTONE_SERVICE_URI + iniset $NOVA_CONF oslo_limit system_scope "'all'" + iniset $NOVA_CONF oslo_limit endpoint_id \ + $(openstack endpoint list --service nova -f value -c ID) + + # Allow the nova service user to read quotas + openstack role add --user nova --user-domain Default --system all \ + reader +} + function init_nova_service_user_conf { iniset $NOVA_CONF service_user send_service_user_token True iniset $NOVA_CONF service_user auth_type password From a756f4b9681d429f2612164eb01d57c800ff2d2a Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Mon, 31 Jan 2022 16:38:31 +0000 Subject: [PATCH 182/574] Add python3.6 pip support Since pip v22, python3.6 is not supported (the minimum version is python3.7). This patch adds the reference for the pip3.6 URL to be used instead of the default one. Closes-Bug: #1959600 Change-Id: Iab2c391d5388461fe9e9037cee81884ce8032e72 --- tools/install_pip.sh | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 5d73a1f0d8..e9c52eacb7 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -38,7 +38,7 @@ FILES=$TOP_DIR/files # [1] https://opendev.org/openstack/project-config/src/branch/master/nodepool/elements/cache-devstack/source-repository-pip PIP_GET_PIP_URL=${PIP_GET_PIP_URL:-"https://bootstrap.pypa.io/get-pip.py"} -LOCAL_PIP="$FILES/$(basename $PIP_GET_PIP_URL)" +PIP_GET_PIP36_URL=${PIP_GET_PIP36_URL:-"https://bootstrap.pypa.io/pip/3.6/get-pip.py"} GetDistro echo "Distro: $DISTRO" @@ -57,12 +57,21 @@ function get_versions { function install_get_pip { + if [[ "$PYTHON3_VERSION" = "3.6" ]]; then + _pip_url=$PIP_GET_PIP36_URL + _local_pip="$FILES/$(basename $_pip_url)-py36" + else + _pip_url=$PIP_GET_PIP_URL + _local_pip="$FILES/$(basename $_pip_url)" + fi + + # If get-pip.py isn't python, delete it. This was probably an # outage on the server. - if [[ -r $LOCAL_PIP ]]; then - if ! head -1 $LOCAL_PIP | grep -q '#!/usr/bin/env python'; then - echo "WARNING: Corrupt $LOCAL_PIP found removing" - rm $LOCAL_PIP + if [[ -r $_local_pip ]]; then + if ! head -1 $_local_pip | grep -q '#!/usr/bin/env python'; then + echo "WARNING: Corrupt $_local_pip found removing" + rm $_local_pip fi fi @@ -76,20 +85,20 @@ function install_get_pip { # Thus we use curl's "-z" feature to always check the modified # since and only download if a new version is out -- but only if # it seems we downloaded the file originally. - if [[ ! -r $LOCAL_PIP || -r $LOCAL_PIP.downloaded ]]; then + if [[ ! -r $_local_pip || -r $_local_pip.downloaded ]]; then # only test freshness if LOCAL_PIP is actually there, # otherwise we generate a scary warning. local timecond="" - if [[ -r $LOCAL_PIP ]]; then - timecond="-z $LOCAL_PIP" + if [[ -r $_local_pip ]]; then + timecond="-z $_local_pip" fi curl -f --retry 6 --retry-delay 5 \ - $timecond -o $LOCAL_PIP $PIP_GET_PIP_URL || \ + $timecond -o $_local_pip $_pip_url || \ die $LINENO "Download of get-pip.py failed" - touch $LOCAL_PIP.downloaded + touch $_local_pip.downloaded fi - sudo -H -E python${PYTHON3_VERSION} $LOCAL_PIP + sudo -H -E python${PYTHON3_VERSION} $_local_pip } From 85c7d8db4eef2e367a7466a39b4f1fba7a983eef Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Fri, 4 Feb 2022 08:29:32 +0000 Subject: [PATCH 183/574] revert stackrc execute permissions This change reverts the execute permissions from stackrc which is not meant to be run as a script but sourced as part of stack.sh Change-Id: I9a05051e5a297cfaf78d097fa5f90a7c5fd254a6 --- stackrc | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 stackrc diff --git a/stackrc b/stackrc old mode 100755 new mode 100644 From 343e35162798af4c0399f2f7c0a733c568782686 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 3 Feb 2022 11:19:08 +1100 Subject: [PATCH 184/574] Bump fedora-latest to F35 Generally this is straight forward, but Horizon has a dependency issue with pyScss (described in [1]) so it is disabled, for now. [1] https://bugs.launchpad.net/horizon/+bug/1960204 Co-Authored-By: Dr. Jens Harbott Depends-On: https://review.opendev.org/c/openstack/devstack/+/827578 Change-Id: I7c4bf0945f9ac5bd563fe0a698c09b8571c97c5e --- .zuul.yaml | 6 +++++- files/rpms/swift | 2 +- lib/apache | 7 ++++++- stack.sh | 2 +- 4 files changed, 13 insertions(+), 4 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 5a58d743fe..3278eeb9e0 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -90,7 +90,7 @@ name: devstack-single-node-fedora-latest nodes: - name: controller - label: fedora-34 + label: fedora-35 groups: - name: tempest nodes: @@ -729,6 +729,10 @@ voting: false vars: configure_swap_size: 4096 + # Python 3.10 dependency issues; see + # https://bugs.launchpad.net/horizon/+bug/1960204 + devstack_services: + horizon: false - job: name: devstack-platform-fedora-latest-virt-preview diff --git a/files/rpms/swift b/files/rpms/swift index faf0a3175a..a838d7839e 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -4,4 +4,4 @@ memcached rsync-daemon sqlite xfsprogs -xinetd # not:f34,rhel9 +xinetd # not:f35,rhel9 diff --git a/lib/apache b/lib/apache index cbe61adf34..f29c7ea2cb 100644 --- a/lib/apache +++ b/lib/apache @@ -85,7 +85,12 @@ function install_apache_uwsgi { if is_ubuntu; then local pkg_list="uwsgi uwsgi-plugin-python3 libapache2-mod-proxy-uwsgi" install_package ${pkg_list} - elif is_fedora && ! is_openeuler; then + # NOTE(ianw) 2022-02-03 : Fedora 35 needs to skip this and fall + # into the install-from-source because the upstream packages + # didn't fix Python 3.10 compatibility before release. Should be + # fixed in uwsgi 4.9.0; can remove this when packages available + # or we drop this release + elif is_fedora && ! is_openeuler && ! [[ $DISTRO =~ f35 ]]; then # Note httpd comes with mod_proxy_uwsgi and it is loaded by # default; the mod_proxy_uwsgi package actually conflicts now. # See: diff --git a/stack.sh b/stack.sh index c92cc79b40..0082b99f11 100755 --- a/stack.sh +++ b/stack.sh @@ -227,7 +227,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bullseye|focal|f34|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9|openEuler-20.03" +SUPPORTED_DISTROS="bullseye|focal|f35|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9|openEuler-20.03" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" From accd99e7cddabb60633bf7e8f8832c961a2805f4 Mon Sep 17 00:00:00 2001 From: Jakob Meng Date: Tue, 8 Feb 2022 11:05:14 +0100 Subject: [PATCH 185/574] Made LVM backing disk persistent Previously, loop devices for LVM volume groups backing files were not created after reboots, causing e.g. Cinder to fail with messages such as ERROR cinder.service [-] Manager for service cinder-volume devstack@lvmdriver-1 is reporting problems, not sending heartbeat. Service will appear "down". Now, we use systemd services to manage loop devices for backing files. Change-Id: I27ec027834966e44aa9a99999358f5b4debc43e0 --- files/lvm-backing-file.template | 16 +++++++++++ lib/lvm | 49 ++++++++++++++++++--------------- 2 files changed, 43 insertions(+), 22 deletions(-) create mode 100644 files/lvm-backing-file.template diff --git a/files/lvm-backing-file.template b/files/lvm-backing-file.template new file mode 100644 index 0000000000..dc519d7745 --- /dev/null +++ b/files/lvm-backing-file.template @@ -0,0 +1,16 @@ +[Unit] +Description=Activate LVM backing file %BACKING_FILE% +DefaultDependencies=no +After=systemd-udev-settle.service +Before=lvm2-activation-early.service +Wants=systemd-udev-settle.service + +[Service] +ExecStart=/sbin/losetup --find --show %DIRECTIO% %BACKING_FILE% +ExecStop=/bin/sh -c '/sbin/losetup -d $$(/sbin/losetup --associated %BACKING_FILE% -O NAME -n)' +RemainAfterExit=yes +Type=oneshot + +[Install] +WantedBy=local-fs.target +Also=systemd-udev-settle.service diff --git a/lib/lvm b/lib/lvm index b826c1bc63..d3f6bf1792 100644 --- a/lib/lvm +++ b/lib/lvm @@ -53,28 +53,10 @@ function _remove_lvm_volume_group { sudo vgremove -f $vg } -# _clean_lvm_backing_file() removes the backing file of the -# volume group -# -# Usage: _clean_lvm_backing_file() $backing_file -function _clean_lvm_backing_file { - local backing_file=$1 - - # If the backing physical device is a loop device, it was probably setup by DevStack - if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then - local vg_dev - vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'$BACKING_FILE_SUFFIX'/ { print $1}') - if [[ -n "$vg_dev" ]]; then - sudo losetup -d $vg_dev - fi - rm -f $backing_file - fi -} - # clean_lvm_volume_group() cleans up the volume group and removes the # backing file # -# Usage: clean_lvm_volume_group $vg +# Usage: clean_lvm_volume_group() $vg function clean_lvm_volume_group { local vg=$1 @@ -83,11 +65,22 @@ function clean_lvm_volume_group { # if there is no logical volume left, it's safe to attempt a cleanup # of the backing file if [[ -z "$(sudo lvs --noheadings -o lv_name $vg 2>/dev/null)" ]]; then - _clean_lvm_backing_file $DATA_DIR/$vg$BACKING_FILE_SUFFIX + local backing_file=$DATA_DIR/$vg$BACKING_FILE_SUFFIX + + if [[ -n "$vg$BACKING_FILE_SUFFIX" ]] && \ + [[ -e "/etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service" ]]; then + sudo systemctl disable --now $vg$BACKING_FILE_SUFFIX.service + sudo rm -f /etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service + sudo systemctl daemon-reload + fi + + # If the backing physical device is a loop device, it was probably setup by DevStack + if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then + rm -f $backing_file + fi fi } - # _create_lvm_volume_group creates default volume group # # Usage: _create_lvm_volume_group() $vg $size @@ -106,8 +99,20 @@ function _create_lvm_volume_group { directio="--direct-io=on" fi + # Only create systemd service if it doesn't already exists + if [[ ! -e "/etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service" ]]; then + sed -e " + s|%DIRECTIO%|${directio}|g; + s|%BACKING_FILE%|${backing_file}|g; + " $FILES/lvm-backing-file.template | sudo tee \ + /etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service + + sudo systemctl daemon-reload + sudo systemctl enable --now $vg$BACKING_FILE_SUFFIX.service + fi + local vg_dev - vg_dev=$(sudo losetup -f --show $directio $backing_file) + vg_dev=$(sudo losetup --associated $backing_file -O NAME -n) # Only create volume group if it doesn't already exist if ! sudo vgs $vg; then From 8c6710326eaf8114b579720185161091a0a9f38c Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Wed, 9 Feb 2022 18:01:46 +0000 Subject: [PATCH 186/574] Fix installation with OVN backend and compilation This patch fixes several issues related to the installation with OVN backend with the OVS/OVN compilation enabled. The OVS/OVN local directories prefix, when both services are compiled, is now "/usr/local". The "ovn_agent._run_process" function is calling "ovs-appctl" to configure the logging settings of several services. Instead of using the service name, the ctl socket file is used instead. That is more robust and does not fail in systems with previous installations. Closes-Bug: #1960514 Change-Id: I69de5333393957593db6e05495f0c3c758efefdf --- lib/neutron_plugins/ovn_agent | 7 ++++++- lib/neutron_plugins/ovs_base | 2 +- lib/neutron_plugins/ovs_source | 4 ++-- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 09b28b6c3f..927896b70b 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -253,7 +253,12 @@ function _run_process { local testcmd="test -e $OVS_RUNDIR/$service.pid" test_with_retry "$testcmd" "$service did not start" $SERVICE_TIMEOUT 1 - sudo ovs-appctl -t $service vlog/set console:off syslog:info file:info + local service_ctl_file + service_ctl_file=$(ls $OVS_RUNDIR | grep $service | grep ctl) + if [ -z "$service_ctl_file" ]; then + die $LINENO "ctl file for service $service is not present." + fi + sudo ovs-appctl -t $OVS_RUNDIR/$service_ctl_file vlog/set console:off syslog:info file:info } function clone_repository { diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 8acf586189..cc41a8cd46 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -68,7 +68,7 @@ function _neutron_ovs_base_install_ubuntu_dkms { function _neutron_ovs_base_install_agent_packages { if [ "$Q_BUILD_OVS_FROM_GIT" == "True" ]; then remove_ovs_packages - compile_ovs False /usr /var + compile_ovs False /usr/local /var load_conntrack_gre_module start_new_ovs else diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source index 9c87dce551..9ae5555afb 100644 --- a/lib/neutron_plugins/ovs_source +++ b/lib/neutron_plugins/ovs_source @@ -188,12 +188,12 @@ function action_openvswitch { # start_new_ovs() - removes old ovs database, creates a new one and starts ovs function start_new_ovs { sudo rm -f /etc/openvswitch/conf.db /etc/openvswitch/.conf.db~lock~ - sudo /usr/share/openvswitch/scripts/ovs-ctl start + sudo /usr/local/share/openvswitch/scripts/ovs-ctl start } # stop_new_ovs() - stops ovs function stop_new_ovs { - local ovs_ctl='/usr/share/openvswitch/scripts/ovs-ctl' + local ovs_ctl='/usr/local/share/openvswitch/scripts/ovs-ctl' if [ -x $ovs_ctl ] ; then sudo $ovs_ctl stop From 17b1999eabab92a7820a2900853dc23d7150dbe9 Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Wed, 9 Feb 2022 22:14:24 +0000 Subject: [PATCH 187/574] Default CIRROS_ARCH to host arch This change use uname -m to get the portable host arch and uses that as a new default. on x86_64 hosts this should result in no visable change in behavior however on a non x86 host it will cause devstack to attempt to download a cirros image that matches the host. Change-Id: I6d1495a23400ef4cf496302028324fa5794dd45f --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 681e9dee38..e48fd81d16 100644 --- a/stackrc +++ b/stackrc @@ -663,7 +663,7 @@ esac #IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image CIRROS_VERSION=${CIRROS_VERSION:-"0.5.2"} -CIRROS_ARCH=${CIRROS_ARCH:-"x86_64"} +CIRROS_ARCH=${CIRROS_ARCH:-$(uname -m)} # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of # which may be set in ``local.conf``. Also allow ``DEFAULT_IMAGE_NAME`` and From ef6fac7959b257bb08ff3014e9cbf9cbc6b28ec3 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 16 Feb 2022 02:16:15 +0000 Subject: [PATCH 188/574] Updated from generate-devstack-plugins-list Change-Id: If1b667cd4af88511cb1672645a980c9c4fc557ae --- doc/source/plugin-registry.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 3edd708d8b..6850553b52 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -75,6 +75,7 @@ openstack/networking-powervm `https://opendev.org/openstack/networki openstack/networking-sfc `https://opendev.org/openstack/networking-sfc `__ openstack/neutron `https://opendev.org/openstack/neutron `__ openstack/neutron-dynamic-routing `https://opendev.org/openstack/neutron-dynamic-routing `__ +openstack/neutron-fwaas-dashboard `https://opendev.org/openstack/neutron-fwaas-dashboard `__ openstack/neutron-tempest-plugin `https://opendev.org/openstack/neutron-tempest-plugin `__ openstack/neutron-vpnaas `https://opendev.org/openstack/neutron-vpnaas `__ openstack/neutron-vpnaas-dashboard `https://opendev.org/openstack/neutron-vpnaas-dashboard `__ @@ -92,6 +93,7 @@ openstack/sahara `https://opendev.org/openstack/sahara < openstack/sahara-dashboard `https://opendev.org/openstack/sahara-dashboard `__ openstack/senlin `https://opendev.org/openstack/senlin `__ openstack/shade `https://opendev.org/openstack/shade `__ +openstack/skyline-apiserver `https://opendev.org/openstack/skyline-apiserver `__ openstack/solum `https://opendev.org/openstack/solum `__ openstack/storlets `https://opendev.org/openstack/storlets `__ openstack/tacker `https://opendev.org/openstack/tacker `__ @@ -112,7 +114,6 @@ openstack/zaqar-ui `https://opendev.org/openstack/zaqar-ui openstack/zun `https://opendev.org/openstack/zun `__ openstack/zun-ui `https://opendev.org/openstack/zun-ui `__ performa/os-faults `https://opendev.org/performa/os-faults `__ -skyline/skyline-apiserver `https://opendev.org/skyline/skyline-apiserver `__ starlingx/config `https://opendev.org/starlingx/config `__ starlingx/fault `https://opendev.org/starlingx/fault `__ starlingx/ha `https://opendev.org/starlingx/ha `__ From c0882aeaae8ebdc692f2f0e005f8795bbb6c3d53 Mon Sep 17 00:00:00 2001 From: Victor Morales Date: Wed, 16 Feb 2022 18:15:12 -0800 Subject: [PATCH 189/574] Add rsync deb package for swift The rsync debian package is required for swift service. This requirement has been covered by rpms but not for deb packages. Change-Id: Iefd1302be9c7fd80e037bbae3638602d6d823580 --- files/debs/swift | 1 + 1 file changed, 1 insertion(+) diff --git a/files/debs/swift b/files/debs/swift index 4b8ac3d793..67c6c8ddb4 100644 --- a/files/debs/swift +++ b/files/debs/swift @@ -2,5 +2,6 @@ curl liberasurecode-dev make memcached +rsync sqlite3 xfsprogs From e30620e9a62cd1243bded2b922b21c269d801aa6 Mon Sep 17 00:00:00 2001 From: Jakob Meng Date: Fri, 4 Feb 2022 20:55:48 +0100 Subject: [PATCH 190/574] Made Swift backing disk persistent Previously, Swift's backing disk were not be mounted after reboots, causing swift-proxy-server service to fail with cryptic error messages such as 'proxy-server: ERROR Insufficient Storage'. Now, we use Dan Smith' create_disk function from functions to create the backing disk for us and add it to /etc/fstab. Change-Id: I9cbccc87bc94a55b58e9badf3fdb127d6f1cf599 --- lib/swift | 32 ++++---------------------------- 1 file changed, 4 insertions(+), 28 deletions(-) diff --git a/lib/swift b/lib/swift index 9c13701c6e..ba92f3dcc3 100644 --- a/lib/swift +++ b/lib/swift @@ -179,12 +179,9 @@ function is_swift_enabled { # cleanup_swift() - Remove residual data files function cleanup_swift { rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz} - if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then - sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 - fi - if [[ -e ${SWIFT_DISK_IMAGE} ]]; then - rm ${SWIFT_DISK_IMAGE} - fi + + destroy_disk ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1 + rm -rf ${SWIFT_DATA_DIR}/run/ if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then _cleanup_swift_apache_wsgi @@ -575,28 +572,7 @@ function create_swift_disk { sudo install -d -o ${STACK_USER} -g ${user_group} ${SWIFT_DATA_DIR}/{drives,cache,run,logs} # Create a loopback disk and format it to XFS. - if [[ -e ${SWIFT_DISK_IMAGE} ]]; then - if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then - sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 - sudo rm -f ${SWIFT_DISK_IMAGE} - fi - fi - - mkdir -p ${SWIFT_DATA_DIR}/drives/images - sudo touch ${SWIFT_DISK_IMAGE} - sudo chown ${STACK_USER}: ${SWIFT_DISK_IMAGE} - - truncate -s ${SWIFT_LOOPBACK_DISK_SIZE} ${SWIFT_DISK_IMAGE} - - # Make a fresh XFS filesystem - /sbin/mkfs.xfs -f -i size=1024 ${SWIFT_DISK_IMAGE} - - # Mount the disk with mount options to make it as efficient as possible - mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1 - if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then - sudo mount -t xfs -o loop,noatime,nodiratime,logbufs=8 \ - ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1 - fi + create_disk ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1 ${SWIFT_LOOPBACK_DISK_SIZE} # Create a link to the above mount and # create all of the directories needed to emulate a few different servers From a2ff7545366b1be960c1175b47e20c5845c3a6e2 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 2 Mar 2022 02:13:44 +0000 Subject: [PATCH 191/574] Updated from generate-devstack-plugins-list Change-Id: Iff2bf021edee9be3bae21b67e66fe07c552f3a05 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 6850553b52..2e8e8f53d7 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -75,6 +75,7 @@ openstack/networking-powervm `https://opendev.org/openstack/networki openstack/networking-sfc `https://opendev.org/openstack/networking-sfc `__ openstack/neutron `https://opendev.org/openstack/neutron `__ openstack/neutron-dynamic-routing `https://opendev.org/openstack/neutron-dynamic-routing `__ +openstack/neutron-fwaas `https://opendev.org/openstack/neutron-fwaas `__ openstack/neutron-fwaas-dashboard `https://opendev.org/openstack/neutron-fwaas-dashboard `__ openstack/neutron-tempest-plugin `https://opendev.org/openstack/neutron-tempest-plugin `__ openstack/neutron-vpnaas `https://opendev.org/openstack/neutron-vpnaas `__ From 2c96180ac8482d912c487c18d400f418bf933cab Mon Sep 17 00:00:00 2001 From: melanie witt Date: Thu, 3 Mar 2022 23:54:49 +0000 Subject: [PATCH 192/574] Clean up unified limits configuration for nova and glance This is a followup for change Ifdef3510bc7da3098a71739814e35dbaf612ae34 which added configuration of unified limits for nova. This removes an unnecessary wrapper unsetting of OS_ env variables, unnecessary quoting on an iniset config value, and a hardcoding of user domain. The glance code from which the nova code was originally copied is also cleaned up. Change-Id: I4921af5cc0f624dd5aa848533f7049ee816be593 --- lib/glance | 6 ++--- lib/nova | 67 +++++++++++++++++++++++------------------------------- 2 files changed, 32 insertions(+), 41 deletions(-) diff --git a/lib/glance b/lib/glance index 9bba938b9d..b94c06dc93 100644 --- a/lib/glance +++ b/lib/glance @@ -309,13 +309,13 @@ function configure_glance_quotas { iniset $GLANCE_API_CONF oslo_limit username glance iniset $GLANCE_API_CONF oslo_limit auth_type password iniset $GLANCE_API_CONF oslo_limit auth_url $KEYSTONE_SERVICE_URI - iniset $GLANCE_API_CONF oslo_limit system_scope "'all'" + iniset $GLANCE_API_CONF oslo_limit system_scope all iniset $GLANCE_API_CONF oslo_limit endpoint_id \ $(openstack --os-cloud devstack-system-admin endpoint list --service glance -f value -c ID) # Allow the glance service user to read quotas - openstack --os-cloud devstack-system-admin role add --user glance --user-domain Default \ - --system all reader + openstack --os-cloud devstack-system-admin role add --user glance \ + --user-domain $SERVICE_DOMAIN_NAME --system all reader } # configure_glance() - Set config files, create data dirs, etc diff --git a/lib/nova b/lib/nova index 90289b139a..509cba6ff2 100644 --- a/lib/nova +++ b/lib/nova @@ -746,42 +746,33 @@ function configure_console_proxies { } function configure_nova_unified_limits { - # Default limits. Mirror the config-based default values. + # Registered limit resources in keystone are system-specific resources. + # Make sure we use a system-scoped token to interact with this API. + + # Default limits here mirror the legacy config-based default values. # Note: disk quota is new in nova as of unified limits. - bash -c "unset OS_USERNAME OS_TENANT_NAME OS_PROJECT_NAME; - openstack --os-cloud devstack-system-admin registered limit create \ - --service nova --default-limit 10 --region $REGION_NAME \ - servers; \ - openstack --os-cloud devstack-system-admin registered limit create \ - --service nova --default-limit 20 --region $REGION_NAME \ - class:VCPU; \ - openstack --os-cloud devstack-system-admin registered limit create \ - --service nova --default-limit $((50 * 1024)) --region $REGION_NAME \ - class:MEMORY_MB; \ - openstack --os-cloud devstack-system-admin registered limit create \ - --service nova --default-limit 20 --region $REGION_NAME \ - class:DISK_GB; \ - openstack --os-cloud devstack-system-admin registered limit create \ - --service nova --default-limit 128 --region $REGION_NAME \ - server_metadata_items; \ - openstack --os-cloud devstack-system-admin registered limit create \ - --service nova --default-limit 5 --region $REGION_NAME \ - server_injected_files; \ - openstack --os-cloud devstack-system-admin registered limit create \ - --service nova --default-limit 10240 --region $REGION_NAME \ - server_injected_file_content_bytes; \ - openstack --os-cloud devstack-system-admin registered limit create \ - --service nova --default-limit 255 --region $REGION_NAME \ - server_injected_file_path_bytes; \ - openstack --os-cloud devstack-system-admin registered limit create \ - --service nova --default-limit 100 --region $REGION_NAME \ - server_key_pairs; \ - openstack --os-cloud devstack-system-admin registered limit create \ - --service nova --default-limit 10 --region $REGION_NAME \ - server_groups; \ - openstack --os-cloud devstack-system-admin registered limit create \ - --service nova --default-limit 10 --region $REGION_NAME \ - server_group_members" + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10 --region $REGION_NAME servers + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 20 --region $REGION_NAME class:VCPU + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit $((50 * 1024)) --region $REGION_NAME class:MEMORY_MB + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 20 --region $REGION_NAME class:DISK_GB + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 128 --region $REGION_NAME server_metadata_items + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 5 --region $REGION_NAME server_injected_files + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10240 --region $REGION_NAME server_injected_file_content_bytes + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 255 --region $REGION_NAME server_injected_file_path_bytes + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 100 --region $REGION_NAME server_key_pairs + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10 --region $REGION_NAME server_groups + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10 --region $REGION_NAME server_group_members # Tell nova to use these limits iniset $NOVA_CONF quota driver "nova.quota.UnifiedLimitsDriver" @@ -792,13 +783,13 @@ function configure_nova_unified_limits { iniset $NOVA_CONF oslo_limit username nova iniset $NOVA_CONF oslo_limit auth_type password iniset $NOVA_CONF oslo_limit auth_url $KEYSTONE_SERVICE_URI - iniset $NOVA_CONF oslo_limit system_scope "'all'" + iniset $NOVA_CONF oslo_limit system_scope all iniset $NOVA_CONF oslo_limit endpoint_id \ $(openstack endpoint list --service nova -f value -c ID) # Allow the nova service user to read quotas - openstack role add --user nova --user-domain Default --system all \ - reader + openstack --os-cloud devstack-system-admin role add --user nova \ + --user-domain $SERVICE_DOMAIN_NAME --system all reader } function init_nova_service_user_conf { From 13e8db5a6f44a8f537988a102f9869e725bb97bb Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Wed, 9 Mar 2022 20:17:31 +0000 Subject: [PATCH 193/574] ignore failures to copy the devstack cache If the ci images do not have any cached data we should ignore any error when trying to copying it. This is requried when using unmodified cloud images. Change-Id: Ia6e94fc01343d0c292b1477905f8a96a6b43bcf8 --- roles/setup-devstack-cache/tasks/main.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/setup-devstack-cache/tasks/main.yaml b/roles/setup-devstack-cache/tasks/main.yaml index 84f33f0e16..3adff17d5d 100644 --- a/roles/setup-devstack-cache/tasks/main.yaml +++ b/roles/setup-devstack-cache/tasks/main.yaml @@ -2,6 +2,7 @@ # This uses hard links to avoid using extra space. command: "find {{ devstack_cache_dir }}/files -mindepth 1 -maxdepth 1 -exec cp -l {} {{ devstack_base_dir }}/devstack/files/ ;" become: true + ignore_errors: yes - name: Set ownership of cached files file: From 35bc600da17c7342345fa9c4d0b8078a8388fad1 Mon Sep 17 00:00:00 2001 From: Michael Johnson Date: Mon, 28 Feb 2022 18:42:34 +0000 Subject: [PATCH 194/574] Fix tls-proxy on newer versions of openssl Newer versions of openssl (CentOS9Stream for example) do not like using sha1. Devstack will fail on these systems[1] with the following error: 801B93DCE77F0000:error:03000098:digital envelope routines:do_sigver_init:invalid digest:crypto/evp/m_sigver.c:333: This patch updates the tls-proxy code in devstack to use sha256 instead of sha1 which allows devstack to complete when tls-proxy is enabled. [1] https://zuul.opendev.org/t/openstack/build/1d90b22a39c74e24a8390861b3c5f957/log/job-output.txt#5535 Closes-Bug: #1962600 Change-Id: I71e1371affe32f070167037b0109a489d196bd31 --- lib/tls | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/tls b/lib/tls index b3cc0b4159..5a7f5ae324 100644 --- a/lib/tls +++ b/lib/tls @@ -169,7 +169,7 @@ default_md = default [ req ] default_bits = 1024 -default_md = sha1 +default_md = sha256 prompt = no distinguished_name = req_distinguished_name @@ -261,7 +261,7 @@ function make_cert { if [ ! -r "$ca_dir/$cert_name.crt" ]; then # Generate a signing request $OPENSSL req \ - -sha1 \ + -sha256 \ -newkey rsa \ -nodes \ -keyout $ca_dir/private/$cert_name.key \ @@ -301,7 +301,7 @@ function make_int_CA { if [ ! -r "$ca_dir/cacert.pem" ]; then # Create a signing certificate request $OPENSSL req -config $ca_dir/ca.conf \ - -sha1 \ + -sha256 \ -newkey rsa \ -nodes \ -keyout $ca_dir/private/cacert.key \ From 7943a92bdbdd2a3b2f75fe66ee8c69db65147692 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Mon, 14 Mar 2022 13:53:41 -0400 Subject: [PATCH 195/574] Do not use hardcoded IPv4 localhost value There are a couple of places that still use a hardcoded 127.0.0.1 value, even if devstack is run with SERVICE_IP_VERSION=6 in local.conf. While things still work, SERVICE_LOCAL_HOST should be used instead since everything else could be using IPv6. Change-Id: I2dd9247a4ac19f565d4d5ecb2e1490501fda8bca --- lib/apache | 9 +++++++-- lib/databases/mysql | 14 +++++++++----- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/lib/apache b/lib/apache index f29c7ea2cb..02827d1f1b 100644 --- a/lib/apache +++ b/lib/apache @@ -27,6 +27,11 @@ set +o xtrace APACHE_USER=${APACHE_USER:-$STACK_USER} APACHE_GROUP=${APACHE_GROUP:-$(id -gn $APACHE_USER)} +APACHE_LOCAL_HOST=$SERVICE_LOCAL_HOST +if [[ "$SERVICE_IP_VERSION" == 6 ]]; then + APACHE_LOCAL_HOST=[$APACHE_LOCAL_HOST] +fi + # Set up apache name and configuration directory # Note that APACHE_CONF_DIR is really more accurately apache's vhost @@ -323,7 +328,7 @@ function write_local_uwsgi_http_config { rm -rf $file iniset "$file" uwsgi wsgi-file "$wsgi" port=$(get_random_port) - iniset "$file" uwsgi http-socket "127.0.0.1:$port" + iniset "$file" uwsgi http-socket "$APACHE_LOCAL_HOST:$port" iniset "$file" uwsgi processes $API_WORKERS # This is running standalone iniset "$file" uwsgi master true @@ -359,7 +364,7 @@ function write_local_uwsgi_http_config { apache_conf=$(apache_site_config_for $name) echo "KeepAlive Off" | sudo tee $apache_conf echo "SetEnv proxy-sendchunked 1" | sudo tee -a $apache_conf - echo "ProxyPass \"${url}\" \"http://127.0.0.1:$port\" retry=0 " | sudo tee -a $apache_conf + echo "ProxyPass \"${url}\" \"http://$APACHE_LOCAL_HOST:$port\" retry=0 " | sudo tee -a $apache_conf enable_apache_site $name restart_apache_server } diff --git a/lib/databases/mysql b/lib/databases/mysql index 30e4b7c496..0f45273d4b 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -86,10 +86,16 @@ function configure_database_mysql { exit_distro_not_supported "mysql configuration" fi - # Start mysql-server + # Change bind-address from localhost (127.0.0.1) to any (::) + iniset -sudo $my_conf mysqld bind-address "$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)" + + # (Re)Start mysql-server if is_fedora || is_suse; then # service is not started by default start_service $MYSQL_SERVICE_NAME + elif is_ubuntu; then + # required since bind-address could have changed above + restart_service $MYSQL_SERVICE_NAME fi # Set the root password - only works the first time. For Ubuntu, we already @@ -102,7 +108,7 @@ function configure_database_mysql { if is_ubuntu && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then local cmd_args="-uroot -p$DATABASE_PASSWORD " else - local cmd_args="-uroot -p$DATABASE_PASSWORD -h127.0.0.1 " + local cmd_args="-uroot -p$DATABASE_PASSWORD -h$SERVICE_LOCAL_HOST " fi # In mariadb e.g. on Ubuntu socket plugin is used for authentication @@ -119,9 +125,7 @@ function configure_database_mysql { # Now update ``my.cnf`` for some local needs and restart the mysql service - # Change bind-address from localhost (127.0.0.1) to any (::) and - # set default db type to InnoDB - iniset -sudo $my_conf mysqld bind-address "$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)" + # Set default db type to InnoDB iniset -sudo $my_conf mysqld sql_mode TRADITIONAL iniset -sudo $my_conf mysqld default-storage-engine InnoDB iniset -sudo $my_conf mysqld max_connections 1024 From 369042b74fe07e6f0f471fd50d7108586d55b97a Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Mon, 21 Mar 2022 15:29:38 -0500 Subject: [PATCH 196/574] Make centos-9-stream job voting bug#1960346 is fixed by the below series - https://review.opendev.org/q/(topic:bug/1960346+OR+topic:wait_until_sshable_pingable)+status:merged and now centos-9-stream job is passing and made voting on tempest gate. This commit makes devstack centos9 steam platform job as voting and add it gate pipeline too. Change-Id: Ic35420c5d58926ae90a136045a1558112accc533 --- .zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index fc80e6c413..067d3f5b08 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -678,7 +678,6 @@ parent: tempest-full-py3 description: CentOS 9 Stream platform test nodeset: devstack-single-node-centos-9-stream - voting: false timeout: 9000 vars: configure_swap_size: 4096 @@ -894,6 +893,7 @@ jobs: - devstack - devstack-ipv6 + - devstack-platform-centos-9-stream - devstack-enforce-scope - devstack-multinode - devstack-unit-tests From cebd00aa0468a084d21fc6f43ed7d4c15db878f5 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Thu, 17 Feb 2022 11:57:30 +0100 Subject: [PATCH 197/574] Clean usage of project_id in the Neutron's L3 service module After patch [1] project_id in that module is no longer needed as to make it working with new secure RBAC policies we had to hardcode "demo" project to be used always. This is small follow-up patch with cleaning after [1]. [1] https://review.opendev.org/c/openstack/devstack/+/826851/ Change-Id: Iddf9692817c91807fc3269547910e4f83585f07f --- lib/neutron_plugins/services/l3 | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index cd98115746..c0d74c7728 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -147,10 +147,6 @@ function _neutron_get_ext_gw_interface { } function create_neutron_initial_network { - local project_id - project_id=$(openstack project list | grep " demo " | get_field 1) - die_if_not_set $LINENO project_id "Failure retrieving project_id for demo" - # Allow drivers that need to create an initial network to do so here if type -p neutron_plugin_create_initial_network_profile > /dev/null; then neutron_plugin_create_initial_network_profile $PHYSICAL_NETWORK @@ -171,14 +167,14 @@ function create_neutron_initial_network { die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK" die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specify the PROVIDER_NETWORK_TYPE" NET_ID=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share | grep ' id ' | get_field 2) - die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $project_id" + die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK" if [[ "$IP_VERSION" =~ 4.* ]]; then if [ -z $SUBNETPOOL_V4_ID ]; then fixed_range_v4=$FIXED_RANGE fi SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} | grep ' id ' | get_field 2) - die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $project_id" + die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME" fi if [[ "$IP_VERSION" =~ .*6 ]]; then @@ -188,7 +184,7 @@ function create_neutron_initial_network { fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE fi IPV6_SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} | grep ' id ' | get_field 2) - die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME $project_id" + die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME" fi if [[ $Q_AGENT == "openvswitch" ]]; then @@ -198,16 +194,16 @@ function create_neutron_initial_network { fi else NET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" network create "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) - die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME $project_id" + die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME" if [[ "$IP_VERSION" =~ 4.* ]]; then # Create IPv4 private subnet - SUBNET_ID=$(_neutron_create_private_subnet_v4 $project_id) + SUBNET_ID=$(_neutron_create_private_subnet_v4) fi if [[ "$IP_VERSION" =~ .*6 ]]; then # Create IPv6 private subnet - IPV6_SUBNET_ID=$(_neutron_create_private_subnet_v6 $project_id) + IPV6_SUBNET_ID=$(_neutron_create_private_subnet_v6) fi fi @@ -216,11 +212,11 @@ function create_neutron_initial_network { if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then # create a tenant-owned router. ROUTER_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2) - die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $project_id $Q_ROUTER_NAME" + die_if_not_set $LINENO ROUTER_ID "Failure creating router $Q_ROUTER_NAME" else # Plugin only supports creating a single router, which should be admin owned. ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2) - die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $Q_ROUTER_NAME" + die_if_not_set $LINENO ROUTER_ID "Failure creating router $Q_ROUTER_NAME" fi EXTERNAL_NETWORK_FLAGS="--external" @@ -249,7 +245,6 @@ function create_neutron_initial_network { # Create private IPv4 subnet function _neutron_create_private_subnet_v4 { - local project_id=$1 if [ -z $SUBNETPOOL_V4_ID ]; then fixed_range_v4=$FIXED_RANGE fi @@ -263,13 +258,12 @@ function _neutron_create_private_subnet_v4 { subnet_params+="--network $NET_ID $PRIVATE_SUBNET_NAME" local subnet_id subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) - die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet for $project_id" + die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet" echo $subnet_id } # Create private IPv6 subnet function _neutron_create_private_subnet_v6 { - local project_id=$1 die_if_not_set $LINENO IPV6_RA_MODE "IPV6 RA Mode not set" die_if_not_set $LINENO IPV6_ADDRESS_MODE "IPV6 Address Mode not set" local ipv6_modes="--ipv6-ra-mode $IPV6_RA_MODE --ipv6-address-mode $IPV6_ADDRESS_MODE" @@ -285,7 +279,7 @@ function _neutron_create_private_subnet_v6 { subnet_params+="$ipv6_modes --network $NET_ID $IPV6_PRIVATE_SUBNET_NAME " local ipv6_subnet_id ipv6_subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) - die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet for $project_id" + die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet" echo $ipv6_subnet_id } From 189c7ff14222fe365a7376e7ef7171bfb2c74b24 Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Fri, 25 Mar 2022 14:06:52 +0100 Subject: [PATCH 198/574] Update DEVSTACK_SERIES to zed stable/yoga branch has been created now and current master is for zed. Change-Id: I8743a3440a0ce96acb24b34971548b43ae7c8d4c --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index e48fd81d16..d22fa88373 100644 --- a/stackrc +++ b/stackrc @@ -235,7 +235,7 @@ REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements} # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="yoga" +DEVSTACK_SERIES="zed" ############## # From 8dc342d400e4e19541bdd0627a746052875364c3 Mon Sep 17 00:00:00 2001 From: zhouyanbing Date: Sat, 26 Mar 2022 10:44:40 +0800 Subject: [PATCH 199/574] remove unuseful local variable define the local varibale: api_cell_conf in start_nova_rest function is unuseful, so remove it now. Change-Id: I0019ce807cf3905ee246b684fce2abcb46336306 --- lib/nova | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/nova b/lib/nova index 509cba6ff2..4c14374d0f 100644 --- a/lib/nova +++ b/lib/nova @@ -1045,7 +1045,6 @@ function start_nova_rest { local old_path=$PATH export PATH=$NOVA_BIN_DIR:$PATH - local api_cell_conf=$NOVA_CONF local compute_cell_conf=$NOVA_CONF run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf" From 5c51a95d10ba886fc9136e804844f60bc71aecf9 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Mon, 28 Mar 2022 14:00:54 +0200 Subject: [PATCH 200/574] Drop setup.py and setup.cfg devstack isn't a python project, these were introduced only for docs building and made redundant with [0]. We can remove them now. [0] Iedcc008b170821aa74acefc02ec6a243a0dc307c Signed-off-by: Dr. Jens Harbott Change-Id: I90ca1c6918c016d10c579fbae49d13fff1ed59af --- setup.cfg | 12 ------------ setup.py | 22 ---------------------- 2 files changed, 34 deletions(-) delete mode 100644 setup.cfg delete mode 100755 setup.py diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index a4e621f6df..0000000000 --- a/setup.cfg +++ /dev/null @@ -1,12 +0,0 @@ -[metadata] -name = DevStack -summary = OpenStack DevStack -description_file = - README.rst -author = OpenStack -author_email = openstack-discuss@lists.openstack.org -home_page = https://docs.openstack.org/devstack/latest -classifier = - Intended Audience :: Developers - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux diff --git a/setup.py b/setup.py deleted file mode 100755 index 70c2b3f32b..0000000000 --- a/setup.py +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -setuptools.setup( - setup_requires=['pbr'], - pbr=True) From 45b029064f3f9ebb94cca97e572d9c0500abe21f Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Fri, 25 Mar 2022 22:23:04 -0500 Subject: [PATCH 201/574] Move openEuler job to experimental pipeline OpenEuler job fails 100% of the time. As discussed in QA meeting, we agreed to move OpenEuler job to experimental pipeline. - https://meetings.opendev.org/meetings/qa/2022/qa.2022-03-22-15.00.log.html#l-76 Once it is fixed, we can think of adding back to regular pipeline. Change-Id: I831889a09fabe5bed5522d17e352ec8009eac321 --- .zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 067d3f5b08..0dda2624d2 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -849,7 +849,6 @@ - devstack-platform-centos-8-stream - devstack-platform-centos-9-stream - devstack-platform-debian-bullseye - - devstack-platform-openEuler-20.03-SP2 - devstack-multinode - devstack-unit-tests - openstack-tox-bashate @@ -948,6 +947,7 @@ experimental: jobs: + - devstack-platform-openEuler-20.03-SP2 - nova-multi-cell - nova-next - neutron-fullstack-with-uwsgi From f4a703661ebea05690fadf93fd13df6e54a49b59 Mon Sep 17 00:00:00 2001 From: afariasa Date: Wed, 6 Apr 2022 15:23:11 +0000 Subject: [PATCH 202/574] Add OpenStack two nodes nodeset for Centos 9 Change-Id: I01c8e5e0e88d0dcfe778f19548a2e268406ef6bf --- .zuul.yaml | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 0dda2624d2..1c517f1def 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -176,6 +176,36 @@ nodes: - compute1 +- nodeset: + name: openstack-two-node-centos-9-stream + nodes: + - name: controller + label: centos-9-stream + - name: compute1 + label: centos-9-stream + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + - nodeset: name: openstack-two-node-focal nodes: From eca9783a0ad9c7d7e2e8267457be8d4dd8b55502 Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Wed, 9 Mar 2022 23:26:13 +0000 Subject: [PATCH 203/574] ensure /usr/local/bin in in path osc is typicaly installed in /usr/local/bin to avoid command not found errors when invoking osc in devstack ensure that /usr/local/bin is included in the PATH. Change-Id: I605fbc4b131149bf5d1b6307b360fe365c680b1a --- stack.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 0082b99f11..6e9ced985e 100755 --- a/stack.sh +++ b/stack.sh @@ -67,7 +67,9 @@ unset `env | grep -E '^OS_' | cut -d = -f 1` umask 022 # Not all distros have sbin in PATH for regular users. -PATH=$PATH:/usr/local/sbin:/usr/sbin:/sbin +# osc will normally be installed at /usr/local/bin/openstack so ensure +# /usr/local/bin is also in the path +PATH=$PATH:/usr/local/bin:/usr/local/sbin:/usr/sbin:/sbin # Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0") && pwd) From bfbd2be00b38fbc4a5fd082cf44e0fdf06cc5330 Mon Sep 17 00:00:00 2001 From: Artur Angiel Date: Sun, 10 Apr 2022 11:31:21 +0200 Subject: [PATCH 204/574] Added recursive for deletion of $OVN_RUNDIR After ./unstack.sh trying to 'enable_plugin venus https://opendev.org/openstack/venus' gived following error: +lib/neutron_plugins/ovn_agent:install_ovn:363 sudo ln -s /var/run/openvswitch /var/run/ovn ln: failed to create symbolic link '/var/run/ovn/openvswitch': File exists which led to: +lib/neutron_plugins/ovn_agent:cleanup_ovn:801 sudo rm -f /var/run/ovn rm: cannot remove '/var/run/ovn': Is a directory Change-Id: I1cafdc0c71093ed7249bb9748b57d51110986686 --- lib/neutron_plugins/ovn_agent | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 927896b70b..2938f472bc 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -798,5 +798,5 @@ function cleanup_ovn { _cleanup $ovs_path fi - sudo rm -f $OVN_RUNDIR + sudo rm -rf $OVN_RUNDIR } From 676dcaf94487665882be048cfe1f3206d6807e0f Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 13 Apr 2022 15:04:46 +1000 Subject: [PATCH 205/574] Mark our source trees as safe for git to use as other users git commit [1] introduced a new behaviour to work around a CVE that disallows any git operations in directories not owned by the current user. This may seem unrelated to installation, but it plays havoc with PBR, which calls out to git to get to get revision history. So if you are "pip install"-ing from a source tree you don't own, the PBR git calls in that tree now fail and the install blows up. This plays havoc with our model. Firstly, we checkout all code as "stack" then install it globally with "sudo" (i.e. root) -- which breaks. We also have cases of essentially the opposite -- checkouts we have installed as root, but then run tox in them as a regular user; tox wants to install the source in its venv but now we have another user conflict. This uses the only available configuration option to avoid that by globally setting the source directories we clone as safe. This is an encroachment of the global system for sure, but is about the only switch available at the moment. For discussion of other approaches, see [2]. Related-Bug: https://bugs.launchpad.net/devstack/+bug/1968798 [1] https://github.com/git/git/commit/8959555cee7ec045958f9b6dd62e541affb7e7d9 [2] https://review.opendev.org/c/openstack/devstack/+/837636 Change-Id: Ib9896a99b6d6c4d359ee412743ce30512b3c4fb7 --- functions-common | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/functions-common b/functions-common index b2cf9d99c6..ddef2e4980 100644 --- a/functions-common +++ b/functions-common @@ -673,6 +673,13 @@ function git_clone { fi fi + # NOTE(ianw) 2022-04-13 : commit [1] has broken many assumptions + # about how we clone and work with repos. Mark them safe globally + # as a work-around. + # + # [1] https://github.com/git/git/commit/8959555cee7ec045958f9b6dd62e541affb7e7d9 + sudo git config --global --add safe.directory ${git_dest} + # print out the results so we know what change was used in the logs cd $git_dest git show --oneline | head -1 From 4baeb3b51fcb6196fa311f823ad3f0ac13ccf341 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Wed, 13 Apr 2022 13:44:07 -0700 Subject: [PATCH 206/574] Write safe.directory items to system git config This is necessary for more consistent behavior across multiple distro versions. Apparently somewhere along the way, git started looking at the current user's home directory instead of $HOME. Related-Bug: https://bugs.launchpad.net/devstack/+bug/1968798 Change-Id: I941ef5ea90970a0901236afe81c551aaf24ac1d8 --- functions-common | 7 ++++++- unstack.sh | 5 +++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/functions-common b/functions-common index ddef2e4980..8651604b79 100644 --- a/functions-common +++ b/functions-common @@ -677,8 +677,13 @@ function git_clone { # about how we clone and work with repos. Mark them safe globally # as a work-around. # + # NOTE(danms): On bionic (and likely others) git-config may write + # ~stackuser/.gitconfig if not run with sudo -H. Using --system + # writes these changes to /etc/gitconfig which is more + # discoverable anyway. + # # [1] https://github.com/git/git/commit/8959555cee7ec045958f9b6dd62e541affb7e7d9 - sudo git config --global --add safe.directory ${git_dest} + sudo git config --system --add safe.directory ${git_dest} # print out the results so we know what change was used in the logs cd $git_dest diff --git a/unstack.sh b/unstack.sh index 4b57b6e344..813f9a8117 100755 --- a/unstack.sh +++ b/unstack.sh @@ -181,3 +181,8 @@ fi clean_pyc_files rm -Rf $DEST/async + +# Clean any safe.directory items we wrote into the global +# gitconfig. We can identify the relevant ones by checking that they +# point to somewhere in our $DEST directory. +sudo sed -i "/directory=${DEST}/ d" /etc/gitconfig From 28bed125a2555fb3da778898d6ae310175972d3c Mon Sep 17 00:00:00 2001 From: Mohammed Naser Date: Wed, 20 Apr 2022 15:11:39 -0400 Subject: [PATCH 207/574] nova: unset cpu_model on aarch64 Without this, running DevStack on an `aarch64` environment will end up in cpu_model set to "Nehalem" and cpu_mode set to "host-passthrough" which does not work. This patch drops that value under aarch64 environments. Change-Id: I30be5a388dda5ccf08718670dbb14a28a4a8a8eb --- lib/nova_plugins/hypervisor-libvirt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index c1cd132548..6c6f4c4261 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -56,6 +56,10 @@ function configure_nova_hypervisor { # arm64-specific configuration if is_arch "aarch64"; then iniset $NOVA_CONF libvirt cpu_mode "host-passthrough" + # NOTE(mnaser): We cannot have `cpu_model` set if the `cpu_mode` is + # set to `host-passthrough`, or `nova-compute` refuses to + # start. + inidelete $NOVA_CONF libvirt cpu_model fi if isset ENABLE_FILE_INJECTION; then From 7fa24750a676a44ab224206cc13096f904057d44 Mon Sep 17 00:00:00 2001 From: Mohammed Naser Date: Wed, 20 Apr 2022 15:42:43 -0400 Subject: [PATCH 208/574] ovn: use bundled ovs We are using the latest OVS, however, OVN needs to build using the OVS submodule since some of the signatures don't work[1]. [1]: https://github.com/ovn-org/ovn/issues/128 Change-Id: I3ad7e5e80f1141c3d94f7ce7c8b8f8fdb9fb7c3c --- lib/neutron_plugins/ovn_agent | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 927896b70b..bf1b23a82b 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -320,8 +320,24 @@ function compile_ovn { ./boot.sh fi + # NOTE(mnaser): OVN requires that you build using the OVS from the + # submodule. + # + # https://github.com/ovn-org/ovn/blob/3fb397b63663297acbcbf794e1233951222ae5af/Documentation/intro/install/general.rst#bootstrapping + # https://github.com/ovn-org/ovn/issues/128 + git submodule update --init + pushd ovs + if [ ! -f configure ] ; then + ./boot.sh + fi + if [ ! -f config.status ] || [ configure -nt config.status ] ; then + ./configure + fi + make -j$(($(nproc) + 1)) + popd + if [ ! -f config.status ] || [ configure -nt config.status ] ; then - ./configure --with-ovs-source=$DEST/$OVS_REPO_NAME $prefix $localstatedir + ./configure $prefix $localstatedir fi make -j$(($(nproc) + 1)) sudo make install From c2772c2984e4f29aa6032725e4f7d8680a54ed19 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Fri, 8 Apr 2022 08:48:49 -0700 Subject: [PATCH 209/574] Gather performance data after tempest This makes us gather a bunch of consistent statistics after we run tempest that can be use to measure the impact of a given change. These are stable metrics such as "number of DB queries made" and "how much memory is each service using after a tempest run." Note that this will always run after devstack to generate the JSON file, but there are two things that control its completeness: - MYSQL_GATHER_PERFORMANCE must be enabled to get per-db stats - Unless tls-proxy is enabled, we will only get API stats for keystone Change-Id: Ie3b1504256dc1c9c6b59634e86fa98494bcb07b1 --- .zuul.yaml | 1 + lib/databases/mysql | 9 + playbooks/post.yaml | 3 + roles/capture-performance-data/README.rst | 25 +++ .../defaults/main.yaml | 3 + .../capture-performance-data/tasks/main.yaml | 15 ++ stackrc | 4 + tools/get-stats.py | 155 ++++++++++++++++++ 8 files changed, 215 insertions(+) create mode 100644 roles/capture-performance-data/README.rst create mode 100644 roles/capture-performance-data/defaults/main.yaml create mode 100644 roles/capture-performance-data/tasks/main.yaml create mode 100755 tools/get-stats.py diff --git a/.zuul.yaml b/.zuul.yaml index 0dda2624d2..329cb527f3 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -389,6 +389,7 @@ '{{ devstack_log_dir }}/worlddump-latest.txt': logs '{{ devstack_full_log}}': logs '{{ stage_dir }}/verify_tempest_conf.log': logs + '{{ stage_dir }}/performance.json': logs '{{ stage_dir }}/apache': logs '{{ stage_dir }}/apache_config': logs '{{ stage_dir }}/etc': logs diff --git a/lib/databases/mysql b/lib/databases/mysql index 0f45273d4b..6b3ea0287c 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -150,6 +150,15 @@ function configure_database_mysql { iniset -sudo $my_conf mysqld log-queries-not-using-indexes 1 fi + if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then + echo "enabling MySQL performance_schema items" + # Enable long query history + iniset -sudo $my_conf mysqld \ + performance-schema-consumer-events-statements-history-long TRUE + iniset -sudo $my_conf mysqld \ + performance_schema_events_stages_history_long_size 1000000 + fi + restart_service $MYSQL_SERVICE_NAME } diff --git a/playbooks/post.yaml b/playbooks/post.yaml index 9e66f20e9e..d8d5f6833c 100644 --- a/playbooks/post.yaml +++ b/playbooks/post.yaml @@ -20,6 +20,9 @@ roles: - export-devstack-journal - apache-logs-conf + # This should run as early as possible to make sure we don't skew + # the post-tempest results with other activities. + - capture-performance-data - devstack-project-conf # capture-system-logs should be the last role before stage-output - capture-system-logs diff --git a/roles/capture-performance-data/README.rst b/roles/capture-performance-data/README.rst new file mode 100644 index 0000000000..b7a37c223f --- /dev/null +++ b/roles/capture-performance-data/README.rst @@ -0,0 +1,25 @@ +Generate performance logs for staging + +Captures usage information from mysql, systemd, apache logs, and other +parts of the system and generates a performance.json file in the +staging directory. + +**Role Variables** + +.. zuul:rolevar:: stage_dir + :default: {{ ansible_user_dir }} + + The base stage directory + +.. zuul:rolevar:: devstack_conf_dir + :default: /opt/stack + + The base devstack destination directory + +.. zuul:rolevar:: debian_suse_apache_deref_logs + + The apache logs found in the debian/suse locations + +.. zuul:rolevar:: redhat_apache_deref_logs + + The apache logs found in the redhat locations diff --git a/roles/capture-performance-data/defaults/main.yaml b/roles/capture-performance-data/defaults/main.yaml new file mode 100644 index 0000000000..7bd79f4c4f --- /dev/null +++ b/roles/capture-performance-data/defaults/main.yaml @@ -0,0 +1,3 @@ +devstack_base_dir: /opt/stack +devstack_conf_dir: "{{ devstack_base_dir }}" +stage_dir: "{{ ansible_user_dir }}" diff --git a/roles/capture-performance-data/tasks/main.yaml b/roles/capture-performance-data/tasks/main.yaml new file mode 100644 index 0000000000..2d2cfe4b8b --- /dev/null +++ b/roles/capture-performance-data/tasks/main.yaml @@ -0,0 +1,15 @@ +- name: Generate statistics + shell: + executable: /bin/bash + cmd: | + source {{ devstack_conf_dir }}/stackrc + python3 {{ devstack_conf_dir }}/tools/get-stats.py \ + --db-user="$DATABASE_USER" \ + --db-pass="$DATABASE_PASSWORD" \ + --db-host="$DATABASE_HOST" \ + {{ apache_logs }} > {{ stage_dir }}/performance.json + vars: + apache_logs: >- + {% for i in debian_suse_apache_deref_logs.results | default([]) + redhat_apache_deref_logs.results | default([]) %} + --apache-log="{{ i.stat.path }}" + {% endfor %} diff --git a/stackrc b/stackrc index d22fa88373..c3254dcce4 100644 --- a/stackrc +++ b/stackrc @@ -193,6 +193,10 @@ ADDITIONAL_VENV_PACKAGES=${ADITIONAL_VENV_PACKAGES:-""} # (currently only implemented for MySQL backend) DATABASE_QUERY_LOGGING=$(trueorfalse False DATABASE_QUERY_LOGGING) +# This can be used to turn on various non-default items in the +# performance_schema that are of interest to us +MYSQL_GATHER_PERFORMANCE=$(trueorfalse True MYSQL_GATHER_PERFORMANCE) + # Set a timeout for git operations. If git is still running when the # timeout expires, the command will be retried up to 3 times. This is # in the format for timeout(1); diff --git a/tools/get-stats.py b/tools/get-stats.py new file mode 100755 index 0000000000..dc0bd0f9e5 --- /dev/null +++ b/tools/get-stats.py @@ -0,0 +1,155 @@ +#!/usr/bin/python3 + +import argparse +import datetime +import glob +import itertools +import json +import os +import psutil +import re +import socket +import subprocess +import sys +import pymysql + +# https://www.elastic.co/blog/found-crash-elasticsearch#mapping-explosion + + +def tryint(value): + try: + return int(value) + except (ValueError, TypeError): + return value + + +def get_service_stats(service): + stats = {'MemoryCurrent': 0} + output = subprocess.check_output(['/usr/bin/systemctl', 'show', service] + + ['-p%s' % stat for stat in stats]) + for line in output.decode().split('\n'): + if not line: + continue + stat, val = line.split('=') + stats[stat] = int(val) + + return stats + + +def get_services_stats(): + services = [os.path.basename(s) for s in + glob.glob('/etc/systemd/system/devstack@*.service')] + return [dict(service=service, **get_service_stats(service)) + for service in services] + + +def get_process_stats(proc): + cmdline = proc.cmdline() + if 'python' in cmdline[0]: + cmdline = cmdline[1:] + return {'cmd': cmdline[0], + 'pid': proc.pid, + 'args': ' '.join(cmdline[1:]), + 'rss': proc.memory_info().rss} + + +def get_processes_stats(matches): + me = os.getpid() + procs = psutil.process_iter() + + def proc_matches(proc): + return me != proc.pid and any( + re.search(match, ' '.join(proc.cmdline())) + for match in matches) + + return [ + get_process_stats(proc) + for proc in procs + if proc_matches(proc)] + + +def get_db_stats(host, user, passwd): + dbs = [] + db = pymysql.connect(host=host, user=user, password=passwd, + database='performance_schema', + cursorclass=pymysql.cursors.DictCursor) + with db: + with db.cursor() as cur: + cur.execute( + 'SELECT COUNT(*) AS queries,current_schema AS db FROM ' + 'events_statements_history_long GROUP BY current_schema') + for row in cur: + dbs.append({k: tryint(v) for k, v in row.items()}) + return dbs + + +def get_http_stats_for_log(logfile): + stats = {} + for line in open(logfile).readlines(): + m = re.search('"([A-Z]+) /([^" ]+)( HTTP/1.1)?" ([0-9]{3}) ([0-9]+)', + line) + if m: + method = m.group(1) + path = m.group(2) + status = m.group(4) + size = int(m.group(5)) + + try: + service, rest = path.split('/', 1) + except ValueError: + # Root calls like "GET /identity" + service = path + rest = '' + + stats.setdefault(service, {'largest': 0}) + stats[service].setdefault(method, 0) + stats[service][method] += 1 + stats[service]['largest'] = max(stats[service]['largest'], size) + + # Flatten this for ES + return [{'service': service, 'log': os.path.basename(logfile), + **vals} + for service, vals in stats.items()] + + +def get_http_stats(logfiles): + return list(itertools.chain.from_iterable(get_http_stats_for_log(log) + for log in logfiles)) + + +def get_report_info(): + return { + 'timestamp': datetime.datetime.now().isoformat(), + 'hostname': socket.gethostname(), + } + + +if __name__ == '__main__': + process_defaults = ['privsep', 'mysqld', 'erlang', 'etcd'] + parser = argparse.ArgumentParser() + parser.add_argument('--db-user', default='root', + help=('MySQL user for collecting stats ' + '(default: "root")')) + parser.add_argument('--db-pass', default=None, + help='MySQL password for db-user') + parser.add_argument('--db-host', default='localhost', + help='MySQL hostname') + parser.add_argument('--apache-log', action='append', default=[], + help='Collect API call stats from this apache log') + parser.add_argument('--process', action='append', + default=process_defaults, + help=('Include process stats for this cmdline regex ' + '(default is %s)' % ','.join(process_defaults))) + args = parser.parse_args() + + data = { + 'services': get_services_stats(), + 'db': args.db_pass and get_db_stats(args.db_host, + args.db_user, + args.db_pass) or [], + 'processes': get_processes_stats(args.process), + 'api': get_http_stats(args.apache_log), + 'report': get_report_info(), + } + + print(json.dumps(data, indent=2)) From 4423450eb334aad6f889ddae2ab9b9a2a2197a4a Mon Sep 17 00:00:00 2001 From: Zhou Yanbing Date: Thu, 21 Apr 2022 15:00:41 +0800 Subject: [PATCH 210/574] modify the sample value of LOGDAYS the value of LOGDAYS in samples/local.conf is 2, so change the value in the comment and the sample value in the document to be consistent with it. Change-Id: I5822bbf1d6ad347c67c886be1e3325113d079114 --- doc/source/configuration.rst | 2 +- samples/local.conf | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index dd8f21faaf..40a8725b8d 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -279,7 +279,7 @@ number of days of old log files to keep. :: - LOGDAYS=1 + LOGDAYS=2 Some coloring is used during the DevStack runs to make it easier to see what is going on. This can be disabled with:: diff --git a/samples/local.conf b/samples/local.conf index 8b76137c38..55b729809d 100644 --- a/samples/local.conf +++ b/samples/local.conf @@ -49,7 +49,7 @@ SERVICE_PASSWORD=$ADMIN_PASSWORD # path of the destination log file. A timestamp will be appended to the given name. LOGFILE=$DEST/logs/stack.sh.log -# Old log files are automatically removed after 7 days to keep things neat. Change +# Old log files are automatically removed after 2 days to keep things neat. Change # the number of days by setting ``LOGDAYS``. LOGDAYS=2 From af75f689fa5d03446593c3b4c7724f0922f0f19a Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Thu, 14 Apr 2022 21:48:38 +0100 Subject: [PATCH 211/574] install mod_ssl on centos 9 stream by default This change adds mod_ssl to the default set of rpms installed on rpm based distros. this is required if the tls-proxy service is enabled for multi node centos based jobs. Change-Id: I52652de88352094c824da68e5baf7db4c17cb027 --- files/rpms/general | 1 + 1 file changed, 1 insertion(+) diff --git a/files/rpms/general b/files/rpms/general index 163a7c8f24..668705b1c3 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -17,6 +17,7 @@ libxml2-devel # lxml libxslt-devel # lxml libyaml-devel make # dist:openEuler-20.03 +mod_ssl # required for tls-proxy on centos 9 stream computes net-tools openssh-server openssl From 7191c5e7e7edb1e2f28c0ce71294f61b409cf16b Mon Sep 17 00:00:00 2001 From: Balazs Gibizer Date: Fri, 22 Apr 2022 12:01:13 +0200 Subject: [PATCH 212/574] Use proper sed separator for paths I941ef5ea90970a0901236afe81c551aaf24ac1d8 added a sed command that should match and delete path values but used '/' as sed separator. This leads to error in unstack.sh runs when the path also contains '/': +./unstack.sh:main:188 sudo sed -i '/directory=/opt/stack/ d' /etc/gitconfig sed: -e expression #1, char 13: unknown command: `o' So this patch replace '/' separator with '+'. Change-Id: I06811c0d9ee7ecddf84ef1c6dd6cff5129dbf4b1 --- unstack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unstack.sh b/unstack.sh index 813f9a8117..a36af3fb59 100755 --- a/unstack.sh +++ b/unstack.sh @@ -185,4 +185,4 @@ rm -Rf $DEST/async # Clean any safe.directory items we wrote into the global # gitconfig. We can identify the relevant ones by checking that they # point to somewhere in our $DEST directory. -sudo sed -i "/directory=${DEST}/ d" /etc/gitconfig +sudo sed -i "\+directory = ${DEST}+ d" /etc/gitconfig From c6dfd169aeb3fbf2ba73ad8403cc1feb7c6ecf76 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Mon, 25 Apr 2022 15:19:06 -0500 Subject: [PATCH 213/574] Drop centos 8 stream testing In Zed cycle testing runtime, we are targetting the centos 9 stream - https://governance.openstack.org/tc/reference/runtimes/zed.html With dropping the python 3.6 support, project started adding python 3.8 as minimum, example nova: - https://github.com/openstack/nova/blob/56b5aed08c6a3ed81b78dc216f0165ebfe3c3350/setup.cfg#L13 with that, centos 8 stream job is failing 100% - https://zuul.openstack.org/build/970d029dc96742c3aa0f6932a35e97cf - https://zuul.openstack.org/builds?job_name=devstack-platform-centos-8-stream&skip=0 This commit drops centos-8-stream testing so that we focus on centos-9-stream. Change-Id: I045e67b1ca79aba1b2a7be9f88d7804c69c6d781 --- .zuul.yaml | 51 --------------------------------------------------- 1 file changed, 51 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 8fd4d0212c..a437c1cc02 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -56,16 +56,6 @@ nodes: - controller -- nodeset: - name: devstack-single-node-centos-8-stream - nodes: - - name: controller - label: centos-8-stream - groups: - - name: tempest - nodes: - - controller - - nodeset: name: devstack-single-node-centos-9-stream nodes: @@ -146,36 +136,6 @@ nodes: - compute1 -- nodeset: - name: openstack-two-node-centos-8-stream - nodes: - - name: controller - label: centos-8-stream - - name: compute1 - label: centos-8-stream - groups: - # Node where tests are executed and test results collected - - name: tempest - nodes: - - controller - # Nodes running the compute service - - name: compute - nodes: - - controller - - compute1 - # Nodes that are not the controller - - name: subnode - nodes: - - compute1 - # Switch node for multinode networking setup - - name: switch - nodes: - - controller - # Peer nodes for multinode networking setup - - name: peers - nodes: - - compute1 - - nodeset: name: openstack-two-node-centos-9-stream nodes: @@ -694,16 +654,6 @@ # we often have to rush things through devstack to stabilise the gate, # and these platforms don't have the round-the-clock support to avoid # becoming blockers in that situation. -- job: - name: devstack-platform-centos-8-stream - parent: tempest-full-py3 - description: CentOS 8 Stream platform test - nodeset: devstack-single-node-centos-8-stream - voting: false - timeout: 9000 - vars: - configure_swap_size: 4096 - - job: name: devstack-platform-centos-9-stream parent: tempest-full-py3 @@ -877,7 +827,6 @@ - devstack-ipv6 - devstack-enforce-scope - devstack-platform-fedora-latest - - devstack-platform-centos-8-stream - devstack-platform-centos-9-stream - devstack-platform-debian-bullseye - devstack-multinode From 8615563df47261d9c6dab7c5badbceb399d0e14d Mon Sep 17 00:00:00 2001 From: Grzegorz Grasza Date: Mon, 18 Oct 2021 16:52:06 +0200 Subject: [PATCH 214/574] Global option for enforcing scope (ENFORCE_SCOPE) This updates each devstack service library, to use it as the default value for service-specific RBAC configuration. Change-Id: I41061d042206c411ee3dd94ce91098e612af7ae7 --- .zuul.yaml | 5 +---- functions-common | 2 +- lib/cinder | 2 +- lib/glance | 2 +- lib/keystone | 2 +- lib/neutron | 2 +- lib/neutron-legacy | 2 +- lib/tempest | 11 ++++++++--- stackrc | 4 ++++ 9 files changed, 19 insertions(+), 13 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index fc80e6c413..0f047166fa 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -646,10 +646,7 @@ This job runs the devstack with scope checks enabled. vars: devstack_localrc: - # Keep enabeling the services here to run with system scope - CINDER_ENFORCE_SCOPE: true - GLANCE_ENFORCE_SCOPE: true - NEUTRON_ENFORCE_SCOPE: true + ENFORCE_SCOPE: true - job: name: devstack-multinode diff --git a/functions-common b/functions-common index b2cf9d99c6..603e7d896d 100644 --- a/functions-common +++ b/functions-common @@ -1154,7 +1154,7 @@ function is_ironic_hardware { } function is_ironic_enforce_scope { - is_service_enabled ironic && [[ "$IRONIC_ENFORCE_SCOPE" == "True" ]] && return 0 + is_service_enabled ironic && [[ "$IRONIC_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]] && return 0 return 1 } diff --git a/lib/cinder b/lib/cinder index b029fa0db4..52818a81eb 100644 --- a/lib/cinder +++ b/lib/cinder @@ -380,7 +380,7 @@ function configure_cinder { iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT" fi - if [[ "$CINDER_ENFORCE_SCOPE" == True ]] ; then + if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then iniset $CINDER_CONF oslo_policy enforce_scope true iniset $CINDER_CONF oslo_policy enforce_new_defaults true fi diff --git a/lib/glance b/lib/glance index 9bba938b9d..04b901181c 100644 --- a/lib/glance +++ b/lib/glance @@ -432,7 +432,7 @@ function configure_glance { iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS" fi - if [[ "$GLANCE_ENFORCE_SCOPE" == True ]] ; then + if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then iniset $GLANCE_API_CONF oslo_policy enforce_scope true iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults true iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac true diff --git a/lib/keystone b/lib/keystone index a4c8a52121..80a136f78d 100644 --- a/lib/keystone +++ b/lib/keystone @@ -265,7 +265,7 @@ function configure_keystone { iniset $KEYSTONE_CONF security_compliance lockout_duration $KEYSTONE_LOCKOUT_DURATION iniset $KEYSTONE_CONF security_compliance unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT fi - if [[ "$KEYSTONE_ENFORCE_SCOPE" == True ]] ; then + if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then iniset $KEYSTONE_CONF oslo_policy enforce_scope true iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults true iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml diff --git a/lib/neutron b/lib/neutron index e7719d4ebc..f24ccfb1a9 100644 --- a/lib/neutron +++ b/lib/neutron @@ -632,7 +632,7 @@ function configure_neutron { # configure_rbac_policies() - Configure Neutron to enforce new RBAC # policies and scopes if NEUTRON_ENFORCE_SCOPE == True function configure_rbac_policies { - if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then + if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "ENFORCE_SCOPE" == "True" ]]; then iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True iniset $NEUTRON_CONF oslo_policy enforce_scope True else diff --git a/lib/neutron-legacy b/lib/neutron-legacy index b906a1b2ff..253b457ae1 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -500,7 +500,7 @@ function configure_neutron_after_post_config { # configure_rbac_policies() - Configure Neutron to enforce new RBAC # policies and scopes if NEUTRON_ENFORCE_SCOPE == True function configure_rbac_policies { - if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then + if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == True ]]; then iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True iniset $NEUTRON_CONF oslo_policy enforce_scope True else diff --git a/lib/tempest b/lib/tempest index 45046632b4..1fd4184763 100644 --- a/lib/tempest +++ b/lib/tempest @@ -607,14 +607,19 @@ function configure_tempest { # If services enable the enforce_scope for their policy # we need to enable the same on Tempest side so that # test can be run with scoped token. - if [[ "$KEYSTONE_ENFORCE_SCOPE" == True ]] ; then + if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then iniset $TEMPEST_CONFIG enforce_scope keystone true iniset $TEMPEST_CONFIG auth admin_system 'all' iniset $TEMPEST_CONFIG auth admin_project_name '' fi - iniset $TEMPEST_CONFIG enforce_scope glance "$GLANCE_ENFORCE_SCOPE" - iniset $TEMPEST_CONFIG enforce_scope cinder "$CINDER_ENFORCE_SCOPE" + if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope glance true + fi + + if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope cinder true + fi if [ "$VIRT_DRIVER" = "libvirt" ] && [ "$LIBVIRT_TYPE" = "lxc" ]; then # libvirt-lxc does not support boot from volume or attaching volumes diff --git a/stackrc b/stackrc index 681e9dee38..72180d07f2 100644 --- a/stackrc +++ b/stackrc @@ -179,6 +179,10 @@ fi # TODO(frickler): Drop this when plugins no longer need it IDENTITY_API_VERSION=3 +# Global option for enforcing scope. If enabled, ENFORCE_SCOPE overrides +# each services ${SERVICE}_ENFORCE_SCOPE variables +ENFORCE_SCOPE=$(trueorfalse False ENFORCE_SCOPE) + # Enable use of Python virtual environments. Individual project use of # venvs are controlled by the PROJECT_VENV array; every project with # an entry in the array will be installed into the named venv. From 6964ba4a984691d79cff77506a37d0fa222a5599 Mon Sep 17 00:00:00 2001 From: Julia Kreger Date: Mon, 25 Apr 2022 08:48:20 -0700 Subject: [PATCH 215/574] Set public bridge up for v6 only configurations A long time ago, Ironic's IPv6 only job started to fail working with errors indicated the host was unreacable. Turns out, this was because the $ext_gw_interface was not being set to up, and thus could be found in a Down state, and thus the kernel would not accept routes for it. Adds an explicit step to turn up the public bridge, much as done in the IPv4 router plugin code which would also be executed in 4+6. That being said, Ironic's CI jobs are very intentionally IPv6 only to ensure that we have no chances of v4 addressing getting used at any point in time. This should allow Ironic to return it's IPv6 only CI job back to the normal check queue, once a ironic plugin issue has been resolved which was introduced while it was removed. Change-Id: I121ec8a2e9640b21a7126f2eeb23da36b4aa95bf --- lib/neutron_plugins/services/l3 | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index c0d74c7728..fbd4692bba 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -403,7 +403,10 @@ function _neutron_configure_router_v6 { ext_gw_interface=$(_neutron_get_ext_gw_interface) local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/} - # Configure interface for public bridge + # Configure interface for public bridge by setting the interface + # to "up" in case the job is running entirely private network based + # testing. + sudo ip link set $ext_gw_interface up sudo ip -6 addr replace $ipv6_ext_gw_ip/$ipv6_cidr_len dev $ext_gw_interface # Any IPv6 private subnet that uses the default IPV6 subnet pool # and that is plugged into the default router (Q_ROUTER_NAME) will From bab0c9210371e1cfa321b8528217a7f2e156d7a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Harald=20Jens=C3=A5s?= Date: Tue, 26 Apr 2022 15:46:56 +0200 Subject: [PATCH 216/574] Use tryint() for stats value In some cases the value is [not set], in this case the conversion to integer does not work. Closes-Bug: #1970431 Change-Id: I74df7d8bc9f5cbe0709a6471cf7639caea0b58e8 --- tools/get-stats.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/get-stats.py b/tools/get-stats.py index dc0bd0f9e5..05f088ef8d 100755 --- a/tools/get-stats.py +++ b/tools/get-stats.py @@ -31,7 +31,7 @@ def get_service_stats(service): if not line: continue stat, val = line.split('=') - stats[stat] = int(val) + stats[stat] = tryint(val) return stats From 1b601c7b1e8a3ec4816cb827ccd8bf909a05debb Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 25 Apr 2022 07:47:56 -0700 Subject: [PATCH 217/574] Tolerate missing deps in get-stats.py In order to run on systems where not all requirements are present, we should be tolerant of missing external dependencies, such as psutil and pymysql. Print a warning (to stderr) and just leave out those stats in that case. Also make running the stats collector use ignore_errors:yes to avoid failures in the future. I think the stats is not critical enough to fail a job for bugs like this. Related-Bug: #1970195 Change-Id: I132b0e1f5033c4f109a8b8cc776c0877574c4a49 --- .../capture-performance-data/tasks/main.yaml | 1 + tools/get-stats.py | 24 ++++++++++++++----- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/roles/capture-performance-data/tasks/main.yaml b/roles/capture-performance-data/tasks/main.yaml index 2d2cfe4b8b..f9bb0f7851 100644 --- a/roles/capture-performance-data/tasks/main.yaml +++ b/roles/capture-performance-data/tasks/main.yaml @@ -13,3 +13,4 @@ {% for i in debian_suse_apache_deref_logs.results | default([]) + redhat_apache_deref_logs.results | default([]) %} --apache-log="{{ i.stat.path }}" {% endfor %} + ignore_errors: yes diff --git a/tools/get-stats.py b/tools/get-stats.py index dc0bd0f9e5..2418c851f9 100755 --- a/tools/get-stats.py +++ b/tools/get-stats.py @@ -6,12 +6,24 @@ import itertools import json import os -import psutil import re import socket import subprocess import sys -import pymysql + +try: + import psutil +except ImportError: + psutil = None + print('No psutil, process information will not be included', + file=sys.stderr) + +try: + import pymysql +except ImportError: + pymysql = None + print('No pymysql, database information will not be included', + file=sys.stderr) # https://www.elastic.co/blog/found-crash-elasticsearch#mapping-explosion @@ -144,10 +156,10 @@ def get_report_info(): data = { 'services': get_services_stats(), - 'db': args.db_pass and get_db_stats(args.db_host, - args.db_user, - args.db_pass) or [], - 'processes': get_processes_stats(args.process), + 'db': pymysql and args.db_pass and get_db_stats(args.db_host, + args.db_user, + args.db_pass) or [], + 'processes': psutil and get_processes_stats(args.process) or [], 'api': get_http_stats(args.apache_log), 'report': get_report_info(), } From 42be2425d8782799d3d9f82c6f1789f8b05a4301 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Thu, 28 Apr 2022 18:15:50 +0530 Subject: [PATCH 218/574] Collect status of all services Would be helpful in troubleshooting services which either fails to start or takes time to start. Related-Bug: #1970679 Change-Id: Iba2fce5f8b1cd00708f092e6eb5a1fbd96e97da0 --- .zuul.yaml | 1 + roles/capture-system-logs/README.rst | 1 + roles/capture-system-logs/tasks/main.yaml | 3 +++ 3 files changed, 5 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index a437c1cc02..2935560951 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -398,6 +398,7 @@ '{{ stage_dir }}/rpm-qa.txt': logs '{{ stage_dir }}/core': logs '{{ stage_dir }}/listen53.txt': logs + '{{ stage_dir }}/services.txt': logs '{{ stage_dir }}/deprecations.log': logs '{{ stage_dir }}/audit.log': logs /etc/ceph: logs diff --git a/roles/capture-system-logs/README.rst b/roles/capture-system-logs/README.rst index c28412457a..1376f63bfc 100644 --- a/roles/capture-system-logs/README.rst +++ b/roles/capture-system-logs/README.rst @@ -9,6 +9,7 @@ Stage a number of different logs / reports: - coredumps - dns resolver - listen53 +- services - unbound.log - deprecation messages diff --git a/roles/capture-system-logs/tasks/main.yaml b/roles/capture-system-logs/tasks/main.yaml index 905806d529..77b5ec5098 100644 --- a/roles/capture-system-logs/tasks/main.yaml +++ b/roles/capture-system-logs/tasks/main.yaml @@ -19,6 +19,9 @@ rpm -qa | sort > {{ stage_dir }}/rpm-qa.txt fi + # Services status + sudo systemctl status --all > services.txt 2>/dev/null + # NOTE(kchamart) The 'audit.log' can be useful in cases when QEMU # failed to start due to denials from SELinux — useful for CentOS # and Fedora machines. For Ubuntu (which runs AppArmor), DevStack From 1baa8905d5a3d677b5298e76621b9e08e0ed0f13 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Fri, 6 May 2022 17:53:54 +0530 Subject: [PATCH 219/574] Wait for OVN dbs also along with sockets When OVN is setup from distro packages, the main service is ovn-central which when restarted, restarts ovn-northd, ovn nb and db services. And during the restart ovn dbs(ovnnb_db.db and ovnsb_db.db) are created, which may sometime takes time as seen with ubuntu jammy tests[1]. We already checking for socket's file to be available, let's also check for db files as without it ovn-*ctl operations succeed but changes are not persisted until db files are available and changes are lost with the restart. [1] https://review.opendev.org/c/openstack/devstack/+/839389 Change-Id: I178da7af8cba8bcc8a67174e439df7c0f2c7d4d5 --- lib/neutron_plugins/ovn_agent | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 927896b70b..9022f2d382 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -169,6 +169,17 @@ Q_LOG_DRIVER_LOG_BASE=${Q_LOG_DRIVER_LOG_BASE:-acl_log_meter} # Utility Functions # ----------------- +function wait_for_db_file { + local count=0 + while [ ! -f $1 ]; do + sleep 1 + count=$((count+1)) + if [ "$count" -gt 5 ]; then + die $LINENO "DB File $1 not found" + fi + done +} + function wait_for_sock_file { local count=0 while [ ! -S $1 ]; do @@ -695,8 +706,11 @@ function start_ovn { fi # Wait for the service to be ready + # Check for socket and db files for both OVN NB and SB wait_for_sock_file $OVS_RUNDIR/ovnnb_db.sock wait_for_sock_file $OVS_RUNDIR/ovnsb_db.sock + wait_for_db_file $OVN_DATADIR/ovnnb_db.db + wait_for_db_file $OVN_DATADIR/ovnsb_db.db if is_service_enabled tls-proxy; then sudo ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem From 5c765cb8a1866bd3405946d097d7fb06066ae4d4 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Tue, 26 Apr 2022 13:08:21 +0200 Subject: [PATCH 220/574] Add Ubuntu 22.04 LTS (jammy) platform job The new Ubuntu LTS release has been made last week, start running devstack on it as a platform job. Horizon has issues with py310, so gets disabled for now. Run variants with OVS and OVN(default). Co-Authored-By: yatinkarel Signed-off-by: Dr. Jens Harbott Change-Id: I47696273d6b009f754335b44ef3356b4f5115cd8 --- .zuul.yaml | 76 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ stack.sh | 2 +- 2 files changed, 77 insertions(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index a437c1cc02..e87f410202 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -16,6 +16,16 @@ nodes: - controller +- nodeset: + name: openstack-single-node-jammy + nodes: + - name: controller + label: ubuntu-jammy + groups: + - name: tempest + nodes: + - controller + - nodeset: name: openstack-single-node-focal nodes: @@ -718,6 +728,69 @@ # from source instead. OVN_BUILD_FROM_SOURCE: True +- job: + name: devstack-platform-ubuntu-jammy + parent: tempest-full-py3 + description: Ubuntu 22.04 LTS (jammy) platform test + nodeset: openstack-single-node-jammy + timeout: 9000 + vars: + configure_swap_size: 4096 + devstack_services: + # Horizon doesn't like py310 + horizon: false + +- job: + name: devstack-platform-ubuntu-jammy-ovn-source + parent: devstack-platform-ubuntu-jammy + description: Ubuntu 22.04 LTS (jammy) platform test (OVN from source) + vars: + devstack_localrc: + OVN_BUILD_FROM_SOURCE: True + OVN_BRANCH: "v21.06.0" + OVS_BRANCH: "a4b04276ab5934d087669ff2d191a23931335c87" + OVS_SYSCONFDIR: "/usr/local/etc/openvswitch" + +- job: + name: devstack-platform-ubuntu-jammy-ovs + parent: tempest-full-py3 + description: Ubuntu 22.04 LTS (jammy) platform test (OVS) + nodeset: openstack-single-node-jammy + timeout: 9000 + vars: + configure_swap_size: 8192 + devstack_localrc: + Q_AGENT: openvswitch + Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch + Q_ML2_TENANT_NETWORK_TYPE: vxlan + devstack_services: + # Horizon doesn't like py310 + horizon: false + # Disable OVN services + ovn-northd: false + ovn-controller: false + ovs-vswitchd: false + ovsdb-server: false + # Disable Neutron ML2/OVN services + q-ovn-metadata-agent: false + # Enable Neutron ML2/OVS services + q-agt: true + q-dhcp: true + q-l3: true + q-meta: true + q-metering: true + group-vars: + subnode: + devstack_services: + # Disable OVN services + ovn-controller: false + ovs-vswitchd: false + ovsdb-server: false + # Disable Neutron ML2/OVN services + q-ovn-metadata-agent: false + # Enable Neutron ML2/OVS services + q-agt: true + - job: name: devstack-no-tls-proxy parent: tempest-full-py3 @@ -829,6 +902,9 @@ - devstack-platform-fedora-latest - devstack-platform-centos-9-stream - devstack-platform-debian-bullseye + - devstack-platform-ubuntu-jammy + - devstack-platform-ubuntu-jammy-ovn-source + - devstack-platform-ubuntu-jammy-ovs - devstack-multinode - devstack-unit-tests - openstack-tox-bashate diff --git a/stack.sh b/stack.sh index 6e9ced985e..1970105c1d 100755 --- a/stack.sh +++ b/stack.sh @@ -229,7 +229,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bullseye|focal|f35|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9|openEuler-20.03" +SUPPORTED_DISTROS="bullseye|focal|jammy|f35|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9|openEuler-20.03" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" From fe52d7f0a88de2dc330923cf6cf52c83ccb92bd6 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 28 Apr 2022 12:34:38 -0700 Subject: [PATCH 221/574] Change DB counting mechanism The mysql performance_schema method for counting per-database queries is very heavyweight in that it requires full logging (in a table) of every query. We do hundreds of thousands in the course of a tempest run, which ends up creating its own performance problem. This changes the approach we take, which is to bundle a very tiny sqlalchemy plugin module which counts just what we care about in a special database. It is more complex than just enabling the features in mysql, but it is a massively smaller runtime overhead. It also provides us the opportunity to easily zero the counters just before a tempest run. Change-Id: I361bc30bb970cdaf18b966951f217862d302f0b9 --- lib/databases/mysql | 28 ++++++-- stack.sh | 13 ++++ tools/dbcounter/dbcounter.py | 120 +++++++++++++++++++++++++++++++++ tools/dbcounter/pyproject.toml | 3 + tools/dbcounter/setup.cfg | 14 ++++ tools/get-stats.py | 6 +- 6 files changed, 173 insertions(+), 11 deletions(-) create mode 100644 tools/dbcounter/dbcounter.py create mode 100644 tools/dbcounter/pyproject.toml create mode 100644 tools/dbcounter/setup.cfg diff --git a/lib/databases/mysql b/lib/databases/mysql index 6b3ea0287c..b292da25bd 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -151,12 +151,16 @@ function configure_database_mysql { fi if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then - echo "enabling MySQL performance_schema items" - # Enable long query history - iniset -sudo $my_conf mysqld \ - performance-schema-consumer-events-statements-history-long TRUE - iniset -sudo $my_conf mysqld \ - performance_schema_events_stages_history_long_size 1000000 + echo "enabling MySQL performance counting" + + # Install our sqlalchemy plugin + pip_install ${TOP_DIR}/tools/dbcounter + + # Create our stats database for accounting + recreate_database stats + mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST -e \ + "CREATE TABLE queries (db VARCHAR(32), op VARCHAR(32), + count INT, PRIMARY KEY (db, op)) ENGINE MEMORY" stats fi restart_service $MYSQL_SERVICE_NAME @@ -218,7 +222,17 @@ function install_database_python_mysql { function database_connection_url_mysql { local db=$1 - echo "$BASE_SQL_CONN/$db?charset=utf8" + local plugin + + # NOTE(danms): We don't enable perf on subnodes yet because the + # plugin is not installed there + if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then + if is_service_enabled mysql; then + plugin="&plugin=dbcounter" + fi + fi + + echo "$BASE_SQL_CONN/$db?charset=utf8$plugin" } diff --git a/stack.sh b/stack.sh index 6e9ced985e..16dce81d3d 100755 --- a/stack.sh +++ b/stack.sh @@ -1512,6 +1512,19 @@ async_cleanup time_totals async_print_timing +if is_service_enabled mysql; then + if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" && "$MYSQL_HOST" ]]; then + echo "" + echo "" + echo "Post-stack database query stats:" + mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST stats -e \ + 'SELECT * FROM queries' -t 2>/dev/null + mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST stats -e \ + 'DELETE FROM queries' 2>/dev/null + fi +fi + + # Using the cloud # =============== diff --git a/tools/dbcounter/dbcounter.py b/tools/dbcounter/dbcounter.py new file mode 100644 index 0000000000..5057f0f393 --- /dev/null +++ b/tools/dbcounter/dbcounter.py @@ -0,0 +1,120 @@ +import json +import logging +import os +import threading +import time +import queue + +import sqlalchemy +from sqlalchemy.engine import CreateEnginePlugin +from sqlalchemy import event + +# https://docs.sqlalchemy.org/en/14/core/connections.html? +# highlight=createengineplugin#sqlalchemy.engine.CreateEnginePlugin + +LOG = logging.getLogger(__name__) + +# The theory of operation here is that we register this plugin with +# sqlalchemy via an entry_point. It gets loaded by virtue of plugin= +# being in the database connection URL, which gives us an opportunity +# to hook the engines that get created. +# +# We opportunistically spawn a thread, which we feed "hits" to over a +# queue, and which occasionally writes those hits to a special +# database called 'stats'. We access that database with the same user, +# pass, and host as the main connection URL for simplicity. + + +class LogCursorEventsPlugin(CreateEnginePlugin): + def __init__(self, url, kwargs): + self.db_name = url.database + LOG.info('Registered counter for database %s' % self.db_name) + new_url = sqlalchemy.engine.URL.create(url.drivername, + url.username, + url.password, + url.host, + url.port, + 'stats') + + self.engine = sqlalchemy.create_engine(new_url) + self.queue = queue.Queue() + self.thread = None + + def engine_created(self, engine): + """Hook the engine creation process. + + This is the plug point for the sqlalchemy plugin. Using + plugin=$this in the URL causes this method to be called when + the engine is created, giving us a chance to hook it below. + """ + event.listen(engine, "before_cursor_execute", self._log_event) + + def ensure_writer_thread(self): + self.thread = threading.Thread(target=self.stat_writer, daemon=True) + self.thread.start() + + def _log_event(self, conn, cursor, statement, parameters, context, + executemany): + """Queue a "hit" for this operation to be recorded. + + Attepts to determine the operation by the first word of the + statement, or 'OTHER' if it cannot be determined. + """ + + # Start our thread if not running. If we were forked after the + # engine was created and this plugin was associated, our + # writer thread is gone, so respawn. + if not self.thread or not self.thread.is_alive(): + self.ensure_writer_thread() + + try: + op = statement.strip().split(' ', 1)[0] or 'OTHER' + except Exception: + op = 'OTHER' + + self.queue.put((self.db_name, op)) + + def do_incr(self, db, op, count): + """Increment the counter for (db,op) by count.""" + + query = ('INSERT INTO queries (db, op, count) ' + ' VALUES (%s, %s, %s) ' + ' ON DUPLICATE KEY UPDATE count=count+%s') + try: + with self.engine.begin() as conn: + r = conn.execute(query, (db, op, count, count)) + except Exception as e: + LOG.error('Failed to account for access to database %r: %s', + db, e) + + def stat_writer(self): + """Consume messages from the queue and write them in batches. + + This reads "hists" from from a queue fed by _log_event() and + writes (db,op)+=count stats to the database after ten seconds + of no activity to avoid triggering a write for every SELECT + call. Write no less often than every thirty seconds and/or 100 + pending hits to avoid being starved by constant activity. + """ + LOG.debug('[%i] Writer thread running' % os.getpid()) + while True: + to_write = {} + total = 0 + last = time.time() + while time.time() - last < 30 and total < 100: + try: + item = self.queue.get(timeout=10) + to_write.setdefault(item, 0) + to_write[item] += 1 + total += 1 + except queue.Empty: + break + + if to_write: + LOG.debug('[%i] Writing DB stats %s' % ( + os.getpid(), + ','.join(['%s:%s=%i' % (db, op, count) + for (db, op), count in to_write.items()]))) + + for (db, op), count in to_write.items(): + self.do_incr(db, op, count) diff --git a/tools/dbcounter/pyproject.toml b/tools/dbcounter/pyproject.toml new file mode 100644 index 0000000000..d74d688997 --- /dev/null +++ b/tools/dbcounter/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["sqlalchemy", "setuptools>=42"] +build-backend = "setuptools.build_meta" \ No newline at end of file diff --git a/tools/dbcounter/setup.cfg b/tools/dbcounter/setup.cfg new file mode 100644 index 0000000000..f9f26f2175 --- /dev/null +++ b/tools/dbcounter/setup.cfg @@ -0,0 +1,14 @@ +[metadata] +name = dbcounter +author = Dan Smith +author_email = dms@danplanet.com +version = 0.1 +description = A teeny tiny dbcounter plugin for use with devstack +url = http://github.com/openstack/devstack +license = Apache + +[options] +modules = dbcounter +entry_points = + [sqlalchemy.plugins] + dbcounter = dbcounter:LogCursorEventsPlugin diff --git a/tools/get-stats.py b/tools/get-stats.py index 670e723e82..465afcab5a 100755 --- a/tools/get-stats.py +++ b/tools/get-stats.py @@ -83,13 +83,11 @@ def proc_matches(proc): def get_db_stats(host, user, passwd): dbs = [] db = pymysql.connect(host=host, user=user, password=passwd, - database='performance_schema', + database='stats', cursorclass=pymysql.cursors.DictCursor) with db: with db.cursor() as cur: - cur.execute( - 'SELECT COUNT(*) AS queries,current_schema AS db FROM ' - 'events_statements_history_long GROUP BY current_schema') + cur.execute('SELECT db,op,count FROM queries') for row in cur: dbs.append({k: tryint(v) for k, v in row.items()}) return dbs From 64d68679d9660e304ab3550929fe9892a124ac6f Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Fri, 22 Apr 2022 07:58:29 -0700 Subject: [PATCH 222/574] Improve API log parsing Two runs of the same job on the same patch can yield quite different numbers for API calls if we just count the raw calls. Many of these are tempest polling for resources, which on a slow worker can require many more calls than a fast one. Tempest seems to not change its User-Agent string, but the client libraries do. So, if we ignore the regular "python-urllib" agent calls, we get a much more stable count of service-to-service API calls in the performance report. Note that we were also logging in a different (less-rich) format for the tls-proxy.log file, which hampers our ability to parse that data in the same format. This switches it to "combined" which is used by the access.log and contains more useful information, like the user-agent, among other things. Change-Id: I8889c2e53f85c41150e1245dcbe2a79bac702aad --- lib/tls | 2 +- tools/get-stats.py | 77 ++++++++++++++++++++++++++++++++++------------ 2 files changed, 58 insertions(+), 21 deletions(-) diff --git a/lib/tls b/lib/tls index 5a7f5ae324..b8758cd6d3 100644 --- a/lib/tls +++ b/lib/tls @@ -557,7 +557,7 @@ $listen_string ErrorLog $APACHE_LOG_DIR/tls-proxy_error.log ErrorLogFormat "%{cu}t [%-m:%l] [pid %P:tid %T] %7F: %E: [client\ %a] [frontend\ %A] %M% ,\ referer\ %{Referer}i" LogLevel info - CustomLog $APACHE_LOG_DIR/tls-proxy_access.log "%{%Y-%m-%d}t %{%T}t.%{msec_frac}t [%l] %a \"%r\" %>s %b" + CustomLog $APACHE_LOG_DIR/tls-proxy_access.log combined EOF if is_suse ; then diff --git a/tools/get-stats.py b/tools/get-stats.py index 465afcab5a..ffe467691c 100755 --- a/tools/get-stats.py +++ b/tools/get-stats.py @@ -1,10 +1,12 @@ #!/usr/bin/python3 import argparse +import csv import datetime import glob import itertools import json +import logging import os import re import socket @@ -25,6 +27,8 @@ print('No pymysql, database information will not be included', file=sys.stderr) +LOG = logging.getLogger('perf') + # https://www.elastic.co/blog/found-crash-elasticsearch#mapping-explosion @@ -95,26 +99,56 @@ def get_db_stats(host, user, passwd): def get_http_stats_for_log(logfile): stats = {} - for line in open(logfile).readlines(): - m = re.search('"([A-Z]+) /([^" ]+)( HTTP/1.1)?" ([0-9]{3}) ([0-9]+)', - line) - if m: - method = m.group(1) - path = m.group(2) - status = m.group(4) - size = int(m.group(5)) - - try: - service, rest = path.split('/', 1) - except ValueError: - # Root calls like "GET /identity" - service = path - rest = '' - - stats.setdefault(service, {'largest': 0}) - stats[service].setdefault(method, 0) - stats[service][method] += 1 - stats[service]['largest'] = max(stats[service]['largest'], size) + apache_fields = ('host', 'a', 'b', 'date', 'tz', 'request', 'status', + 'length', 'c', 'agent') + ignore_agents = ('curl', 'uwsgi', 'nova-status') + for line in csv.reader(open(logfile), delimiter=' '): + fields = dict(zip(apache_fields, line)) + if len(fields) != len(apache_fields): + # Not a combined access log, so we can bail completely + return [] + try: + method, url, http = fields['request'].split(' ') + except ValueError: + method = url = http = '' + if 'HTTP' not in http: + # Not a combined access log, so we can bail completely + return [] + + # Tempest's User-Agent is unchanged, but client libraries and + # inter-service API calls use proper strings. So assume + # 'python-urllib' is tempest so we can tell it apart. + if 'python-urllib' in fields['agent'].lower(): + agent = 'tempest' + else: + agent = fields['agent'].split(' ')[0] + if agent.startswith('python-'): + agent = agent.replace('python-', '') + if '/' in agent: + agent = agent.split('/')[0] + + if agent in ignore_agents: + continue + + try: + service, rest = url.strip('/').split('/', 1) + except ValueError: + # Root calls like "GET /identity" + service = url.strip('/') + rest = '' + + method_key = '%s-%s' % (agent, method) + try: + length = int(fields['length']) + except ValueError: + LOG.warning('[%s] Failed to parse length %r from line %r' % ( + logfile, fields['length'], line)) + length = 0 + stats.setdefault(service, {'largest': 0}) + stats[service].setdefault(method_key, 0) + stats[service][method_key] += 1 + stats[service]['largest'] = max(stats[service]['largest'], + length) # Flatten this for ES return [{'service': service, 'log': os.path.basename(logfile), @@ -131,6 +165,7 @@ def get_report_info(): return { 'timestamp': datetime.datetime.now().isoformat(), 'hostname': socket.gethostname(), + 'version': 2, } @@ -152,6 +187,8 @@ def get_report_info(): '(default is %s)' % ','.join(process_defaults))) args = parser.parse_args() + logging.basicConfig(level=logging.WARNING) + data = { 'services': get_services_stats(), 'db': pymysql and args.db_pass and get_db_stats(args.db_host, From 92a34dbe951f2ab31fb3432e61cf34db034b0145 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Tue, 17 May 2022 20:10:48 +0530 Subject: [PATCH 223/574] Configure placement section in neutron conf Without it segment plugin fails to connect with placement api. Configure the placement section if service is deployed. Closes-Bug: #1973783 Change-Id: Ie7f37770a04f622735cf2263c601257669ab5064 --- lib/neutron-legacy | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 253b457ae1..88ac991167 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -931,6 +931,9 @@ function _configure_neutron_service { configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova + # Configuration for placement client + configure_keystone_authtoken_middleware $NEUTRON_CONF placement placement + # Configure plugin neutron_plugin_configure_service } From 111a38b4d6e6f61c21570e0adba58f6c59c52900 Mon Sep 17 00:00:00 2001 From: Brian Rosmaita Date: Fri, 13 May 2022 20:53:26 -0400 Subject: [PATCH 224/574] lib/tempest: add wait for Glance image import Glance image import is asynchronous and may be configured to do image conversion. If image import is being used, it's possible that the tempest configuration code is executed before the import has completed and there may be no active images yet. In that case, we will poll glance every TEMPEST_GLANCE_IMPORT_POLL_INTERVAL seconds (default: 1) to see if there are TEMPEST_GLANCE_IMAGE_COUNT active images (default: 1) up to TEMPEST_GLANCE_IMPORT_POLL_LIMIT times (default: 12). You can see an example of the issue this patch addresses in real life: https://review.opendev.org/c/openstack/glance/+/841278/1#message-456096e48b28e5b866deb8bf53e9258ee08219a0 Change-Id: Ie99f12691d9062611a8930accfa14d9540970cc5 --- lib/tempest | 73 ++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 67 insertions(+), 6 deletions(-) diff --git a/lib/tempest b/lib/tempest index 1fd4184763..206b37b5bf 100644 --- a/lib/tempest +++ b/lib/tempest @@ -71,6 +71,17 @@ TEMPEST_VOLUME_VENDOR=${TEMPEST_VOLUME_VENDOR:-$TEMPEST_DEFAULT_VOLUME_VENDOR} TEMPEST_DEFAULT_STORAGE_PROTOCOL="iSCSI" TEMPEST_STORAGE_PROTOCOL=${TEMPEST_STORAGE_PROTOCOL:-$TEMPEST_DEFAULT_STORAGE_PROTOCOL} +# Glance/Image variables +# When Glance image import is enabled, image creation is asynchronous and images +# may not yet be active when tempest looks for them. In that case, we poll +# Glance every TEMPEST_GLANCE_IMPORT_POLL_INTERVAL seconds for the number of +# times specified by TEMPEST_GLANCE_IMPORT_POLL_LIMIT. If you are importing +# multiple images, set TEMPEST_GLANCE_IMAGE_COUNT so the poller does not quit +# too early (though it will not exceed the polling limit). +TEMPEST_GLANCE_IMPORT_POLL_INTERVAL=${TEMPEST_GLANCE_IMPORT_POLL_INTERVAL:-1} +TEMPEST_GLANCE_IMPORT_POLL_LIMIT=${TEMPEST_GLANCE_IMPORT_POLL_LIMIT:-12} +TEMPEST_GLANCE_IMAGE_COUNT=${TEMPEST_GLANCE_IMAGE_COUNT:-1} + # Neutron/Network variables IPV6_ENABLED=$(trueorfalse True IPV6_ENABLED) IPV6_SUBNET_ATTRIBUTES_ENABLED=$(trueorfalse True IPV6_SUBNET_ATTRIBUTES_ENABLED) @@ -127,6 +138,48 @@ function set_tempest_venv_constraints { fi } +# Makes a call to glance to get a list of active images, ignoring +# ramdisk and kernel images. Takes 3 arguments, an array and two +# variables. The array will contain the list of active image UUIDs; +# if an image with ``DEFAULT_IMAGE_NAME`` is found, its UUID will be +# set as the value of *both* other parameters. +function get_active_images { + declare -n img_array=$1 + declare -n img_id=$2 + declare -n img_id_alt=$3 + + # start with a fresh array in case we are called multiple times + img_array=() + + while read -r IMAGE_NAME IMAGE_UUID; do + if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then + img_id="$IMAGE_UUID" + img_id_alt="$IMAGE_UUID" + fi + img_array+=($IMAGE_UUID) + done < <(openstack --os-cloud devstack-admin image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }') +} + +function poll_glance_images { + declare -n image_array=$1 + declare -n image_id=$2 + declare -n image_id_alt=$3 + local -i poll_count + + poll_count=$TEMPEST_GLANCE_IMPORT_POLL_LIMIT + while (( poll_count-- > 0 )) ; do + sleep $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL + get_active_images image_array image_id image_id_alt + if (( ${#image_array[*]} >= $TEMPEST_GLANCE_IMAGE_COUNT )) ; then + return + fi + done + local msg + msg="Polling limit of $TEMPEST_GLANCE_IMPORT_POLL_LIMIT exceeded; " + msg+="poll interval was $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL sec" + warn $LINENO "$msg" +} + # configure_tempest() - Set config files, create data dirs, etc function configure_tempest { if [[ "$INSTALL_TEMPEST" == "True" ]]; then @@ -168,13 +221,21 @@ function configure_tempest { declare -a images if is_service_enabled glance; then - while read -r IMAGE_NAME IMAGE_UUID; do - if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then - image_uuid="$IMAGE_UUID" - image_uuid_alt="$IMAGE_UUID" + get_active_images images image_uuid image_uuid_alt + + if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then + # Glance image import is asynchronous and may be configured + # to do image conversion. If image import is being used, + # it's possible that this code is being executed before the + # import has completed and there may be no active images yet. + if [[ "$GLANCE_USE_IMPORT_WORKFLOW" == "True" ]]; then + poll_glance_images images image_uuid image_uuid_alt + if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then + echo "Only found ${#images[*]} image(s), was looking for $TEMPEST_GLANCE_IMAGE_COUNT" + exit 1 + fi fi - images+=($IMAGE_UUID) - done < <(openstack --os-cloud devstack-admin image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }') + fi case "${#images[*]}" in 0) From 083eeee5af61a19a932138b5035a916c7421beee Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Thu, 19 May 2022 13:55:35 +0200 Subject: [PATCH 225/574] Make jammy platform jobs non-voting We missed to add the jobs to the gate queue and so they have already regressed before they were actually in place. Make them non-voting for now until the issues are fixed. Signed-off-by: Dr. Jens Harbott Change-Id: I5d1f83dfe23747096163076dcf80750585c0260e --- .zuul.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index b449ea67be..0e114afb3e 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -731,6 +731,7 @@ parent: tempest-full-py3 description: Ubuntu 22.04 LTS (jammy) platform test nodeset: openstack-single-node-jammy + voting: false timeout: 9000 vars: configure_swap_size: 4096 @@ -742,6 +743,7 @@ name: devstack-platform-ubuntu-jammy-ovn-source parent: devstack-platform-ubuntu-jammy description: Ubuntu 22.04 LTS (jammy) platform test (OVN from source) + voting: false vars: devstack_localrc: OVN_BUILD_FROM_SOURCE: True @@ -754,6 +756,7 @@ parent: tempest-full-py3 description: Ubuntu 22.04 LTS (jammy) platform test (OVS) nodeset: openstack-single-node-jammy + voting: false timeout: 9000 vars: configure_swap_size: 8192 From 560ee16a85b22b4456177d289cf53c31c6a1ca6b Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Thu, 19 May 2022 13:58:11 +0200 Subject: [PATCH 226/574] Drop openEuler support The job is broken since it is running with python3.7 and most services now require at least python3.8. Signed-off-by: Dr. Jens Harbott Change-Id: Ie21f71acffabd78c79e2b141951ccf30a5c06445 --- .zuul.yaml | 25 ------------------------- doc/source/index.rst | 2 +- files/rpms/ceph | 2 +- files/rpms/general | 4 +--- files/rpms/nova | 2 +- files/rpms/swift | 2 +- functions-common | 13 +------------ lib/apache | 2 +- lib/nova | 6 +----- roles/apache-logs-conf/tasks/main.yaml | 1 - stack.sh | 9 +-------- tools/fixup_stuff.sh | 24 ------------------------ 12 files changed, 9 insertions(+), 83 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index b449ea67be..3beae1f822 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -106,16 +106,6 @@ nodes: - controller -- nodeset: - name: devstack-single-node-openeuler-20.03-sp2 - nodes: - - name: controller - label: openEuler-20-03-LTS-SP2 - groups: - - name: tempest - nodes: - - controller - - nodeset: name: openstack-two-node nodes: @@ -712,20 +702,6 @@ # Enable Neutron ML2/OVS services q-agt: true -- job: - name: devstack-platform-openEuler-20.03-SP2 - parent: tempest-full-py3 - description: openEuler 20.03 SP2 platform test - nodeset: devstack-single-node-openeuler-20.03-sp2 - voting: false - timeout: 9000 - vars: - configure_swap_size: 4096 - devstack_localrc: - # NOTE(wxy): OVN package is not supported by openEuler yet. Build it - # from source instead. - OVN_BUILD_FROM_SOURCE: True - - job: name: devstack-platform-ubuntu-jammy parent: tempest-full-py3 @@ -1001,7 +977,6 @@ experimental: jobs: - - devstack-platform-openEuler-20.03-SP2 - nova-multi-cell - nova-next - neutron-fullstack-with-uwsgi diff --git a/doc/source/index.rst b/doc/source/index.rst index feb50ce4e9..08ce4cb061 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -38,7 +38,7 @@ Install Linux Start with a clean and minimal install of a Linux system. DevStack attempts to support the two latest LTS releases of Ubuntu, the -latest/current Fedora version, CentOS/RHEL 8, OpenSUSE and openEuler. +latest/current Fedora version, CentOS/RHEL 8 and OpenSUSE. If you do not have a preference, Ubuntu 20.04 (Focal Fossa) is the most tested, and will probably go the smoothest. diff --git a/files/rpms/ceph b/files/rpms/ceph index 93b5746aa6..33a55f80ea 100644 --- a/files/rpms/ceph +++ b/files/rpms/ceph @@ -1,3 +1,3 @@ ceph # NOPRIME -redhat-lsb-core # not:rhel9,openEuler-20.03 +redhat-lsb-core # not:rhel9 xfsprogs diff --git a/files/rpms/general b/files/rpms/general index 668705b1c3..7697513149 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -16,7 +16,6 @@ libjpeg-turbo-devel # Pillow 3.0.0 libxml2-devel # lxml libxslt-devel # lxml libyaml-devel -make # dist:openEuler-20.03 mod_ssl # required for tls-proxy on centos 9 stream computes net-tools openssh-server @@ -29,8 +28,7 @@ psmisc python3-devel python3-pip python3-systemd -redhat-rpm-config # not:openEuler-20.03 missing dep for gcc hardening flags, see rhbz#1217376 -systemd-devel # dist:openEuler-20.03 +redhat-rpm-config # missing dep for gcc hardening flags, see rhbz#1217376 tar tcpdump unzip diff --git a/files/rpms/nova b/files/rpms/nova index 9e8621c628..9522e5729d 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -6,7 +6,7 @@ ebtables genisoimage # not:rhel9 required for config_drive iptables iputils -kernel-modules # not:openEuler-20.03 +kernel-modules kpartx parted polkit diff --git a/files/rpms/swift b/files/rpms/swift index a838d7839e..7d906aa926 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -1,5 +1,5 @@ curl -liberasurecode-devel # not:openEuler-20.03 +liberasurecode-devel memcached rsync-daemon sqlite diff --git a/functions-common b/functions-common index b660245337..be966e96a6 100644 --- a/functions-common +++ b/functions-common @@ -399,7 +399,7 @@ function _ensure_lsb_release { elif [[ -x $(command -v zypper 2>/dev/null) ]]; then sudo zypper -n install lsb-release elif [[ -x $(command -v dnf 2>/dev/null) ]]; then - sudo dnf install -y redhat-lsb-core || sudo dnf install -y openeuler-lsb + sudo dnf install -y redhat-lsb-core else die $LINENO "Unable to find or auto-install lsb_release" fi @@ -471,10 +471,6 @@ function GetDistro { # Drop the . release as we assume it's compatible # XXX re-evaluate when we get RHEL10 DISTRO="rhel${os_RELEASE::1}" - elif [[ "$os_VENDOR" =~ (openEuler) ]]; then - # The DISTRO here is `openEuler-20.03`. While, actually only openEuler - # 20.03 LTS SP2 is fully tested. Other SP version maybe have bugs. - DISTRO="openEuler-$os_RELEASE" else # We can't make a good choice here. Setting a sensible DISTRO # is part of the problem, but not the major issue -- we really @@ -526,7 +522,6 @@ function is_fedora { fi [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \ - [ "$os_VENDOR" = "openEuler" ] || \ [ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \ [ "$os_VENDOR" = "RedHatEnterprise" ] || \ [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "CentOSStream" ] || \ @@ -576,12 +571,6 @@ function is_ubuntu { [ "$os_PACKAGE" = "deb" ] } -function is_openeuler { - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - [ "$os_VENDOR" = "openEuler" ] -} # Git Functions # ============= diff --git a/lib/apache b/lib/apache index 02827d1f1b..94f3cfc95a 100644 --- a/lib/apache +++ b/lib/apache @@ -95,7 +95,7 @@ function install_apache_uwsgi { # didn't fix Python 3.10 compatibility before release. Should be # fixed in uwsgi 4.9.0; can remove this when packages available # or we drop this release - elif is_fedora && ! is_openeuler && ! [[ $DISTRO =~ f35 ]]; then + elif is_fedora && ! [[ $DISTRO =~ f35 ]]; then # Note httpd comes with mod_proxy_uwsgi and it is loaded by # default; the mod_proxy_uwsgi package actually conflicts now. # See: diff --git a/lib/nova b/lib/nova index 4c14374d0f..da3a10edd0 100644 --- a/lib/nova +++ b/lib/nova @@ -324,11 +324,7 @@ EOF # set chap algorithms. The default chap_algorithm is md5 which will # not work under FIPS. - # FIXME(alee) For some reason, this breaks openeuler. Openeuler devs should weigh in - # and determine the correct solution for openeuler here - if ! is_openeuler; then - iniset -sudo /etc/iscsi/iscsid.conf DEFAULT "node.session.auth.chap_algs" "SHA3-256,SHA256" - fi + iniset -sudo /etc/iscsi/iscsid.conf DEFAULT "node.session.auth.chap_algs" "SHA3-256,SHA256" # ensure that iscsid is started, even when disabled by default restart_service iscsid diff --git a/roles/apache-logs-conf/tasks/main.yaml b/roles/apache-logs-conf/tasks/main.yaml index 6b7ea37857..bd64574c9b 100644 --- a/roles/apache-logs-conf/tasks/main.yaml +++ b/roles/apache-logs-conf/tasks/main.yaml @@ -64,7 +64,6 @@ 'Debian': '/etc/apache2/sites-enabled/' 'Suse': '/etc/apache2/conf.d/' 'RedHat': '/etc/httpd/conf.d/' - 'openEuler': '/etc/httpd/conf.d/' - name: Discover configurations find: diff --git a/stack.sh b/stack.sh index e53280e00c..df283bbe50 100755 --- a/stack.sh +++ b/stack.sh @@ -229,7 +229,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bullseye|focal|jammy|f35|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9|openEuler-20.03" +SUPPORTED_DISTROS="bullseye|focal|jammy|f35|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" @@ -280,13 +280,6 @@ chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh -# TODO(wxy): Currently some base packages are not installed by default in -# openEuler. Remove the code below once the packaged are installed by default -# in the future. -if [[ $DISTRO == "openEuler-20.03" ]]; then - install_package hostname -fi - # Configure Distro Repositories # ----------------------------- diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index f24ac40ad5..daa1bc6301 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -153,32 +153,8 @@ function fixup_ubuntu { sudo rm -rf /usr/lib/python3/dist-packages/simplejson-*.egg-info } -function fixup_openeuler { - if ! is_openeuler; then - return - fi - - if is_arch "x86_64"; then - arch="x86_64" - elif is_arch "aarch64"; then - arch="aarch64" - fi - - # Some packages' version in openEuler are too old, use the newer ones we - # provide in oepkg. (oepkg is an openEuler third part yum repo which is - # endorsed by openEuler community) - (echo '[openstack-ci]' - echo 'name=openstack' - echo 'baseurl=https://repo.oepkgs.net/openEuler/rpm/openEuler-20.03-LTS-SP2/budding-openeuler/openstack-master-ci/'$arch'/' - echo 'enabled=1' - echo 'gpgcheck=0') | sudo tee -a /etc/yum.repos.d/openstack-master.repo > /dev/null - - yum_install liberasurecode-devel -} - function fixup_all { fixup_ubuntu fixup_fedora fixup_suse - fixup_openeuler } From 50e3c06ec245e8a5e7ca24015b0c152e3bc40a5c Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Thu, 19 May 2022 13:36:43 -0700 Subject: [PATCH 227/574] Fix dbcounter installation on Jammy There are two problems with dbcounter installation on Jammy. The first is straightforward. We have to use `py_modules` instead of `modules` to specify the source file. I don't know how this works on other distros but the docs [0] seem to clearly indicate py_modules does this. The second issue is quite an issue and requires story time. When pip/setuptools insteall editable installs (as is done for many of the openstack projects) it creates an easy-install.pth file that tells the python interpreter to add the source dirs of those repos to the python path. Normally these paths are appended to your sys.path. Pip's isolated build env relies on the assumption that these paths are appeneded to the path when it santizes sys.path to create the isolated environemnt. However, when SETUPTOOLS_SYS_PATH_TECHNIQUE is set to rewrite the paths are not appended and are inserted in the middle. This breaks pip's isolated build env which broke dbcounter installations. We fix this by not setting SETUPTOOLS_SYS_PATH_TECHNIQUE to rewrite. Upstream indicates the reason we set this half a decade ago has since been fixed properly. The reason Jammy and nothing else breaks is that python3.10 is the first python version to use pip's isolated build envs by default. I've locally fiddled with a patch to pip [1] to try and fix this behavior even when rewrite is set. I don't plan to push this upstream but it helps to illustrate where the problem lies. If someone else would like to upstream this feel free. Finally this change makes the jammy platform job voting again and adds it to the gate to ensure we don't regress again. [0] https://docs.python.org/3/distutils/sourcedist.html#specifying-the-files-to-distribute [1] https://paste.opendev.org/show/bqVAuhgMtVtfYupZK5J6/ Change-Id: I237f5663b0f8b060f6df130de04e17e2b1695f8a --- .zuul.yaml | 2 +- inc/python | 1 - tools/dbcounter/setup.cfg | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 0e114afb3e..fc3d76d2b3 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -731,7 +731,6 @@ parent: tempest-full-py3 description: Ubuntu 22.04 LTS (jammy) platform test nodeset: openstack-single-node-jammy - voting: false timeout: 9000 vars: configure_swap_size: 4096 @@ -950,6 +949,7 @@ - devstack - devstack-ipv6 - devstack-platform-centos-9-stream + - devstack-platform-ubuntu-jammy - devstack-enforce-scope - devstack-multinode - devstack-unit-tests diff --git a/inc/python b/inc/python index 9382d352dc..d032a10eb9 100644 --- a/inc/python +++ b/inc/python @@ -194,7 +194,6 @@ function pip_install { https_proxy="${https_proxy:-}" \ no_proxy="${no_proxy:-}" \ PIP_FIND_LINKS=$PIP_FIND_LINKS \ - SETUPTOOLS_SYS_PATH_TECHNIQUE=rewrite \ $cmd_pip $upgrade \ $@ result=$? diff --git a/tools/dbcounter/setup.cfg b/tools/dbcounter/setup.cfg index f9f26f2175..12300bf619 100644 --- a/tools/dbcounter/setup.cfg +++ b/tools/dbcounter/setup.cfg @@ -8,7 +8,7 @@ url = http://github.com/openstack/devstack license = Apache [options] -modules = dbcounter +py_modules = dbcounter entry_points = [sqlalchemy.plugins] dbcounter = dbcounter:LogCursorEventsPlugin From 1d5be95196d31ba1a4ef125f4b06a5730f2af113 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Mon, 23 May 2022 08:46:05 -0700 Subject: [PATCH 228/574] Cleanup comment that should've been removed The previous change, I237f5663b0f8b060f6df130de04e17e2b1695f8a, removed a SETUPTOOLS flag, but not the comment explaining why that flag was previously set. Clean up that comment. Change-Id: I32b0240fd56310d7f10596aaa8ef432679bfd66a --- inc/python | 3 --- 1 file changed, 3 deletions(-) diff --git a/inc/python b/inc/python index d032a10eb9..3eb3efe80e 100644 --- a/inc/python +++ b/inc/python @@ -186,9 +186,6 @@ function pip_install { $xtrace - # adding SETUPTOOLS_SYS_PATH_TECHNIQUE is a workaround to keep - # the same behaviour of setuptools before version 25.0.0. - # related issue: https://github.com/pypa/pip/issues/3874 $sudo_pip \ http_proxy="${http_proxy:-}" \ https_proxy="${https_proxy:-}" \ From 1cdf413ac6f993dc2074741be4627acdc3f10304 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 23 May 2022 13:56:13 -0700 Subject: [PATCH 229/574] Do not barf stack trace if stats DB is missing This can happen if devstack fails to run, but we still run the post tasks. Also could happen if some sort of hybrid job configuration does not run all of devstack but we still end up running post jobs. Just warn to stderr and assume no DB info. Change-Id: I211a331ab668dbb0ad7882908cca4363f865d924 --- tools/get-stats.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/tools/get-stats.py b/tools/get-stats.py index ffe467691c..e0c20f2db9 100755 --- a/tools/get-stats.py +++ b/tools/get-stats.py @@ -86,9 +86,17 @@ def proc_matches(proc): def get_db_stats(host, user, passwd): dbs = [] - db = pymysql.connect(host=host, user=user, password=passwd, - database='stats', - cursorclass=pymysql.cursors.DictCursor) + try: + db = pymysql.connect(host=host, user=user, password=passwd, + database='stats', + cursorclass=pymysql.cursors.DictCursor) + except pymysql.err.OperationalError as e: + if 'Unknown database' in str(e): + print('No stats database; assuming devstack failed', + file=sys.stderr) + return [] + raise + with db: with db.cursor() as cur: cur.execute('SELECT db,op,count FROM queries') From c64ea4f213afebd1602d05cdd4d5bc14eaf5356b Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Wed, 20 Apr 2022 12:30:09 +0530 Subject: [PATCH 230/574] Fix doc and user create script to set homedir permissions RHEL based distros set homedir permissions to 700, and Ubuntu 21.04+ to 750[1], i.e missing executable permission for group or others, this results into failures as defined in the below bug. Since in doc we add useradd command, it's good to add instructions to fix the permissions there itself instead of getting failures during installation and then fixing it. Also update user create script to fix permissions by adding executable bit to DEST directory if missing. [1] https://discourse.ubuntu.com/t/private-home-directories-for-ubuntu-21-04-onwards/19533 Closes-Bug: #1966858 Change-Id: Id2787886433281238eb95ee11a75eddeef514293 --- doc/source/guides/multinode-lab.rst | 8 ++++++++ doc/source/guides/single-machine.rst | 8 ++++++++ doc/source/index.rst | 8 ++++++++ tools/create-stack-user.sh | 9 +++++++++ 4 files changed, 33 insertions(+) diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index 81c5945307..79a76dedb1 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -75,6 +75,14 @@ Otherwise create the stack user: useradd -s /bin/bash -d /opt/stack -m stack +Ensure home directory for the ``stack`` user has executable permission for all, +as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750`` +which can cause issues during deployment. + +:: + + chmod +x /opt/stack + This user will be making many changes to your system during installation and operation so it needs to have sudo privileges to root without a password: diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst index a0e97edb37..03d93743f7 100644 --- a/doc/source/guides/single-machine.rst +++ b/doc/source/guides/single-machine.rst @@ -49,6 +49,14 @@ below) $ sudo useradd -s /bin/bash -d /opt/stack -m stack +Ensure home directory for the ``stack`` user has executable permission for all, +as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750`` +which can cause issues during deployment. + +.. code-block:: console + + $ sudo chmod +x /opt/stack + Since this user will be making many changes to your system, it will need to have sudo privileges: diff --git a/doc/source/index.rst b/doc/source/index.rst index feb50ce4e9..a79a7e602c 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -57,6 +57,14 @@ to run DevStack with $ sudo useradd -s /bin/bash -d /opt/stack -m stack +Ensure home directory for the ``stack`` user has executable permission for all, +as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750`` +which can cause issues during deployment. + +.. code-block:: console + + $ sudo chmod +x /opt/stack + Since this user will be making many changes to your system, it should have sudo privileges: diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh index 919cacb036..cb8d7aa328 100755 --- a/tools/create-stack-user.sh +++ b/tools/create-stack-user.sh @@ -44,6 +44,15 @@ fi if ! getent passwd $STACK_USER >/dev/null; then echo "Creating a user called $STACK_USER" useradd -g $STACK_USER -s /bin/bash -d $DEST -m $STACK_USER + # RHEL based distros create home dir with 700 permissions, + # And Ubuntu 21.04+ with 750, i.e missing executable + # permission for either group or others + # Devstack deploy will have issues with this, fix it by + # adding executable permission + if [[ $(stat -c '%A' $DEST|grep -o x|wc -l) -lt 3 ]]; then + echo "Executable permission missing for $DEST, adding it" + chmod +x $DEST + fi fi echo "Giving stack user passwordless sudo privileges" From 599b241d32cd067a9a26c54fe178dd2bd28426d6 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Thu, 21 Oct 2021 12:07:17 +0200 Subject: [PATCH 231/574] Run debian platform job with OVN Packages for OVN are now available in bullseye, so we can drop the special handling. Signed-off-by: Dr. Jens Harbott Change-Id: I5e5c78aa19c5208c207ddcf14e208bae8fbc3c55 --- .zuul.yaml | 31 ------------------------------- 1 file changed, 31 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 001ac84f12..03553d3b57 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -670,37 +670,6 @@ timeout: 9000 vars: configure_swap_size: 4096 - # NOTE(yoctozepto): Debian Bullseye does not yet offer OVN. Switch to OVS - # for the time being. - devstack_localrc: - Q_AGENT: openvswitch - Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch - Q_ML2_TENANT_NETWORK_TYPE: vxlan - devstack_services: - # Disable OVN services - ovn-northd: false - ovn-controller: false - ovs-vswitchd: false - ovsdb-server: false - # Disable Neutron ML2/OVN services - q-ovn-metadata-agent: false - # Enable Neutron ML2/OVS services - q-agt: true - q-dhcp: true - q-l3: true - q-meta: true - q-metering: true - group-vars: - subnode: - devstack_services: - # Disable OVN services - ovn-controller: false - ovs-vswitchd: false - ovsdb-server: false - # Disable Neutron ML2/OVN services - q-ovn-metadata-agent: false - # Enable Neutron ML2/OVS services - q-agt: true - job: name: devstack-platform-ubuntu-jammy From e85c68e60ff460f0e16eefd5f084862628a3c54d Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 26 May 2022 09:31:36 -0700 Subject: [PATCH 232/574] Add apache2 to the services we collect for memory Change-Id: Ic6daef5b4df50ce43c6782542cb54c1958e54655 --- tools/get-stats.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/get-stats.py b/tools/get-stats.py index ffe467691c..80e7c642a2 100755 --- a/tools/get-stats.py +++ b/tools/get-stats.py @@ -54,7 +54,8 @@ def get_service_stats(service): def get_services_stats(): services = [os.path.basename(s) for s in - glob.glob('/etc/systemd/system/devstack@*.service')] + glob.glob('/etc/systemd/system/devstack@*.service')] + \ + ['apache2.service'] return [dict(service=service, **get_service_stats(service)) for service in services] From 6dd896feface3d0413437221a63e508b359ed615 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Tue, 26 Apr 2022 16:37:07 +0530 Subject: [PATCH 233/574] Allow to skip stop of ovn services Grenade jobs stop services, check fip connectivity for a nova server and then upgrade to next release. But since ovn data plane and db services are stopped along with other services, fip connectivity fails as a result. We shouldn't stop these services along with other neutron services. This patch adds a new variable "SKIP_STOP_OVN" which can be used by grenade jobs to skip stop of ovn services. This will also fix the ovn grenade jobs. Also source fixup_stuff.sh so function fixup_ovn_centos is available. It's already sourced in stack.sh but that's not used in grenade run. Change-Id: I94818a19f19973779cb2e11753d2881d54dfa3bc --- lib/neutron-legacy | 5 ++++- lib/neutron_plugins/ovn_agent | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 88ac991167..e9b55b6b02 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -138,6 +138,9 @@ Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True} VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True} VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300} +# Allow to skip stopping of OVN services +SKIP_STOP_OVN=${SKIP_STOP_OVN:-False} + # The directory which contains files for Q_PLUGIN_EXTRA_CONF_FILES. # /etc/neutron is assumed by many of devstack plugins. Do not change. _Q_PLUGIN_EXTRA_CONF_PATH=/etc/neutron @@ -638,7 +641,7 @@ function stop_mutnauq { stop_mutnauq_other stop_mutnauq_l2_agent - if [[ $Q_AGENT == "ovn" ]]; then + if [[ $Q_AGENT == "ovn" && $SKIP_STOP_OVN != "True" ]]; then stop_ovn fi } diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 9022f2d382..dfd55deae5 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -395,6 +395,8 @@ function install_ovn { sudo mkdir -p $OVS_PREFIX/var/log/ovn sudo chown $(whoami) $OVS_PREFIX/var/log/ovn else + # Load fixup_ovn_centos + source ${TOP_DIR}/tools/fixup_stuff.sh fixup_ovn_centos install_package $(get_packages openvswitch) install_package $(get_packages ovn) From 35fb53423a68f8d156693ae79c1c6950538a33b7 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Fri, 3 Jun 2022 10:10:50 +0530 Subject: [PATCH 234/574] [ironic][swift]Temporary add sha1 to allowed_digests Swift removed sha1 from supported digests with [1] and that broked ironic tinyipa job. Temorary add sha1 to allowed_digests until it's fixed in ironic. [1] https://review.opendev.org/c/openstack/swift/+/525771 Story: 2010068 Task: 45539 Change-Id: I68dfc472ce901058b6a7d691c98ed1641d431e54 --- lib/swift | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/swift b/lib/swift index ba92f3dcc3..251c4625b5 100644 --- a/lib/swift +++ b/lib/swift @@ -402,6 +402,11 @@ function configure_swift { # Versioned Writes iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:versioned_writes allow_versioned_writes true + # Add sha1 temporary https://storyboard.openstack.org/#!/story/2010068 + if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]]; then + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempurl allowed_digests "sha1 sha256 sha512" + fi + # Configure Ceilometer if is_service_enabled ceilometer; then iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer "set log_level" "WARN" From f7d87aa433d344f5db0201aca047a987cba3a0af Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 2 Jun 2022 11:08:32 -0700 Subject: [PATCH 235/574] Capture QEMU core dumps when possible Some of the hardest-to-debug issues are qemu crashes deep in a nova workflow that can't be reproduced locally. This adds a post task to the playbook so that we capture the most recent qemu core dump, if there is one. Change-Id: I48a2ea883325ca920b7e7909edad53a9832fb319 --- .zuul.yaml | 1 + playbooks/post.yaml | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 001ac84f12..fdcee59bc5 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -377,6 +377,7 @@ '{{ devstack_log_dir }}/devstacklog.txt.summary': logs '{{ devstack_log_dir }}/tcpdump.pcap': logs '{{ devstack_log_dir }}/worlddump-latest.txt': logs + '{{ devstack_log_dir }}/qemu.coredump': logs '{{ devstack_full_log}}': logs '{{ stage_dir }}/verify_tempest_conf.log': logs '{{ stage_dir }}/performance.json': logs diff --git a/playbooks/post.yaml b/playbooks/post.yaml index d8d5f6833c..0047d78ea5 100644 --- a/playbooks/post.yaml +++ b/playbooks/post.yaml @@ -17,6 +17,12 @@ dest: "{{ stage_dir }}/verify_tempest_conf.log" state: hard when: tempest_log.stat.exists + - name: Capture most recent qemu crash dump, if any + shell: + executable: /bin/bash + cmd: | + coredumpctl -o {{ devstack_log_dir }}/qemu.coredump dump /usr/bin/qemu-system-x86_64 + ignore_errors: yes roles: - export-devstack-journal - apache-logs-conf From 96dbf55016a22dc121589a70181e5c7e7e55f8c0 Mon Sep 17 00:00:00 2001 From: Francesco Pantano Date: Fri, 18 Mar 2022 10:56:31 +0100 Subject: [PATCH 236/574] Do not create cinder backup pool and key when cephadm is used When cephadm is used, if ENABLE_CEPH_C_BAK is True both pool and key are created by devstack-plugin-ceph. This piece of code can still stay here to make sure the cinder config is properly built. Change-Id: I799521f008123b8e42b2021c1c11d374b834bec3 --- lib/cinder_backups/ceph | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/lib/cinder_backups/ceph b/lib/cinder_backups/ceph index e4003c0720..4b180490d7 100644 --- a/lib/cinder_backups/ceph +++ b/lib/cinder_backups/ceph @@ -26,12 +26,15 @@ CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak} function configure_cinder_backup_ceph { - sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP} - if [[ "$REMOTE_CEPH" = "False" && "$CEPH_REPLICAS" -ne 1 ]]; then - sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID} + # Execute this part only when cephadm is not used + if [[ "$CEPHADM_DEPLOY" = "False" ]]; then + sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP} + if [[ "$REMOTE_CEPH" = "False" && "$CEPH_REPLICAS" -ne 1 ]]; then + sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID} + fi + sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring + sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring fi - sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring - sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver" iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE" From c869d59857c636d21ecd0329023038b24252627d Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Fri, 28 Feb 2020 14:55:08 -0500 Subject: [PATCH 237/574] Add support for IPv6 tunnel endpoints Currently, neutron tunnel endpoints must be IPv4 addresses, i.e. $HOST_IP, although IPv6 endpoints are supported by most drivers. Create a TUNNEL_IP_VERSION variable to choose which host IP to use, either HOST_IP or HOST_IPV6, and configure it in the OVS and Linuxbridge agent driver files. The default is still IPv4, but it can be over-ridden by specifying TUNNEL_ENDPOINT_IP accordingly. This behaves similar to the SERVICE_IP_VERSION option, which can either be set to 4 or 6, but not 4+6 - the tunnel overhead should be consistent on all systems in order not to have MTU issues. Must set the ML2 overlay_ip_version config option to match else agent tunnel sync RPC will not work. Must set the OVN external_ids:ovn-encap-ip config option to the correct address. Updated 'devstack-ipv6-only' job definition and verification role that will set all services and tunnels to use IPv6 addresses. Closes-bug: #1619476 Change-Id: I6034278dfc17b55d7863bc4db541bbdaa983a686 --- .zuul.yaml | 4 ++- doc/source/configuration.rst | 25 ++++++++++++++++-- functions-common | 2 +- lib/neutron | 5 ++-- lib/neutron-legacy | 7 ----- lib/neutron_plugins/ml2 | 1 + lib/neutron_plugins/ovn_agent | 6 +++-- .../README.rst | 10 +++---- stackrc | 26 +++++++++++++++++++ tools/verify-ipv6-only-deployments.sh | 25 +++++++++++++----- 10 files changed, 84 insertions(+), 27 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 001ac84f12..7322f78963 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -625,11 +625,13 @@ name: devstack-ipv6 parent: devstack description: | - Devstack single node job for integration gate with IPv6. + Devstack single node job for integration gate with IPv6, + all services and tunnels using IPv6 addresses. vars: devstack_localrc: SERVICE_IP_VERSION: 6 SERVICE_HOST: "" + TUNNEL_IP_VERSION: 6 - job: name: devstack-enforce-scope diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 40a8725b8d..757b4001d9 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -521,8 +521,8 @@ behavior: can be configured with any valid IPv6 prefix. The default values make use of an auto-generated ``IPV6_GLOBAL_ID`` to comply with RFC4193. -Service Version -~~~~~~~~~~~~~~~ +Service IP Version +~~~~~~~~~~~~~~~~~~ DevStack can enable service operation over either IPv4 or IPv6 by setting ``SERVICE_IP_VERSION`` to either ``SERVICE_IP_VERSION=4`` or @@ -542,6 +542,27 @@ optionally be used to alter the default IPv6 address:: HOST_IPV6=${some_local_ipv6_address} +Tunnel IP Version +~~~~~~~~~~~~~~~~~ + +DevStack can enable tunnel operation over either IPv4 or IPv6 by +setting ``TUNNEL_IP_VERSION`` to either ``TUNNEL_IP_VERSION=4`` or +``TUNNEL_IP_VERSION=6`` respectively. + +When set to ``4`` Neutron will use an IPv4 address for tunnel endpoints, +for example, ``HOST_IP``. + +When set to ``6`` Neutron will use an IPv6 address for tunnel endpoints, +for example, ``HOST_IPV6``. + +The default value for this setting is ``4``. Dual-mode support, for +example ``4+6`` is not supported, as this value must match the address +family of the local tunnel endpoint IP(v6) address. + +The value of ``TUNNEL_IP_VERSION`` has a direct relationship to the +setting of ``TUNNEL_ENDPOINT_IP``, which will default to ``HOST_IP`` +when set to ``4``, and ``HOST_IPV6`` when set to ``6``. + Multi-node setup ~~~~~~~~~~~~~~~~ diff --git a/functions-common b/functions-common index be966e96a6..f299ef1cc9 100644 --- a/functions-common +++ b/functions-common @@ -49,7 +49,7 @@ KILL_PATH="$(which kill)" STACK_ENV_VARS="BASE_SQL_CONN DATA_DIR DEST ENABLED_SERVICES HOST_IP \ KEYSTONE_SERVICE_URI \ LOGFILE OS_CACERT SERVICE_HOST STACK_USER TLS_IP \ - HOST_IPV6 SERVICE_IP_VERSION" + HOST_IPV6 SERVICE_IP_VERSION TUNNEL_ENDPOINT_IP TUNNEL_IP_VERSION" # Saves significant environment variables to .stackenv for later use diff --git a/lib/neutron b/lib/neutron index f24ccfb1a9..1b78493919 100644 --- a/lib/neutron +++ b/lib/neutron @@ -230,6 +230,7 @@ function configure_neutron_new { mech_drivers+=",linuxbridge" fi iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers $mech_drivers + iniset $NEUTRON_CORE_PLUGIN_CONF ml2 overlay_ip_version $TUNNEL_IP_VERSION iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000 iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_flat flat_networks $PUBLIC_NETWORK_NAME @@ -251,10 +252,10 @@ function configure_neutron_new { # Configure the neutron agent if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver iptables - iniset $NEUTRON_CORE_PLUGIN_CONF vxlan local_ip $HOST_IP + iniset $NEUTRON_CORE_PLUGIN_CONF vxlan local_ip $TUNNEL_ENDPOINT_IP elif [[ $NEUTRON_AGENT == "openvswitch" ]]; then iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver openvswitch - iniset $NEUTRON_CORE_PLUGIN_CONF ovs local_ip $HOST_IP + iniset $NEUTRON_CORE_PLUGIN_CONF ovs local_ip $TUNNEL_ENDPOINT_IP if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then iniset $NEUTRON_CORE_PLUGIN_CONF agent l2_population True diff --git a/lib/neutron-legacy b/lib/neutron-legacy index e9b55b6b02..5e6af0f249 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -246,13 +246,6 @@ if [[ $Q_AGENT == "linuxbridge" && -z ${LB_PHYSICAL_INTERFACE} ]]; then LB_PHYSICAL_INTERFACE=$default_route_dev fi -# When Neutron tunnels are enabled it is needed to specify the -# IP address of the end point in the local server. This IP is set -# by default to the same IP address that the HOST IP. -# This variable can be used to specify a different end point IP address -# Example: ``TUNNEL_ENDPOINT_IP=1.1.1.1`` -TUNNEL_ENDPOINT_IP=${TUNNEL_ENDPOINT_IP:-$HOST_IP} - # With the openvswitch plugin, set to True in ``localrc`` to enable # provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. # diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index f00feac6b4..7343606aac 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -125,6 +125,7 @@ function neutron_plugin_configure_service { fi populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 overlay_ip_version=$TUNNEL_IP_VERSION if [[ -n "$Q_ML2_PLUGIN_TYPE_DRIVERS" ]]; then populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index dfd55deae5..24bdf92b60 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -99,8 +99,10 @@ ENABLE_CHASSIS_AS_GW=$(trueorfalse True ENABLE_CHASSIS_AS_GW) OVN_L3_CREATE_PUBLIC_NETWORK=$(trueorfalse True OVN_L3_CREATE_PUBLIC_NETWORK) export OVSDB_SERVER_LOCAL_HOST=$SERVICE_LOCAL_HOST +TUNNEL_IP=$TUNNEL_ENDPOINT_IP if [[ "$SERVICE_IP_VERSION" == 6 ]]; then OVSDB_SERVER_LOCAL_HOST=[$OVSDB_SERVER_LOCAL_HOST] + TUNNEL_IP=[$TUNNEL_IP] fi OVN_IGMP_SNOOPING_ENABLE=$(trueorfalse False OVN_IGMP_SNOOPING_ENABLE) @@ -639,7 +641,7 @@ function _start_ovs { sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-remote="$OVN_SB_REMOTE" sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-bridge="br-int" sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-type="geneve" - sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$HOST_IP" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$TUNNEL_IP" sudo ovs-vsctl --no-wait set open_vswitch . external-ids:hostname="$LOCAL_HOSTNAME" # Select this chassis to host gateway routers if [[ "$ENABLE_CHASSIS_AS_GW" == "True" ]]; then @@ -654,7 +656,7 @@ function _start_ovs { if is_service_enabled ovn-controller-vtep ; then ovn_base_setup_bridge br-v vtep-ctl add-ps br-v - vtep-ctl set Physical_Switch br-v tunnel_ips=$HOST_IP + vtep-ctl set Physical_Switch br-v tunnel_ips=$TUNNEL_IP enable_service ovs-vtep local vtepcmd="$OVS_SCRIPTDIR/ovs-vtep --log-file --pidfile --detach br-v" diff --git a/roles/devstack-ipv6-only-deployments-verification/README.rst b/roles/devstack-ipv6-only-deployments-verification/README.rst index 400a8da222..3bddf5ea60 100644 --- a/roles/devstack-ipv6-only-deployments-verification/README.rst +++ b/roles/devstack-ipv6-only-deployments-verification/README.rst @@ -1,10 +1,10 @@ -Verify the IPv6-only deployments +Verify all addresses in IPv6-only deployments This role needs to be invoked from a playbook that -run tests. This role verifies the IPv6 setting on -devstack side and devstack deploy services on IPv6. -This role is invoked before tests are run so that -if any missing IPv6 setting or deployments can fail +runs tests. This role verifies the IPv6 settings on the +devstack side and that devstack deploys with all addresses +being IPv6. This role is invoked before tests are run so that +if there is any missing IPv6 setting, deployments can fail the job early. diff --git a/stackrc b/stackrc index 0c76de0531..f0039f0043 100644 --- a/stackrc +++ b/stackrc @@ -877,6 +877,32 @@ SERVICE_HOST=${SERVICE_HOST:-${DEF_SERVICE_HOST}} # This is either 127.0.0.1 for IPv4 or ::1 for IPv6 SERVICE_LOCAL_HOST=${SERVICE_LOCAL_HOST:-${DEF_SERVICE_LOCAL_HOST}} +# TUNNEL IP version +# This is the IP version to use for tunnel endpoints +TUNNEL_IP_VERSION=${TUNNEL_IP_VERSION:-4} + +# Validate TUNNEL_IP_VERSION +if [[ $TUNNEL_IP_VERSION != "4" ]] && [[ $TUNNEL_IP_VERSION != "6" ]]; then + die $LINENO "TUNNEL_IP_VERSION must be either 4 or 6" +fi + +if [[ "$TUNNEL_IP_VERSION" == 4 ]]; then + DEF_TUNNEL_ENDPOINT_IP=$HOST_IP +fi + +if [[ "$TUNNEL_IP_VERSION" == 6 ]]; then + # Only die if the user has not over-ridden the endpoint IP + if [[ "$HOST_IPV6" == "" ]] && [[ "$TUNNEL_ENDPOINT_IP" == "" ]]; then + die $LINENO "Could not determine host IPv6 address. See local.conf for suggestions on setting HOST_IPV6." + fi + + DEF_TUNNEL_ENDPOINT_IP=$HOST_IPV6 +fi + +# Allow the use of an alternate address for tunnel endpoints. +# Default is dependent on TUNNEL_IP_VERSION above. +TUNNEL_ENDPOINT_IP=${TUNNEL_ENDPOINT_IP:-${DEF_TUNNEL_ENDPOINT_IP}} + REGION_NAME=${REGION_NAME:-RegionOne} # Configure services to use syslog instead of writing to individual log files diff --git a/tools/verify-ipv6-only-deployments.sh b/tools/verify-ipv6-only-deployments.sh index 2596395165..0f0cba8afe 100755 --- a/tools/verify-ipv6-only-deployments.sh +++ b/tools/verify-ipv6-only-deployments.sh @@ -23,32 +23,43 @@ function verify_devstack_ipv6_setting { _service_listen_address=$(echo $SERVICE_LISTEN_ADDRESS | tr -d []) local _service_local_host='' _service_local_host=$(echo $SERVICE_LOCAL_HOST | tr -d []) + local _tunnel_endpoint_ip='' + _tunnel_endpoint_ip=$(echo $TUNNEL_ENDPOINT_IP | tr -d []) if [[ "$SERVICE_IP_VERSION" != 6 ]]; then echo $SERVICE_IP_VERSION "SERVICE_IP_VERSION is not set to 6 which is must for devstack to deploy services with IPv6 address." exit 1 fi + if [[ "$TUNNEL_IP_VERSION" != 6 ]]; then + echo $TUNNEL_IP_VERSION "TUNNEL_IP_VERSION is not set to 6 so TUNNEL_ENDPOINT_IP cannot be an IPv6 address." + exit 1 + fi is_service_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_host'"))') if [[ "$is_service_host_ipv6" != "True" ]]; then - echo $SERVICE_HOST "SERVICE_HOST is not ipv6 which means devstack cannot deploy services on IPv6 address." + echo $SERVICE_HOST "SERVICE_HOST is not IPv6 which means devstack cannot deploy services on IPv6 addresses." exit 1 fi is_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_host_ipv6'"))') if [[ "$is_host_ipv6" != "True" ]]; then - echo $HOST_IPV6 "HOST_IPV6 is not ipv6 which means devstack cannot deploy services on IPv6 address." + echo $HOST_IPV6 "HOST_IPV6 is not IPv6 which means devstack cannot deploy services on IPv6 addresses." exit 1 fi is_service_listen_address=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_listen_address'"))') if [[ "$is_service_listen_address" != "True" ]]; then - echo $SERVICE_LISTEN_ADDRESS "SERVICE_LISTEN_ADDRESS is not ipv6 which means devstack cannot deploy services on IPv6 address." + echo $SERVICE_LISTEN_ADDRESS "SERVICE_LISTEN_ADDRESS is not IPv6 which means devstack cannot deploy services on IPv6 addresses." exit 1 fi is_service_local_host=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_local_host'"))') if [[ "$is_service_local_host" != "True" ]]; then - echo $SERVICE_LOCAL_HOST "SERVICE_LOCAL_HOST is not ipv6 which means devstack cannot deploy services on IPv6 address." + echo $SERVICE_LOCAL_HOST "SERVICE_LOCAL_HOST is not IPv6 which means devstack cannot deploy services on IPv6 addresses." + exit 1 + fi + is_tunnel_endpoint_ip=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_tunnel_endpoint_ip'"))') + if [[ "$is_tunnel_endpoint_ip" != "True" ]]; then + echo $TUNNEL_ENDPOINT_IP "TUNNEL_ENDPOINT_IP is not IPv6 which means devstack will not deploy with an IPv6 endpoint address." exit 1 fi echo "Devstack is properly configured with IPv6" - echo "SERVICE_IP_VERSION: " $SERVICE_IP_VERSION "HOST_IPV6: " $HOST_IPV6 "SERVICE_HOST: " $SERVICE_HOST "SERVICE_LISTEN_ADDRESS: " $SERVICE_LISTEN_ADDRESS "SERVICE_LOCAL_HOST: " $SERVICE_LOCAL_HOST + echo "SERVICE_IP_VERSION:" $SERVICE_IP_VERSION "HOST_IPV6:" $HOST_IPV6 "SERVICE_HOST:" $SERVICE_HOST "SERVICE_LISTEN_ADDRESS:" $SERVICE_LISTEN_ADDRESS "SERVICE_LOCAL_HOST:" $SERVICE_LOCAL_HOST "TUNNEL_IP_VERSION:" $TUNNEL_IP_VERSION "TUNNEL_ENDPOINT_IP:" $TUNNEL_ENDPOINT_IP } function sanity_check_system_ipv6_enabled { @@ -72,7 +83,7 @@ function verify_service_listen_address_is_ipv6 { is_endpoint_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$endpoint_address'"))') if [[ "$is_endpoint_ipv6" != "True" ]]; then all_ipv6=False - echo $endpoint ": This is not ipv6 endpoint which means corresponding service is not listening on IPv6 address." + echo $endpoint ": This is not an IPv6 endpoint which means corresponding service is not listening on an IPv6 address." continue fi endpoints_verified=True @@ -80,7 +91,7 @@ function verify_service_listen_address_is_ipv6 { if [[ "$all_ipv6" == "False" ]] || [[ "$endpoints_verified" == "False" ]]; then exit 1 fi - echo "All services deployed by devstack is on IPv6 endpoints" + echo "All services deployed by devstack are on IPv6 endpoints" echo $endpoints } From e6e7100e853f2ba06bf2157fd87ae948faba1d1f Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Tue, 7 Jun 2022 10:12:59 +0200 Subject: [PATCH 238/574] Don't install pinned setuptools with distro pip We are seeing failures when using an updated setuptools version installed together with distro pip on Ubuntu 22.04. Install the version from u-c only when we are also installing pip from upstream. Change-Id: Ibb6e9424e5794ccbf9a937d2eecfa3bf60ed312e --- tools/install_pip.sh | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index e9c52eacb7..7c5d4c6555 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -139,15 +139,18 @@ if is_fedora && [[ ${DISTRO} == f* || ${DISTRO} == rhel9 ]]; then # recent enough anyway. This is included via rpms/general : # Simply fall through elif is_ubuntu; then - : # pip on Ubuntu 20.04 is new enough, too + # pip on Ubuntu 20.04 is new enough, too + # drop setuptools from u-c + sed -i -e '/setuptools/d' $REQUIREMENTS_DIR/upper-constraints.txt else install_get_pip + + # Note setuptools is part of requirements.txt and we want to make sure + # we obey any versioning as described there. + pip_install_gr setuptools fi set -x -# Note setuptools is part of requirements.txt and we want to make sure -# we obey any versioning as described there. -pip_install_gr setuptools get_versions From d5af514ac9485009229f3b594bccc09e905782fb Mon Sep 17 00:00:00 2001 From: Gorka Eguileor Date: Wed, 8 Jun 2022 10:19:50 +0200 Subject: [PATCH 239/574] Reduce memory consumption in Cinder services This patch reduces memory usage on the Cinder Volume and Backup services by tuning glibc. The specific tuning consist on disabling the per thread arenas and disabling dynamic thresholds. The Cinder Backup service suffers from high water mark memory usage and uses excessive memory. As an example just after 10 restore operations the service uses almost 1GB of RAM and does not ever free it afterwards. With this patch the memory consumption of the service is reduced down to almost 130MB. If we add a revert from Cinder (Change-Id I43a20c8687f12bc52b014611cc6977c4c3ca212c) it goes down to 100MB during my tests. This glibc tuning is not applied to all Python services because I haven't done proper testings on them and at first glance they don't seem to have such great improvements. Related-bug: #1908805 Change-Id: Ic9030d01468b3189350f83b04a8d1d346c489d3c --- functions-common | 22 ++++++++++++++++++---- lib/cinder | 9 +++++++-- 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/functions-common b/functions-common index be966e96a6..0b896dde59 100644 --- a/functions-common +++ b/functions-common @@ -1564,6 +1564,7 @@ function write_user_unit_file { local command="$2" local group=$3 local user=$4 + local env_vars="$5" local extra="" if [[ -n "$group" ]]; then extra="Group=$group" @@ -1577,6 +1578,9 @@ function write_user_unit_file { iniset -sudo $unitfile "Service" "KillMode" "process" iniset -sudo $unitfile "Service" "TimeoutStopSec" "300" iniset -sudo $unitfile "Service" "ExecReload" "$KILL_PATH -HUP \$MAINPID" + if [[ -n "$env_vars" ]] ; then + iniset -sudo $unitfile "Service" "Environment" "$env_vars" + fi if [[ -n "$group" ]]; then iniset -sudo $unitfile "Service" "Group" "$group" fi @@ -1591,6 +1595,7 @@ function write_uwsgi_user_unit_file { local command="$2" local group=$3 local user=$4 + local env_vars="$5" local unitfile="$SYSTEMD_DIR/$service" mkdir -p $SYSTEMD_DIR @@ -1605,6 +1610,9 @@ function write_uwsgi_user_unit_file { iniset -sudo $unitfile "Service" "NotifyAccess" "all" iniset -sudo $unitfile "Service" "RestartForceExitStatus" "100" + if [[ -n "$env_vars" ]] ; then + iniset -sudo $unitfile "Service" "Environment" "$env_vars" + fi if [[ -n "$group" ]]; then iniset -sudo $unitfile "Service" "Group" "$group" fi @@ -1652,10 +1660,14 @@ function _run_under_systemd { local systemd_service="devstack@$service.service" local group=$3 local user=${4:-$STACK_USER} + if [[ -z "$user" ]]; then + user=$STACK_USER + fi + local env_vars="$5" if [[ "$command" =~ "uwsgi" ]] ; then - write_uwsgi_user_unit_file $systemd_service "$cmd" "$group" "$user" + write_uwsgi_user_unit_file $systemd_service "$cmd" "$group" "$user" "$env_vars" else - write_user_unit_file $systemd_service "$cmd" "$group" "$user" + write_user_unit_file $systemd_service "$cmd" "$group" "$user" "$env_vars" fi $SYSTEMCTL enable $systemd_service @@ -1676,18 +1688,20 @@ function is_running { # If the command includes shell metachatacters (;<>*) it must be run using a shell # If an optional group is provided sg will be used to run the # command as that group. -# run_process service "command-line" [group] [user] +# run_process service "command-line" [group] [user] [env_vars] +# env_vars must be a space separated list of variable assigments, ie: "A=1 B=2" function run_process { local service=$1 local command="$2" local group=$3 local user=$4 + local env_vars="$5" local name=$service time_start "run_process" if is_service_enabled $service; then - _run_under_systemd "$name" "$command" "$group" "$user" + _run_under_systemd "$name" "$command" "$group" "$user" "$env_vars" fi time_stop "run_process" } diff --git a/lib/cinder b/lib/cinder index 52818a81eb..ca2c084aff 100644 --- a/lib/cinder +++ b/lib/cinder @@ -552,8 +552,13 @@ function start_cinder { fi run_process c-sch "$CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF" - run_process c-bak "$CINDER_BIN_DIR/cinder-backup --config-file $CINDER_CONF" - run_process c-vol "$CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF" + # Tune glibc for Python Services using single malloc arena for all threads + # and disabling dynamic thresholds to reduce memory usage when using native + # threads directly or via eventlet.tpool + # https://www.gnu.org/software/libc/manual/html_node/Memory-Allocation-Tunables.html + malloc_tuning="MALLOC_ARENA_MAX=1 MALLOC_MMAP_THRESHOLD_=131072 MALLOC_TRIM_THRESHOLD_=262144" + run_process c-bak "$CINDER_BIN_DIR/cinder-backup --config-file $CINDER_CONF" "" "" "$malloc_tuning" + run_process c-vol "$CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF" "" "" "$malloc_tuning" # NOTE(jdg): For cinder, startup order matters. To ensure that repor_capabilities is received # by the scheduler start the cinder-volume service last (or restart it) after the scheduler From 8ff52ea12bb855adc9fe26de48b022310c1a5893 Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Tue, 21 Jun 2022 17:31:50 +0200 Subject: [PATCH 240/574] Mark devstack-platform-centos-9-stream as n-v Due to the below bug the job has been constantly failing. Let's make it n-v until the bug is resolved: - https://bugs.launchpad.net/neutron/+bug/1979047 Change-Id: Ifc8cc96843a8eac5c98cd1e1f9e4b6287a7f2e7c --- .zuul.yaml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 5b93a77017..c3f8914eee 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -661,6 +661,9 @@ description: CentOS 9 Stream platform test nodeset: devstack-single-node-centos-9-stream timeout: 9000 + # TODO(kopecmartin) n-v until the following is resolved: + # https://bugs.launchpad.net/neutron/+bug/1979047 + voting: false vars: configure_swap_size: 4096 @@ -896,7 +899,9 @@ jobs: - devstack - devstack-ipv6 - - devstack-platform-centos-9-stream + # TODO(kopecmartin) n-v until the following is resolved: + # https://bugs.launchpad.net/neutron/+bug/1979047 + # - devstack-platform-centos-9-stream - devstack-platform-ubuntu-jammy - devstack-enforce-scope - devstack-multinode From 8a38a73ddf2930e9662cb22109f4a6ef341476d6 Mon Sep 17 00:00:00 2001 From: Vladislav Belogrudov Date: Wed, 25 May 2022 12:58:52 +0300 Subject: [PATCH 241/574] Correct hostname for OVN agent Currently Devstack uses short hostname for configuration of OVN. This leads to inability to start instances (failing port binding) on hosts with full hostnames (including dots). Open vSwitch expects hostname in external_ids that corresponds to one returned by ``hostname`` command. Closes-Bug: #1943631 Change-Id: I15b71a49c482be0c8f15ad834e29ea1b33307c86 --- lib/neutron_plugins/ovn_agent | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 24bdf92b60..e8a9babc1c 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -642,7 +642,7 @@ function _start_ovs { sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-bridge="br-int" sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-type="geneve" sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$TUNNEL_IP" - sudo ovs-vsctl --no-wait set open_vswitch . external-ids:hostname="$LOCAL_HOSTNAME" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:hostname=$(hostname) # Select this chassis to host gateway routers if [[ "$ENABLE_CHASSIS_AS_GW" == "True" ]]; then sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-cms-options="enable-chassis-as-gw" From fe7cfa6b8c7573d643d66d3684de03e4183651bb Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 23 Jun 2022 09:25:22 -0700 Subject: [PATCH 242/574] Avoid including bad service names in perf.json Some of the API services are not properly mounted under /$service/ in the apache proxy. This patch tries to avoid recording data for "services" like "v2.0" (in the case of neutron) by only adding names if they're all letters. A single warning is emitted for any services excluded by this check. For the moment this will mean we don't collect data for those services, but when their devstack API config is fixed, they'll start to show up. Change-Id: I41cc300e89a4f97a008a8ba97c91f0980f9b9c3f --- tools/get-stats.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tools/get-stats.py b/tools/get-stats.py index a3ed7f2625..b958af61b2 100755 --- a/tools/get-stats.py +++ b/tools/get-stats.py @@ -111,6 +111,7 @@ def get_http_stats_for_log(logfile): apache_fields = ('host', 'a', 'b', 'date', 'tz', 'request', 'status', 'length', 'c', 'agent') ignore_agents = ('curl', 'uwsgi', 'nova-status') + ignored_services = set() for line in csv.reader(open(logfile), delimiter=' '): fields = dict(zip(apache_fields, line)) if len(fields) != len(apache_fields): @@ -146,6 +147,10 @@ def get_http_stats_for_log(logfile): service = url.strip('/') rest = '' + if not service.isalpha(): + ignored_services.add(service) + continue + method_key = '%s-%s' % (agent, method) try: length = int(fields['length']) @@ -159,6 +164,10 @@ def get_http_stats_for_log(logfile): stats[service]['largest'] = max(stats[service]['largest'], length) + if ignored_services: + LOG.warning('Ignored services: %s' % ','.join( + sorted(ignored_services))) + # Flatten this for ES return [{'service': service, 'log': os.path.basename(logfile), **vals} From ce1ae9ddef4dd05a294dc630bf81b264a4b5a703 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Wed, 29 Jun 2022 09:56:12 +0200 Subject: [PATCH 243/574] Fix missing "$" in the ENFORCE_SCOPE's variable name Because of the missing "$" before ENFORCE_SCOPE in the lib/neutron module, it was treated as an ENFORCE_SCOPE string instead of variable and Neutron was deployed always with old defaults and disabled scope enforcement. Change-Id: Ibe67fea634c5f7abb521c0369ff30dd5db84db8c --- lib/neutron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 1b78493919..2d77df699a 100644 --- a/lib/neutron +++ b/lib/neutron @@ -633,7 +633,7 @@ function configure_neutron { # configure_rbac_policies() - Configure Neutron to enforce new RBAC # policies and scopes if NEUTRON_ENFORCE_SCOPE == True function configure_rbac_policies { - if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "ENFORCE_SCOPE" == "True" ]]; then + if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True iniset $NEUTRON_CONF oslo_policy enforce_scope True else From e1fb94f82a2a5bfdc3a0d56c69455e95e265195e Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Wed, 29 Jun 2022 10:43:33 +0200 Subject: [PATCH 244/574] Make devstack-platform-debian-bullseye voting The job has been successfully passing lately, let's make it voting. Change-Id: Ib3b803a26c8647fd49c89371516c0ac7baba2703 --- .zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index c3f8914eee..c29cb31f31 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -672,7 +672,6 @@ parent: tempest-full-py3 description: Debian Bullseye platform test nodeset: devstack-single-node-debian-bullseye - voting: false timeout: 9000 vars: configure_swap_size: 4096 @@ -902,6 +901,7 @@ # TODO(kopecmartin) n-v until the following is resolved: # https://bugs.launchpad.net/neutron/+bug/1979047 # - devstack-platform-centos-9-stream + - devstack-platform-debian-bullseye - devstack-platform-ubuntu-jammy - devstack-enforce-scope - devstack-multinode From bd6e5205b115fb0cafed7f50a676699a4b9fc0fe Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Sun, 3 Jul 2022 22:27:15 +0200 Subject: [PATCH 245/574] Increase timeout waiting for OVN startup We see some cases where OVN startup takes much longer than 5 seconds, up to 28 seconds have been observed, so increase the limit to 40 to be on the safe side. Signed-off-by: Dr. Jens Harbott Closes-Bug: 1980421 Change-Id: I6da4a537e6a8d527ff71a821f07164fc7d342882 --- lib/neutron_plugins/ovn_agent | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index e8a9babc1c..341b84d959 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -176,7 +176,7 @@ function wait_for_db_file { while [ ! -f $1 ]; do sleep 1 count=$((count+1)) - if [ "$count" -gt 5 ]; then + if [ "$count" -gt 40 ]; then die $LINENO "DB File $1 not found" fi done @@ -187,7 +187,7 @@ function wait_for_sock_file { while [ ! -S $1 ]; do sleep 1 count=$((count+1)) - if [ "$count" -gt 5 ]; then + if [ "$count" -gt 40 ]; then die $LINENO "Socket $1 not found" fi done From 85340e77f3d15b77dd0dc7c9df240428bfd2e30f Mon Sep 17 00:00:00 2001 From: Yadnesh Kulkarni Date: Mon, 11 Jul 2022 17:14:40 +0530 Subject: [PATCH 246/574] delete __pycache__ directory with sudo privileges Signed-off-by: Yadnesh Kulkarni Change-Id: I9cf3cd8921347eacc1effb2b197b97bc6ff3e0df --- functions-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions-common b/functions-common index e16bb27ef5..e5b07514e5 100644 --- a/functions-common +++ b/functions-common @@ -646,7 +646,7 @@ function git_clone { # remove the existing ignored files (like pyc) as they cause breakage # (due to the py files having older timestamps than our pyc, so python # thinks the pyc files are correct using them) - find $git_dest -name '*.pyc' -delete + sudo find $git_dest -name '*.pyc' -delete # handle git_ref accordingly to type (tag, branch) if [[ -n "`git show-ref refs/tags/$git_ref`" ]]; then From cf0bf746e996b780714a085b0e6f38899c2c832e Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Wed, 13 Jul 2022 22:34:47 +0900 Subject: [PATCH 247/574] Neutron: Set experimental option to use linuxbridge agent Recently the experimental mechanism has been added to Neutron and now it requires the [experimental] linuxbridge option when the linuxbridge mechanism driver is used. Depends-on: https://review.opendev.org/c/openstack/neutron/+/845181 Change-Id: Ice82a391cda9eb0193f23e6794be7ab3df12c40b --- lib/neutron | 4 ++++ lib/neutron_plugins/ml2 | 3 +++ 2 files changed, 7 insertions(+) diff --git a/lib/neutron b/lib/neutron index 1b78493919..6e787f213a 100644 --- a/lib/neutron +++ b/lib/neutron @@ -229,6 +229,10 @@ function configure_neutron_new { else mech_drivers+=",linuxbridge" fi + if [[ "$mech_drivers" == *"linuxbridge"* ]]; then + iniset $NEUTRON_CONF experimental linuxbridge True + fi + iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers $mech_drivers iniset $NEUTRON_CORE_PLUGIN_CONF ml2 overlay_ip_version $TUNNEL_IP_VERSION diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index 7343606aac..fa61f1ea30 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -125,6 +125,9 @@ function neutron_plugin_configure_service { fi populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS + if [[ "$Q_ML2_PLUGIN_MECHANISM_DRIVERS" == *"linuxbridge"* ]]; then + iniset $NEUTRON_CONF experimental linuxbridge True + fi populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 overlay_ip_version=$TUNNEL_IP_VERSION if [[ -n "$Q_ML2_PLUGIN_TYPE_DRIVERS" ]]; then From 1a21ccbdf8eb66582a06f181f8c9af1f43bd52f5 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Fri, 8 Jul 2022 21:57:45 +0200 Subject: [PATCH 248/574] Add NEUTRON_ENDPOINT_SERVICE_NAME variable to set service name This option can be used to set name of the service used in the networking service endpoint URL. Depends-On: https://review.opendev.org/c/openstack/grenade/+/850306 Change-Id: I9e9a06eadc1604214c627bd3bda010cc00aaf83d --- lib/neutron | 18 +++++++++++++++--- lib/neutron-legacy | 31 ++++++++++++++++++++++++++++--- 2 files changed, 43 insertions(+), 6 deletions(-) diff --git a/lib/neutron b/lib/neutron index 6e787f213a..a885fbf16e 100644 --- a/lib/neutron +++ b/lib/neutron @@ -114,6 +114,12 @@ NEUTRON_TENANT_VLAN_RANGE=${NEUTRON_TENANT_VLAN_RANGE:-${TENANT_VLAN_RANGE:-100: # Physical network for VLAN network usage. NEUTRON_PHYSICAL_NETWORK=${NEUTRON_PHYSICAL_NETWORK:-} +# The name of the service in the endpoint URL +NEUTRON_ENDPOINT_SERVICE_NAME=${NEUTRON_ENDPOINT_SERVICE_NAME-"networking"} +if [[ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" && -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then + NEUTRON_ENDPOINT_SERVICE_NAME="networking" +fi + # Additional neutron api config files declare -a -g _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS @@ -397,10 +403,13 @@ function create_neutron_accounts_new { local neutron_url if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST/networking/ + neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST/ else neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST:$NEUTRON_SERVICE_PORT/ fi + if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then + neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME + fi if [[ "$ENABLED_SERVICES" =~ "neutron-api" ]]; then @@ -481,19 +490,22 @@ function start_neutron_api { if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" - neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST/networking/ + neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST/ enable_service neutron-rpc-server run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $opts" else # Start the Neutron service # TODO(sc68cal) Stop hard coding this run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server $opts" - neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST:$service_port + neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST:$service_port/ # Start proxy if enabled if is_service_enabled tls-proxy; then start_tls_proxy neutron '*' $NEUTRON_SERVICE_PORT $NEUTRON_SERVICE_HOST $NEUTRON_SERVICE_PORT_INT fi fi + if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then + neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME + fi if ! wait_for_service $SERVICE_TIMEOUT $neutron_url; then die $LINENO "neutron-api did not start" diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 5e6af0f249..1a6995511b 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -145,6 +145,12 @@ SKIP_STOP_OVN=${SKIP_STOP_OVN:-False} # /etc/neutron is assumed by many of devstack plugins. Do not change. _Q_PLUGIN_EXTRA_CONF_PATH=/etc/neutron +# The name of the service in the endpoint URL +NEUTRON_ENDPOINT_SERVICE_NAME=${NEUTRON_ENDPOINT_SERVICE_NAME-"networking"} +if [[ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" && -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then + NEUTRON_ENDPOINT_SERVICE_NAME="networking" +fi + # List of config file names in addition to the main plugin config file # To add additional plugin config files, use ``neutron_server_config_add`` # utility function. For example: @@ -431,10 +437,13 @@ function create_nova_conf_neutron { function create_mutnauq_accounts { local neutron_url if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - neutron_url=$Q_PROTOCOL://$SERVICE_HOST/networking/ + neutron_url=$Q_PROTOCOL://$SERVICE_HOST/ else neutron_url=$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/ fi + if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then + neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME + fi if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then @@ -538,17 +547,20 @@ function start_neutron_service_and_check { if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then enable_service neutron-api run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" - neutron_url=$Q_PROTOCOL://$Q_HOST/networking/ + neutron_url=$Q_PROTOCOL://$Q_HOST/ enable_service neutron-rpc-server run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" else run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options" - neutron_url=$service_protocol://$Q_HOST:$service_port + neutron_url=$service_protocol://$Q_HOST:$service_port/ # Start proxy if enabled if is_service_enabled tls-proxy; then start_tls_proxy neutron '*' $Q_PORT $Q_HOST $Q_PORT_INT fi fi + if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then + neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME + fi echo "Waiting for Neutron to start..." local testcmd="wget ${ssl_ca} --no-proxy -q -O- $neutron_url" @@ -905,12 +917,25 @@ function _configure_neutron_plugin_agent { neutron_plugin_configure_plugin_agent } +function _replace_api_paste_composite { + local sep + sep=$(echo -ne "\x01") + # Replace it + $sudo sed -i -e "s/\/\: neutronversions_composite/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/\: neutronversions_composite/" "$Q_API_PASTE_FILE" + $sudo sed -i -e "s/\/healthcheck\: healthcheck/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/healthcheck\: healthcheck/" "$Q_API_PASTE_FILE" + $sudo sed -i -e "s/\/v2.0\: neutronapi_v2_0/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/v2.0\: neutronapi_v2_0/" "$Q_API_PASTE_FILE" +} + # _configure_neutron_service() - Set config files for neutron service # It is called when q-svc is enabled. function _configure_neutron_service { Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE + if [[ -n "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then + _replace_api_paste_composite + fi + # Update either configuration file with plugin iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS From facf15626e5776bc64a2f072bdccadbda714a8f2 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Wed, 13 Jul 2022 15:58:42 +0900 Subject: [PATCH 249/574] Neutron: Do not set removed allow_overlapping_ips The parameter has been removed from neutron by [1]. [1] fde91e8059a9a23fb7ece6e3463984329c7ea581 Change-Id: I3b838ea741d19729d6fcf03c0478b1b4d8ec1213 --- lib/neutron | 1 - lib/neutron-legacy | 1 - 2 files changed, 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index 6e787f213a..d4815cd5f8 100644 --- a/lib/neutron +++ b/lib/neutron @@ -213,7 +213,6 @@ function configure_neutron_new { iniset $NEUTRON_CONF DEFAULT core_plugin $NEUTRON_CORE_PLUGIN iniset $NEUTRON_CONF DEFAULT policy_file $policy_file - iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips True iniset $NEUTRON_CONF DEFAULT router_distributed $NEUTRON_DISTRIBUTED_ROUTING iniset $NEUTRON_CONF DEFAULT auth_strategy $NEUTRON_AUTH_STRATEGY diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 5e6af0f249..d21be51dcd 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -916,7 +916,6 @@ function _configure_neutron_service { iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $NEUTRON_CONF oslo_policy policy_file $Q_POLICY_FILE - iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY configure_keystone_authtoken_middleware $NEUTRON_CONF $Q_ADMIN_USERNAME From b70d98fe75621d7c71197f82b9fde630d2fa50b2 Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Wed, 20 Jul 2022 14:05:15 +0000 Subject: [PATCH 250/574] Fix doc for adding sudo privileges to stack user Writing NOPASSWD directive into /etc/sudoers was throwing permission denied errors. This commit writes the directive to the /etc/sudoers.d/stack file instead. Closes-Bug: #1981541 Change-Id: If30f01aa5f3a33dda79ff4a6892116511c8e1542 --- doc/source/guides/multinode-lab.rst | 2 +- doc/source/guides/single-machine.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index 79a76dedb1..658422b0af 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -89,7 +89,7 @@ password: :: - echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack From here on use the ``stack`` user. **Logout** and **login** as the ``stack`` user. diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst index 03d93743f7..0529e30f08 100644 --- a/doc/source/guides/single-machine.rst +++ b/doc/source/guides/single-machine.rst @@ -63,7 +63,7 @@ to have sudo privileges: .. code-block:: console $ apt-get install sudo -y || yum install -y sudo - $ echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + $ echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack .. note:: On some systems you may need to use ``sudo visudo``. From 79bef068b69c7e97a63aaa3e7fae13bcbc649ebd Mon Sep 17 00:00:00 2001 From: Hoai-Thu Vuong Date: Tue, 2 Aug 2022 14:41:59 +0700 Subject: [PATCH 251/574] remove duplicate line of REGION_NAME Change-Id: I42b270749f057c5751e809aba282112b990b9f38 --- stackrc | 2 -- 1 file changed, 2 deletions(-) diff --git a/stackrc b/stackrc index f0039f0043..b3130e5f7f 100644 --- a/stackrc +++ b/stackrc @@ -903,8 +903,6 @@ fi # Default is dependent on TUNNEL_IP_VERSION above. TUNNEL_ENDPOINT_IP=${TUNNEL_ENDPOINT_IP:-${DEF_TUNNEL_ENDPOINT_IP}} -REGION_NAME=${REGION_NAME:-RegionOne} - # Configure services to use syslog instead of writing to individual log files SYSLOG=$(trueorfalse False SYSLOG) SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP} From d266c87b1d3ecae1b40589832efc9bf7cf3e524c Mon Sep 17 00:00:00 2001 From: Nobuhiro MIKI Date: Mon, 8 Aug 2022 16:45:31 +0900 Subject: [PATCH 252/574] iniset: fix handling of values containg ampersand Attempting to set a value containing the ampersand character (&) by iniset would corrupt the value. So, add an escaping process. Signed-off-by: Nobuhiro MIKI Closes-Bug: #1983816 Change-Id: Ie2633bacd2d761d110e6cb12f95382325c329415 --- inc/ini-config | 3 +++ tests/test_ini_config.sh | 12 +++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/inc/ini-config b/inc/ini-config index 79936823d2..f65e42d3a5 100644 --- a/inc/ini-config +++ b/inc/ini-config @@ -189,6 +189,9 @@ function iniset { local option=$3 local value=$4 + # Escape the ampersand character (&) + value=$(echo $value | sed -e 's/&/\\&/g') + if [[ -z $section || -z $option ]]; then $xtrace return diff --git a/tests/test_ini_config.sh b/tests/test_ini_config.sh index 6ed1647f34..6367cde441 100755 --- a/tests/test_ini_config.sh +++ b/tests/test_ini_config.sh @@ -44,6 +44,9 @@ empty = multi = foo1 multi = foo2 +[fff] +ampersand = + [key_with_spaces] rgw special key = something @@ -85,7 +88,7 @@ fi # test iniget_sections VAL=$(iniget_sections "${TEST_INI}") -assert_equal "$VAL" "default aaa bbb ccc ddd eee key_with_spaces \ +assert_equal "$VAL" "default aaa bbb ccc ddd eee fff key_with_spaces \ del_separate_options del_same_option del_missing_option \ del_missing_option_multi del_no_options" @@ -124,6 +127,13 @@ iniset ${SUDO_ARG} ${TEST_INI} bbb handlers "33,44" VAL=$(iniget ${TEST_INI} bbb handlers) assert_equal "$VAL" "33,44" "inset at EOF" +# Test with ampersand in values +for i in `seq 3`; do + iniset ${TEST_INI} fff ampersand '&y' +done +VAL=$(iniget ${TEST_INI} fff ampersand) +assert_equal "$VAL" "&y" "iniset ampersands in option" + # test empty option if ini_has_option ${SUDO_ARG} ${TEST_INI} ddd empty; then passed "ini_has_option: ddd.empty present" From 90e5479f382af1a5482f0acccdc36c6d18321634 Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Tue, 16 Aug 2022 17:29:16 +0200 Subject: [PATCH 253/574] Remove forgotten LinuxMint occurrence Right now we don't officialy support LinuxMint as our documentation says [1], it seems LinuxMint is a relict and got forgotten over time. This patch removes LinuxMint from the code in order not to confuse users. [1] https://docs.openstack.org/devstack/latest/ Closes-Bug: #1983427 Change-Id: Ie1ced25f89389494b28a7b2e9bb1c4273e002dd5 --- doc/source/plugins.rst | 2 +- functions-common | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index 7d70d74dd0..62dd15bfb1 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -238,7 +238,7 @@ package dependencies, packages may be listed at the following locations in the top-level of the plugin repository: - ``./devstack/files/debs/$plugin_name`` - Packages to install when running - on Ubuntu, Debian or Linux Mint. + on Ubuntu or Debian. - ``./devstack/files/rpms/$plugin_name`` - Packages to install when running on Red Hat, Fedora, or CentOS. diff --git a/functions-common b/functions-common index e5b07514e5..92a6678de0 100644 --- a/functions-common +++ b/functions-common @@ -426,7 +426,7 @@ function GetOSVersion { os_VENDOR=$(lsb_release -i -s) fi - if [[ $os_VENDOR =~ (Debian|Ubuntu|LinuxMint) ]]; then + if [[ $os_VENDOR =~ (Debian|Ubuntu) ]]; then os_PACKAGE="deb" else os_PACKAGE="rpm" @@ -444,9 +444,8 @@ declare -g DISTRO function GetDistro { GetOSVersion - if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) || \ - "$os_VENDOR" =~ (LinuxMint) ]]; then - # 'Everyone' refers to Ubuntu / Debian / Mint releases by + if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then + # 'Everyone' refers to Ubuntu / Debian releases by # the code name adjective DISTRO=$os_CODENAME elif [[ "$os_VENDOR" =~ (Fedora) ]]; then From fdfc14451afc4d7f78edadb1b26a3a845eace715 Mon Sep 17 00:00:00 2001 From: Eliad Cohen Date: Tue, 16 Aug 2022 13:00:45 -0400 Subject: [PATCH 254/574] Clean up use of get_field Openstack client can return the id field for create/show commands using `-f value -c id`. Cleaned up the use of grep 'id' with get_field Change-Id: I2f4338f30c11e5139cda51c92524782b86f0aacc --- functions | 4 ++-- lib/neutron_plugins/services/l3 | 20 ++++++++++---------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/functions b/functions index ccca5cda51..7ada0feba7 100644 --- a/functions +++ b/functions @@ -414,10 +414,10 @@ function upload_image { # kernel for use when uploading the root filesystem. local kernel_id="" ramdisk_id=""; if [ -n "$kernel" ]; then - kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki < "$kernel" | grep ' id ' | get_field 2) + kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki < "$kernel" -f value -c id) fi if [ -n "$ramdisk" ]; then - ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari < "$ramdisk" | grep ' id ' | get_field 2) + ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari < "$ramdisk" -f value -c id) fi _upload_image "${image_name%.img}" ami ami "$image" ${kernel_id:+ kernel_id=$kernel_id} ${ramdisk_id:+ ramdisk_id=$ramdisk_id} $img_property fi diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index fbd4692bba..3dffc33d37 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -166,14 +166,14 @@ function create_neutron_initial_network { if is_provider_network; then die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK" die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specify the PROVIDER_NETWORK_TYPE" - NET_ID=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share | grep ' id ' | get_field 2) + NET_ID=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share -f value -c id) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK" if [[ "$IP_VERSION" =~ 4.* ]]; then if [ -z $SUBNETPOOL_V4_ID ]; then fixed_range_v4=$FIXED_RANGE fi - SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} | grep ' id ' | get_field 2) + SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} -f value -c id) die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME" fi @@ -183,7 +183,7 @@ function create_neutron_initial_network { if [ -z $SUBNETPOOL_V6_ID ]; then fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE fi - IPV6_SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} | grep ' id ' | get_field 2) + IPV6_SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} -f value -c id) die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME" fi @@ -193,7 +193,7 @@ function create_neutron_initial_network { sudo ip link set $PUBLIC_INTERFACE up fi else - NET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" network create "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) + NET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" network create "$PRIVATE_NETWORK_NAME" -f value -c id) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME" if [[ "$IP_VERSION" =~ 4.* ]]; then @@ -211,11 +211,11 @@ function create_neutron_initial_network { # Create a router, and add the private subnet as one of its interfaces if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then # create a tenant-owned router. - ROUTER_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2) + ROUTER_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" router create $Q_ROUTER_NAME -f value -c id) die_if_not_set $LINENO ROUTER_ID "Failure creating router $Q_ROUTER_NAME" else # Plugin only supports creating a single router, which should be admin owned. - ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2) + ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME -f value -c id) die_if_not_set $LINENO ROUTER_ID "Failure creating router $Q_ROUTER_NAME" fi @@ -225,9 +225,9 @@ function create_neutron_initial_network { fi # Create an external network, and a subnet. Configure the external network as router gw if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then - EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2) + EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} -f value -c id) else - EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS | grep ' id ' | get_field 2) + EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS -f value -c id) fi die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME" @@ -257,7 +257,7 @@ function _neutron_create_private_subnet_v4 { subnet_params+="${fixed_range_v4:+--subnet-range $fixed_range_v4} " subnet_params+="--network $NET_ID $PRIVATE_SUBNET_NAME" local subnet_id - subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) + subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params -f value -c id) die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet" echo $subnet_id } @@ -278,7 +278,7 @@ function _neutron_create_private_subnet_v6 { subnet_params+="${fixed_range_v6:+--subnet-range $fixed_range_v6} " subnet_params+="$ipv6_modes --network $NET_ID $IPV6_PRIVATE_SUBNET_NAME " local ipv6_subnet_id - ipv6_subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) + ipv6_subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params -f value -c id) die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet" echo $ipv6_subnet_id } From ca5f9195610a94ca0a567700a94f9417ca877336 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Sun, 21 Aug 2022 10:52:41 +0200 Subject: [PATCH 255/574] Clean up n-net remnants In I90316208d1af42c1659d3bee386f95e38aaf2c56 support for nova-network was removed, but some bits remained, fix this up. Change-Id: Iba7e1785fd0bdf0a6e94e5e03438fc7634621e49 --- files/debs/nova | 2 -- files/rpms-suse/nova | 2 -- files/rpms/nova | 2 -- lib/nova | 14 -------------- stack.sh | 10 +++------- 5 files changed, 3 insertions(+), 27 deletions(-) diff --git a/files/debs/nova b/files/debs/nova index 0194f00f2c..5c00ad72d9 100644 --- a/files/debs/nova +++ b/files/debs/nova @@ -1,7 +1,5 @@ conntrack curl -dnsmasq-base -dnsmasq-utils # for dhcp_release ebtables genisoimage # required for config_drive iptables diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova index 1cc2f62ea5..082b9aca22 100644 --- a/files/rpms-suse/nova +++ b/files/rpms-suse/nova @@ -1,8 +1,6 @@ cdrkit-cdrtools-compat # dist:sle12 conntrack-tools curl -dnsmasq -dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1 ebtables iptables iputils diff --git a/files/rpms/nova b/files/rpms/nova index 9522e5729d..f2824ee2c4 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -1,7 +1,5 @@ conntrack-tools curl -dnsmasq # for q-dhcp -dnsmasq-utils # for dhcp_release ebtables genisoimage # not:rhel9 required for config_drive iptables diff --git a/lib/nova b/lib/nova index da3a10edd0..6de1d3382f 100644 --- a/lib/nova +++ b/lib/nova @@ -107,20 +107,6 @@ NOVA_FILTERS="AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,Ima QEMU_CONF=/etc/libvirt/qemu.conf -# Set default defaults here as some hypervisor drivers override these -PUBLIC_INTERFACE_DEFAULT=br100 -# Set ``GUEST_INTERFACE_DEFAULT`` to some interface on the box so that -# the default isn't completely crazy. This will match ``eth*``, ``em*``, or -# the new ``p*`` interfaces, then basically picks the first -# alphabetically. It's probably wrong, however it's less wrong than -# always using ``eth0`` which doesn't exist on new Linux distros at all. -GUEST_INTERFACE_DEFAULT=$(ip link \ - | grep 'state UP' \ - | awk '{print $2}' \ - | sed 's/://' \ - | grep ^[ep] \ - | head -1) - # ``NOVA_VNC_ENABLED`` can be used to forcibly enable VNC configuration. # In multi-node setups allows compute hosts to not run ``n-novnc``. NOVA_VNC_ENABLED=$(trueorfalse False NOVA_VNC_ENABLED) diff --git a/stack.sh b/stack.sh index df283bbe50..c99189e6dc 100755 --- a/stack.sh +++ b/stack.sh @@ -1152,7 +1152,8 @@ fi # ---- if is_service_enabled q-dhcp; then - # Delete traces of nova networks from prior runs + # TODO(frickler): These are remnants from n-net, check which parts are really + # still needed for Neutron. # Do not kill any dnsmasq instance spawned by NetworkManager netman_pid=$(pidof NetworkManager || true) if [ -z "$netman_pid" ]; then @@ -1212,12 +1213,7 @@ if is_service_enabled nova; then echo_summary "Configuring Nova" init_nova - # Additional Nova configuration that is dependent on other services - # TODO(stephenfin): Is it possible for neutron to *not* be enabled now? If - # not, remove the if here - if is_service_enabled neutron; then - async_runfunc configure_neutron_nova - fi + async_runfunc configure_neutron_nova fi From ccd116d36447ba1c5efad58ee360eb7f276eb7c6 Mon Sep 17 00:00:00 2001 From: Alan Bishop Date: Wed, 10 Aug 2022 10:30:19 -0700 Subject: [PATCH 256/574] Cinder: add creator role when barbican is enabled When barbican is enabled, add the "creator" role to cinder's service user so that cinder can create secrets. Cinder needs to create barbican secrets when migrating encryption keys from the legacy ConfKeyManager to barbican. Cinder also needs to create barbican secrets in order to support transferring encrypted volumes. Implements: bp/transfer-encrypted-volume Depends-On: I216f78e8a300ab3f79bbcbb38110adf2bbec2196 Change-Id: Ia3f414c4b9b0829f60841a6dd63c97a893fdde4d --- lib/cinder | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/lib/cinder b/lib/cinder index ca2c084aff..7dd7539eca 100644 --- a/lib/cinder +++ b/lib/cinder @@ -388,16 +388,24 @@ function configure_cinder { # create_cinder_accounts() - Set up common required cinder accounts -# Tenant User Roles +# Project User Roles # ------------------------------------------------------------------ -# service cinder admin # if enabled +# SERVICE_PROJECT_NAME cinder service +# SERVICE_PROJECT_NAME cinder creator (if Barbican is enabled) # Migrated from keystone_data.sh function create_cinder_accounts { # Cinder if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then - create_service_user "cinder" + local extra_role="" + + # cinder needs the "creator" role in order to interact with barbican + if is_service_enabled barbican; then + extra_role=$(get_or_create_role "creator") + fi + + create_service_user "cinder" $extra_role # block-storage is the official service type get_or_create_service "cinder" "block-storage" "Cinder Volume Service" From e7d2623dca483497ec51c75dfe1b6162801eead0 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Sun, 21 Aug 2022 12:54:57 +0200 Subject: [PATCH 257/574] Clean up neutron cleanup code neutron-ns-metadata-proxy was dropped from Neutron 5 years ago, no need to keep trying to kill it. Change-Id: I20b6d68dd8dde36057a2418bca0841bdea377b07 --- lib/neutron | 1 - lib/neutron-legacy | 1 - 2 files changed, 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index 1f54e0e043..b3e3d72e8c 100644 --- a/lib/neutron +++ b/lib/neutron @@ -567,7 +567,6 @@ function stop_neutron_new { fi if is_service_enabled neutron-metadata-agent; then - sudo pkill -9 -f neutron-ns-metadata-proxy || : stop_process neutron-metadata-agent fi } diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 9229b47988..baf67f209e 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -628,7 +628,6 @@ function stop_mutnauq_other { fi if is_service_enabled q-meta; then - sudo pkill -9 -f neutron-ns-metadata-proxy || : stop_process q-meta fi From b9b6d6b862ce69a875c152ad30da8f4717c75272 Mon Sep 17 00:00:00 2001 From: June Yi Date: Sat, 2 Jul 2022 13:07:43 +0900 Subject: [PATCH 258/574] Respect constraints on tempest venv consistently In case of online mode, there is a procedure to recreate tempest venv. For consistency of tempest venv during the entire stack.sh process, add logic to consider the TEMPEST_VENV_UPPER_CONSTRAINTS option here. Closes-bug: #1980483 Signed-off-by: June Yi Change-Id: I0cea282152fd363af8671cab1b5f733ebe2bd4df --- lib/tempest | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/tempest b/lib/tempest index 206b37b5bf..87a2244784 100644 --- a/lib/tempest +++ b/lib/tempest @@ -695,13 +695,13 @@ function configure_tempest { local tmp_cfg_file tmp_cfg_file=$(mktemp) cd $TEMPEST_DIR - if [[ "$OFFLINE" != "True" ]]; then - tox -revenv-tempest --notest - fi local tmp_u_c_m tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX) set_tempest_venv_constraints $tmp_u_c_m + if [[ "$OFFLINE" != "True" ]]; then + tox -revenv-tempest --notest + fi tox -evenv-tempest -- pip install -c $tmp_u_c_m -r requirements.txt rm -f $tmp_u_c_m From 3de92db6634a6d1455b7211ec869aed35508c58c Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Fri, 26 Aug 2022 12:58:29 +0200 Subject: [PATCH 259/574] Fix installation of OVS/OVN from sources This patch changes user who runs ovsdb-server and ovn-nortd services to root. It also adds installation of the libssl dev package before compilation of the openvswitch if TLS service is enabled. Co-Authored-By: Fernando Royo Closes-Bug: #1987832 Change-Id: I83fc9250ae5b7c1686938a0dd25d66b40fc6c6aa --- lib/neutron_plugins/ovn_agent | 4 ++-- lib/neutron_plugins/ovs_source | 6 ++++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 341b84d959..8eb2993b94 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -616,7 +616,7 @@ function _start_ovs { dbcmd+=" --remote=db:hardware_vtep,Global,managers $OVS_DATADIR/vtep.db" fi dbcmd+=" $OVS_DATADIR/conf.db" - _run_process ovsdb-server "$dbcmd" + _run_process ovsdb-server "$dbcmd" "" "$STACK_GROUP" "root" # Note: ovn-controller will create and configure br-int once it is started. # So, no need to create it now because nothing depends on that bridge here. @@ -704,7 +704,7 @@ function start_ovn { local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_northd" local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_northd" - _run_process ovn-northd "$cmd" "$stop_cmd" + _run_process ovn-northd "$cmd" "$stop_cmd" "$STACK_GROUP" "root" else _start_process "$OVN_NORTHD_SERVICE" fi diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source index 9ae5555afb..164d574c42 100644 --- a/lib/neutron_plugins/ovs_source +++ b/lib/neutron_plugins/ovs_source @@ -87,9 +87,15 @@ function prepare_for_ovs_compilation { install_package kernel-devel-$KERNEL_VERSION install_package kernel-headers-$KERNEL_VERSION + if is_service_enabled tls-proxy; then + install_package openssl-devel + fi elif is_ubuntu ; then install_package linux-headers-$KERNEL_VERSION + if is_service_enabled tls-proxy; then + install_package libssl-dev + fi fi } From 97061c9a1f2a2989e0bacb5f7cc5910c75aaeb44 Mon Sep 17 00:00:00 2001 From: Gorka Eguileor Date: Thu, 14 Oct 2021 09:55:56 +0200 Subject: [PATCH 260/574] Add LVM NVMe support This patch adds NVMe LVM support to the existing iSCSI LVM configuration support. We deprecate the CINDER_ISCSI_HELPER configuration option since we are no longer limited to iSCSI, and replace it with the CINDER_TARGET_HELPER option. The patch also adds another 3 target configuration options: - CINDER_TARGET_PROTOCOL - CINDER_TARGET_PREFIX - CINDER_TARGET_PORT These options will have different defaults based on the selected target helper. For tgtadm and lioadm they'll be iSCSI, iqn.2010-10.org.openstack:, and 3260 respectively, and for nvmet they'll be nvmet_rdma, nvme-subsystem-1, and 4420. Besides nvmet_rdma the CINDER_TARGET_PROTOCOL option can also be set to nvmet_tcp, and nvmet_fc. For the RDMA transport protocol devstack will be using Soft-RoCE and creating a device on top of the network interface. LVM NVMe-TCP support is added in the dependency mentioned in the footer and LVM NVMe-FC will be added in later patches (need os-brick and cinder patches) but the code here should still be valid. Change-Id: I6578cdc27489b34916cdeb72ba3fdf06ea9d4ad8 --- doc/source/configuration.rst | 29 +++++++++++ lib/cinder | 95 +++++++++++++++++++++++++++++------ lib/cinder_backends/fake_gate | 2 +- lib/cinder_backends/lvm | 5 +- lib/lvm | 10 ++-- lib/nova | 40 ++++++++++++--- 6 files changed, 154 insertions(+), 27 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 757b4001d9..0d8773fb6a 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -669,6 +669,35 @@ adjusted by setting ``CINDER_QUOTA_VOLUMES``, ``CINDER_QUOTA_BACKUPS``, or ``CINDER_QUOTA_SNAPSHOTS`` to the desired value. (The default for each is 10.) +DevStack's Cinder LVM configuration module currently supports both iSCSI and +NVMe connections, and we can choose which one to use with options +``CINDER_TARGET_HELPER``, ``CINDER_TARGET_PROTOCOL``, ``CINDER_TARGET_PREFIX``, +and ``CINDER_TARGET_PORT``. + +Defaults use iSCSI with the LIO target manager:: + + CINDER_TARGET_HELPER="lioadm" + CINDER_TARGET_PROTOCOL="iscsi" + CINDER_TARGET_PREFIX="iqn.2010-10.org.openstack:" + CINDER_TARGET_PORT=3260 + +Additionally there are 3 supported transport protocols for NVMe, +``nvmet_rdma``, ``nvmet_tcp``, and ``nvmet_fc``, and when the ``nvmet`` target +is selected the protocol, prefix, and port defaults will change to more +sensible defaults for NVMe:: + + CINDER_TARGET_HELPER="nvmet" + CINDER_TARGET_PROTOCOL="nvmet_rdma" + CINDER_TARGET_PREFIX="nvme-subsystem-1" + CINDER_TARGET_PORT=4420 + +When selecting the RDMA transport protocol DevStack will create on Cinder nodes +a Software RoCE device on top of the ``HOST_IP_IFACE`` and if it is not defined +then on top of the interface with IP address ``HOST_IP`` or ``HOST_IPV6``. + +This Soft-RoCE device will always be created on the Nova compute side since we +cannot tell beforehand whether there will be an RDMA connection or not. + Keystone ~~~~~~~~ diff --git a/lib/cinder b/lib/cinder index ca2c084aff..bc704c1e5d 100644 --- a/lib/cinder +++ b/lib/cinder @@ -43,6 +43,13 @@ GITDIR["python-cinderclient"]=$DEST/python-cinderclient GITDIR["python-brick-cinderclient-ext"]=$DEST/python-brick-cinderclient-ext CINDER_DIR=$DEST/cinder +if [[ $SERVICE_IP_VERSION == 6 ]]; then + CINDER_MY_IP="$HOST_IPV6" +else + CINDER_MY_IP="$HOST_IP" +fi + + # Cinder virtual environment if [[ ${USE_VENV} = True ]]; then PROJECT_VENV["cinder"]=${CINDER_DIR}.venv @@ -88,13 +95,32 @@ CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-lvm:lvmdriver-1} CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}} CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]') -# Default to lioadm -CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm} + +if [[ -n "$CINDER_ISCSI_HELPER" ]]; then + if [[ -z "$CINDER_TARGET_HELPER" ]]; then + deprecated 'Using CINDER_ISCSI_HELPER is deprecated, use CINDER_TARGET_HELPER instead' + CINDER_TARGET_HELPER="$CINDER_ISCSI_HELPER" + else + deprecated 'Deprecated CINDER_ISCSI_HELPER is set, but is being overwritten by CINDER_TARGET_HELPER' + fi +fi +CINDER_TARGET_HELPER=${CINDER_TARGET_HELPER:-lioadm} + +if [[ $CINDER_TARGET_HELPER == 'nvmet' ]]; then + CINDER_TARGET_PROTOCOL=${CINDER_TARGET_PROTOCOL:-'nvmet_rdma'} + CINDER_TARGET_PREFIX=${CINDER_TARGET_PREFIX:-'nvme-subsystem-1'} + CINDER_TARGET_PORT=${CINDER_TARGET_PORT:-4420} +else + CINDER_TARGET_PROTOCOL=${CINDER_TARGET_PROTOCOL:-'iscsi'} + CINDER_TARGET_PREFIX=${CINDER_TARGET_PREFIX:-'iqn.2010-10.org.openstack:'} + CINDER_TARGET_PORT=${CINDER_TARGET_PORT:-3260} +fi + # EL and SUSE should only use lioadm if is_fedora || is_suse; then - if [[ ${CINDER_ISCSI_HELPER} != "lioadm" ]]; then - die "lioadm is the only valid Cinder target_helper config on this platform" + if [[ ${CINDER_TARGET_HELPER} != "lioadm" && ${CINDER_TARGET_HELPER} != 'nvmet' ]]; then + die "lioadm and nvmet are the only valid Cinder target_helper config on this platform" fi fi @@ -187,7 +213,7 @@ function _cinder_cleanup_apache_wsgi { function cleanup_cinder { # ensure the volume group is cleared up because fails might # leave dead volumes in the group - if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then + if [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then local targets targets=$(sudo tgtadm --op show --mode target) if [ $? -ne 0 ]; then @@ -215,8 +241,14 @@ function cleanup_cinder { else stop_service tgtd fi - else + elif [ "$CINDER_TARGET_HELPER" = "lioadm" ]; then sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete + elif [ "$CINDER_TARGET_HELPER" = "nvmet" ]; then + # If we don't disconnect everything vgremove will block + sudo nvme disconnect-all + sudo nvmetcli clear + else + die $LINENO "Unknown value \"$CINDER_TARGET_HELPER\" for CINDER_TARGET_HELPER" fi if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then @@ -267,7 +299,7 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $CINDER_CONF DEFAULT target_helper "$CINDER_ISCSI_HELPER" + iniset $CINDER_CONF DEFAULT target_helper "$CINDER_TARGET_HELPER" iniset $CINDER_CONF database connection `database_connection_url cinder` iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI iniset $CINDER_CONF DEFAULT rootwrap_config "$CINDER_CONF_DIR/rootwrap.conf" @@ -275,11 +307,7 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT osapi_volume_listen $CINDER_SERVICE_LISTEN_ADDRESS iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH iniset $CINDER_CONF oslo_concurrency lock_path $CINDER_STATE_PATH - if [[ $SERVICE_IP_VERSION == 6 ]]; then - iniset $CINDER_CONF DEFAULT my_ip "$HOST_IPV6" - else - iniset $CINDER_CONF DEFAULT my_ip "$HOST_IP" - fi + iniset $CINDER_CONF DEFAULT my_ip "$CINDER_MY_IP" iniset $CINDER_CONF key_manager backend cinder.keymgr.conf_key_mgr.ConfKeyManager iniset $CINDER_CONF key_manager fixed_key $(openssl rand -hex 16) if [[ -n "$CINDER_ALLOWED_DIRECT_URL_SCHEMES" ]]; then @@ -465,9 +493,9 @@ function init_cinder { function install_cinder { git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH setup_develop $CINDER_DIR - if [[ "$CINDER_ISCSI_HELPER" == "tgtadm" ]]; then + if [[ "$CINDER_TARGET_HELPER" == "tgtadm" ]]; then install_package tgt - elif [[ "$CINDER_ISCSI_HELPER" == "lioadm" ]]; then + elif [[ "$CINDER_TARGET_HELPER" == "lioadm" ]]; then if is_ubuntu; then # TODO(frickler): Workaround for https://launchpad.net/bugs/1819819 sudo mkdir -p /etc/target @@ -476,6 +504,43 @@ function install_cinder { else install_package targetcli fi + elif [[ "$CINDER_TARGET_HELPER" == "nvmet" ]]; then + install_package nvme-cli + + # TODO: Remove manual installation of the dependency when the + # requirement is added to nvmetcli: + # http://lists.infradead.org/pipermail/linux-nvme/2022-July/033576.html + if is_ubuntu; then + install_package python3-configshell-fb + else + install_package python3-configshell + fi + # Install from source because Ubuntu doesn't have the package and some packaged versions didn't work on Python 3 + pip_install git+git://git.infradead.org/users/hch/nvmetcli.git + + sudo modprobe nvmet + sudo modprobe nvme-fabrics + + if [[ $CINDER_TARGET_PROTOCOL == 'nvmet_rdma' ]]; then + install_package rdma-core + sudo modprobe nvme-rdma + + # Create the Soft-RoCE device over the networking interface + local iface=${HOST_IP_IFACE:-`ip -br -$SERVICE_IP_VERSION a | grep $CINDER_MY_IP | awk '{print $1}'`} + if [[ -z "$iface" ]]; then + die $LINENO "Cannot find interface to bind Soft-RoCE" + fi + + if ! sudo rdma link | grep $iface ; then + sudo rdma link add rxe_$iface type rxe netdev $iface + fi + + elif [[ $CINDER_TARGET_PROTOCOL == 'nvmet_tcp' ]]; then + sudo modprobe nvme-tcp + + else # 'nvmet_fc' + sudo modprobe nvme-fc + fi fi } @@ -512,7 +577,7 @@ function start_cinder { service_port=$CINDER_SERVICE_PORT_INT service_protocol="http" fi - if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then + if [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then if is_service_enabled c-vol; then # Delete any old stack.conf sudo rm -f /etc/tgt/conf.d/stack.conf diff --git a/lib/cinder_backends/fake_gate b/lib/cinder_backends/fake_gate index 3ffd9a6785..3b9f1d1164 100644 --- a/lib/cinder_backends/fake_gate +++ b/lib/cinder_backends/fake_gate @@ -50,7 +50,7 @@ function configure_cinder_backend_lvm { iniset $CINDER_CONF $be_name volume_backend_name $be_name iniset $CINDER_CONF $be_name volume_driver "cinder.tests.fake_driver.FakeGateDriver" iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name - iniset $CINDER_CONF $be_name target_helper "$CINDER_ISCSI_HELPER" + iniset $CINDER_CONF $be_name target_helper "$CINDER_TARGET_HELPER" iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE" if [[ "$CINDER_VOLUME_CLEAR" == "non" ]]; then diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm index e03ef14c55..42865119da 100644 --- a/lib/cinder_backends/lvm +++ b/lib/cinder_backends/lvm @@ -50,7 +50,10 @@ function configure_cinder_backend_lvm { iniset $CINDER_CONF $be_name volume_backend_name $be_name iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.lvm.LVMVolumeDriver" iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name - iniset $CINDER_CONF $be_name target_helper "$CINDER_ISCSI_HELPER" + iniset $CINDER_CONF $be_name target_helper "$CINDER_TARGET_HELPER" + iniset $CINDER_CONF $be_name target_protocol "$CINDER_TARGET_PROTOCOL" + iniset $CINDER_CONF $be_name target_port "$CINDER_TARGET_PORT" + iniset $CINDER_CONF $be_name target_prefix "$CINDER_TARGET_PREFIX" iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE" iniset $CINDER_CONF $be_name volume_clear "$CINDER_VOLUME_CLEAR" } diff --git a/lib/lvm b/lib/lvm index d3f6bf1792..57ffb967c3 100644 --- a/lib/lvm +++ b/lib/lvm @@ -130,7 +130,7 @@ function init_lvm_volume_group { local size=$2 # Start the tgtd service on Fedora and SUSE if tgtadm is used - if is_fedora || is_suse && [[ "$CINDER_ISCSI_HELPER" = "tgtadm" ]]; then + if is_fedora || is_suse && [[ "$CINDER_TARGET_HELPER" = "tgtadm" ]]; then start_service tgtd fi @@ -138,10 +138,14 @@ function init_lvm_volume_group { _create_lvm_volume_group $vg $size # Remove iscsi targets - if [ "$CINDER_ISCSI_HELPER" = "lioadm" ]; then + if [ "$CINDER_TARGET_HELPER" = "lioadm" ]; then sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete - else + elif [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then sudo tgtadm --op show --mode target | awk '/Target/ {print $3}' | sudo xargs -r -n1 tgt-admin --delete + elif [ "$CINDER_TARGET_HELPER" = "nvmet" ]; then + # If we don't disconnect everything vgremove will block + sudo nvme disconnect-all + sudo nvmetcli clear fi _clean_lvm_volume_group $vg } diff --git a/lib/nova b/lib/nova index da3a10edd0..7902c5fdb9 100644 --- a/lib/nova +++ b/lib/nova @@ -97,6 +97,12 @@ NOVA_SERVICE_LISTEN_ADDRESS=${NOVA_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVI METADATA_SERVICE_PORT=${METADATA_SERVICE_PORT:-8775} NOVA_ENABLE_CACHE=${NOVA_ENABLE_CACHE:-True} +if [[ $SERVICE_IP_VERSION == 6 ]]; then + NOVA_MY_IP="$HOST_IPV6" +else + NOVA_MY_IP="$HOST_IP" +fi + # Option to enable/disable config drive # NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"False"} @@ -219,6 +225,9 @@ function cleanup_nova { done sudo iscsiadm --mode node --op delete || true + # Disconnect all nvmeof connections + sudo nvme disconnect-all || true + # Clean out the instances directory. sudo rm -rf $NOVA_INSTANCES_PATH/* fi @@ -306,6 +315,7 @@ function configure_nova { fi fi + # Due to cinder bug #1966513 we ALWAYS need an initiator name for LVM # Ensure each compute host uses a unique iSCSI initiator echo InitiatorName=$(iscsi-iname) | sudo tee /etc/iscsi/initiatorname.iscsi @@ -326,8 +336,28 @@ EOF # not work under FIPS. iniset -sudo /etc/iscsi/iscsid.conf DEFAULT "node.session.auth.chap_algs" "SHA3-256,SHA256" - # ensure that iscsid is started, even when disabled by default - restart_service iscsid + if [[ $CINDER_TARGET_HELPER != 'nvmet' ]]; then + # ensure that iscsid is started, even when disabled by default + restart_service iscsid + + # For NVMe-oF we need different packages that many not be present + else + install_package nvme-cli + sudo modprobe nvme-fabrics + + # Ensure NVMe is ready and create the Soft-RoCE device over the networking interface + if [[ $CINDER_TARGET_PROTOCOL == 'nvmet_rdma' ]]; then + sudo modprobe nvme-rdma + iface=${HOST_IP_IFACE:-`ip -br -$SERVICE_IP_VERSION a | grep $NOVA_MY_IP | awk '{print $1}'`} + if ! sudo rdma link | grep $iface ; then + sudo rdma link add rxe_$iface type rxe netdev $iface + fi + elif [[ $CINDER_TARGET_PROTOCOL == 'nvmet_tcp' ]]; then + sudo modprobe nvme-tcp + else # 'nvmet_fc' + sudo modprobe nvme-fc + fi + fi fi # Rebuild the config file from scratch @@ -418,11 +448,7 @@ function create_nova_conf { iniset $NOVA_CONF filter_scheduler enabled_filters "$NOVA_FILTERS" iniset $NOVA_CONF scheduler workers "$API_WORKERS" iniset $NOVA_CONF neutron default_floating_pool "$PUBLIC_NETWORK_NAME" - if [[ $SERVICE_IP_VERSION == 6 ]]; then - iniset $NOVA_CONF DEFAULT my_ip "$HOST_IPV6" - else - iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP" - fi + iniset $NOVA_CONF DEFAULT my_ip "$NOVA_MY_IP" iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" iniset $NOVA_CONF DEFAULT osapi_compute_listen "$NOVA_SERVICE_LISTEN_ADDRESS" iniset $NOVA_CONF DEFAULT metadata_listen "$NOVA_SERVICE_LISTEN_ADDRESS" From f49b435e98cd9d119179d98829241954b8d73669 Mon Sep 17 00:00:00 2001 From: Masayuki Igawa Date: Thu, 22 Sep 2022 11:22:21 +0900 Subject: [PATCH 261/574] [Doc] Fix Glance image size limit command This commit fixes the configuration document which mentions how to change Glance default image size quota at runtime because we don't have `openstack registered limit update` command but `openstack registered limit set` command[1]. [1] https://docs.openstack.org/python-openstackclient/latest/cli/command-objects/registered-limit.html#registered-limit-set Change-Id: I399685ed1f864f8f1ce7295ed6f83336cfccbd81 --- doc/source/configuration.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 757b4001d9..d0f2b02419 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -719,7 +719,7 @@ or at runtime via: :: - openstack --os-cloud devstack-system-admin registered limit update \ + openstack --os-cloud devstack-system-admin registered limit set \ --service glance --default-limit 5000 --region RegionOne image_size_total .. _arch-configuration: From 1516997afe888ebc3cd06653a4f29a05bba7b346 Mon Sep 17 00:00:00 2001 From: Tom Weininger Date: Wed, 14 Sep 2022 17:16:00 +0200 Subject: [PATCH 262/574] Update user guide for Octavia Change-Id: I8e3134c3b2d591f7ab72b8040e1b931e967e11be --- doc/source/guides.rst | 8 +- doc/source/guides/devstack-with-lbaas-v2.rst | 145 ------------------ .../guides/devstack-with-nested-kvm.rst | 2 + doc/source/guides/devstack-with-octavia.rst | 144 +++++++++++++++++ 4 files changed, 150 insertions(+), 149 deletions(-) delete mode 100644 doc/source/guides/devstack-with-lbaas-v2.rst create mode 100644 doc/source/guides/devstack-with-octavia.rst diff --git a/doc/source/guides.rst b/doc/source/guides.rst index e7ec629962..e7b46b6e55 100644 --- a/doc/source/guides.rst +++ b/doc/source/guides.rst @@ -20,7 +20,7 @@ Walk through various setups used by stackers guides/neutron guides/devstack-with-nested-kvm guides/nova - guides/devstack-with-lbaas-v2 + guides/devstack-with-octavia guides/devstack-with-ldap All-In-One Single VM @@ -69,10 +69,10 @@ Nova and devstack Guide to working with nova features :doc:`Nova and devstack `. -Configure Load-Balancer Version 2 ------------------------------------ +Configure Octavia +----------------- -Guide on :doc:`Configure Load-Balancer Version 2 `. +Guide on :doc:`Configure Octavia `. Deploying DevStack with LDAP ---------------------------- diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst deleted file mode 100644 index 5d96ca7d74..0000000000 --- a/doc/source/guides/devstack-with-lbaas-v2.rst +++ /dev/null @@ -1,145 +0,0 @@ -Devstack with Octavia Load Balancing -==================================== - -Starting with the OpenStack Pike release, Octavia is now a standalone service -providing load balancing services for OpenStack. - -This guide will show you how to create a devstack with `Octavia API`_ enabled. - -.. _Octavia API: https://docs.openstack.org/api-ref/load-balancer/v2/index.html - -Phase 1: Create DevStack + 2 nova instances --------------------------------------------- - -First, set up a vm of your choice with at least 8 GB RAM and 16 GB disk space, -make sure it is updated. Install git and any other developer tools you find -useful. - -Install devstack - -:: - - git clone https://opendev.org/openstack/devstack - cd devstack/tools - sudo ./create-stack-user.sh - cd ../.. - sudo mv devstack /opt/stack - sudo chown -R stack.stack /opt/stack/devstack - -This will clone the current devstack code locally, then setup the "stack" -account that devstack services will run under. Finally, it will move devstack -into its default location in /opt/stack/devstack. - -Edit your ``/opt/stack/devstack/local.conf`` to look like - -:: - - [[local|localrc]] - enable_plugin octavia https://opendev.org/openstack/octavia - # If you are enabling horizon, include the octavia dashboard - # enable_plugin octavia-dashboard https://opendev.org/openstack/octavia-dashboard.git - # If you are enabling barbican for TLS offload in Octavia, include it here. - # enable_plugin barbican https://opendev.org/openstack/barbican - - # ===== BEGIN localrc ===== - DATABASE_PASSWORD=password - ADMIN_PASSWORD=password - SERVICE_PASSWORD=password - SERVICE_TOKEN=password - RABBIT_PASSWORD=password - # Enable Logging - LOGFILE=$DEST/logs/stack.sh.log - VERBOSE=True - LOG_COLOR=True - # Pre-requisite - ENABLED_SERVICES=rabbit,mysql,key - # Horizon - enable for the OpenStack web GUI - # ENABLED_SERVICES+=,horizon - # Nova - ENABLED_SERVICES+=,n-api,n-crt,n-cpu,n-cond,n-sch,n-api-meta,n-sproxy - ENABLED_SERVICES+=,placement-api,placement-client - # Glance - ENABLED_SERVICES+=,g-api - # Neutron - ENABLED_SERVICES+=,q-svc,q-agt,q-dhcp,q-l3,q-meta,neutron - ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api - # Cinder - ENABLED_SERVICES+=,c-api,c-vol,c-sch - # Tempest - ENABLED_SERVICES+=,tempest - # Barbican - Optionally used for TLS offload in Octavia - # ENABLED_SERVICES+=,barbican - # ===== END localrc ===== - -Run stack.sh and do some sanity checks - -:: - - sudo su - stack - cd /opt/stack/devstack - ./stack.sh - . ./openrc - - openstack network list # should show public and private networks - -Create two nova instances that we can use as test http servers: - -:: - - #create nova instances on private network - openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1 - openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2 - openstack server list # should show the nova instances just created - - #add secgroup rules to allow ssh etc.. - openstack security group rule create default --protocol icmp - openstack security group rule create default --protocol tcp --dst-port 22:22 - openstack security group rule create default --protocol tcp --dst-port 80:80 - -Set up a simple web server on each of these instances. ssh into each instance (username 'cirros', password 'cubswin:)' or 'gocubsgo') and run - -:: - - MYIP=$(ifconfig eth0|grep 'inet addr'|awk -F: '{print $2}'| awk '{print $1}') - while true; do echo -e "HTTP/1.0 200 OK\r\n\r\nWelcome to $MYIP" | sudo nc -l -p 80 ; done& - -Phase 2: Create your load balancer ----------------------------------- - -Make sure you have the 'openstack loadbalancer' commands: - -:: - - pip install python-octaviaclient - -Create your load balancer: - -:: - - openstack loadbalancer create --name lb1 --vip-subnet-id private-subnet - openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE. - openstack loadbalancer listener create --protocol HTTP --protocol-port 80 --name listener1 lb1 - openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE. - openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1 - openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE. - openstack loadbalancer healthmonitor create --delay 5 --timeout 2 --max-retries 1 --type HTTP pool1 - openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE. - openstack loadbalancer member create --subnet-id private-subnet --address --protocol-port 80 pool1 - openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE. - openstack loadbalancer member create --subnet-id private-subnet --address --protocol-port 80 pool1 - -Please note: The fields are the IP addresses of the nova -servers created in Phase 1. -Also note, using the API directly you can do all of the above commands in one -API call. - -Phase 3: Test your load balancer --------------------------------- - -:: - - openstack loadbalancer show lb1 # Note the vip_address - curl http:// - curl http:// - -This should show the "Welcome to " message from each member server. diff --git a/doc/source/guides/devstack-with-nested-kvm.rst b/doc/source/guides/devstack-with-nested-kvm.rst index 3732f06fd8..ba483e9ec9 100644 --- a/doc/source/guides/devstack-with-nested-kvm.rst +++ b/doc/source/guides/devstack-with-nested-kvm.rst @@ -1,3 +1,5 @@ +.. _kvm_nested_virt: + ======================================================= Configure DevStack with KVM-based Nested Virtualization ======================================================= diff --git a/doc/source/guides/devstack-with-octavia.rst b/doc/source/guides/devstack-with-octavia.rst new file mode 100644 index 0000000000..55939f0f12 --- /dev/null +++ b/doc/source/guides/devstack-with-octavia.rst @@ -0,0 +1,144 @@ +Devstack with Octavia Load Balancing +==================================== + +Starting with the OpenStack Pike release, Octavia is now a standalone service +providing load balancing services for OpenStack. + +This guide will show you how to create a devstack with `Octavia API`_ enabled. + +.. _Octavia API: https://docs.openstack.org/api-ref/load-balancer/v2/index.html + +Phase 1: Create DevStack + 2 nova instances +-------------------------------------------- + +First, set up a VM of your choice with at least 8 GB RAM and 16 GB disk space, +make sure it is updated. Install git and any other developer tools you find +useful. + +Install devstack:: + + git clone https://opendev.org/openstack/devstack + cd devstack/tools + sudo ./create-stack-user.sh + cd ../.. + sudo mv devstack /opt/stack + sudo chown -R stack.stack /opt/stack/devstack + +This will clone the current devstack code locally, then setup the "stack" +account that devstack services will run under. Finally, it will move devstack +into its default location in /opt/stack/devstack. + +Edit your ``/opt/stack/devstack/local.conf`` to look like:: + + [[local|localrc]] + # ===== BEGIN localrc ===== + DATABASE_PASSWORD=password + ADMIN_PASSWORD=password + SERVICE_PASSWORD=password + SERVICE_TOKEN=password + RABBIT_PASSWORD=password + GIT_BASE=https://opendev.org + # Optional settings: + # OCTAVIA_AMP_BASE_OS=centos + # OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID=9-stream + # OCTAVIA_AMP_IMAGE_SIZE=3 + # OCTAVIA_LB_TOPOLOGY=ACTIVE_STANDBY + # OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD=True + # LIBS_FROM_GIT+=octavia-lib, + # Enable Logging + LOGFILE=$DEST/logs/stack.sh.log + VERBOSE=True + LOG_COLOR=True + enable_service rabbit + enable_plugin neutron $GIT_BASE/openstack/neutron + # Octavia supports using QoS policies on the VIP port: + enable_service q-qos + enable_service placement-api placement-client + # Octavia services + enable_plugin octavia $GIT_BASE/openstack/octavia master + enable_plugin octavia-dashboard $GIT_BASE/openstack/octavia-dashboard + enable_plugin ovn-octavia-provider $GIT_BASE/openstack/ovn-octavia-provider + enable_plugin octavia-tempest-plugin $GIT_BASE/openstack/octavia-tempest-plugin + enable_service octavia o-api o-cw o-hm o-hk o-da + # If you are enabling barbican for TLS offload in Octavia, include it here. + # enable_plugin barbican $GIT_BASE/openstack/barbican + # enable_service barbican + # Cinder (optional) + disable_service c-api c-vol c-sch + # Tempest + enable_service tempest + # ===== END localrc ===== + +.. note:: + For best performance it is highly recommended to use KVM + virtualization instead of QEMU. + Also make sure nested virtualization is enabled as documented in + :ref:`the respective guide `. + By adding ``LIBVIRT_CPU_MODE="host-passthrough"`` to your + ``local.conf`` you enable the guest VMs to make use of all features your + host's CPU provides. + +Run stack.sh and do some sanity checks:: + + sudo su - stack + cd /opt/stack/devstack + ./stack.sh + . ./openrc + + openstack network list # should show public and private networks + +Create two nova instances that we can use as test http servers:: + + # create nova instances on private network + openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1 + openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2 + openstack server list # should show the nova instances just created + + # add secgroup rules to allow ssh etc.. + openstack security group rule create default --protocol icmp + openstack security group rule create default --protocol tcp --dst-port 22:22 + openstack security group rule create default --protocol tcp --dst-port 80:80 + +Set up a simple web server on each of these instances. One possibility is to use +the `Golang test server`_ that is used by the Octavia project for CI testing +as well. +Copy the binary to your instances and start it as shown below +(username 'cirros', password 'gocubsgo'):: + + INST_IP= + scp -O test_server.bin cirros@${INST_IP}: + ssh -f cirros@${INST_IP} ./test_server.bin -id ${INST_IP} + +When started this way the test server will respond to HTTP requests with +its own IP. + +Phase 2: Create your load balancer +---------------------------------- + +Create your load balancer:: + + openstack loadbalancer create --wait --name lb1 --vip-subnet-id private-subnet + openstack loadbalancer listener create --wait --protocol HTTP --protocol-port 80 --name listener1 lb1 + openstack loadbalancer pool create --wait --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1 + openstack loadbalancer healthmonitor create --wait --delay 5 --timeout 2 --max-retries 1 --type HTTP pool1 + openstack loadbalancer member create --wait --subnet-id private-subnet --address --protocol-port 80 pool1 + openstack loadbalancer member create --wait --subnet-id private-subnet --address --protocol-port 80 pool1 + +Please note: The fields are the IP addresses of the nova +servers created in Phase 1. +Also note, using the API directly you can do all of the above commands in one +API call. + +Phase 3: Test your load balancer +-------------------------------- + +:: + + openstack loadbalancer show lb1 # Note the vip_address + curl http:// + curl http:// + +This should show the "Welcome to " message from each member server. + + +.. _Golang test server: https://opendev.org/openstack/octavia-tempest-plugin/src/branch/master/octavia_tempest_plugin/contrib/test_server From 0d5c8d6643d5f532ec4b0e9f4a588d604db51dba Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Wed, 28 Sep 2022 02:13:58 +0200 Subject: [PATCH 263/574] Update DEVSTACK_SERIES to 2023.1 stable/zed branch has been created now and current master is for 2023.1 Antelope. Change-Id: I6186d01b1bf8548425500cc9feee6ab494a3db03 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index b3130e5f7f..a05d1e5553 100644 --- a/stackrc +++ b/stackrc @@ -243,7 +243,7 @@ REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements} # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="zed" +DEVSTACK_SERIES="2023.1" ############## # From 9ece457b7b704d1218f8746829b7950b70e0a406 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 24 Aug 2022 14:43:00 +1000 Subject: [PATCH 264/574] Update to Fedora 36 Update the Fedora job to the latest release nodes Depends-On: https://review.opendev.org/c/openstack/devstack/+/860634 Change-Id: If2d7f99e3665a2e3df4cf763efc64dd381f02350 --- .zuul.yaml | 2 +- files/rpms/swift | 2 +- lib/apache | 2 +- stack.sh | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 6ad7148449..441a9cf1e9 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -82,7 +82,7 @@ name: devstack-single-node-fedora-latest nodes: - name: controller - label: fedora-35 + label: fedora-36 groups: - name: tempest nodes: diff --git a/files/rpms/swift b/files/rpms/swift index 7d906aa926..49a1833dc4 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -4,4 +4,4 @@ memcached rsync-daemon sqlite xfsprogs -xinetd # not:f35,rhel9 +xinetd # not:f36,rhel9 diff --git a/lib/apache b/lib/apache index 94f3cfc95a..705776c55b 100644 --- a/lib/apache +++ b/lib/apache @@ -95,7 +95,7 @@ function install_apache_uwsgi { # didn't fix Python 3.10 compatibility before release. Should be # fixed in uwsgi 4.9.0; can remove this when packages available # or we drop this release - elif is_fedora && ! [[ $DISTRO =~ f35 ]]; then + elif is_fedora && ! [[ $DISTRO =~ f36 ]]; then # Note httpd comes with mod_proxy_uwsgi and it is loaded by # default; the mod_proxy_uwsgi package actually conflicts now. # See: diff --git a/stack.sh b/stack.sh index c99189e6dc..cc90fca576 100755 --- a/stack.sh +++ b/stack.sh @@ -12,7 +12,7 @@ # a multi-node developer install. # To keep this script simple we assume you are running on a recent **Ubuntu** -# (Bionic or newer), **Fedora** (F24 or newer), or **CentOS/RHEL** +# (Bionic or newer), **Fedora** (F36 or newer), or **CentOS/RHEL** # (7 or newer) machine. (It may work on other platforms but support for those # platforms is left to those who added them to DevStack.) It should work in # a VM or physical server. Additionally, we maintain a list of ``deb`` and @@ -229,7 +229,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bullseye|focal|jammy|f35|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9" +SUPPORTED_DISTROS="bullseye|focal|jammy|f36|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" From e3bc6b5f571a5b291617ee5227c153002ef8d9c3 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 24 Sep 2019 12:44:16 +1000 Subject: [PATCH 265/574] get_or_create_domain: simplify with "--or-show" argument Similar to other functions, this uses "--or-show" to avoid double calls. Co-Authored-By: Jens Harbott Change-Id: I548f9acd812687838e04b705f86f3b70d2b10caf --- functions-common | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/functions-common b/functions-common index 92a6678de0..ed44186804 100644 --- a/functions-common +++ b/functions-common @@ -875,14 +875,9 @@ function policy_add { # Usage: get_or_create_domain function get_or_create_domain { local domain_id - # Gets domain id domain_id=$( - # Gets domain id - openstack --os-cloud devstack-system-admin domain show $1 \ - -f value -c id 2>/dev/null || - # Creates new domain openstack --os-cloud devstack-system-admin domain create $1 \ - --description "$2" \ + --description "$2" --or-show \ -f value -c id ) echo $domain_id From e69b78df6fc48a1e70c180d3878164e416adbbdd Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 24 Sep 2019 12:51:25 +1000 Subject: [PATCH 266/574] Simplify role addtion helper functions Because adding the role is idempotent, we can save doing the initial check for role assignment. Also simplify the output matching by using osc's filters where appropriate. Co-Authored-By: Jens Harbott Change-Id: If2a661cc565a43a7821b8f0a10edd97de08eb911 --- functions-common | 65 ++++++++++++++++++------------------------------ 1 file changed, 24 insertions(+), 41 deletions(-) diff --git a/functions-common b/functions-common index ed44186804..e9984fd65e 100644 --- a/functions-common +++ b/functions-common @@ -966,29 +966,22 @@ function _get_domain_args { # Usage: get_or_add_user_project_role [ ] function get_or_add_user_project_role { local user_role_id + local domain_args domain_args=$(_get_domain_args $4 $5) - # Gets user role id + # Note this is idempotent so we are safe across multiple + # duplicate calls. + openstack --os-cloud devstack-system-admin role add $1 \ + --user $2 \ + --project $3 \ + $domain_args user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --user $2 \ --project $3 \ $domain_args \ - | grep '^|\s[a-f0-9]\+' | get_field 1) - if [[ -z "$user_role_id" ]]; then - # Adds role to user and get it - openstack --os-cloud devstack-system-admin role add $1 \ - --user $2 \ - --project $3 \ - $domain_args - user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ - --role $1 \ - --user $2 \ - --project $3 \ - $domain_args \ - | grep '^|\s[a-f0-9]\+' | get_field 1) - fi + -c Role -f value) echo $user_role_id } @@ -996,23 +989,18 @@ function get_or_add_user_project_role { # Usage: get_or_add_user_domain_role function get_or_add_user_domain_role { local user_role_id - # Gets user role id + + # Note this is idempotent so we are safe across multiple + # duplicate calls. + openstack --os-cloud devstack-system-admin role add $1 \ + --user $2 \ + --domain $3 user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --user $2 \ --domain $3 \ - | grep '^|\s[a-f0-9]\+' | get_field 1) - if [[ -z "$user_role_id" ]]; then - # Adds role to user and get it - openstack --os-cloud devstack-system-admin role add $1 \ - --user $2 \ - --domain $3 - user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ - --role $1 \ - --user $2 \ - --domain $3 \ - | grep '^|\s[a-f0-9]\+' | get_field 1) - fi + -c Role -f value) + echo $user_role_id } @@ -1051,23 +1039,18 @@ function get_or_add_user_system_role { # Usage: get_or_add_group_project_role function get_or_add_group_project_role { local group_role_id - # Gets group role id + + # Note this is idempotent so we are safe across multiple + # duplicate calls. + openstack role add $1 \ + --group $2 \ + --project $3 group_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --group $2 \ --project $3 \ - -f value) - if [[ -z "$group_role_id" ]]; then - # Adds role to group and get it - openstack --os-cloud devstack-system-admin role add $1 \ - --group $2 \ - --project $3 - group_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ - --role $1 \ - --group $2 \ - --project $3 \ - -f value) - fi + -f value -c Role) + echo $group_role_id } From 2e6756640c8e85cb924f9dfcd968aad303b481b3 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Thu, 6 Oct 2022 17:24:57 +0200 Subject: [PATCH 267/574] Re-enable horizon in jammy-based jobs The issue that Horizon had with python3.10 has been fixed some time ago, so we can stop disabling it for those jobs. Also stop including roles from devstack-gate which we no longer need. Change-Id: Ia5d0b31561adc5051acd96fcaab183e60c3c2f99 --- .zuul.yaml | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 6ad7148449..99b48dae45 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -335,7 +335,6 @@ required-projects: - opendev.org/openstack/devstack roles: - - zuul: opendev.org/openstack/devstack-gate - zuul: opendev.org/openstack/openstack-zuul-jobs vars: devstack_localrc: @@ -676,9 +675,6 @@ timeout: 9000 vars: configure_swap_size: 4096 - devstack_services: - # Horizon doesn't like py310 - horizon: false - job: name: devstack-platform-ubuntu-jammy-ovn-source @@ -706,8 +702,6 @@ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch Q_ML2_TENANT_NETWORK_TYPE: vxlan devstack_services: - # Horizon doesn't like py310 - horizon: false # Disable OVN services ovn-northd: false ovn-controller: false @@ -752,10 +746,6 @@ voting: false vars: configure_swap_size: 4096 - # Python 3.10 dependency issues; see - # https://bugs.launchpad.net/horizon/+bug/1960204 - devstack_services: - horizon: false - job: name: devstack-platform-fedora-latest-virt-preview From 7d1ba835c38839a62cee94dc281773b62c554932 Mon Sep 17 00:00:00 2001 From: Masayuki Igawa Date: Tue, 11 Oct 2022 12:35:18 +0900 Subject: [PATCH 268/574] [Doc] Fix tox command option to run smoke tests This commit fixes the tox command option to run the smoke tests. The original arguments fail with the error[1], and `-efull` and `tempest.scenario.test_network_basic_ops` are not for the smoke tests. [1] $ tox -efull tempest.scenario.test_network_basic_ops ... tempest run: error: unrecognized arguments: tempest.scenario.test_network_basic_ops Change-Id: I9c3dd9fb4f64bf856c5cab88a2aeaae355c84a65 --- doc/source/configuration.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 757b4001d9..3191ae824b 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -636,7 +636,7 @@ tests can be run as follows: :: $ cd /opt/stack/tempest - $ tox -efull tempest.scenario.test_network_basic_ops + $ tox -e smoke By default tempest is downloaded and the config file is generated, but the tempest package is not installed in the system's global site-packages (the From 781fbf47b557d92bcb71e60c535f6249e729637d Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Tue, 11 Oct 2022 15:41:02 +0200 Subject: [PATCH 269/574] docs: Add warnings about password selection Some services fail when using special characters in passwords, add some warnings to our docs. Closes-Bug: 1744985 Change-Id: I601149e2e7362507b38f01719f7197385a27e0a8 --- doc/source/configuration.rst | 3 +++ doc/source/guides/single-machine.rst | 3 +++ doc/source/index.rst | 5 ++++- 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 757b4001d9..d59c1edafd 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -181,6 +181,9 @@ values that most often need to be set. If the ``*_PASSWORD`` variables are not set here you will be prompted to enter values for them by ``stack.sh``. +.. warning:: Only use alphanumeric characters in your passwords, as some + services fail to work when using special characters. + The network ranges must not overlap with any networks in use on the host. Overlap is not uncommon as RFC-1918 'private' ranges are commonly used for both the local networking and Nova's fixed and floating ranges. diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst index 0529e30f08..a4385b5b4b 100644 --- a/doc/source/guides/single-machine.rst +++ b/doc/source/guides/single-machine.rst @@ -106,6 +106,9 @@ do the following: - Set the service password. This is used by the OpenStack services (Nova, Glance, etc) to authenticate with Keystone. +.. warning:: Only use alphanumeric characters in your passwords, as some + services fail to work when using special characters. + ``local.conf`` should look something like this: .. code-block:: ini diff --git a/doc/source/index.rst b/doc/source/index.rst index 0434d68838..ba7ea42943 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -101,7 +101,10 @@ devstack git repo. This is the minimum required config to get started with DevStack. .. note:: There is a sample :download:`local.conf ` file - under the *samples* directory in the devstack repository. + under the *samples* directory in the devstack repository. + +.. warning:: Only use alphanumeric characters in your passwords, as some + services fail to work when using special characters. Start the install ----------------- From 358987f065af05d166539982c282e2f587b5c952 Mon Sep 17 00:00:00 2001 From: Adrian Fusco Arnejo Date: Wed, 31 Aug 2022 19:38:49 +0200 Subject: [PATCH 270/574] Adding devstack support for Rocky Linux 9 Adding job and nodeset to run tempest-full-py3 in Rocky Linux 9 instance Change-Id: I6fb390bfeec436b50a3ddc18d154bbce3f3b1975 --- .zuul.yaml | 20 ++++++++++++++++++++ doc/source/index.rst | 2 +- functions-common | 7 ++++++- 3 files changed, 27 insertions(+), 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index c29cb31f31..8e6f8633ff 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -106,6 +106,16 @@ nodes: - controller +- nodeset: + name: devstack-single-node-rockylinux-9 + nodes: + - name: controller + label: rockylinux-9 + groups: + - name: tempest + nodes: + - controller + - nodeset: name: openstack-two-node nodes: @@ -676,6 +686,15 @@ vars: configure_swap_size: 4096 +- job: + name: devstack-platform-rocky-blue-onyx + parent: tempest-full-py3 + description: Rocky Linux 9 Blue Onyx platform test + nodeset: devstack-single-node-rockylinux-9 + timeout: 9000 + vars: + configure_swap_size: 4096 + - job: name: devstack-platform-ubuntu-jammy parent: tempest-full-py3 @@ -852,6 +871,7 @@ - devstack-platform-fedora-latest - devstack-platform-centos-9-stream - devstack-platform-debian-bullseye + - devstack-platform-rocky-blue-onyx - devstack-platform-ubuntu-jammy - devstack-platform-ubuntu-jammy-ovn-source - devstack-platform-ubuntu-jammy-ovs diff --git a/doc/source/index.rst b/doc/source/index.rst index 0434d68838..626d5e1a85 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -38,7 +38,7 @@ Install Linux Start with a clean and minimal install of a Linux system. DevStack attempts to support the two latest LTS releases of Ubuntu, the -latest/current Fedora version, CentOS/RHEL 8 and OpenSUSE. +latest/current Fedora version, CentOS/RHEL/Rocky Linux 9 and OpenSUSE. If you do not have a preference, Ubuntu 20.04 (Focal Fossa) is the most tested, and will probably go the smoothest. diff --git a/functions-common b/functions-common index 92a6678de0..e27518f559 100644 --- a/functions-common +++ b/functions-common @@ -418,6 +418,9 @@ function GetOSVersion { os_RELEASE=${VERSION_ID} os_CODENAME="n/a" os_VENDOR=$(echo $NAME | tr -d '[:space:]') + elif [[ "${ID}${VERSION}" =~ "rocky9" ]]; then + os_VENDOR="Rocky" + os_RELEASE=${VERSION_ID} else _ensure_lsb_release @@ -466,6 +469,7 @@ function GetDistro { "$os_VENDOR" =~ (AlmaLinux) || \ "$os_VENDOR" =~ (Scientific) || \ "$os_VENDOR" =~ (OracleServer) || \ + "$os_VENDOR" =~ (Rocky) || \ "$os_VENDOR" =~ (Virtuozzo) ]]; then # Drop the . release as we assume it's compatible # XXX re-evaluate when we get RHEL10 @@ -513,7 +517,7 @@ function is_oraclelinux { # Determine if current distribution is a Fedora-based distribution -# (Fedora, RHEL, CentOS, etc). +# (Fedora, RHEL, CentOS, Rocky, etc). # is_fedora function is_fedora { if [[ -z "$os_VENDOR" ]]; then @@ -523,6 +527,7 @@ function is_fedora { [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \ [ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \ [ "$os_VENDOR" = "RedHatEnterprise" ] || \ + [ "$os_VENDOR" = "Rocky" ] || \ [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "CentOSStream" ] || \ [ "$os_VENDOR" = "AlmaLinux" ] || \ [ "$os_VENDOR" = "OracleServer" ] || [ "$os_VENDOR" = "Virtuozzo" ] From 71c99655479174750bcedfe458328328a1596766 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Wed, 19 Oct 2022 14:08:43 -0400 Subject: [PATCH 271/574] Use separate OVS and OVN directories If stack.sh is run on a system that already has OVN packages installed, it could fail to find its DB sockets. This is because the 'ln -s' will place the symlink inside of /var/run/ovn instead of using a single directory as intended. Change the code in neutron_plugins/ovn_agent to not make the symlink and instead use separate directories for OVS and OVN. Closes-bug: #1980421 Change-Id: Ic28a93bdc3dfe4a6159234baeabd0064db452b07 --- lib/neutron_plugins/ovn_agent | 43 ++++++++++++++++------------------- 1 file changed, 20 insertions(+), 23 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 8eb2993b94..e64224cbaa 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -244,11 +244,12 @@ function _run_process { local cmd="$2" local stop_cmd="$3" local group=$4 - local user=${5:-$STACK_USER} + local user=$5 + local rundir=${6:-$OVS_RUNDIR} local systemd_service="devstack@$service.service" local unit_file="$SYSTEMD_DIR/$systemd_service" - local environment="OVN_RUNDIR=$OVS_RUNDIR OVN_DBDIR=$OVN_DATADIR OVN_LOGDIR=$LOGDIR OVS_RUNDIR=$OVS_RUNDIR OVS_DBDIR=$OVS_DATADIR OVS_LOGDIR=$LOGDIR" + local environment="OVN_RUNDIR=$OVN_RUNDIR OVN_DBDIR=$OVN_DATADIR OVN_LOGDIR=$LOGDIR OVS_RUNDIR=$OVS_RUNDIR OVS_DBDIR=$OVS_DATADIR OVS_LOGDIR=$LOGDIR" echo "Starting $service executed command": $cmd @@ -264,14 +265,14 @@ function _run_process { _start_process $systemd_service - local testcmd="test -e $OVS_RUNDIR/$service.pid" + local testcmd="test -e $rundir/$service.pid" test_with_retry "$testcmd" "$service did not start" $SERVICE_TIMEOUT 1 local service_ctl_file - service_ctl_file=$(ls $OVS_RUNDIR | grep $service | grep ctl) + service_ctl_file=$(ls $rundir | grep $service | grep ctl) if [ -z "$service_ctl_file" ]; then die $LINENO "ctl file for service $service is not present." fi - sudo ovs-appctl -t $OVS_RUNDIR/$service_ctl_file vlog/set console:off syslog:info file:info + sudo ovs-appctl -t $rundir/$service_ctl_file vlog/set console:off syslog:info file:info } function clone_repository { @@ -370,10 +371,6 @@ function install_ovn { sudo mkdir -p $OVS_RUNDIR sudo chown $(whoami) $OVS_RUNDIR - # NOTE(lucasagomes): To keep things simpler, let's reuse the same - # RUNDIR for both OVS and OVN. This way we avoid having to specify the - # --db option in the ovn-{n,s}bctl commands while playing with DevStack - sudo ln -s $OVS_RUNDIR $OVN_RUNDIR if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then # If OVS is already installed, remove it, because we're about to @@ -616,12 +613,12 @@ function _start_ovs { dbcmd+=" --remote=db:hardware_vtep,Global,managers $OVS_DATADIR/vtep.db" fi dbcmd+=" $OVS_DATADIR/conf.db" - _run_process ovsdb-server "$dbcmd" "" "$STACK_GROUP" "root" + _run_process ovsdb-server "$dbcmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR" # Note: ovn-controller will create and configure br-int once it is started. # So, no need to create it now because nothing depends on that bridge here. local ovscmd="$OVS_SBINDIR/ovs-vswitchd --log-file --pidfile --detach" - _run_process ovs-vswitchd "$ovscmd" "" "$STACK_GROUP" "root" + _run_process ovs-vswitchd "$ovscmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR" else _start_process "$OVSDB_SERVER_SERVICE" _start_process "$OVS_VSWITCHD_SERVICE" @@ -660,7 +657,7 @@ function _start_ovs { enable_service ovs-vtep local vtepcmd="$OVS_SCRIPTDIR/ovs-vtep --log-file --pidfile --detach br-v" - _run_process ovs-vtep "$vtepcmd" "" "$STACK_GROUP" "root" + _run_process ovs-vtep "$vtepcmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR" vtep-ctl set-manager tcp:$HOST_IP:6640 fi @@ -704,26 +701,26 @@ function start_ovn { local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_northd" local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_northd" - _run_process ovn-northd "$cmd" "$stop_cmd" "$STACK_GROUP" "root" + _run_process ovn-northd "$cmd" "$stop_cmd" "$STACK_GROUP" "root" "$OVN_RUNDIR" else _start_process "$OVN_NORTHD_SERVICE" fi # Wait for the service to be ready # Check for socket and db files for both OVN NB and SB - wait_for_sock_file $OVS_RUNDIR/ovnnb_db.sock - wait_for_sock_file $OVS_RUNDIR/ovnsb_db.sock + wait_for_sock_file $OVN_RUNDIR/ovnnb_db.sock + wait_for_sock_file $OVN_RUNDIR/ovnsb_db.sock wait_for_db_file $OVN_DATADIR/ovnnb_db.db wait_for_db_file $OVN_DATADIR/ovnsb_db.db if is_service_enabled tls-proxy; then - sudo ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem - sudo ovn-sbctl --db=unix:$OVS_RUNDIR/ovnsb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem + sudo ovn-nbctl --db=unix:$OVN_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem + sudo ovn-sbctl --db=unix:$OVN_RUNDIR/ovnsb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem fi - sudo ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 - sudo ovn-sbctl --db=unix:$OVS_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 - sudo ovs-appctl -t $OVS_RUNDIR/ovnnb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL - sudo ovs-appctl -t $OVS_RUNDIR/ovnsb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL + sudo ovn-nbctl --db=unix:$OVN_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 + sudo ovn-sbctl --db=unix:$OVN_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 + sudo ovs-appctl -t $OVN_RUNDIR/ovnnb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL + sudo ovs-appctl -t $OVN_RUNDIR/ovnsb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL fi if is_service_enabled ovn-controller ; then @@ -731,7 +728,7 @@ function start_ovn { local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_controller" local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_controller" - _run_process ovn-controller "$cmd" "$stop_cmd" "$STACK_GROUP" "root" + _run_process ovn-controller "$cmd" "$stop_cmd" "$STACK_GROUP" "root" "$OVN_RUNDIR" else _start_process "$OVN_CONTROLLER_SERVICE" fi @@ -740,7 +737,7 @@ function start_ovn { if is_service_enabled ovn-controller-vtep ; then if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then local cmd="$OVS_BINDIR/ovn-controller-vtep --log-file --pidfile --detach --ovnsb-db=$OVN_SB_REMOTE" - _run_process ovn-controller-vtep "$cmd" "" "$STACK_GROUP" "root" + _run_process ovn-controller-vtep "$cmd" "" "$STACK_GROUP" "root" "$OVN_RUNDIR" else _start_process "$OVN_CONTROLLER_VTEP_SERVICE" fi From 5e7afb779c469f593a1628e8f63c66989b7e2c49 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Mon, 24 Oct 2022 12:17:48 +0200 Subject: [PATCH 272/574] Run dmesg command with sudo It seems that setting "sysctl kernel.dmesg_restrict" was changed in Ubuntu 22.04 (Jammy) to "1" and because of that running "dmesg" command requires now root privileges. Closes-bug: #1994023 Change-Id: I2adc76e3025fadf994bab2e2e1fd608e688874fc --- lib/neutron_plugins/ovs_source | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source index 164d574c42..ea71e60e68 100644 --- a/lib/neutron_plugins/ovs_source +++ b/lib/neutron_plugins/ovs_source @@ -33,9 +33,9 @@ function load_module { local fatal=$2 if [ "$(trueorfalse True fatal)" == "True" ]; then - sudo modprobe $module || (dmesg && die $LINENO "FAILED TO LOAD $module") + sudo modprobe $module || (sudo dmesg && die $LINENO "FAILED TO LOAD $module") else - sudo modprobe $module || (echo "FAILED TO LOAD $module" && dmesg) + sudo modprobe $module || (echo "FAILED TO LOAD $module" && sudo dmesg) fi } @@ -103,7 +103,7 @@ function prepare_for_ovs_compilation { function load_ovs_kernel_modules { load_module openvswitch load_module vport-geneve False - dmesg | tail + sudo dmesg | tail } # reload_ovs_kernel_modules() - reload openvswitch kernel module From 47a429777ce71e4d69e1894f173cf87e731b3a6e Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Fri, 4 Nov 2022 14:31:03 +0100 Subject: [PATCH 273/574] Extend single-core-review for non-functional changes Adding a second exception for single-core-review in Devstack repository - changes which do not affect core functionality, like f.e. job cleanups, can be reviewed by a single core. Change-Id: Idb6cefa510fdbfed41379eb410f4884852d1177f --- doc/source/contributor/contributing.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst index 4de238fbf8..8b5a85b3df 100644 --- a/doc/source/contributor/contributing.rst +++ b/doc/source/contributor/contributing.rst @@ -42,8 +42,9 @@ Getting Your Patch Merged ~~~~~~~~~~~~~~~~~~~~~~~~~ All changes proposed to the Devstack require two ``Code-Review +2`` votes from Devstack core reviewers before one of the core reviewers can approve the patch -by giving ``Workflow +1`` vote. One exception is for patches to unblock the gate -which can be approved by single core reviewers. +by giving ``Workflow +1`` vote. There are 2 exceptions, approving patches to +unblock the gate and patches that do not relate to the Devstack's core logic, +like for example old job cleanups, can be approved by single core reviewers. Project Team Lead Duties ~~~~~~~~~~~~~~~~~~~~~~~~ From d1c2bf5e7c739bc5a7eeac602b477edb9f6630c2 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Wed, 2 Nov 2022 16:43:41 +0100 Subject: [PATCH 274/574] Add new service "file_tracker" This new service periodically tracks the file open in the system. Closes-Bug: #1995502 Change-Id: I02e097fef07655ff571af9f35bf258b2ed975098 --- .zuul.yaml | 4 ++++ doc/source/debugging.rst | 6 +++++ lib/dstat | 6 +++++ tools/file_tracker.sh | 47 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 63 insertions(+) create mode 100755 tools/file_tracker.sh diff --git a/.zuul.yaml b/.zuul.yaml index 6dbb5bacbe..ce38760203 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -469,6 +469,7 @@ dstat: false etcd3: true memory_tracker: true + file_tracker: true mysql: true rabbit: true group-vars: @@ -477,6 +478,7 @@ # Shared services dstat: false memory_tracker: true + file_tracker: true devstack_localrc: # Multinode specific settings HOST_IP: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}" @@ -544,6 +546,7 @@ dstat: false etcd3: true memory_tracker: true + file_tracker: true mysql: true rabbit: true tls-proxy: true @@ -593,6 +596,7 @@ # Shared services dstat: false memory_tracker: true + file_tracker: true tls-proxy: true # Nova services n-cpu: true diff --git a/doc/source/debugging.rst b/doc/source/debugging.rst index fd0d9cdf74..3ca0ad94b4 100644 --- a/doc/source/debugging.rst +++ b/doc/source/debugging.rst @@ -20,6 +20,12 @@ provides consumption output when available memory is seen to be falling (i.e. processes are consuming memory). It also provides output showing locked (unswappable) memory. +file_tracker +------------ + +The ``file_tracker`` service periodically monitors the number of +open files in the system. + tcpdump ------- diff --git a/lib/dstat b/lib/dstat index eb03ae0fb2..870c901d2a 100644 --- a/lib/dstat +++ b/lib/dstat @@ -40,12 +40,18 @@ function start_dstat { if is_service_enabled peakmem_tracker; then die $LINENO "The peakmem_tracker service has been removed, use memory_tracker instead" fi + + # To enable file_tracker add: + # enable_service file_tracker + # to your localrc + run_process file_tracker "$TOP_DIR/tools/file_tracker.sh" } # stop_dstat() stop dstat process function stop_dstat { stop_process dstat stop_process memory_tracker + stop_process file_tracker } # Restore xtrace diff --git a/tools/file_tracker.sh b/tools/file_tracker.sh new file mode 100755 index 0000000000..9c31b30a56 --- /dev/null +++ b/tools/file_tracker.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +set -o errexit + +# time to sleep between checks +SLEEP_TIME=20 + +function tracker { + echo "Number of open files | Number of open files not in use | Maximum number of files allowed to be opened" + while true; do + cat /proc/sys/fs/file-nr + sleep $SLEEP_TIME + done +} + +function usage { + echo "Usage: $0 [-x] [-s N]" 1>&2 + exit 1 +} + +while getopts ":s:x" opt; do + case $opt in + s) + SLEEP_TIME=$OPTARG + ;; + x) + set -o xtrace + ;; + *) + usage + ;; + esac +done + +tracker From a4680766515ed9317b71cfb39cd0d75dc04f3d9c Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Wed, 9 Nov 2022 10:11:46 -0800 Subject: [PATCH 275/574] Make debian-bullseye job non-voting As noted in the QA meeting this week, this job is failing due to something that seems outside of our control: https://meetings.opendev.org/meetings/qa/2022/qa.2022-11-08-15.00.log.html Make it non-voting until that is resolved. Change-Id: Ia571d1dab45eb1bbb8665373d416515d3c95fb14 --- .zuul.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 6dbb5bacbe..1923444601 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -674,6 +674,8 @@ description: Debian Bullseye platform test nodeset: devstack-single-node-debian-bullseye timeout: 9000 + # TODO(danms) n-v until the known issue is resolved + voting: false vars: configure_swap_size: 4096 From 97b2a51d6beee4fd58b93027d823d6fd90f5c11f Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Wed, 9 Nov 2022 11:58:37 -0800 Subject: [PATCH 276/574] Fix dbcounter install on Debian Bullseye The dbcounter install on Debian Bullseye is broken in a really fun way. The problem is that we end up mixing pypi openssl and distro cryptography under pip and those two versions of libraries are not compatible. The reason this happens is that debian's pip package debundles the pip deps. This splits them out into /usr/share/python-wheels and it will prefer distro versions of libraries over pypi installed versions of libraries. But if a pypi version is installed and a distro version is not then the pypi version is used. If the pypi version of library A does not work with distro version of library B then debundled pip breaks. This has happened with crypytography and pyOpenSSL. This happens because urllib3 (a debundled pip dep) appears to use pyopenssl conditionally. Novnc depends on python3-cryptography, and openstack depends on cryptogrpahy from pypi ensuring we get both a distro and a pypi version installed. However, pyOpenSSL is only pulled in from pypi via openstack deps. This leaves debundled urllib3 attempting to use pypi pyOpenSSL with distro cryptography and that combo isn't valid due to an interface change. To fix this we install python3-openssl ensuring that debundled pip will use distro pyOpenSSL with distro cryptography making everything happy again. But we only do this when we install novnc as novnc is what pulls in distro cryptography in the first place. We can't simply install python3-openssl on all debuntu platforms because this breaks Ubuntu Focal in the other direction. On Ubuntu focal distro pip uses distro pyOpenSSL when no pypi pyOpenSSl is installed (prior to keystone install) and is not compatible with pypi cryptography. Honestly, this whole intersection between distro and pypi installs of cryptography and pyOpenSSL could probably be made cleaner. One option would be for us to always install the constraints version of both packages from pypi and the distro pacakges very early in the devstack run. But that seems far more complicated so I'm not attempting that here. Change-Id: I0fc6a8e66e365ac49c6c7ceb4c71c68714b9f541 --- lib/nova | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 6de1d3382f..5c619bb762 100644 --- a/lib/nova +++ b/lib/nova @@ -885,8 +885,23 @@ function install_nova { # a websockets/html5 or flash powered VNC console for vm instances NOVNC_FROM_PACKAGE=$(trueorfalse False NOVNC_FROM_PACKAGE) if [ "$NOVNC_FROM_PACKAGE" = "True" ]; then + # Installing novnc on Debian bullseye breaks the global pip + # install. This happens because novnc pulls in distro cryptography + # which will be prefered by distro pip, but if anything has + # installed pyOpenSSL from pypi (keystone) that is not compatible + # with distro cryptography. Fix this by installing + # python3-openssl (pyOpenSSL) from the distro which pip will prefer + # on Debian. Ubuntu has inverse problems so we only do this for + # Debian. + local novnc_packages + novnc_packages="novnc" + GetOSVersion + if [[ "$os_VENDOR" = "Debian" ]] ; then + novnc_packages="$novnc_packages python3-openssl" + fi + NOVNC_WEB_DIR=/usr/share/novnc - install_package novnc + install_package $novnc_packages else NOVNC_WEB_DIR=$DEST/novnc git_clone $NOVNC_REPO $NOVNC_WEB_DIR $NOVNC_BRANCH From 857f4993f35fbdc83771b9632d3525766de194a1 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Wed, 19 Oct 2022 20:15:42 -0500 Subject: [PATCH 277/574] Add RBAC scope and new defaults setting support for Nova & Tempest Nova is ready with the scope and new defaults as per the new RBAC design. Adding devstack flag to enable the scope checks and new defaults enforcement in nova side. Change-Id: I305ea626a4b622c5534d523f4b619832f9d35f8d --- lib/nova | 10 ++++++++++ lib/tempest | 4 ++++ 2 files changed, 14 insertions(+) diff --git a/lib/nova b/lib/nova index 8e8ea8a175..63c6a86a66 100644 --- a/lib/nova +++ b/lib/nova @@ -97,6 +97,12 @@ NOVA_SERVICE_LISTEN_ADDRESS=${NOVA_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVI METADATA_SERVICE_PORT=${METADATA_SERVICE_PORT:-8775} NOVA_ENABLE_CACHE=${NOVA_ENABLE_CACHE:-True} +# Flag to set the oslo_policy.enforce_scope and oslo_policy.enforce_new_defaults. +# This is used to switch the compute API policies enable the scope and new defaults. +# By Default, these flag are False. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +NOVA_ENFORCE_SCOPE=$(trueorfalse False NOVA_ENFORCE_SCOPE) + if [[ $SERVICE_IP_VERSION == 6 ]]; then NOVA_MY_IP="$HOST_IPV6" else @@ -481,6 +487,10 @@ function create_nova_conf { NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/,metadata//") fi iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS" + if [[ "$NOVA_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then + iniset $NOVA_CONF oslo_policy enforce_new_defaults True + iniset $NOVA_CONF oslo_policy enforce_scope True + fi if is_service_enabled tls-proxy && [ "$NOVA_USE_MOD_WSGI" == "False" ]; then # Set the service port for a proxy to take the original iniset $NOVA_CONF DEFAULT osapi_compute_listen_port "$NOVA_SERVICE_PORT_INT" diff --git a/lib/tempest b/lib/tempest index 87a2244784..b232f24540 100644 --- a/lib/tempest +++ b/lib/tempest @@ -674,6 +674,10 @@ function configure_tempest { iniset $TEMPEST_CONFIG auth admin_project_name '' fi + if [[ "$NOVA_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope nova true + fi + if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then iniset $TEMPEST_CONFIG enforce_scope glance true fi From d00921a57bcd9b408817ac7feddfc49b49b9cea2 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Mon, 14 Nov 2022 06:50:45 +0000 Subject: [PATCH 278/574] Revert "Make debian-bullseye job non-voting" This reverts commit a4680766515ed9317b71cfb39cd0d75dc04f3d9c. Reason for revert: Debian job got repaired Change-Id: I3ef969f6e373de103d26c9282cab94cea7ae87e5 --- .zuul.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 1923444601..6dbb5bacbe 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -674,8 +674,6 @@ description: Debian Bullseye platform test nodeset: devstack-single-node-debian-bullseye timeout: 9000 - # TODO(danms) n-v until the known issue is resolved - voting: false vars: configure_swap_size: 4096 From 8d299efa4b6346ccfc3c6fcf9cf011b3c884bebc Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Sun, 9 Oct 2022 11:00:07 +0200 Subject: [PATCH 279/574] Switch devstack nodeset to Ubuntu 22.04 (jammy) Depends-On: https://review.opendev.org/c/openstack/devstack-plugin-ceph/+/864948 Change-Id: I26b4784a4d772abbf8572f6273bda37f2fec5336 --- .zuul.yaml | 54 +++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 39 insertions(+), 15 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 56acb37a03..76a70dc85c 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -168,6 +168,36 @@ nodes: - compute1 +- nodeset: + name: openstack-two-node-jammy + nodes: + - name: controller + label: ubuntu-jammy + - name: compute1 + label: ubuntu-jammy + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + - nodeset: name: openstack-two-node-focal nodes: @@ -455,7 +485,7 @@ description: | Minimal devstack base job, intended for use by jobs that need less than the normal minimum set of required-projects. - nodeset: openstack-single-node-focal + nodeset: openstack-single-node-jammy required-projects: - opendev.org/openstack/requirements vars: @@ -526,6 +556,7 @@ - opendev.org/openstack/swift timeout: 7200 vars: + configure_swap_size: 4096 devstack_localrc: # Common OpenStack services settings SWIFT_REPLICAS: 1 @@ -651,7 +682,7 @@ - job: name: devstack-multinode parent: devstack - nodeset: openstack-two-node-focal + nodeset: openstack-two-node-jammy description: | Simple multinode test to verify multinode functionality on devstack side. This is not meant to be used as a parent job. @@ -669,8 +700,6 @@ # TODO(kopecmartin) n-v until the following is resolved: # https://bugs.launchpad.net/neutron/+bug/1979047 voting: false - vars: - configure_swap_size: 4096 - job: name: devstack-platform-debian-bullseye @@ -693,13 +722,11 @@ configure_swap_size: 4096 - job: - name: devstack-platform-ubuntu-jammy + name: devstack-platform-ubuntu-focal parent: tempest-full-py3 - description: Ubuntu 22.04 LTS (jammy) platform test - nodeset: openstack-single-node-jammy + description: Ubuntu 20.04 LTS (focal) platform test + nodeset: openstack-single-node-focal timeout: 9000 - vars: - configure_swap_size: 4096 - job: name: devstack-platform-ubuntu-jammy-ovn-source @@ -769,8 +796,6 @@ description: Fedora latest platform test nodeset: devstack-single-node-fedora-latest voting: false - vars: - configure_swap_size: 4096 - job: name: devstack-platform-fedora-latest-virt-preview @@ -779,7 +804,6 @@ nodeset: devstack-single-node-fedora-latest voting: false vars: - configure_swap_size: 4096 devstack_localrc: ENABLE_FEDORA_VIRT_PREVIEW_REPO: true @@ -839,7 +863,7 @@ - job: name: devstack-unit-tests - nodeset: ubuntu-focal + nodeset: ubuntu-jammy description: | Runs unit tests on devstack project. @@ -860,7 +884,7 @@ - devstack-platform-centos-9-stream - devstack-platform-debian-bullseye - devstack-platform-rocky-blue-onyx - - devstack-platform-ubuntu-jammy + - devstack-platform-ubuntu-focal - devstack-platform-ubuntu-jammy-ovn-source - devstack-platform-ubuntu-jammy-ovs - devstack-multinode @@ -910,7 +934,7 @@ # https://bugs.launchpad.net/neutron/+bug/1979047 # - devstack-platform-centos-9-stream - devstack-platform-debian-bullseye - - devstack-platform-ubuntu-jammy + - devstack-platform-ubuntu-focal - devstack-enforce-scope - devstack-multinode - devstack-unit-tests From 818d1a225d54291d1da1f8011f92affb2998d0e9 Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Mon, 28 Nov 2022 11:19:45 +0100 Subject: [PATCH 280/574] [doc] Update Ubuntu to 22.04 This updates documentation to reflect the switch to Ubuntu 22.04 (jammy) in the CI: https://review.opendev.org/c/openstack/devstack/+/860795 Change-Id: I8bee430029dcc719629bd92451c2791571f8a30c --- doc/source/index.rst | 2 +- tools/install_pip.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index ba53c6d279..3f206f411e 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -40,7 +40,7 @@ Start with a clean and minimal install of a Linux system. DevStack attempts to support the two latest LTS releases of Ubuntu, the latest/current Fedora version, CentOS/RHEL/Rocky Linux 9 and OpenSUSE. -If you do not have a preference, Ubuntu 20.04 (Focal Fossa) is the +If you do not have a preference, Ubuntu 22.04 (Jammy) is the most tested, and will probably go the smoothest. Add Stack User (optional) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 7c5d4c6555..91b180c06f 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -139,7 +139,7 @@ if is_fedora && [[ ${DISTRO} == f* || ${DISTRO} == rhel9 ]]; then # recent enough anyway. This is included via rpms/general : # Simply fall through elif is_ubuntu; then - # pip on Ubuntu 20.04 is new enough, too + # pip on Ubuntu 20.04 and higher is new enough, too # drop setuptools from u-c sed -i -e '/setuptools/d' $REQUIREMENTS_DIR/upper-constraints.txt else From db3eff7dd27acdc973e8d189bda80d642be92f03 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Wed, 30 Nov 2022 14:03:36 +0900 Subject: [PATCH 281/574] Stop setting [ovs_vif_ovs] ovsdb_interface The option was already deprecated in os-vif 2.2.0[1]. The override is no longer required since bug 1929446 was already resolved. [1] https://review.opendev.org/c/openstack/os-vif/+/744816 Related-Bug: #1929446 Change-Id: I5bc55723a178b32d947da2ac91d2f62aa8124990 --- lib/os-vif | 7 ------- 1 file changed, 7 deletions(-) diff --git a/lib/os-vif b/lib/os-vif index 865645c0d5..7c8bee3744 100644 --- a/lib/os-vif +++ b/lib/os-vif @@ -1,10 +1,5 @@ #!/bin/bash -# support vsctl or native. -# until bug #1929446 is resolved we override the os-vif default -# and fall back to the legacy "vsctl" driver. -OS_VIF_OVS_OVSDB_INTERFACE=${OS_VIF_OVS_OVSDB_INTERFACE:="vsctl"} - function is_ml2_ovs { if [[ "${Q_AGENT}" == "openvswitch" ]]; then echo "True" @@ -19,11 +14,9 @@ OS_VIF_OVS_ISOLATE_VIF=$(trueorfalse False OS_VIF_OVS_ISOLATE_VIF) function configure_os_vif { if [[ -e ${NOVA_CONF} ]]; then - iniset ${NOVA_CONF} os_vif_ovs ovsdb_interface ${OS_VIF_OVS_OVSDB_INTERFACE} iniset ${NOVA_CONF} os_vif_ovs isolate_vif ${OS_VIF_OVS_ISOLATE_VIF} fi if [[ -e ${NEUTRON_CONF} ]]; then - iniset ${NEUTRON_CONF} os_vif_ovs ovsdb_interface ${OS_VIF_OVS_OVSDB_INTERFACE} iniset ${NEUTRON_CONF} os_vif_ovs isolate_vif ${OS_VIF_OVS_ISOLATE_VIF} fi } From 16c2b389ed8efca70fa1e65395becdaea84f8b44 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Wed, 30 Nov 2022 14:24:07 -0600 Subject: [PATCH 282/574] Add RBAC scope and new defaults setting support for placement Adding devstack flag to enable and test the Placement API policies scope and new defaults. Depends-On: https://review.opendev.org/c/openstack/tempest/+/866212 Change-Id: I6f56fc28f2c1e4cdde946deb2ae06afddf85ff0d --- lib/placement | 10 ++++++++++ lib/tempest | 4 ++++ 2 files changed, 14 insertions(+) diff --git a/lib/placement b/lib/placement index b7798669a1..bc22c564f4 100644 --- a/lib/placement +++ b/lib/placement @@ -48,6 +48,12 @@ fi PLACEMENT_SERVICE_PROTOCOL=${PLACEMENT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} PLACEMENT_SERVICE_HOST=${PLACEMENT_SERVICE_HOST:-$SERVICE_HOST} +# Flag to set the oslo_policy.enforce_scope and oslo_policy.enforce_new_defaults. +# This is used to switch the Placement API policies scope and new defaults. +# By Default, these flag are False. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +PLACEMENT_ENFORCE_SCOPE=$(trueorfalse False PLACEMENT_ENFORCE_SCOPE) + # Functions # --------- @@ -111,6 +117,10 @@ function configure_placement { else _config_placement_apache_wsgi fi + if [[ "$PLACEMENT_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then + iniset $PLACEMENT_CONF oslo_policy enforce_new_defaults True + iniset $PLACEMENT_CONF oslo_policy enforce_scope True + fi } # create_placement_accounts() - Set up required placement accounts diff --git a/lib/tempest b/lib/tempest index b232f24540..5cd4d18439 100644 --- a/lib/tempest +++ b/lib/tempest @@ -678,6 +678,10 @@ function configure_tempest { iniset $TEMPEST_CONFIG enforce_scope nova true fi + if [[ "$PLACEMENT_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope placement true + fi + if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then iniset $TEMPEST_CONFIG enforce_scope glance true fi From 6440c6d7e69c6726c8d31ea225b90967c50528e8 Mon Sep 17 00:00:00 2001 From: wangxiyuan Date: Fri, 5 Aug 2022 14:18:13 +0800 Subject: [PATCH 283/574] Add openEuler 22.03 LTS support openEuler 20.03 LTS SP2 support was removed from devstack in last few months due to its python version is too old and the CI job always fail. And openEuler 20.03 LTS SP2 was out of maintainer in May 2022 by openEuler community. The newest LTS version was released in March 2022 called 22.03 LTS. This release will be maintained for at least 2 years. And the python version is 3.9 which works well for devstack. This Patch add the openEuler distro support back. And add the related CI job to make sure its works well. Change-Id: I99c99d08b4a44d3dc644bd2e56b5ae7f7ee44210 --- .zuul.yaml | 72 ++++++++++++++++++++++++++ doc/source/index.rst | 3 +- files/rpms/ceph | 2 +- files/rpms/general | 4 +- files/rpms/nova | 2 +- functions-common | 13 ++++- lib/apache | 2 +- lib/nova_plugins/functions-libvirt | 8 ++- roles/apache-logs-conf/tasks/main.yaml | 1 + stack.sh | 11 +++- 10 files changed, 109 insertions(+), 9 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 471ca100c8..8e20f6ed34 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -108,6 +108,16 @@ nodes: - controller +- nodeset: + name: devstack-single-node-openeuler-22.03 + nodes: + - name: controller + label: openEuler-22-03-LTS + groups: + - name: tempest + nodes: + - controller + - nodeset: name: openstack-two-node nodes: @@ -777,6 +787,62 @@ # Enable Neutron ML2/OVS services q-agt: true +- job: + name: devstack-platform-openEuler-22.03-ovn-source + parent: tempest-full-py3 + description: openEuler 22.03 LTS platform test (OVN) + nodeset: devstack-single-node-openeuler-22.03 + voting: false + timeout: 9000 + vars: + configure_swap_size: 4096 + devstack_localrc: + # NOTE(wxy): OVN package is not supported by openEuler yet. Build it + # from source instead. + OVN_BUILD_FROM_SOURCE: True + OVN_BRANCH: "v21.06.0" + OVS_BRANCH: "a4b04276ab5934d087669ff2d191a23931335c87" + OVS_SYSCONFDIR: "/usr/local/etc/openvswitch" + +- job: + name: devstack-platform-openEuler-22.03-ovs + parent: tempest-full-py3 + description: openEuler 22.03 LTS platform test (OVS) + nodeset: devstack-single-node-openeuler-22.03 + voting: false + timeout: 9000 + vars: + configure_swap_size: 8192 + devstack_localrc: + Q_AGENT: openvswitch + Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch + Q_ML2_TENANT_NETWORK_TYPE: vxlan + devstack_services: + # Disable OVN services + ovn-northd: false + ovn-controller: false + ovs-vswitchd: false + ovsdb-server: false + # Disable Neutron ML2/OVN services + q-ovn-metadata-agent: false + # Enable Neutron ML2/OVS services + q-agt: true + q-dhcp: true + q-l3: true + q-meta: true + q-metering: true + group-vars: + subnode: + devstack_services: + # Disable OVN services + ovn-controller: false + ovs-vswitchd: false + ovsdb-server: false + # Disable Neutron ML2/OVN services + q-ovn-metadata-agent: false + # Enable Neutron ML2/OVS services + q-agt: true + - job: name: devstack-no-tls-proxy parent: tempest-full-py3 @@ -885,6 +951,8 @@ - devstack-platform-ubuntu-focal - devstack-platform-ubuntu-jammy-ovn-source - devstack-platform-ubuntu-jammy-ovs + - devstack-platform-openEuler-22.03-ovn-source + - devstack-platform-openEuler-22.03-ovs - devstack-multinode - devstack-unit-tests - openstack-tox-bashate @@ -1017,3 +1085,7 @@ periodic: jobs: - devstack-no-tls-proxy + periodic-weekly: + jobs: + - devstack-platform-openEuler-22.03-ovn-source + - devstack-platform-openEuler-22.03-ovs diff --git a/doc/source/index.rst b/doc/source/index.rst index 3f206f411e..1e932f88a5 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -38,7 +38,8 @@ Install Linux Start with a clean and minimal install of a Linux system. DevStack attempts to support the two latest LTS releases of Ubuntu, the -latest/current Fedora version, CentOS/RHEL/Rocky Linux 9 and OpenSUSE. +latest/current Fedora version, CentOS/RHEL/Rocky Linux 9, OpenSUSE and +openEuler. If you do not have a preference, Ubuntu 22.04 (Jammy) is the most tested, and will probably go the smoothest. diff --git a/files/rpms/ceph b/files/rpms/ceph index 33a55f80ea..19f158fd57 100644 --- a/files/rpms/ceph +++ b/files/rpms/ceph @@ -1,3 +1,3 @@ ceph # NOPRIME -redhat-lsb-core # not:rhel9 +redhat-lsb-core # not:rhel9,openEuler-22.03 xfsprogs diff --git a/files/rpms/general b/files/rpms/general index 7697513149..b6866de62d 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -26,9 +26,9 @@ pkgconfig postgresql-devel # psycopg2 psmisc python3-devel -python3-pip +python3-pip # not:openEuler-22.03 python3-systemd -redhat-rpm-config # missing dep for gcc hardening flags, see rhbz#1217376 +redhat-rpm-config # not:openEuler-22.03 missing dep for gcc hardening flags, see rhbz#1217376 tar tcpdump unzip diff --git a/files/rpms/nova b/files/rpms/nova index f2824ee2c4..e0f13b854a 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -4,7 +4,7 @@ ebtables genisoimage # not:rhel9 required for config_drive iptables iputils -kernel-modules +kernel-modules # not:openEuler-22.03 kpartx parted polkit diff --git a/functions-common b/functions-common index 0aee5d163e..4eed5d8407 100644 --- a/functions-common +++ b/functions-common @@ -399,7 +399,7 @@ function _ensure_lsb_release { elif [[ -x $(command -v zypper 2>/dev/null) ]]; then sudo zypper -n install lsb-release elif [[ -x $(command -v dnf 2>/dev/null) ]]; then - sudo dnf install -y redhat-lsb-core + sudo dnf install -y redhat-lsb-core || sudo dnf install -y openeuler-lsb else die $LINENO "Unable to find or auto-install lsb_release" fi @@ -474,6 +474,8 @@ function GetDistro { # Drop the . release as we assume it's compatible # XXX re-evaluate when we get RHEL10 DISTRO="rhel${os_RELEASE::1}" + elif [[ "$os_VENDOR" =~ (openEuler) ]]; then + DISTRO="openEuler-$os_RELEASE" else # We can't make a good choice here. Setting a sensible DISTRO # is part of the problem, but not the major issue -- we really @@ -525,6 +527,7 @@ function is_fedora { fi [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \ + [ "$os_VENDOR" = "openEuler" ] || \ [ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \ [ "$os_VENDOR" = "RedHatEnterprise" ] || \ [ "$os_VENDOR" = "Rocky" ] || \ @@ -575,6 +578,14 @@ function is_ubuntu { [ "$os_PACKAGE" = "deb" ] } +# Determine if current distribution is an openEuler distribution +# is_openeuler +function is_openeuler { + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + [ "$os_VENDOR" = "openEuler" ] +} # Git Functions # ============= diff --git a/lib/apache b/lib/apache index 705776c55b..dd8c9a0f06 100644 --- a/lib/apache +++ b/lib/apache @@ -95,7 +95,7 @@ function install_apache_uwsgi { # didn't fix Python 3.10 compatibility before release. Should be # fixed in uwsgi 4.9.0; can remove this when packages available # or we drop this release - elif is_fedora && ! [[ $DISTRO =~ f36 ]]; then + elif is_fedora && ! is_openeuler && ! [[ $DISTRO =~ f36 ]]; then # Note httpd comes with mod_proxy_uwsgi and it is loaded by # default; the mod_proxy_uwsgi package actually conflicts now. # See: diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 3e7d2801d6..c0e45ebb85 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -82,11 +82,17 @@ function install_libvirt { sudo dnf copr enable -y @virtmaint-sig/virt-preview fi + if is_openeuler; then + qemu_package=qemu + else + qemu_package=qemu-kvm + fi + # Note that in CentOS/RHEL this needs to come from the RDO # repositories (qemu-kvm-ev ... which provides this package) # as the base system version is too old. We should have # pre-installed these - install_package qemu-kvm + install_package $qemu_package install_package libvirt libvirt-devel python3-libvirt if is_arch "aarch64"; then diff --git a/roles/apache-logs-conf/tasks/main.yaml b/roles/apache-logs-conf/tasks/main.yaml index bd64574c9b..6b7ea37857 100644 --- a/roles/apache-logs-conf/tasks/main.yaml +++ b/roles/apache-logs-conf/tasks/main.yaml @@ -64,6 +64,7 @@ 'Debian': '/etc/apache2/sites-enabled/' 'Suse': '/etc/apache2/conf.d/' 'RedHat': '/etc/httpd/conf.d/' + 'openEuler': '/etc/httpd/conf.d/' - name: Discover configurations find: diff --git a/stack.sh b/stack.sh index cc90fca576..28576d1e14 100755 --- a/stack.sh +++ b/stack.sh @@ -229,7 +229,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bullseye|focal|jammy|f36|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9" +SUPPORTED_DISTROS="bullseye|focal|jammy|f36|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9|openEuler-22.03" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" @@ -394,6 +394,15 @@ elif [[ $DISTRO == "rhel9" ]]; then sudo dnf config-manager --set-enabled crb # rabbitmq and other packages are provided by RDO repositories. _install_rdo +elif [[ $DISTRO == "openEuler-22.03" ]]; then + # There are some problem in openEuler. We should fix it first. Some required + # package/action runs before fixup script. So we can't fix there. + # + # 1. the hostname package is not installed by default + # 2. Some necessary packages are in openstack repo, for example liberasurecode-devel + # 3. python3-pip can be uninstalled by `get_pip.py` automaticly. + install_package hostname openstack-release-wallaby + uninstall_package python3-pip fi # Ensure python is installed From 0a40648b3884c374e314105c33c2a20c85ab2f7f Mon Sep 17 00:00:00 2001 From: Miguel Lavalle Date: Wed, 7 Dec 2022 16:51:28 -0600 Subject: [PATCH 284/574] Fix the db user for mariadb in ubuntu 22.04 In Ubuntu 22.04, mariadb version 10.6 is installed. Per [0] and [1] authentication management was changed in version 10.4. This change adapts the way the db user is created to the new rules in versions 10.4 and later. [0] https://mariadb.com/kb/en/authentication-from-mariadb-104/ [1] https://mariadb.org/authentication-in-mariadb-10-4/ Closes-Bug: #1999090 Change-Id: I77a699a9e191eb83628ad5d361282e66744b6e4a --- lib/databases/mysql | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index b292da25bd..fbad44e36a 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -100,8 +100,13 @@ function configure_database_mysql { # Set the root password - only works the first time. For Ubuntu, we already # did that with debconf before installing the package, but we still try, - # because the package might have been installed already. - sudo mysqladmin -u root password $DATABASE_PASSWORD || true + # because the package might have been installed already. We don't do this + # for Ubuntu 22.04 (jammy) because the authorization model change in + # version 10.4 of mariadb. See + # https://mariadb.org/authentication-in-mariadb-10-4/ + if ! (is_ubuntu && [[ "$DISTRO" == "jammy" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then + sudo mysqladmin -u root password $DATABASE_PASSWORD || true + fi # In case of Mariadb, giving hostname in arguments causes permission # problems as it expects connection through socket @@ -115,13 +120,21 @@ function configure_database_mysql { # as root so it works only as sudo. To restore old "mysql like" behaviour, # we need to change auth plugin for root user if is_ubuntu && [[ "$DISTRO" != "bullseye" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then - sudo mysql $cmd_args -e "UPDATE mysql.user SET plugin='' WHERE user='$DATABASE_USER' AND host='localhost';" - sudo mysql $cmd_args -e "FLUSH PRIVILEGES;" + if [[ "$DISTRO" == "jammy" ]]; then + # For Ubuntu 22.04 (jammy) we follow the model outlined in + # https://mariadb.org/authentication-in-mariadb-10-4/ + sudo mysql -e "ALTER USER $DATABASE_USER@localhost IDENTIFIED VIA mysql_native_password USING PASSWORD('$DATABASE_PASSWORD');" + else + sudo mysql $cmd_args -e "UPDATE mysql.user SET plugin='' WHERE user='$DATABASE_USER' AND host='localhost';" + sudo mysql $cmd_args -e "FLUSH PRIVILEGES;" + fi + fi + if ! (is_ubuntu && [[ "$DISTRO" == "jammy" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then + # Create DB user if it does not already exist + sudo mysql $cmd_args -e "CREATE USER IF NOT EXISTS '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';" + # Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases: + sudo mysql $cmd_args -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%';" fi - # Create DB user if it does not already exist - sudo mysql $cmd_args -e "CREATE USER IF NOT EXISTS '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';" - # Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases: - sudo mysql $cmd_args -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%';" # Now update ``my.cnf`` for some local needs and restart the mysql service From 30acfc6d14bb42db822352426cc2d4e337717c72 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Thu, 24 Nov 2022 14:12:08 +0100 Subject: [PATCH 285/574] [neutron] Don't configure firewall_driver for core ML2 plugin In the past firewall_driver setting was configured for ML2 plugin because it was used in the neutron.agent.securitygroups_rpc.is_firewall_enabled() function but currently it's not needed anymore as there is other config option "enable_security_group" for that. Related-bug: #1996748 Change-Id: I9b09c6afb3f1f1c33d1bdfea52ba6f4c0d0cf2dc --- lib/neutron_plugins/ml2 | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index fa61f1ea30..46edacdc54 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -111,18 +111,7 @@ function neutron_plugin_configure_service { fi fi fi - # REVISIT(rkukura): Setting firewall_driver here for - # neutron.agent.securitygroups_rpc.is_firewall_enabled() which is - # used in the server, in case no L2 agent is configured on the - # server's node. If an L2 agent is configured, this will get - # overridden with the correct driver. The ml2 plugin should - # instead use its own config variable to indicate whether security - # groups is enabled, and that will need to be set here instead. - if [[ "$Q_USE_SECGROUP" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.not.a.real.FirewallDriver - else - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver - fi + populate_ml2_config /$Q_PLUGIN_CONF_FILE securitygroup enable_security_group=$Q_USE_SECGROUP populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS if [[ "$Q_ML2_PLUGIN_MECHANISM_DRIVERS" == *"linuxbridge"* ]]; then From 9a1be7794bd3b1b06a89183a800f42f77cd1b1b9 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Thu, 8 Dec 2022 20:24:46 -0600 Subject: [PATCH 286/574] Pin tox<4.0.0 for <=stable/zed branch testing Tox 4.0.0 has some incompatible changes, epecially more strict on allowlist_externals. Tempest recently changed allowlist_externals not to be *[1] causing the failure on jobs where lib/tempest failing to run the tempest as command in virtual env. ---------- venv: commands[0]> tempest verify-config -uro /tmp/tmp.qH5KgJHTF4 venv: failed with tempest is not allowed, use allowlist_externals to allow it ------ We do not need to test/fix the <=stable/zed branches with tox 4.0.0 and pinning them with the compatible tox version of the time stable brnaches were releaased is better way. This commit proposes: 1. Pinning the tox<4.0.0 for <=stable/ze branches testing 2. Workaround to unblock the master gate by pinning it <4.0.0 but we should make our testing compatible with tox 4.0.0 soon. Depends-On: https://review.opendev.org/c/openstack/devstack/+/867066 Related-Bug: #1999183 [1] https://review.opendev.org/c/openstack/tempest/+/865314 devstack based job started failing to run tempest command on venv. Change-Id: I9a138af94dedc0d8ce5a0d519d75779415d3c30b --- lib/tempest | 7 ++++++- playbooks/tox/pre.yaml | 8 +++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index b232f24540..ec2949ac38 100644 --- a/lib/tempest +++ b/lib/tempest @@ -779,7 +779,12 @@ function configure_tempest { # install_tempest() - Collect source and prepare function install_tempest { git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH - pip_install 'tox!=2.8.0' + # NOTE(gmann): Pinning tox<4.0.0 for stable/zed and lower. Tox 4.0.0 + # released after zed was released and has some incompatible changes + # and it is ok not to fix the issues caused by tox 4.0.0 in stable + # beanches jobs. We can continue testing the stable/zed and lower + # branches with tox<4.0.0 + pip_install 'tox!=2.8.0,<4.0.0' pushd $TEMPEST_DIR # NOTE(gmann): checkout the TEMPEST_BRANCH in case TEMPEST_BRANCH # is tag name not master. git_clone would not checkout tag because diff --git a/playbooks/tox/pre.yaml b/playbooks/tox/pre.yaml index d7e4670a80..68d5254251 100644 --- a/playbooks/tox/pre.yaml +++ b/playbooks/tox/pre.yaml @@ -5,4 +5,10 @@ bindep_profile: test bindep_dir: "{{ zuul_work_dir }}" - test-setup - - ensure-tox + # NOTE(gmann): Pinning tox<4.0.0 for stable/zed and lower. Tox 4.0.0 + # released after zed was released and has some incompatible changes + # and it is ok not to fix the issues caused by tox 4.0.0 in stable + # beanches jobs. We can continue testing the stable/zed and lower + # branches with tox<4.0.0 + - role: ensure-tox + ensure_tox_version: "<4" From a52041cd3f067156e478e355f5712a60e12ce649 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Fri, 18 Nov 2022 11:39:56 +0100 Subject: [PATCH 287/574] Drop lib/neutron module Module lib/neutron was introduced long time ago as new module to deploy neutron. It was intended to replace old lib/neutron-legacy module. But since very long time it wasn't really finished and used by anyone and lib/neutron-legacy is defacto standard module used by everyone to deploy neutron with devstack. In [1] unfinished lib/neutron was deprecated and now it's time to remove it from the devstack code. This patch also renames old "lib/neutron-legacy" module to be "lib/neutron" now. Previously "old" lib/neutron-legacy module was accepting neutron services names wit "q-" prefix and "new" lib/neutron module was accepting services with "neutron-" prefix. Now, as there is only one module it accepts both prefixes. For historical reasons and to be consistent with old lib/neutron-legacy which was widely used everywhere, services will be named with "q-" prefix but both prefixes will be accepted to enable or disable services. This patch also moves _configure_neutron_service function to be called at the end of the "configure_neutron" after all agents and service plugins are already configured. [1] https://review.opendev.org/c/openstack/devstack/+/823653 Related-bug: #1996748 Change-Id: Ibf1c8b2ee6b6618f77cd8486e9c687993d7cb4a0 --- clean.sh | 1 - lib/neutron | 1438 ++++++++++++++-------- lib/neutron-legacy | 1097 +---------------- lib/neutron_plugins/README.md | 2 +- lib/neutron_plugins/bigswitch_floodlight | 2 +- lib/neutron_plugins/brocade | 2 +- lib/neutron_plugins/linuxbridge_agent | 2 +- lib/neutron_plugins/ml2 | 2 +- lib/neutron_plugins/openvswitch_agent | 2 +- lib/neutron_plugins/ovn_agent | 18 +- lib/neutron_plugins/services/l3 | 4 +- lib/neutron_plugins/services/metering | 2 +- lib/neutron_plugins/services/qos | 2 +- lib/tempest | 4 +- 14 files changed, 933 insertions(+), 1645 deletions(-) diff --git a/clean.sh b/clean.sh index 870dfd4313..6a31cc624a 100755 --- a/clean.sh +++ b/clean.sh @@ -50,7 +50,6 @@ source $TOP_DIR/lib/placement source $TOP_DIR/lib/cinder source $TOP_DIR/lib/swift source $TOP_DIR/lib/neutron -source $TOP_DIR/lib/neutron-legacy set -o xtrace diff --git a/lib/neutron b/lib/neutron index b3e3d72e8c..c8ee8c5e76 100644 --- a/lib/neutron +++ b/lib/neutron @@ -1,128 +1,311 @@ #!/bin/bash # # lib/neutron -# Install and start **Neutron** network services +# functions - functions specific to neutron # Dependencies: -# # ``functions`` file # ``DEST`` must be defined +# ``STACK_USER`` must be defined # ``stack.sh`` calls the entry points in this order: # -# - is_XXXX_enabled -# - install_XXXX -# - configure_XXXX -# - init_XXXX -# - start_XXXX -# - stop_XXXX -# - cleanup_XXXX +# - install_neutron_agent_packages +# - install_neutronclient +# - install_neutron +# - install_neutron_third_party +# - configure_neutron +# - init_neutron +# - configure_neutron_third_party +# - init_neutron_third_party +# - start_neutron_third_party +# - create_nova_conf_neutron +# - configure_neutron_after_post_config +# - start_neutron_service_and_check +# - check_neutron_third_party_integration +# - start_neutron_agents +# - create_neutron_initial_network +# +# ``unstack.sh`` calls the entry points in this order: +# +# - stop_neutron +# - stop_neutron_third_party +# - cleanup_neutron -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace +# Functions in lib/neutron are classified into the following categories: +# +# - entry points (called from stack.sh or unstack.sh) +# - internal functions +# - neutron exercises +# - 3rd party programs -# Defaults + +# Neutron Networking +# ------------------ + +# Make sure that neutron is enabled in ``ENABLED_SERVICES``. If you want +# to run Neutron on this host, make sure that q-svc is also in +# ``ENABLED_SERVICES``. +# +# See "Neutron Network Configuration" below for additional variables +# that must be set in localrc for connectivity across hosts with +# Neutron. + +# Settings # -------- + +# Neutron Network Configuration +# ----------------------------- + +if is_service_enabled tls-proxy; then + Q_PROTOCOL="https" +fi + + # Set up default directories GITDIR["python-neutronclient"]=$DEST/python-neutronclient + +NEUTRON_DIR=$DEST/neutron +NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas + +# Support entry points installation of console scripts +if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then + NEUTRON_BIN_DIR=$NEUTRON_DIR/bin +else + NEUTRON_BIN_DIR=$(get_python_exec_prefix) +fi + +NEUTRON_CONF_DIR=/etc/neutron +NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf +export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} + # NEUTRON_DEPLOY_MOD_WSGI defines how neutron is deployed, allowed values: # - False (default) : Run neutron under Eventlet # - True : Run neutron under uwsgi # TODO(annp): Switching to uwsgi in next cycle if things turn out to be stable # enough NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI) -NEUTRON_AGENT=${NEUTRON_AGENT:-openvswitch} -NEUTRON_DIR=$DEST/neutron + +NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini # If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope" # and "enforce_new_defaults" to True in the Neutron's config to enforce usage # of the new RBAC policies and scopes. NEUTRON_ENFORCE_SCOPE=$(trueorfalse False NEUTRON_ENFORCE_SCOPE) -NEUTRON_DISTRIBUTED_ROUTING=$(trueorfalse False NEUTRON_DISTRIBUTED_ROUTING) -# Distributed Virtual Router (DVR) configuration -# Can be: -# - ``legacy`` - No DVR functionality -# - ``dvr_snat`` - Controller or single node DVR -# - ``dvr`` - Compute node in multi-node DVR -# - ``dvr_no_external`` - Compute node in multi-node DVR, no external network -# -# Default is 'dvr_snat' since it can handle both DVR and legacy routers -NEUTRON_DVR_MODE=${NEUTRON_DVR_MODE:-dvr_snat} - -NEUTRON_BIN_DIR=$(get_python_exec_prefix) -NEUTRON_DHCP_BINARY="neutron-dhcp-agent" - -NEUTRON_CONF_DIR=/etc/neutron -NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf -NEUTRON_META_CONF=$NEUTRON_CONF_DIR/metadata_agent.ini -NEUTRON_META_DATA_HOST=${NEUTRON_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)} - -NEUTRON_DHCP_CONF=$NEUTRON_CONF_DIR/dhcp_agent.ini -NEUTRON_L3_CONF=$NEUTRON_CONF_DIR/l3_agent.ini -NEUTRON_AGENT_CONF=$NEUTRON_CONF_DIR/ -NEUTRON_CREATE_INITIAL_NETWORKS=${NEUTRON_CREATE_INITIAL_NETWORKS:-True} +# Agent binaries. Note, binary paths for other agents are set in per-service +# scripts in lib/neutron_plugins/services/ +AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent" +AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"} +AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent" + +# Agent config files. Note, plugin-specific Q_PLUGIN_CONF_FILE is set and +# loaded from per-plugin scripts in lib/neutron_plugins/ +Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini +# NOTE(slaweq): NEUTRON_DHCP_CONF is used e.g. in neutron repository, +# it was previously defined in the lib/neutron module which is now deleted. +NEUTRON_DHCP_CONF=$Q_DHCP_CONF_FILE +Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini +# NOTE(slaweq): NEUTRON_L3_CONF is used e.g. in neutron repository, +# it was previously defined in the lib/neutron module which is now deleted. +NEUTRON_L3_CONF=$Q_L3_CONF_FILE +Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini + +# Default name for Neutron database +Q_DB_NAME=${Q_DB_NAME:-neutron} +# Default Neutron Plugin +Q_PLUGIN=${Q_PLUGIN:-ml2} +# Default Neutron Port +Q_PORT=${Q_PORT:-9696} +# Default Neutron Internal Port when using TLS proxy +Q_PORT_INT=${Q_PORT_INT:-19696} +# Default Neutron Host +Q_HOST=${Q_HOST:-$SERVICE_HOST} +# Default protocol +Q_PROTOCOL=${Q_PROTOCOL:-$SERVICE_PROTOCOL} +# Default listen address +Q_LISTEN_ADDRESS=${Q_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} +# Default admin username +Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron} +# Default auth strategy +Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} +# RHEL's support for namespaces requires using veths with ovs +Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False} +Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} +Q_USE_ROOTWRAP_DAEMON=$(trueorfalse True Q_USE_ROOTWRAP_DAEMON) +# Meta data IP +Q_META_DATA_IP=${Q_META_DATA_IP:-$(ipv6_unquote $SERVICE_HOST)} +# Allow Overlapping IP among subnets +Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True} +Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True} +Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True} +VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True} +VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300} + +# Allow to skip stopping of OVN services +SKIP_STOP_OVN=${SKIP_STOP_OVN:-False} + +# The directory which contains files for Q_PLUGIN_EXTRA_CONF_FILES. +# /etc/neutron is assumed by many of devstack plugins. Do not change. +_Q_PLUGIN_EXTRA_CONF_PATH=/etc/neutron -NEUTRON_STATE_PATH=${NEUTRON_STATE_PATH:=$DATA_DIR/neutron} +# The name of the service in the endpoint URL +NEUTRON_ENDPOINT_SERVICE_NAME=${NEUTRON_ENDPOINT_SERVICE_NAME-"networking"} +if [[ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" && -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then + NEUTRON_ENDPOINT_SERVICE_NAME="networking" +fi -NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini +# List of config file names in addition to the main plugin config file +# To add additional plugin config files, use ``neutron_server_config_add`` +# utility function. For example: +# +# ``neutron_server_config_add file1`` +# +# These config files are relative to ``/etc/neutron``. The above +# example would specify ``--config-file /etc/neutron/file1`` for +# neutron server. +declare -a -g Q_PLUGIN_EXTRA_CONF_FILES + +# same as Q_PLUGIN_EXTRA_CONF_FILES, but with absolute path. +declare -a -g _Q_PLUGIN_EXTRA_CONF_FILES_ABS + + +Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf +if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + Q_RR_COMMAND="sudo" +else + NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) + Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + Q_RR_DAEMON_COMMAND="sudo $NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" + fi +fi -# By default, use the ML2 plugin -NEUTRON_CORE_PLUGIN=${NEUTRON_CORE_PLUGIN:-ml2} -NEUTRON_CORE_PLUGIN_CONF_FILENAME=${NEUTRON_CORE_PLUGIN_CONF_FILENAME:-ml2_conf.ini} -NEUTRON_CORE_PLUGIN_CONF_PATH=$NEUTRON_CONF_DIR/plugins/$NEUTRON_CORE_PLUGIN -NEUTRON_CORE_PLUGIN_CONF=$NEUTRON_CORE_PLUGIN_CONF_PATH/$NEUTRON_CORE_PLUGIN_CONF_FILENAME -NEUTRON_METERING_AGENT_CONF_FILENAME=${NEUTRON_METERING_AGENT_CONF_FILENAME:-metering_agent.ini} -NEUTRON_METERING_AGENT_CONF=$NEUTRON_CONF_DIR/$NEUTRON_METERING_AGENT_CONF_FILENAME +# Distributed Virtual Router (DVR) configuration +# Can be: +# - ``legacy`` - No DVR functionality +# - ``dvr_snat`` - Controller or single node DVR +# - ``dvr`` - Compute node in multi-node DVR +# - ``dvr_no_external`` - Compute node in multi-node DVR, no external network +# +Q_DVR_MODE=${Q_DVR_MODE:-legacy} +if [[ "$Q_DVR_MODE" != "legacy" ]]; then + Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,l2population +fi -NEUTRON_AGENT_BINARY=${NEUTRON_AGENT_BINARY:-neutron-$NEUTRON_AGENT-agent} -NEUTRON_L3_BINARY=${NEUTRON_L3_BINARY:-neutron-l3-agent} -NEUTRON_META_BINARY=${NEUTRON_META_BINARY:-neutron-metadata-agent} -NEUTRON_METERING_BINARY=${NEUTRON_METERING_BINARY:-neutron-metering-agent} +# Provider Network Configurations +# -------------------------------- + +# The following variables control the Neutron ML2 plugins' allocation +# of tenant networks and availability of provider networks. If these +# are not configured in ``localrc``, tenant networks will be local to +# the host (with no remote connectivity), and no physical resources +# will be available for the allocation of provider networks. + +# To disable tunnels (GRE or VXLAN) for tenant networks, +# set to False in ``local.conf``. +# GRE tunnels are only supported by the openvswitch. +ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-True} + +# If using GRE, VXLAN or GENEVE tunnels for tenant networks, +# specify the range of IDs from which tenant networks are +# allocated. Can be overridden in ``localrc`` if necessary. +TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGES:-1:1000} + +# To use VLANs for tenant networks, set to True in localrc. VLANs +# are supported by the ML2 plugins, requiring additional configuration +# described below. +ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} + +# If using VLANs for tenant networks, set in ``localrc`` to specify +# the range of VLAN VIDs from which tenant networks are +# allocated. An external network switch must be configured to +# trunk these VLANs between hosts for multi-host connectivity. +# +# Example: ``TENANT_VLAN_RANGE=1000:1999`` +TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} + +# If using VLANs for tenant networks, or if using flat or VLAN +# provider networks, set in ``localrc`` to the name of the physical +# network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the +# openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge +# agent, as described below. +# +# Example: ``PHYSICAL_NETWORK=default`` +PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public} + +# With the openvswitch agent, if using VLANs for tenant networks, +# or if using flat or VLAN provider networks, set in ``localrc`` to +# the name of the OVS bridge to use for the physical network. The +# bridge will be created if it does not already exist, but a +# physical interface must be manually added to the bridge as a +# port for external connectivity. +# +# Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` +OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex} -# Public facing bits -if is_service_enabled tls-proxy; then - NEUTRON_SERVICE_PROTOCOL="https" +# With the linuxbridge agent, if using VLANs for tenant networks, +# or if using flat or VLAN provider networks, set in ``localrc`` to +# the name of the network interface to use for the physical +# network. +# +# Example: ``LB_PHYSICAL_INTERFACE=eth1`` +if [[ $Q_AGENT == "linuxbridge" && -z ${LB_PHYSICAL_INTERFACE} ]]; then + default_route_dev=$( (ip route; ip -6 route) | grep ^default | head -n 1 | awk '{print $5}') + die_if_not_set $LINENO default_route_dev "Failure retrieving default route device" + LB_PHYSICAL_INTERFACE=$default_route_dev fi -NEUTRON_SERVICE_HOST=${NEUTRON_SERVICE_HOST:-$SERVICE_HOST} -NEUTRON_SERVICE_PORT=${NEUTRON_SERVICE_PORT:-9696} -NEUTRON_SERVICE_PORT_INT=${NEUTRON_SERVICE_PORT_INT:-19696} -NEUTRON_SERVICE_PROTOCOL=${NEUTRON_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} -NEUTRON_AUTH_STRATEGY=${NEUTRON_AUTH_STRATEGY:-keystone} -NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) -NEUTRON_ROOTWRAP_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf -NEUTRON_ROOTWRAP_CMD="$NEUTRON_ROOTWRAP $NEUTRON_ROOTWRAP_CONF_FILE" -NEUTRON_ROOTWRAP_DAEMON_CMD="$NEUTRON_ROOTWRAP-daemon $NEUTRON_ROOTWRAP_CONF_FILE" +# With the openvswitch plugin, set to True in ``localrc`` to enable +# provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. +# +# Example: ``OVS_ENABLE_TUNNELING=True`` +OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} + +# Use DHCP agent for providing metadata service in the case of +# without L3 agent (No Route Agent), set to True in localrc. +ENABLE_ISOLATED_METADATA=${ENABLE_ISOLATED_METADATA:-False} + +# Add a static route as dhcp option, so the request to 169.254.169.254 +# will be able to reach through a route(DHCP agent) +# This option require ENABLE_ISOLATED_METADATA = True +ENABLE_METADATA_NETWORK=${ENABLE_METADATA_NETWORK:-False} +# Neutron plugin specific functions +# --------------------------------- + +# Please refer to ``lib/neutron_plugins/README.md`` for details. +if [ -f $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN ]; then + source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN +fi -# This is needed because _neutron_ovs_base_configure_l3_agent uses it to create -# an external network bridge -PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} -PUBLIC_BRIDGE_MTU=${PUBLIC_BRIDGE_MTU:-1500} +# Agent metering service plugin functions +# ------------------------------------------- -# Network type - default vxlan, however enables vlan based jobs to override -# using the legacy environment variable as well as a new variable in greater -# alignment with the naming scheme of this plugin. -NEUTRON_TENANT_NETWORK_TYPE=${NEUTRON_TENANT_NETWORK_TYPE:-vxlan} +# Hardcoding for 1 service plugin for now +source $TOP_DIR/lib/neutron_plugins/services/metering -NEUTRON_TENANT_VLAN_RANGE=${NEUTRON_TENANT_VLAN_RANGE:-${TENANT_VLAN_RANGE:-100:150}} +# L3 Service functions +source $TOP_DIR/lib/neutron_plugins/services/l3 -# Physical network for VLAN network usage. -NEUTRON_PHYSICAL_NETWORK=${NEUTRON_PHYSICAL_NETWORK:-} +# Additional Neutron service plugins +source $TOP_DIR/lib/neutron_plugins/services/placement +source $TOP_DIR/lib/neutron_plugins/services/trunk +source $TOP_DIR/lib/neutron_plugins/services/qos -# The name of the service in the endpoint URL -NEUTRON_ENDPOINT_SERVICE_NAME=${NEUTRON_ENDPOINT_SERVICE_NAME-"networking"} -if [[ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" && -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then - NEUTRON_ENDPOINT_SERVICE_NAME="networking" +# Use security group or not +if has_neutron_plugin_security_group; then + Q_USE_SECGROUP=${Q_USE_SECGROUP:-True} +else + Q_USE_SECGROUP=False fi +# Save trace setting +_XTRACE_NEUTRON=$(set +o | grep xtrace) +set +o xtrace -# Additional neutron api config files -declare -a -g _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS # Functions # --------- @@ -136,310 +319,194 @@ function is_neutron_enabled { } # Test if any Neutron services are enabled -# is_neutron_enabled +# TODO(slaweq): this is not really needed now and we should remove it as soon +# as it will not be called from any other Devstack plugins, like e.g. Neutron +# plugin function is_neutron_legacy_enabled { - # first we need to remove all "neutron-" from DISABLED_SERVICES list - disabled_services_copy=$(echo $DISABLED_SERVICES | sed 's/neutron-//g') - [[ ,${disabled_services_copy} =~ ,"neutron" ]] && return 1 - [[ ,${ENABLED_SERVICES} =~ ,"q-" ]] && return 0 - return 1 + return 0 } -if is_neutron_legacy_enabled; then - source $TOP_DIR/lib/neutron-legacy -fi - -# cleanup_neutron() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_neutron_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" - source $TOP_DIR/lib/neutron_plugins/${NEUTRON_AGENT}_agent - if is_neutron_ovs_base_plugin; then - neutron_ovs_base_cleanup +function _determine_config_server { + if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" != '' ]]; then + if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" = "$_Q_PLUGIN_EXTRA_CONF_PATH" ]]; then + deprecated "Q_PLUGIN_EXTRA_CONF_PATH is deprecated" + else + die $LINENO "Q_PLUGIN_EXTRA_CONF_PATH is deprecated" + fi fi - - if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then - neutron_lb_cleanup + if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 ]]; then + deprecated "Q_PLUGIN_EXTRA_CONF_FILES is deprecated. Use neutron_server_config_add instead." fi - # delete all namespaces created by neutron - for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|qlbaas|fip|snat)-[0-9a-f-]*'); do - sudo ip netns delete ${ns} + for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do + _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($_Q_PLUGIN_EXTRA_CONF_PATH/$cfg_file) done -} -# configure_root_helper_options() - Configure agent rootwrap helper options -function configure_root_helper_options { - local conffile=$1 - iniset $conffile agent root_helper "sudo $NEUTRON_ROOTWRAP_CMD" - iniset $conffile agent root_helper_daemon "sudo $NEUTRON_ROOTWRAP_DAEMON_CMD" + local cfg_file + local opts="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" + for cfg_file in ${_Q_PLUGIN_EXTRA_CONF_FILES_ABS[@]}; do + opts+=" --config-file $cfg_file" + done + echo "$opts" } -# configure_neutron() - Set config files, create data dirs, etc -function configure_neutron_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" - sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR - - (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) - - cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF - - configure_neutron_rootwrap - - mkdir -p $NEUTRON_CORE_PLUGIN_CONF_PATH +function _determine_config_l3 { + local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE" + echo "$opts" +} - # NOTE(yamamoto): A decomposed plugin should prepare the config file in - # its devstack plugin. - if [ -f $NEUTRON_DIR/etc/neutron/plugins/$NEUTRON_CORE_PLUGIN/$NEUTRON_CORE_PLUGIN_CONF_FILENAME.sample ]; then - cp $NEUTRON_DIR/etc/neutron/plugins/$NEUTRON_CORE_PLUGIN/$NEUTRON_CORE_PLUGIN_CONF_FILENAME.sample $NEUTRON_CORE_PLUGIN_CONF +# For services and agents that require it, dynamically construct a list of +# --config-file arguments that are passed to the binary. +function determine_config_files { + local opts="" + case "$1" in + "neutron-server") opts="$(_determine_config_server)" ;; + "neutron-l3-agent") opts="$(_determine_config_l3)" ;; + esac + if [ -z "$opts" ] ; then + die $LINENO "Could not determine config files for $1." fi + echo "$opts" +} - iniset $NEUTRON_CONF database connection `database_connection_url neutron` - iniset $NEUTRON_CONF DEFAULT state_path $NEUTRON_STATE_PATH - iniset $NEUTRON_CONF oslo_concurrency lock_path $NEUTRON_STATE_PATH/lock - iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG - - iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - +# configure_neutron() +# Set common config for all neutron server and agents. +function configure_neutron { + _configure_neutron_common iniset_rpc_backend neutron $NEUTRON_CONF - # Neutron API server & Neutron plugin - if is_service_enabled neutron-api; then - local policy_file=$NEUTRON_CONF_DIR/policy.json - # Allow neutron user to administer neutron to match neutron account - # NOTE(amotoki): This is required for nova works correctly with neutron. - if [ -f $NEUTRON_DIR/etc/policy.json ]; then - cp $NEUTRON_DIR/etc/policy.json $policy_file - sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $policy_file - else - echo '{"context_is_admin": "role:admin or user_name:neutron"}' > $policy_file - fi - - cp $NEUTRON_DIR/etc/api-paste.ini $NEUTRON_CONF_DIR/api-paste.ini - - iniset $NEUTRON_CONF DEFAULT core_plugin $NEUTRON_CORE_PLUGIN - - iniset $NEUTRON_CONF DEFAULT policy_file $policy_file - iniset $NEUTRON_CONF DEFAULT router_distributed $NEUTRON_DISTRIBUTED_ROUTING - - iniset $NEUTRON_CONF DEFAULT auth_strategy $NEUTRON_AUTH_STRATEGY - configure_keystone_authtoken_middleware $NEUTRON_CONF neutron - configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova - - # Configure tenant network type - iniset $NEUTRON_CORE_PLUGIN_CONF ml2 tenant_network_types $NEUTRON_TENANT_NETWORK_TYPE - - local mech_drivers="openvswitch" - if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then - mech_drivers+=",l2population" - else - mech_drivers+=",linuxbridge" - fi - if [[ "$mech_drivers" == *"linuxbridge"* ]]; then - iniset $NEUTRON_CONF experimental linuxbridge True - fi - - iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers $mech_drivers - iniset $NEUTRON_CORE_PLUGIN_CONF ml2 overlay_ip_version $TUNNEL_IP_VERSION - - iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000 - iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_flat flat_networks $PUBLIC_NETWORK_NAME - if [[ "$NEUTRON_TENANT_NETWORK_TYPE" =~ "vlan" ]] && [[ "$NEUTRON_PHYSICAL_NETWORK" != "" ]]; then - iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vlan network_vlan_ranges ${NEUTRON_PHYSICAL_NETWORK}:${NEUTRON_TENANT_VLAN_RANGE} - fi - if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then - neutron_ml2_extension_driver_add port_security - fi - configure_rbac_policies - fi - - # Neutron OVS or LB agent - if is_service_enabled neutron-agent; then - iniset $NEUTRON_CORE_PLUGIN_CONF agent tunnel_types vxlan - iniset $NEUTRON_CORE_PLUGIN_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - configure_root_helper_options $NEUTRON_CORE_PLUGIN_CONF - - # Configure the neutron agent - if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then - iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver iptables - iniset $NEUTRON_CORE_PLUGIN_CONF vxlan local_ip $TUNNEL_ENDPOINT_IP - elif [[ $NEUTRON_AGENT == "openvswitch" ]]; then - iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver openvswitch - iniset $NEUTRON_CORE_PLUGIN_CONF ovs local_ip $TUNNEL_ENDPOINT_IP - - if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then - iniset $NEUTRON_CORE_PLUGIN_CONF agent l2_population True - iniset $NEUTRON_CORE_PLUGIN_CONF agent enable_distributed_routing True - iniset $NEUTRON_CORE_PLUGIN_CONF agent arp_responder True - fi - fi - - if ! running_in_container; then - enable_kernel_bridge_firewall - fi + if is_service_enabled q-metering neutron-metering; then + _configure_neutron_metering fi - - # DHCP Agent - if is_service_enabled neutron-dhcp; then - cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $NEUTRON_DHCP_CONF - - iniset $NEUTRON_DHCP_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - # make it so we have working DNS from guests - iniset $NEUTRON_DHCP_CONF DEFAULT dnsmasq_local_resolv True - - configure_root_helper_options $NEUTRON_DHCP_CONF - iniset $NEUTRON_DHCP_CONF DEFAULT interface_driver $NEUTRON_AGENT - neutron_plugin_configure_dhcp_agent $NEUTRON_DHCP_CONF + if is_service_enabled q-agt neutron-agent; then + _configure_neutron_plugin_agent + fi + if is_service_enabled q-dhcp neutron-dhcp; then + _configure_neutron_dhcp_agent + fi + if is_service_enabled q-l3 neutron-l3; then + _configure_neutron_l3_agent + fi + if is_service_enabled q-meta neutron-metadata-agent; then + _configure_neutron_metadata_agent fi - if is_service_enabled neutron-l3; then - cp $NEUTRON_DIR/etc/l3_agent.ini.sample $NEUTRON_L3_CONF - iniset $NEUTRON_L3_CONF DEFAULT interface_driver $NEUTRON_AGENT - neutron_service_plugin_class_add router - configure_root_helper_options $NEUTRON_L3_CONF - iniset $NEUTRON_L3_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - neutron_plugin_configure_l3_agent $NEUTRON_L3_CONF + if [[ "$Q_DVR_MODE" != "legacy" ]]; then + _configure_dvr + fi + if is_service_enabled ceilometer; then + _configure_neutron_ceilometer_notifications + fi - # Configure the neutron agent to serve external network ports - if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then - iniset $NEUTRON_CORE_PLUGIN_CONF linux_bridge bridge_mappings "$PUBLIC_NETWORK_NAME:$PUBLIC_BRIDGE" - else - iniset $NEUTRON_CORE_PLUGIN_CONF ovs bridge_mappings "$PUBLIC_NETWORK_NAME:$PUBLIC_BRIDGE" - fi + if [[ $Q_AGENT == "ovn" ]]; then + configure_ovn + configure_ovn_plugin + fi - if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then - iniset $NEUTRON_L3_CONF DEFAULT agent_mode $NEUTRON_DVR_MODE + # Configure Neutron's advanced services + if is_service_enabled q-placement neutron-placement; then + configure_placement_extension + fi + if is_service_enabled q-trunk neutron-trunk; then + configure_trunk_extension + fi + if is_service_enabled q-qos neutron-qos; then + configure_qos + if is_service_enabled q-l3 neutron-l3; then + configure_l3_agent_extension_fip_qos + configure_l3_agent_extension_gateway_ip_qos fi fi - # Metadata - if is_service_enabled neutron-metadata-agent; then - cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $NEUTRON_META_CONF - - iniset $NEUTRON_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $NEUTRON_META_CONF DEFAULT nova_metadata_host $NEUTRON_META_DATA_HOST - iniset $NEUTRON_META_CONF DEFAULT metadata_workers $API_WORKERS - # TODO(ihrachys) do we really need to set rootwrap for metadata agent? - configure_root_helper_options $NEUTRON_META_CONF - - # TODO(dtroyer): remove the v2.0 hard code below - iniset $NEUTRON_META_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI - configure_keystone_authtoken_middleware $NEUTRON_META_CONF neutron DEFAULT + # Finally configure Neutron server and core plugin + if is_service_enabled q-agt neutron-agent q-svc neutron-api; then + _configure_neutron_service fi - # Format logging - setup_logging $NEUTRON_CONF + iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS" + # devstack is not a tool for running uber scale OpenStack + # clouds, therefore running without a dedicated RPC worker + # for state reports is more than adequate. + iniset $NEUTRON_CONF DEFAULT rpc_state_report_workers 0 - if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then - # Set the service port for a proxy to take the original - iniset $NEUTRON_CONF DEFAULT bind_port "$NEUTRON_SERVICE_PORT_INT" - iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True - fi - - # Metering - if is_service_enabled neutron-metering; then - cp $NEUTRON_DIR/etc/metering_agent.ini.sample $NEUTRON_METERING_AGENT_CONF - neutron_service_plugin_class_add metering + if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then + write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api" "/networking" fi } -# configure_neutron_rootwrap() - configure Neutron's rootwrap -function configure_neutron_rootwrap { - # Deploy new rootwrap filters files (owned by root). - # Wipe any existing rootwrap.d files first - if [[ -d $NEUTRON_CONF_DIR/rootwrap.d ]]; then - sudo rm -rf $NEUTRON_CONF_DIR/rootwrap.d +function configure_neutron_nova { + create_nova_conf_neutron $NOVA_CONF + if [[ "${CELLSV2_SETUP}" == "superconductor" ]]; then + for i in $(seq 1 $NOVA_NUM_CELLS); do + local conf + conf=$(conductor_conf $i) + create_nova_conf_neutron $conf + done fi +} - # Deploy filters to /etc/neutron/rootwrap.d - sudo install -d -o root -g root -m 755 $NEUTRON_CONF_DIR/rootwrap.d - sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.d/*.filters $NEUTRON_CONF_DIR/rootwrap.d - - # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d`` - sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $NEUTRON_CONF_DIR - sudo sed -e "s:^filters_path=.*$:filters_path=$NEUTRON_CONF_DIR/rootwrap.d:" -i $NEUTRON_CONF_DIR/rootwrap.conf - - # Set up the rootwrap sudoers for Neutron - tempfile=`mktemp` - echo "$STACK_USER ALL=(root) NOPASSWD: $NEUTRON_ROOTWRAP_CMD *" >$tempfile - echo "$STACK_USER ALL=(root) NOPASSWD: $NEUTRON_ROOTWRAP_DAEMON_CMD" >>$tempfile - chmod 0440 $tempfile - sudo chown root:root $tempfile - sudo mv $tempfile /etc/sudoers.d/neutron-rootwrap -} - -# Make Neutron-required changes to nova.conf -# Takes a single optional argument which is the config file to update, -# if not passed $NOVA_CONF is used. -function configure_neutron_nova_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" +function create_nova_conf_neutron { local conf=${1:-$NOVA_CONF} iniset $conf neutron auth_type "password" iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI" - iniset $conf neutron username neutron + iniset $conf neutron username "$Q_ADMIN_USERNAME" iniset $conf neutron password "$SERVICE_PASSWORD" - iniset $conf neutron user_domain_name "Default" - iniset $conf neutron project_name "$SERVICE_TENANT_NAME" - iniset $conf neutron project_domain_name "Default" - iniset $conf neutron auth_strategy $NEUTRON_AUTH_STRATEGY + iniset $conf neutron user_domain_name "$SERVICE_DOMAIN_NAME" + iniset $conf neutron project_name "$SERVICE_PROJECT_NAME" + iniset $conf neutron project_domain_name "$SERVICE_DOMAIN_NAME" + iniset $conf neutron auth_strategy "$Q_AUTH_STRATEGY" iniset $conf neutron region_name "$REGION_NAME" # optionally set options in nova_conf neutron_plugin_create_nova_conf $conf - if is_service_enabled neutron-metadata-agent; then + if is_service_enabled q-meta neutron-metadata-agent; then iniset $conf neutron service_metadata_proxy "True" fi + iniset $conf DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL" + iniset $conf DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT" } +# create_neutron_accounts() - Set up common required neutron accounts + # Tenant User Roles # ------------------------------------------------------------------ # service neutron admin # if enabled -# create_neutron_accounts() - Create required service accounts -function create_neutron_accounts_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" +# Migrated from keystone_data.sh +function create_neutron_accounts { local neutron_url - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST/ + neutron_url=$Q_PROTOCOL://$SERVICE_HOST/ else - neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST:$NEUTRON_SERVICE_PORT/ + neutron_url=$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/ fi if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME fi - - if [[ "$ENABLED_SERVICES" =~ "neutron-api" ]]; then + if is_service_enabled q-svc neutron-api; then create_service_user "neutron" - neutron_service=$(get_or_create_service "neutron" \ - "network" "Neutron Service") - get_or_create_endpoint $neutron_service \ + get_or_create_service "neutron" "network" "Neutron Service" + get_or_create_endpoint \ + "network" \ "$REGION_NAME" "$neutron_url" fi } # init_neutron() - Initialize databases, etc. -function init_neutron_new { - - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" - recreate_database neutron - +function init_neutron { + recreate_database $Q_DB_NAME time_start "dbsync" # Run Neutron db migrations - $NEUTRON_BIN_DIR/neutron-db-manage upgrade heads + $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head time_stop "dbsync" } # install_neutron() - Collect source and prepare -function install_neutron_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" - git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH - setup_develop $NEUTRON_DIR - +function install_neutron { # Install neutron-lib from git so we make sure we're testing # the latest code. if use_library_from_git "neutron-lib"; then @@ -447,17 +514,12 @@ function install_neutron_new { setup_dev_lib "neutron-lib" fi - # L3 service requires radvd - if is_service_enabled neutron-l3; then - install_package radvd - fi + git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH + setup_develop $NEUTRON_DIR - if is_service_enabled neutron-agent neutron-dhcp neutron-l3; then - #TODO(sc68cal) - kind of ugly - source $TOP_DIR/lib/neutron_plugins/${NEUTRON_AGENT}_agent - neutron_plugin_install_agent_packages + if [[ $Q_AGENT == "ovn" ]]; then + install_ovn fi - } # install_neutronclient() - Collect source and prepare @@ -469,152 +531,256 @@ function install_neutronclient { fi } -# start_neutron_api() - Start the API process ahead of other things -function start_neutron_api { - local service_port=$NEUTRON_SERVICE_PORT - local service_protocol=$NEUTRON_SERVICE_PROTOCOL +# install_neutron_agent_packages() - Collect source and prepare +function install_neutron_agent_packages { + # radvd doesn't come with the OS. Install it if the l3 service is enabled. + if is_service_enabled q-l3 neutron-l3; then + install_package radvd + fi + # install packages that are specific to plugin agent(s) + if is_service_enabled q-agt neutron-agent q-dhcp neutron-dhcp q-l3 neutron-l3; then + neutron_plugin_install_agent_packages + fi +} + +# Finish neutron configuration +function configure_neutron_after_post_config { + if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then + iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES + fi + configure_rbac_policies +} + +# configure_rbac_policies() - Configure Neutron to enforce new RBAC +# policies and scopes if NEUTRON_ENFORCE_SCOPE == True +function configure_rbac_policies { + if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == True ]]; then + iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True + iniset $NEUTRON_CONF oslo_policy enforce_scope True + else + iniset $NEUTRON_CONF oslo_policy enforce_new_defaults False + iniset $NEUTRON_CONF oslo_policy enforce_scope False + fi +} + +# Start running OVN processes +function start_ovn_services { + if [[ $Q_AGENT == "ovn" ]]; then + init_ovn + start_ovn + if [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]]; then + if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" != "True" ]]; then + echo "OVN_L3_CREATE_PUBLIC_NETWORK=True is being ignored " + echo "because NEUTRON_CREATE_INITIAL_NETWORKS is set to False" + else + create_public_bridge + fi + fi + fi +} + +# Start running processes +function start_neutron_service_and_check { + local service_port=$Q_PORT + local service_protocol=$Q_PROTOCOL + local cfg_file_options local neutron_url + + cfg_file_options="$(determine_config_files neutron-server)" + if is_service_enabled tls-proxy; then - service_port=$NEUTRON_SERVICE_PORT_INT + service_port=$Q_PORT_INT service_protocol="http" fi - - local opts="" - opts+=" --config-file $NEUTRON_CONF" - opts+=" --config-file $NEUTRON_CORE_PLUGIN_CONF" - local cfg_file - for cfg_file in ${_NEUTRON_SERVER_EXTRA_CONF_FILES_ABS[@]}; do - opts+=" --config-file $cfg_file" - done - + # Start the Neutron service if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then + enable_service neutron-api run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" - neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST/ + neutron_url=$Q_PROTOCOL://$Q_HOST/ enable_service neutron-rpc-server - run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $opts" + run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" else - # Start the Neutron service - # TODO(sc68cal) Stop hard coding this - run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server $opts" - neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST:$service_port/ + run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options" + neutron_url=$service_protocol://$Q_HOST:$service_port/ # Start proxy if enabled if is_service_enabled tls-proxy; then - start_tls_proxy neutron '*' $NEUTRON_SERVICE_PORT $NEUTRON_SERVICE_HOST $NEUTRON_SERVICE_PORT_INT + start_tls_proxy neutron '*' $Q_PORT $Q_HOST $Q_PORT_INT fi fi if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME fi + echo "Waiting for Neutron to start..." - if ! wait_for_service $SERVICE_TIMEOUT $neutron_url; then - die $LINENO "neutron-api did not start" - fi + local testcmd="wget ${ssl_ca} --no-proxy -q -O- $neutron_url" + test_with_retry "$testcmd" "Neutron did not start" $SERVICE_TIMEOUT } -# start_neutron() - Start running processes -function start_neutron_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" - # Start up the neutron agents if enabled - # TODO(sc68cal) Make this pluggable so different DevStack plugins for different Neutron plugins - # can resolve the $NEUTRON_AGENT_BINARY - if is_service_enabled neutron-agent; then - # TODO(ihrachys) stop loading ml2_conf.ini into agents, instead load agent specific files - run_process neutron-agent "$NEUTRON_BIN_DIR/$NEUTRON_AGENT_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_CORE_PLUGIN_CONF" - fi - if is_service_enabled neutron-dhcp; then - neutron_plugin_configure_dhcp_agent $NEUTRON_DHCP_CONF - run_process neutron-dhcp "$NEUTRON_BIN_DIR/$NEUTRON_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_DHCP_CONF" - fi - if is_service_enabled neutron-l3; then - run_process neutron-l3 "$NEUTRON_BIN_DIR/$NEUTRON_L3_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_L3_CONF" - fi - if is_service_enabled neutron-api && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then - # XXX(sc68cal) - Here's where plugins can wire up their own networks instead - # of the code in lib/neutron_plugins/services/l3 - if type -p neutron_plugin_create_initial_networks > /dev/null; then - neutron_plugin_create_initial_networks - else - # XXX(sc68cal) Load up the built in Neutron networking code and build a topology - source $TOP_DIR/lib/neutron_plugins/services/l3 - # Create the networks using servic - create_neutron_initial_network +function start_neutron { + start_l2_agent "$@" + start_other_agents "$@" +} + +# Control of the l2 agent is separated out to make it easier to test partial +# upgrades (everything upgraded except the L2 agent) +function start_l2_agent { + run_process q-agt "$AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" + + if is_provider_network && [[ $Q_AGENT == "openvswitch" ]]; then + sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE + sudo ip link set $OVS_PHYSICAL_BRIDGE up + sudo ip link set br-int up + sudo ip link set $PUBLIC_INTERFACE up + if is_ironic_hardware; then + for IP in $(ip addr show dev $PUBLIC_INTERFACE | grep ' inet ' | awk '{print $2}'); do + sudo ip addr del $IP dev $PUBLIC_INTERFACE + sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE + done + sudo ip route replace $FIXED_RANGE via $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE fi fi - if is_service_enabled neutron-metadata-agent; then - run_process neutron-metadata-agent "$NEUTRON_BIN_DIR/$NEUTRON_META_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_META_CONF" - fi +} - if is_service_enabled neutron-metering; then - run_process neutron-metering "$NEUTRON_BIN_DIR/$NEUTRON_METERING_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_METERING_AGENT_CONF" - fi +function start_other_agents { + run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $Q_DHCP_CONF_FILE" + + run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)" + + run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE" + run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" } -# stop_neutron() - Stop running processes -function stop_neutron_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" - for serv in neutron-api neutron-agent neutron-l3; do - stop_process $serv - done +# Start running processes, including screen +function start_neutron_agents { + # NOTE(slaweq): it's now just a wrapper for start_neutron function + start_neutron "$@" +} - if is_service_enabled neutron-rpc-server; then - stop_process neutron-rpc-server - fi +function stop_l2_agent { + stop_process q-agt +} - if is_service_enabled neutron-dhcp; then - stop_process neutron-dhcp +# stop_other() - Stop running processes +function stop_other { + if is_service_enabled q-dhcp neutron-dhcp; then + stop_process q-dhcp pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }') [ ! -z "$pid" ] && sudo kill -9 $pid fi - if is_service_enabled neutron-metadata-agent; then - stop_process neutron-metadata-agent + if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then + stop_process neutron-rpc-server + stop_process neutron-api + else + stop_process q-svc fi -} -# neutron_service_plugin_class_add() - add service plugin class -function neutron_service_plugin_class_add_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" - local service_plugin_class=$1 - local plugins="" + if is_service_enabled q-l3 neutron-l3; then + sudo pkill -f "radvd -C $DATA_DIR/neutron/ra" + stop_process q-l3 + fi - plugins=$(iniget $NEUTRON_CONF DEFAULT service_plugins) - if [ $plugins ]; then - plugins+="," + if is_service_enabled q-meta neutron-metadata-agent; then + stop_process q-meta + fi + + if is_service_enabled q-metering neutron-metering; then + neutron_metering_stop + fi + + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + sudo pkill -9 -f $NEUTRON_ROOTWRAP-daemon || : fi - plugins+="${service_plugin_class}" - iniset $NEUTRON_CONF DEFAULT service_plugins $plugins } -function _neutron_ml2_extension_driver_add { - local driver=$1 - local drivers="" +# stop_neutron() - Stop running processes (non-screen) +function stop_neutron { + stop_other + stop_l2_agent - drivers=$(iniget $NEUTRON_CORE_PLUGIN_CONF ml2 extension_drivers) - if [ $drivers ]; then - drivers+="," + if [[ $Q_AGENT == "ovn" && $SKIP_STOP_OVN != "True" ]]; then + stop_ovn fi - drivers+="${driver}" - iniset $NEUTRON_CORE_PLUGIN_CONF ml2 extension_drivers $drivers } -function neutron_server_config_add_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" - _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS+=($1) +# _move_neutron_addresses_route() - Move the primary IP to the OVS bridge +# on startup, or back to the public interface on cleanup. If no IP is +# configured on the interface, just add it as a port to the OVS bridge. +function _move_neutron_addresses_route { + local from_intf=$1 + local to_intf=$2 + local add_ovs_port=$3 + local del_ovs_port=$4 + local af=$5 + + if [[ -n "$from_intf" && -n "$to_intf" ]]; then + # Remove the primary IP address from $from_intf and add it to $to_intf, + # along with the default route, if it exists. Also, when called + # on configure we will also add $from_intf as a port on $to_intf, + # assuming it is an OVS bridge. + + local IP_REPLACE="" + local IP_DEL="" + local IP_UP="" + local DEFAULT_ROUTE_GW + DEFAULT_ROUTE_GW=$(ip -f $af r | awk "/default.+$from_intf\s/ { print \$3; exit }") + local ADD_OVS_PORT="" + local DEL_OVS_PORT="" + local ARP_CMD="" + + IP_BRD=$(ip -f $af a s dev $from_intf scope global primary | grep inet | awk '{ print $2, $3, $4; exit }') + + if [ "$DEFAULT_ROUTE_GW" != "" ]; then + ADD_DEFAULT_ROUTE="sudo ip -f $af r replace default via $DEFAULT_ROUTE_GW dev $to_intf" + fi + + if [[ "$add_ovs_port" == "True" ]]; then + ADD_OVS_PORT="sudo ovs-vsctl --may-exist add-port $to_intf $from_intf" + fi + + if [[ "$del_ovs_port" == "True" ]]; then + DEL_OVS_PORT="sudo ovs-vsctl --if-exists del-port $from_intf $to_intf" + fi + + if [[ "$IP_BRD" != "" ]]; then + IP_DEL="sudo ip addr del $IP_BRD dev $from_intf" + IP_REPLACE="sudo ip addr replace $IP_BRD dev $to_intf" + IP_UP="sudo ip link set $to_intf up" + if [[ "$af" == "inet" ]]; then + IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1) + ARP_CMD="sudo arping -A -c 3 -w 5 -I $to_intf $IP " + fi + fi + + # The add/del OVS port calls have to happen either before or + # after the address is moved in order to not leave it orphaned. + $DEL_OVS_PORT; $IP_DEL; $IP_REPLACE; $IP_UP; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE; $ARP_CMD + fi } -# neutron_deploy_rootwrap_filters() - deploy rootwrap filters -function neutron_deploy_rootwrap_filters_new { - deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!" - local srcdir=$1 - sudo install -d -o root -g root -m 755 $NEUTRON_CONF_DIR/rootwrap.d - sudo install -o root -g root -m 644 $srcdir/etc/neutron/rootwrap.d/*.filters $NEUTRON_CONF_DIR/rootwrap.d +# _configure_public_network_connectivity() - Configures connectivity to the +# external network using $PUBLIC_INTERFACE or NAT on the single interface +# machines +function _configure_public_network_connectivity { + # If we've given a PUBLIC_INTERFACE to take over, then we assume + # that we can own the whole thing, and privot it into the OVS + # bridge. If we are not, we're probably on a single interface + # machine, and we just setup NAT so that fixed guests can get out. + if [[ -n "$PUBLIC_INTERFACE" ]]; then + _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet" + + if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then + _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6" + fi + else + for d in $default_v4_route_devs; do + sudo iptables -t nat -A POSTROUTING -o $d -s $FLOATING_RANGE -j MASQUERADE + done + fi } -# Dispatch functions -# These are needed for compatibility between the old and new implementations -# where there are function name overlaps. These will be removed when -# neutron-legacy is removed. -# TODO(sc68cal) Remove when neutron-legacy is no more. +# cleanup_neutron() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up function cleanup_neutron { if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then stop_process neutron-api @@ -623,153 +789,367 @@ function cleanup_neutron { sudo rm -f $(apache_site_config_for neutron-api) fi - if is_neutron_legacy_enabled; then - # Call back to old function - cleanup_mutnauq "$@" - else - cleanup_neutron_new "$@" + if [[ -n "$OVS_PHYSICAL_BRIDGE" ]]; then + _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False True "inet" + + if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then + # ip(8) wants the prefix length when deleting + local v6_gateway + v6_gateway=$(ip -6 a s dev $OVS_PHYSICAL_BRIDGE | grep $IPV6_PUBLIC_NETWORK_GATEWAY | awk '{ print $2 }') + sudo ip -6 addr del $v6_gateway dev $OVS_PHYSICAL_BRIDGE + _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False False "inet6" + fi + + if is_provider_network && is_ironic_hardware; then + for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do + sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE + sudo ip addr add $IP dev $PUBLIC_INTERFACE + done + sudo route del -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE + fi fi -} -function configure_neutron { - if is_neutron_legacy_enabled; then - # Call back to old function - configure_mutnauq "$@" - else - configure_neutron_new "$@" + if is_neutron_ovs_base_plugin; then + neutron_ovs_base_cleanup fi - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api" "/networking" + if [[ $Q_AGENT == "linuxbridge" ]]; then + neutron_lb_cleanup + fi + + # delete all namespaces created by neutron + for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do + sudo ip netns delete ${ns} + done + + if [[ $Q_AGENT == "ovn" ]]; then + cleanup_ovn fi } -# configure_rbac_policies() - Configure Neutron to enforce new RBAC -# policies and scopes if NEUTRON_ENFORCE_SCOPE == True -function configure_rbac_policies { - if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then - iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True - iniset $NEUTRON_CONF oslo_policy enforce_scope True + +function _create_neutron_conf_dir { + # Put config files in ``NEUTRON_CONF_DIR`` for everyone to find + sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR +} + +# _configure_neutron_common() +# Set common config for all neutron server and agents. +# This MUST be called before other ``_configure_neutron_*`` functions. +function _configure_neutron_common { + _create_neutron_conf_dir + + # Uses oslo config generator to generate core sample configuration files + (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) + + cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF + + Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json + + # allow neutron user to administer neutron to match neutron account + # NOTE(amotoki): This is required for nova works correctly with neutron. + if [ -f $NEUTRON_DIR/etc/policy.json ]; then + cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE + sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $Q_POLICY_FILE else - iniset $NEUTRON_CONF oslo_policy enforce_new_defaults False - iniset $NEUTRON_CONF oslo_policy enforce_scope False + echo '{"context_is_admin": "role:admin or user_name:neutron"}' > $Q_POLICY_FILE + fi + + # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``. + # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``. + neutron_plugin_configure_common + + if [[ "$Q_PLUGIN_CONF_PATH" == '' || "$Q_PLUGIN_CONF_FILENAME" == '' || "$Q_PLUGIN_CLASS" == '' ]]; then + die $LINENO "Neutron plugin not set.. exiting" + fi + + # If needed, move config file from ``$NEUTRON_DIR/etc/neutron`` to ``NEUTRON_CONF_DIR`` + mkdir -p /$Q_PLUGIN_CONF_PATH + Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME + # NOTE(slaweq): NEUTRON_CORE_PLUGIN_CONF is used e.g. in neutron repository, + # it was previously defined in the lib/neutron module which is now deleted. + NEUTRON_CORE_PLUGIN_CONF=$Q_PLUGIN_CONF_FILE + # NOTE(hichihara): Some neutron vendor plugins were already decomposed and + # there is no config file in Neutron tree. They should prepare the file in each plugin. + if [ -f "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" ]; then + cp "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" /$Q_PLUGIN_CONF_FILE + elif [ -f $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE ]; then + cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE + fi + + iniset $NEUTRON_CONF database connection `database_connection_url $Q_DB_NAME` + iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron + iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG + iniset $NEUTRON_CONF DEFAULT bind_host $Q_LISTEN_ADDRESS + iniset $NEUTRON_CONF oslo_concurrency lock_path $DATA_DIR/neutron/lock + + # NOTE(freerunner): Need to adjust Region Name for nova in multiregion installation + iniset $NEUTRON_CONF nova region_name $REGION_NAME + + if [ "$VIRT_DRIVER" = 'fake' ]; then + # Disable arbitrary limits + iniset $NEUTRON_CONF quotas quota_network -1 + iniset $NEUTRON_CONF quotas quota_subnet -1 + iniset $NEUTRON_CONF quotas quota_port -1 + iniset $NEUTRON_CONF quotas quota_security_group -1 + iniset $NEUTRON_CONF quotas quota_security_group_rule -1 + fi + + # Format logging + setup_logging $NEUTRON_CONF + + if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then + # Set the service port for a proxy to take the original + iniset $NEUTRON_CONF DEFAULT bind_port "$Q_PORT_INT" + iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True fi + + _neutron_setup_rootwrap } +function _configure_neutron_dhcp_agent { -function configure_neutron_nova { - if is_neutron_legacy_enabled; then - # Call back to old function - create_nova_conf_neutron $NOVA_CONF - if [[ "${CELLSV2_SETUP}" == "superconductor" ]]; then - for i in $(seq 1 $NOVA_NUM_CELLS); do - local conf - conf=$(conductor_conf $i) - create_nova_conf_neutron $conf - done - fi - else - configure_neutron_nova_new $NOVA_CONF - if [[ "${CELLSV2_SETUP}" == "superconductor" ]]; then - for i in $(seq 1 $NOVA_NUM_CELLS); do - local conf - conf=$(conductor_conf $i) - configure_neutron_nova_new $conf - done + cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $Q_DHCP_CONF_FILE + + iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + # make it so we have working DNS from guests + iniset $Q_DHCP_CONF_FILE DEFAULT dnsmasq_local_resolv True + configure_root_helper_options $Q_DHCP_CONF_FILE + + if ! is_service_enabled q-l3 neutron-l3; then + if [[ "$ENABLE_ISOLATED_METADATA" = "True" ]]; then + iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata $ENABLE_ISOLATED_METADATA + iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network $ENABLE_METADATA_NETWORK + else + if [[ "$ENABLE_METADATA_NETWORK" = "True" ]]; then + die "$LINENO" "Enable isolated metadata is a must for metadata network" + fi fi fi + + _neutron_setup_interface_driver $Q_DHCP_CONF_FILE + + neutron_plugin_configure_dhcp_agent $Q_DHCP_CONF_FILE } -function create_neutron_accounts { - if is_neutron_legacy_enabled; then - # Call back to old function - create_mutnauq_accounts "$@" - else - create_neutron_accounts_new "$@" - fi + +function _configure_neutron_metadata_agent { + cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $Q_META_CONF_FILE + + iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $Q_META_CONF_FILE DEFAULT nova_metadata_host $Q_META_DATA_IP + iniset $Q_META_CONF_FILE DEFAULT metadata_workers $API_WORKERS + configure_root_helper_options $Q_META_CONF_FILE } -function init_neutron { - if is_neutron_legacy_enabled; then - # Call back to old function - init_mutnauq "$@" - else - init_neutron_new "$@" - fi +function _configure_neutron_ceilometer_notifications { + iniset $NEUTRON_CONF oslo_messaging_notifications driver messagingv2 } -function install_neutron { - if is_neutron_legacy_enabled; then - # Call back to old function - install_mutnauq "$@" - else - install_neutron_new "$@" +function _configure_neutron_metering { + neutron_agent_metering_configure_common + neutron_agent_metering_configure_agent +} + +function _configure_dvr { + iniset $NEUTRON_CONF DEFAULT router_distributed True + iniset $Q_L3_CONF_FILE DEFAULT agent_mode $Q_DVR_MODE +} + + +# _configure_neutron_plugin_agent() - Set config files for neutron plugin agent +# It is called when q-agt is enabled. +function _configure_neutron_plugin_agent { + # Specify the default root helper prior to agent configuration to + # ensure that an agent's configuration can override the default + configure_root_helper_options /$Q_PLUGIN_CONF_FILE + iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + + # Configure agent for plugin + neutron_plugin_configure_plugin_agent +} + +function _replace_api_paste_composite { + local sep + sep=$(echo -ne "\x01") + # Replace it + $sudo sed -i -e "s/\/\: neutronversions_composite/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/\: neutronversions_composite/" "$Q_API_PASTE_FILE" + $sudo sed -i -e "s/\/healthcheck\: healthcheck/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/healthcheck\: healthcheck/" "$Q_API_PASTE_FILE" + $sudo sed -i -e "s/\/v2.0\: neutronapi_v2_0/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/v2.0\: neutronapi_v2_0/" "$Q_API_PASTE_FILE" +} + +# _configure_neutron_service() - Set config files for neutron service +# It is called when q-svc is enabled. +function _configure_neutron_service { + Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini + cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE + + if [[ -n "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then + _replace_api_paste_composite fi + + # Update either configuration file with plugin + iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS + + iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $NEUTRON_CONF oslo_policy policy_file $Q_POLICY_FILE + + iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY + configure_keystone_authtoken_middleware $NEUTRON_CONF $Q_ADMIN_USERNAME + + # Configuration for neutron notifications to nova. + iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES + iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES + + configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova + + # Configuration for placement client + configure_keystone_authtoken_middleware $NEUTRON_CONF placement placement + + # Configure plugin + neutron_plugin_configure_service } +# Utility Functions +#------------------ + +# neutron_service_plugin_class_add() - add service plugin class function neutron_service_plugin_class_add { - if is_neutron_legacy_enabled; then - # Call back to old function - _neutron_service_plugin_class_add "$@" - else - neutron_service_plugin_class_add_new "$@" + local service_plugin_class=$1 + if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then + Q_SERVICE_PLUGIN_CLASSES=$service_plugin_class + elif [[ ! ,${Q_SERVICE_PLUGIN_CLASSES}, =~ ,${service_plugin_class}, ]]; then + Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$service_plugin_class" fi } +# neutron_ml2_extension_driver_add() - add ML2 extension driver function neutron_ml2_extension_driver_add { - if is_neutron_legacy_enabled; then - # Call back to old function - _neutron_ml2_extension_driver_add_old "$@" - else - _neutron_ml2_extension_driver_add "$@" + local extension=$1 + if [[ $Q_ML2_PLUGIN_EXT_DRIVERS == '' ]]; then + Q_ML2_PLUGIN_EXT_DRIVERS=$extension + elif [[ ! ,${Q_ML2_PLUGIN_EXT_DRIVERS}, =~ ,${extension}, ]]; then + Q_ML2_PLUGIN_EXT_DRIVERS="$Q_ML2_PLUGIN_EXT_DRIVERS,$extension" fi } -function install_neutron_agent_packages { - if is_neutron_legacy_enabled; then - # Call back to old function - install_neutron_agent_packages_mutnauq "$@" - else - : - fi +# neutron_server_config_add() - add server config file +function neutron_server_config_add { + _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($1) } -function neutron_server_config_add { - if is_neutron_legacy_enabled; then - # Call back to old function - mutnauq_server_config_add "$@" - else - neutron_server_config_add_new "$@" +# neutron_deploy_rootwrap_filters() - deploy rootwrap filters to $Q_CONF_ROOTWRAP_D (owned by root). +function neutron_deploy_rootwrap_filters { + if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + return fi + local srcdir=$1 + sudo install -d -o root -m 755 $Q_CONF_ROOTWRAP_D + sudo install -o root -m 644 $srcdir/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ } -function start_neutron { - if is_neutron_legacy_enabled; then - # Call back to old function - start_mutnauq_l2_agent "$@" - start_mutnauq_other_agents "$@" +# _neutron_setup_rootwrap() - configure Neutron's rootwrap +function _neutron_setup_rootwrap { + if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + return + fi + # Wipe any existing ``rootwrap.d`` files first + Q_CONF_ROOTWRAP_D=$NEUTRON_CONF_DIR/rootwrap.d + if [[ -d $Q_CONF_ROOTWRAP_D ]]; then + sudo rm -rf $Q_CONF_ROOTWRAP_D + fi + + neutron_deploy_rootwrap_filters $NEUTRON_DIR + + # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d`` + # location moved in newer versions, prefer new location + if test -r $NEUTRON_DIR/etc/neutron/rootwrap.conf; then + sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE else - start_neutron_new "$@" + sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE fi + sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE + sudo sed -e 's:^exec_dirs=\(.*\)$:exec_dirs=\1,/usr/local/bin:' -i $Q_RR_CONF_FILE + + # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap + ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *" + ROOTWRAP_DAEMON_SUDOER_CMD="$NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" + + # Set up the rootwrap sudoers for neutron + TEMPFILE=`mktemp` + echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE + echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_DAEMON_SUDOER_CMD" >>$TEMPFILE + chmod 0440 $TEMPFILE + sudo chown root:root $TEMPFILE + sudo mv $TEMPFILE /etc/sudoers.d/neutron-rootwrap + + # Update the root_helper + configure_root_helper_options $NEUTRON_CONF } -function stop_neutron { - if is_neutron_legacy_enabled; then - # Call back to old function - stop_mutnauq "$@" - else - stop_neutron_new "$@" +function configure_root_helper_options { + local conffile=$1 + iniset $conffile agent root_helper "$Q_RR_COMMAND" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + iniset $conffile agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" fi } -function neutron_deploy_rootwrap_filters { - if is_neutron_legacy_enabled; then - # Call back to old function - _neutron_deploy_rootwrap_filters "$@" - else - neutron_deploy_rootwrap_filters_new "$@" +function _neutron_setup_interface_driver { + + # ovs_use_veth needs to be set before the plugin configuration + # occurs to allow plugins to override the setting. + iniset $1 DEFAULT ovs_use_veth $Q_OVS_USE_VETH + + neutron_plugin_setup_interface_driver $1 +} +# Functions for Neutron Exercises +#-------------------------------- + +function delete_probe { + local from_net="$1" + net_id=`_get_net_id $from_net` + probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'` + neutron-debug --os-tenant-name admin --os-username admin probe-delete $probe_id +} + +function _get_net_id { + openstack --os-cloud devstack-admin --os-region-name="$REGION_NAME" --os-project-name admin --os-username admin --os-password $ADMIN_PASSWORD network list | grep $1 | awk '{print $2}' +} + +function _get_probe_cmd_prefix { + local from_net="$1" + net_id=`_get_net_id $from_net` + probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1` + echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id" +} + +# ssh check +function _ssh_check_neutron { + local from_net=$1 + local key_file=$2 + local ip=$3 + local user=$4 + local timeout_sec=$5 + local probe_cmd = "" + probe_cmd=`_get_probe_cmd_prefix $from_net` + local testcmd="$probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success" + test_with_retry "$testcmd" "server $ip didn't become ssh-able" $timeout_sec +} + +function plugin_agent_add_l2_agent_extension { + local l2_agent_extension=$1 + if [[ -z "$L2_AGENT_EXTENSIONS" ]]; then + L2_AGENT_EXTENSIONS=$l2_agent_extension + elif [[ ! ,${L2_AGENT_EXTENSIONS}, =~ ,${l2_agent_extension}, ]]; then + L2_AGENT_EXTENSIONS+=",$l2_agent_extension" fi } # Restore xtrace -$XTRACE +$_XTRACE_NEUTRON + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/neutron-legacy b/lib/neutron-legacy index baf67f209e..e90400fec1 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -1,1097 +1,6 @@ #!/bin/bash -# -# lib/neutron -# functions - functions specific to neutron -# Dependencies: -# ``functions`` file -# ``DEST`` must be defined -# ``STACK_USER`` must be defined +# TODO(slaweq): remove that file when other projects, like e.g. Grenade will +# be using lib/neutron -# ``stack.sh`` calls the entry points in this order: -# -# - install_neutron_agent_packages -# - install_neutronclient -# - install_neutron -# - install_neutron_third_party -# - configure_neutron -# - init_neutron -# - configure_neutron_third_party -# - init_neutron_third_party -# - start_neutron_third_party -# - create_nova_conf_neutron -# - configure_neutron_after_post_config -# - start_neutron_service_and_check -# - check_neutron_third_party_integration -# - start_neutron_agents -# - create_neutron_initial_network -# -# ``unstack.sh`` calls the entry points in this order: -# -# - stop_neutron -# - stop_neutron_third_party -# - cleanup_neutron - -# Functions in lib/neutron are classified into the following categories: -# -# - entry points (called from stack.sh or unstack.sh) -# - internal functions -# - neutron exercises -# - 3rd party programs - - -# Neutron Networking -# ------------------ - -# Make sure that neutron is enabled in ``ENABLED_SERVICES``. If you want -# to run Neutron on this host, make sure that q-svc is also in -# ``ENABLED_SERVICES``. -# -# See "Neutron Network Configuration" below for additional variables -# that must be set in localrc for connectivity across hosts with -# Neutron. - -# Settings -# -------- - - -# Neutron Network Configuration -# ----------------------------- - -if is_service_enabled tls-proxy; then - Q_PROTOCOL="https" -fi - - -# Set up default directories -GITDIR["python-neutronclient"]=$DEST/python-neutronclient - - -NEUTRON_DIR=$DEST/neutron -NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas - -# Support entry points installation of console scripts -if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then - NEUTRON_BIN_DIR=$NEUTRON_DIR/bin -else - NEUTRON_BIN_DIR=$(get_python_exec_prefix) -fi - -NEUTRON_CONF_DIR=/etc/neutron -NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf -export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} - -# NEUTRON_DEPLOY_MOD_WSGI defines how neutron is deployed, allowed values: -# - False (default) : Run neutron under Eventlet -# - True : Run neutron under uwsgi -# TODO(annp): Switching to uwsgi in next cycle if things turn out to be stable -# enough -NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI) - -NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini - -# If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope" -# and "enforce_new_defaults" to True in the Neutron's config to enforce usage -# of the new RBAC policies and scopes. -NEUTRON_ENFORCE_SCOPE=$(trueorfalse False NEUTRON_ENFORCE_SCOPE) - -# Agent binaries. Note, binary paths for other agents are set in per-service -# scripts in lib/neutron_plugins/services/ -AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent" -AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"} -AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent" - -# Agent config files. Note, plugin-specific Q_PLUGIN_CONF_FILE is set and -# loaded from per-plugin scripts in lib/neutron_plugins/ -Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini -Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini -Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini - -# Default name for Neutron database -Q_DB_NAME=${Q_DB_NAME:-neutron} -# Default Neutron Plugin -Q_PLUGIN=${Q_PLUGIN:-ml2} -# Default Neutron Port -Q_PORT=${Q_PORT:-9696} -# Default Neutron Internal Port when using TLS proxy -Q_PORT_INT=${Q_PORT_INT:-19696} -# Default Neutron Host -Q_HOST=${Q_HOST:-$SERVICE_HOST} -# Default protocol -Q_PROTOCOL=${Q_PROTOCOL:-$SERVICE_PROTOCOL} -# Default listen address -Q_LISTEN_ADDRESS=${Q_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} -# Default admin username -Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron} -# Default auth strategy -Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} -# RHEL's support for namespaces requires using veths with ovs -Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False} -Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} -Q_USE_ROOTWRAP_DAEMON=$(trueorfalse True Q_USE_ROOTWRAP_DAEMON) -# Meta data IP -Q_META_DATA_IP=${Q_META_DATA_IP:-$(ipv6_unquote $SERVICE_HOST)} -# Allow Overlapping IP among subnets -Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True} -Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True} -Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True} -VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True} -VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300} - -# Allow to skip stopping of OVN services -SKIP_STOP_OVN=${SKIP_STOP_OVN:-False} - -# The directory which contains files for Q_PLUGIN_EXTRA_CONF_FILES. -# /etc/neutron is assumed by many of devstack plugins. Do not change. -_Q_PLUGIN_EXTRA_CONF_PATH=/etc/neutron - -# The name of the service in the endpoint URL -NEUTRON_ENDPOINT_SERVICE_NAME=${NEUTRON_ENDPOINT_SERVICE_NAME-"networking"} -if [[ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" && -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then - NEUTRON_ENDPOINT_SERVICE_NAME="networking" -fi - -# List of config file names in addition to the main plugin config file -# To add additional plugin config files, use ``neutron_server_config_add`` -# utility function. For example: -# -# ``neutron_server_config_add file1`` -# -# These config files are relative to ``/etc/neutron``. The above -# example would specify ``--config-file /etc/neutron/file1`` for -# neutron server. -declare -a -g Q_PLUGIN_EXTRA_CONF_FILES - -# same as Q_PLUGIN_EXTRA_CONF_FILES, but with absolute path. -declare -a -g _Q_PLUGIN_EXTRA_CONF_FILES_ABS - - -Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf -if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then - Q_RR_COMMAND="sudo" -else - NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) - Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - Q_RR_DAEMON_COMMAND="sudo $NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" - fi -fi - - -# Distributed Virtual Router (DVR) configuration -# Can be: -# - ``legacy`` - No DVR functionality -# - ``dvr_snat`` - Controller or single node DVR -# - ``dvr`` - Compute node in multi-node DVR -# -Q_DVR_MODE=${Q_DVR_MODE:-legacy} -if [[ "$Q_DVR_MODE" != "legacy" ]]; then - Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,l2population -fi - -# Provider Network Configurations -# -------------------------------- - -# The following variables control the Neutron ML2 plugins' allocation -# of tenant networks and availability of provider networks. If these -# are not configured in ``localrc``, tenant networks will be local to -# the host (with no remote connectivity), and no physical resources -# will be available for the allocation of provider networks. - -# To disable tunnels (GRE or VXLAN) for tenant networks, -# set to False in ``local.conf``. -# GRE tunnels are only supported by the openvswitch. -ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-True} - -# If using GRE, VXLAN or GENEVE tunnels for tenant networks, -# specify the range of IDs from which tenant networks are -# allocated. Can be overridden in ``localrc`` if necessary. -TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGES:-1:1000} - -# To use VLANs for tenant networks, set to True in localrc. VLANs -# are supported by the ML2 plugins, requiring additional configuration -# described below. -ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} - -# If using VLANs for tenant networks, set in ``localrc`` to specify -# the range of VLAN VIDs from which tenant networks are -# allocated. An external network switch must be configured to -# trunk these VLANs between hosts for multi-host connectivity. -# -# Example: ``TENANT_VLAN_RANGE=1000:1999`` -TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} - -# If using VLANs for tenant networks, or if using flat or VLAN -# provider networks, set in ``localrc`` to the name of the physical -# network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the -# openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge -# agent, as described below. -# -# Example: ``PHYSICAL_NETWORK=default`` -PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public} - -# With the openvswitch agent, if using VLANs for tenant networks, -# or if using flat or VLAN provider networks, set in ``localrc`` to -# the name of the OVS bridge to use for the physical network. The -# bridge will be created if it does not already exist, but a -# physical interface must be manually added to the bridge as a -# port for external connectivity. -# -# Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` -OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex} - -# With the linuxbridge agent, if using VLANs for tenant networks, -# or if using flat or VLAN provider networks, set in ``localrc`` to -# the name of the network interface to use for the physical -# network. -# -# Example: ``LB_PHYSICAL_INTERFACE=eth1`` -if [[ $Q_AGENT == "linuxbridge" && -z ${LB_PHYSICAL_INTERFACE} ]]; then - default_route_dev=$( (ip route; ip -6 route) | grep ^default | head -n 1 | awk '{print $5}') - die_if_not_set $LINENO default_route_dev "Failure retrieving default route device" - LB_PHYSICAL_INTERFACE=$default_route_dev -fi - -# With the openvswitch plugin, set to True in ``localrc`` to enable -# provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. -# -# Example: ``OVS_ENABLE_TUNNELING=True`` -OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} - -# Use DHCP agent for providing metadata service in the case of -# without L3 agent (No Route Agent), set to True in localrc. -ENABLE_ISOLATED_METADATA=${ENABLE_ISOLATED_METADATA:-False} - -# Add a static route as dhcp option, so the request to 169.254.169.254 -# will be able to reach through a route(DHCP agent) -# This option require ENABLE_ISOLATED_METADATA = True -ENABLE_METADATA_NETWORK=${ENABLE_METADATA_NETWORK:-False} -# Neutron plugin specific functions -# --------------------------------- - -# Please refer to ``lib/neutron_plugins/README.md`` for details. -if [ -f $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN ]; then - source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN -fi - -# Agent metering service plugin functions -# ------------------------------------------- - -# Hardcoding for 1 service plugin for now -source $TOP_DIR/lib/neutron_plugins/services/metering - -# L3 Service functions -source $TOP_DIR/lib/neutron_plugins/services/l3 - -# Additional Neutron service plugins -source $TOP_DIR/lib/neutron_plugins/services/placement -source $TOP_DIR/lib/neutron_plugins/services/trunk -source $TOP_DIR/lib/neutron_plugins/services/qos - -# Use security group or not -if has_neutron_plugin_security_group; then - Q_USE_SECGROUP=${Q_USE_SECGROUP:-True} -else - Q_USE_SECGROUP=False -fi - -# Save trace setting -_XTRACE_NEUTRON=$(set +o | grep xtrace) -set +o xtrace - - -# Functions -# --------- - -function _determine_config_server { - if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" != '' ]]; then - if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" = "$_Q_PLUGIN_EXTRA_CONF_PATH" ]]; then - deprecated "Q_PLUGIN_EXTRA_CONF_PATH is deprecated" - else - die $LINENO "Q_PLUGIN_EXTRA_CONF_PATH is deprecated" - fi - fi - if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 ]]; then - deprecated "Q_PLUGIN_EXTRA_CONF_FILES is deprecated. Use neutron_server_config_add instead." - fi - for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do - _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($_Q_PLUGIN_EXTRA_CONF_PATH/$cfg_file) - done - - local cfg_file - local opts="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" - for cfg_file in ${_Q_PLUGIN_EXTRA_CONF_FILES_ABS[@]}; do - opts+=" --config-file $cfg_file" - done - echo "$opts" -} - -function _determine_config_l3 { - local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE" - echo "$opts" -} - -# For services and agents that require it, dynamically construct a list of -# --config-file arguments that are passed to the binary. -function determine_config_files { - local opts="" - case "$1" in - "neutron-server") opts="$(_determine_config_server)" ;; - "neutron-l3-agent") opts="$(_determine_config_l3)" ;; - esac - if [ -z "$opts" ] ; then - die $LINENO "Could not determine config files for $1." - fi - echo "$opts" -} - -# configure_mutnauq() -# Set common config for all neutron server and agents. -function configure_mutnauq { - _configure_neutron_common - iniset_rpc_backend neutron $NEUTRON_CONF - - if is_service_enabled q-metering; then - _configure_neutron_metering - fi - if is_service_enabled q-agt q-svc; then - _configure_neutron_service - fi - if is_service_enabled q-agt; then - _configure_neutron_plugin_agent - fi - if is_service_enabled q-dhcp; then - _configure_neutron_dhcp_agent - fi - if is_service_enabled q-l3; then - _configure_neutron_l3_agent - fi - if is_service_enabled q-meta; then - _configure_neutron_metadata_agent - fi - - if [[ "$Q_DVR_MODE" != "legacy" ]]; then - _configure_dvr - fi - if is_service_enabled ceilometer; then - _configure_neutron_ceilometer_notifications - fi - - if [[ $Q_AGENT == "ovn" ]]; then - configure_ovn - configure_ovn_plugin - fi - - # Configure Neutron's advanced services - if is_service_enabled q-placement neutron-placement; then - configure_placement_extension - fi - if is_service_enabled q-trunk neutron-trunk; then - configure_trunk_extension - fi - if is_service_enabled q-qos neutron-qos; then - configure_qos - if is_service_enabled q-l3 neutron-l3; then - configure_l3_agent_extension_fip_qos - configure_l3_agent_extension_gateway_ip_qos - fi - fi - - iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS" - # devstack is not a tool for running uber scale OpenStack - # clouds, therefore running without a dedicated RPC worker - # for state reports is more than adequate. - iniset $NEUTRON_CONF DEFAULT rpc_state_report_workers 0 -} - -function create_nova_conf_neutron { - local conf=${1:-$NOVA_CONF} - iniset $conf neutron auth_type "password" - iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI" - iniset $conf neutron username "$Q_ADMIN_USERNAME" - iniset $conf neutron password "$SERVICE_PASSWORD" - iniset $conf neutron user_domain_name "$SERVICE_DOMAIN_NAME" - iniset $conf neutron project_name "$SERVICE_PROJECT_NAME" - iniset $conf neutron project_domain_name "$SERVICE_DOMAIN_NAME" - iniset $conf neutron auth_strategy "$Q_AUTH_STRATEGY" - iniset $conf neutron region_name "$REGION_NAME" - - # optionally set options in nova_conf - neutron_plugin_create_nova_conf $conf - - if is_service_enabled q-meta; then - iniset $conf neutron service_metadata_proxy "True" - fi - - iniset $conf DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL" - iniset $conf DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT" -} - -# create_mutnauq_accounts() - Set up common required neutron accounts - -# Tenant User Roles -# ------------------------------------------------------------------ -# service neutron admin # if enabled - -# Migrated from keystone_data.sh -function create_mutnauq_accounts { - local neutron_url - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - neutron_url=$Q_PROTOCOL://$SERVICE_HOST/ - else - neutron_url=$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/ - fi - if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then - neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME - fi - - if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then - - create_service_user "neutron" - - get_or_create_service "neutron" "network" "Neutron Service" - get_or_create_endpoint \ - "network" \ - "$REGION_NAME" "$neutron_url" - fi -} - -# init_mutnauq() - Initialize databases, etc. -function init_mutnauq { - recreate_database $Q_DB_NAME - time_start "dbsync" - # Run Neutron db migrations - $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head - time_stop "dbsync" -} - -# install_mutnauq() - Collect source and prepare -function install_mutnauq { - # Install neutron-lib from git so we make sure we're testing - # the latest code. - if use_library_from_git "neutron-lib"; then - git_clone_by_name "neutron-lib" - setup_dev_lib "neutron-lib" - fi - - git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH - setup_develop $NEUTRON_DIR - - if [[ $Q_AGENT == "ovn" ]]; then - install_ovn - fi -} - -# install_neutron_agent_packages() - Collect source and prepare -function install_neutron_agent_packages_mutnauq { - # radvd doesn't come with the OS. Install it if the l3 service is enabled. - if is_service_enabled q-l3; then - install_package radvd - fi - # install packages that are specific to plugin agent(s) - if is_service_enabled q-agt q-dhcp q-l3; then - neutron_plugin_install_agent_packages - fi -} - -# Finish neutron configuration -function configure_neutron_after_post_config { - if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then - iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES - fi - configure_rbac_policies -} - -# configure_rbac_policies() - Configure Neutron to enforce new RBAC -# policies and scopes if NEUTRON_ENFORCE_SCOPE == True -function configure_rbac_policies { - if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == True ]]; then - iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True - iniset $NEUTRON_CONF oslo_policy enforce_scope True - else - iniset $NEUTRON_CONF oslo_policy enforce_new_defaults False - iniset $NEUTRON_CONF oslo_policy enforce_scope False - fi -} - -# Start running OVN processes -function start_ovn_services { - if [[ $Q_AGENT == "ovn" ]]; then - init_ovn - start_ovn - if [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]]; then - if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" != "True" ]]; then - echo "OVN_L3_CREATE_PUBLIC_NETWORK=True is being ignored " - echo "because NEUTRON_CREATE_INITIAL_NETWORKS is set to False" - else - create_public_bridge - fi - fi - fi -} - -# Start running processes -function start_neutron_service_and_check { - local service_port=$Q_PORT - local service_protocol=$Q_PROTOCOL - local cfg_file_options - local neutron_url - - cfg_file_options="$(determine_config_files neutron-server)" - - if is_service_enabled tls-proxy; then - service_port=$Q_PORT_INT - service_protocol="http" - fi - # Start the Neutron service - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - enable_service neutron-api - run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" - neutron_url=$Q_PROTOCOL://$Q_HOST/ - enable_service neutron-rpc-server - run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" - else - run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options" - neutron_url=$service_protocol://$Q_HOST:$service_port/ - # Start proxy if enabled - if is_service_enabled tls-proxy; then - start_tls_proxy neutron '*' $Q_PORT $Q_HOST $Q_PORT_INT - fi - fi - if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then - neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME - fi - echo "Waiting for Neutron to start..." - - local testcmd="wget ${ssl_ca} --no-proxy -q -O- $neutron_url" - test_with_retry "$testcmd" "Neutron did not start" $SERVICE_TIMEOUT -} - -# Control of the l2 agent is separated out to make it easier to test partial -# upgrades (everything upgraded except the L2 agent) -function start_mutnauq_l2_agent { - run_process q-agt "$AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" - - if is_provider_network && [[ $Q_AGENT == "openvswitch" ]]; then - sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE - sudo ip link set $OVS_PHYSICAL_BRIDGE up - sudo ip link set br-int up - sudo ip link set $PUBLIC_INTERFACE up - if is_ironic_hardware; then - for IP in $(ip addr show dev $PUBLIC_INTERFACE | grep ' inet ' | awk '{print $2}'); do - sudo ip addr del $IP dev $PUBLIC_INTERFACE - sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE - done - sudo ip route replace $FIXED_RANGE via $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE - fi - fi -} - -function start_mutnauq_other_agents { - run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $Q_DHCP_CONF_FILE" - - run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)" - - run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE" - run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" -} - -# Start running processes, including screen -function start_neutron_agents { - # Start up the neutron agents if enabled - start_mutnauq_l2_agent - start_mutnauq_other_agents -} - -function stop_mutnauq_l2_agent { - stop_process q-agt -} - -# stop_mutnauq_other() - Stop running processes -function stop_mutnauq_other { - if is_service_enabled q-dhcp; then - stop_process q-dhcp - pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }') - [ ! -z "$pid" ] && sudo kill -9 $pid - fi - - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - stop_process neutron-rpc-server - stop_process neutron-api - else - stop_process q-svc - fi - - if is_service_enabled q-l3; then - sudo pkill -f "radvd -C $DATA_DIR/neutron/ra" - stop_process q-l3 - fi - - if is_service_enabled q-meta; then - stop_process q-meta - fi - - if is_service_enabled q-metering; then - neutron_metering_stop - fi - - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - sudo pkill -9 -f $NEUTRON_ROOTWRAP-daemon || : - fi -} - -# stop_neutron() - Stop running processes (non-screen) -function stop_mutnauq { - stop_mutnauq_other - stop_mutnauq_l2_agent - - if [[ $Q_AGENT == "ovn" && $SKIP_STOP_OVN != "True" ]]; then - stop_ovn - fi -} - -# _move_neutron_addresses_route() - Move the primary IP to the OVS bridge -# on startup, or back to the public interface on cleanup. If no IP is -# configured on the interface, just add it as a port to the OVS bridge. -function _move_neutron_addresses_route { - local from_intf=$1 - local to_intf=$2 - local add_ovs_port=$3 - local del_ovs_port=$4 - local af=$5 - - if [[ -n "$from_intf" && -n "$to_intf" ]]; then - # Remove the primary IP address from $from_intf and add it to $to_intf, - # along with the default route, if it exists. Also, when called - # on configure we will also add $from_intf as a port on $to_intf, - # assuming it is an OVS bridge. - - local IP_REPLACE="" - local IP_DEL="" - local IP_UP="" - local DEFAULT_ROUTE_GW - DEFAULT_ROUTE_GW=$(ip -f $af r | awk "/default.+$from_intf\s/ { print \$3; exit }") - local ADD_OVS_PORT="" - local DEL_OVS_PORT="" - local ARP_CMD="" - - IP_BRD=$(ip -f $af a s dev $from_intf scope global primary | grep inet | awk '{ print $2, $3, $4; exit }') - - if [ "$DEFAULT_ROUTE_GW" != "" ]; then - ADD_DEFAULT_ROUTE="sudo ip -f $af r replace default via $DEFAULT_ROUTE_GW dev $to_intf" - fi - - if [[ "$add_ovs_port" == "True" ]]; then - ADD_OVS_PORT="sudo ovs-vsctl --may-exist add-port $to_intf $from_intf" - fi - - if [[ "$del_ovs_port" == "True" ]]; then - DEL_OVS_PORT="sudo ovs-vsctl --if-exists del-port $from_intf $to_intf" - fi - - if [[ "$IP_BRD" != "" ]]; then - IP_DEL="sudo ip addr del $IP_BRD dev $from_intf" - IP_REPLACE="sudo ip addr replace $IP_BRD dev $to_intf" - IP_UP="sudo ip link set $to_intf up" - if [[ "$af" == "inet" ]]; then - IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1) - ARP_CMD="sudo arping -A -c 3 -w 5 -I $to_intf $IP " - fi - fi - - # The add/del OVS port calls have to happen either before or - # after the address is moved in order to not leave it orphaned. - $DEL_OVS_PORT; $IP_DEL; $IP_REPLACE; $IP_UP; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE; $ARP_CMD - fi -} - -# _configure_public_network_connectivity() - Configures connectivity to the -# external network using $PUBLIC_INTERFACE or NAT on the single interface -# machines -function _configure_public_network_connectivity { - # If we've given a PUBLIC_INTERFACE to take over, then we assume - # that we can own the whole thing, and privot it into the OVS - # bridge. If we are not, we're probably on a single interface - # machine, and we just setup NAT so that fixed guests can get out. - if [[ -n "$PUBLIC_INTERFACE" ]]; then - _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet" - - if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then - _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6" - fi - else - for d in $default_v4_route_devs; do - sudo iptables -t nat -A POSTROUTING -o $d -s $FLOATING_RANGE -j MASQUERADE - done - fi -} - -# cleanup_mutnauq() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_mutnauq { - - if [[ -n "$OVS_PHYSICAL_BRIDGE" ]]; then - _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False True "inet" - - if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then - # ip(8) wants the prefix length when deleting - local v6_gateway - v6_gateway=$(ip -6 a s dev $OVS_PHYSICAL_BRIDGE | grep $IPV6_PUBLIC_NETWORK_GATEWAY | awk '{ print $2 }') - sudo ip -6 addr del $v6_gateway dev $OVS_PHYSICAL_BRIDGE - _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False False "inet6" - fi - - if is_provider_network && is_ironic_hardware; then - for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do - sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE - sudo ip addr add $IP dev $PUBLIC_INTERFACE - done - sudo route del -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE - fi - fi - - if is_neutron_ovs_base_plugin; then - neutron_ovs_base_cleanup - fi - - if [[ $Q_AGENT == "linuxbridge" ]]; then - neutron_lb_cleanup - fi - - # delete all namespaces created by neutron - for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do - sudo ip netns delete ${ns} - done - - if [[ $Q_AGENT == "ovn" ]]; then - cleanup_ovn - fi -} - - -function _create_neutron_conf_dir { - # Put config files in ``NEUTRON_CONF_DIR`` for everyone to find - sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR -} - -# _configure_neutron_common() -# Set common config for all neutron server and agents. -# This MUST be called before other ``_configure_neutron_*`` functions. -function _configure_neutron_common { - _create_neutron_conf_dir - - # Uses oslo config generator to generate core sample configuration files - (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) - - cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF - - Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json - - # allow neutron user to administer neutron to match neutron account - # NOTE(amotoki): This is required for nova works correctly with neutron. - if [ -f $NEUTRON_DIR/etc/policy.json ]; then - cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE - sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $Q_POLICY_FILE - else - echo '{"context_is_admin": "role:admin or user_name:neutron"}' > $Q_POLICY_FILE - fi - - # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``. - # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``. - neutron_plugin_configure_common - - if [[ "$Q_PLUGIN_CONF_PATH" == '' || "$Q_PLUGIN_CONF_FILENAME" == '' || "$Q_PLUGIN_CLASS" == '' ]]; then - die $LINENO "Neutron plugin not set.. exiting" - fi - - # If needed, move config file from ``$NEUTRON_DIR/etc/neutron`` to ``NEUTRON_CONF_DIR`` - mkdir -p /$Q_PLUGIN_CONF_PATH - Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME - # NOTE(hichihara): Some neutron vendor plugins were already decomposed and - # there is no config file in Neutron tree. They should prepare the file in each plugin. - if [ -f "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" ]; then - cp "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" /$Q_PLUGIN_CONF_FILE - elif [ -f $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE ]; then - cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE - fi - - iniset $NEUTRON_CONF database connection `database_connection_url $Q_DB_NAME` - iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron - iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG - iniset $NEUTRON_CONF DEFAULT bind_host $Q_LISTEN_ADDRESS - iniset $NEUTRON_CONF oslo_concurrency lock_path $DATA_DIR/neutron/lock - - # NOTE(freerunner): Need to adjust Region Name for nova in multiregion installation - iniset $NEUTRON_CONF nova region_name $REGION_NAME - - if [ "$VIRT_DRIVER" = 'fake' ]; then - # Disable arbitrary limits - iniset $NEUTRON_CONF quotas quota_network -1 - iniset $NEUTRON_CONF quotas quota_subnet -1 - iniset $NEUTRON_CONF quotas quota_port -1 - iniset $NEUTRON_CONF quotas quota_security_group -1 - iniset $NEUTRON_CONF quotas quota_security_group_rule -1 - fi - - # Format logging - setup_logging $NEUTRON_CONF - - if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then - # Set the service port for a proxy to take the original - iniset $NEUTRON_CONF DEFAULT bind_port "$Q_PORT_INT" - iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True - fi - - _neutron_setup_rootwrap -} - -function _configure_neutron_dhcp_agent { - - cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $Q_DHCP_CONF_FILE - - iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - # make it so we have working DNS from guests - iniset $Q_DHCP_CONF_FILE DEFAULT dnsmasq_local_resolv True - iniset $Q_DHCP_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset $Q_DHCP_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND" - fi - - if ! is_service_enabled q-l3; then - if [[ "$ENABLE_ISOLATED_METADATA" = "True" ]]; then - iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata $ENABLE_ISOLATED_METADATA - iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network $ENABLE_METADATA_NETWORK - else - if [[ "$ENABLE_METADATA_NETWORK" = "True" ]]; then - die "$LINENO" "Enable isolated metadata is a must for metadata network" - fi - fi - fi - - _neutron_setup_interface_driver $Q_DHCP_CONF_FILE - - neutron_plugin_configure_dhcp_agent $Q_DHCP_CONF_FILE -} - - -function _configure_neutron_metadata_agent { - cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $Q_META_CONF_FILE - - iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $Q_META_CONF_FILE DEFAULT nova_metadata_host $Q_META_DATA_IP - iniset $Q_META_CONF_FILE DEFAULT metadata_workers $API_WORKERS - iniset $Q_META_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset $Q_META_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND" - fi -} - -function _configure_neutron_ceilometer_notifications { - iniset $NEUTRON_CONF oslo_messaging_notifications driver messagingv2 -} - -function _configure_neutron_metering { - neutron_agent_metering_configure_common - neutron_agent_metering_configure_agent -} - -function _configure_dvr { - iniset $NEUTRON_CONF DEFAULT router_distributed True - iniset $Q_L3_CONF_FILE DEFAULT agent_mode $Q_DVR_MODE -} - - -# _configure_neutron_plugin_agent() - Set config files for neutron plugin agent -# It is called when q-agt is enabled. -function _configure_neutron_plugin_agent { - # Specify the default root helper prior to agent configuration to - # ensure that an agent's configuration can override the default - iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_COMMAND" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" - fi - iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - - # Configure agent for plugin - neutron_plugin_configure_plugin_agent -} - -function _replace_api_paste_composite { - local sep - sep=$(echo -ne "\x01") - # Replace it - $sudo sed -i -e "s/\/\: neutronversions_composite/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/\: neutronversions_composite/" "$Q_API_PASTE_FILE" - $sudo sed -i -e "s/\/healthcheck\: healthcheck/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/healthcheck\: healthcheck/" "$Q_API_PASTE_FILE" - $sudo sed -i -e "s/\/v2.0\: neutronapi_v2_0/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/v2.0\: neutronapi_v2_0/" "$Q_API_PASTE_FILE" -} - -# _configure_neutron_service() - Set config files for neutron service -# It is called when q-svc is enabled. -function _configure_neutron_service { - Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini - cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE - - if [[ -n "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then - _replace_api_paste_composite - fi - - # Update either configuration file with plugin - iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS - - iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $NEUTRON_CONF oslo_policy policy_file $Q_POLICY_FILE - - iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY - configure_keystone_authtoken_middleware $NEUTRON_CONF $Q_ADMIN_USERNAME - - # Configuration for neutron notifications to nova. - iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES - iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES - - configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova - - # Configuration for placement client - configure_keystone_authtoken_middleware $NEUTRON_CONF placement placement - - # Configure plugin - neutron_plugin_configure_service -} - -# Utility Functions -#------------------ - -# _neutron_service_plugin_class_add() - add service plugin class -function _neutron_service_plugin_class_add { - local service_plugin_class=$1 - if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then - Q_SERVICE_PLUGIN_CLASSES=$service_plugin_class - elif [[ ! ,${Q_SERVICE_PLUGIN_CLASSES}, =~ ,${service_plugin_class}, ]]; then - Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$service_plugin_class" - fi -} - -# _neutron_ml2_extension_driver_add_old() - add ML2 extension driver -function _neutron_ml2_extension_driver_add_old { - local extension=$1 - if [[ $Q_ML2_PLUGIN_EXT_DRIVERS == '' ]]; then - Q_ML2_PLUGIN_EXT_DRIVERS=$extension - elif [[ ! ,${Q_ML2_PLUGIN_EXT_DRIVERS}, =~ ,${extension}, ]]; then - Q_ML2_PLUGIN_EXT_DRIVERS="$Q_ML2_PLUGIN_EXT_DRIVERS,$extension" - fi -} - -# mutnauq_server_config_add() - add server config file -function mutnauq_server_config_add { - _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($1) -} - -# _neutron_deploy_rootwrap_filters() - deploy rootwrap filters to $Q_CONF_ROOTWRAP_D (owned by root). -function _neutron_deploy_rootwrap_filters { - if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then - return - fi - local srcdir=$1 - sudo install -d -o root -m 755 $Q_CONF_ROOTWRAP_D - sudo install -o root -m 644 $srcdir/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ -} - -# _neutron_setup_rootwrap() - configure Neutron's rootwrap -function _neutron_setup_rootwrap { - if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then - return - fi - # Wipe any existing ``rootwrap.d`` files first - Q_CONF_ROOTWRAP_D=$NEUTRON_CONF_DIR/rootwrap.d - if [[ -d $Q_CONF_ROOTWRAP_D ]]; then - sudo rm -rf $Q_CONF_ROOTWRAP_D - fi - - _neutron_deploy_rootwrap_filters $NEUTRON_DIR - - # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d`` - # location moved in newer versions, prefer new location - if test -r $NEUTRON_DIR/etc/neutron/rootwrap.conf; then - sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE - else - sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE - fi - sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE - sudo sed -e 's:^exec_dirs=\(.*\)$:exec_dirs=\1,/usr/local/bin:' -i $Q_RR_CONF_FILE - - # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap - ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *" - ROOTWRAP_DAEMON_SUDOER_CMD="$NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" - - # Set up the rootwrap sudoers for neutron - TEMPFILE=`mktemp` - echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE - echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_DAEMON_SUDOER_CMD" >>$TEMPFILE - chmod 0440 $TEMPFILE - sudo chown root:root $TEMPFILE - sudo mv $TEMPFILE /etc/sudoers.d/neutron-rootwrap - - # Update the root_helper - iniset $NEUTRON_CONF agent root_helper "$Q_RR_COMMAND" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset $NEUTRON_CONF agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" - fi -} - -function _neutron_setup_interface_driver { - - # ovs_use_veth needs to be set before the plugin configuration - # occurs to allow plugins to override the setting. - iniset $1 DEFAULT ovs_use_veth $Q_OVS_USE_VETH - - neutron_plugin_setup_interface_driver $1 -} -# Functions for Neutron Exercises -#-------------------------------- - -function delete_probe { - local from_net="$1" - net_id=`_get_net_id $from_net` - probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'` - neutron-debug --os-tenant-name admin --os-username admin probe-delete $probe_id -} - -function _get_net_id { - openstack --os-cloud devstack-admin --os-region-name="$REGION_NAME" --os-project-name admin --os-username admin --os-password $ADMIN_PASSWORD network list | grep $1 | awk '{print $2}' -} - -function _get_probe_cmd_prefix { - local from_net="$1" - net_id=`_get_net_id $from_net` - probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1` - echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id" -} - -# ssh check -function _ssh_check_neutron { - local from_net=$1 - local key_file=$2 - local ip=$3 - local user=$4 - local timeout_sec=$5 - local probe_cmd = "" - probe_cmd=`_get_probe_cmd_prefix $from_net` - local testcmd="$probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success" - test_with_retry "$testcmd" "server $ip didn't become ssh-able" $timeout_sec -} - -function plugin_agent_add_l2_agent_extension { - local l2_agent_extension=$1 - if [[ -z "$L2_AGENT_EXTENSIONS" ]]; then - L2_AGENT_EXTENSIONS=$l2_agent_extension - elif [[ ! ,${L2_AGENT_EXTENSIONS}, =~ ,${l2_agent_extension}, ]]; then - L2_AGENT_EXTENSIONS+=",$l2_agent_extension" - fi -} - -# Restore xtrace -$_XTRACE_NEUTRON - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: +source $TOP_DIR/lib/neutron diff --git a/lib/neutron_plugins/README.md b/lib/neutron_plugins/README.md index ed40886fda..728aaee85f 100644 --- a/lib/neutron_plugins/README.md +++ b/lib/neutron_plugins/README.md @@ -13,7 +13,7 @@ Plugin specific configuration variables should be in this file. functions --------- -``lib/neutron-legacy`` calls the following functions when the ``$Q_PLUGIN`` is enabled +``lib/neutron`` calls the following functions when the ``$Q_PLUGIN`` is enabled * ``neutron_plugin_create_nova_conf`` : optionally set options in nova_conf diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight index d3f5bd5752..84ca7ec42c 100644 --- a/lib/neutron_plugins/bigswitch_floodlight +++ b/lib/neutron_plugins/bigswitch_floodlight @@ -67,7 +67,7 @@ function has_neutron_plugin_security_group { } function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 + is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0 } # Restore xtrace diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade index 310b72e5ad..96400634af 100644 --- a/lib/neutron_plugins/brocade +++ b/lib/neutron_plugins/brocade @@ -72,7 +72,7 @@ function has_neutron_plugin_security_group { } function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 + is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0 } # Restore xtrace diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent index bdeaf0f3c6..a392bd0baf 100644 --- a/lib/neutron_plugins/linuxbridge_agent +++ b/lib/neutron_plugins/linuxbridge_agent @@ -97,7 +97,7 @@ function neutron_plugin_setup_interface_driver { } function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 + is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0 } # Restore xtrace diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index 46edacdc54..c2e78c65cc 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -67,7 +67,7 @@ function neutron_plugin_configure_common { Q_PLUGIN_CLASS="ml2" # The ML2 plugin delegates L3 routing/NAT functionality to # the L3 service plugin which must therefore be specified. - _neutron_service_plugin_class_add $ML2_L3_PLUGIN + neutron_service_plugin_class_add $ML2_L3_PLUGIN } function neutron_plugin_configure_service { diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent index 7fed8bf853..6e79984e9b 100644 --- a/lib/neutron_plugins/openvswitch_agent +++ b/lib/neutron_plugins/openvswitch_agent @@ -68,7 +68,7 @@ function neutron_plugin_setup_interface_driver { } function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 + is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0 } # Restore xtrace diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index e64224cbaa..dc8129553c 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -348,7 +348,7 @@ function compile_ovn { # OVN service sanity check function ovn_sanity_check { - if is_service_enabled q-agt neutron-agt; then + if is_service_enabled q-agt neutron-agent; then die $LINENO "The q-agt/neutron-agt service must be disabled with OVN." elif is_service_enabled q-l3 neutron-l3; then die $LINENO "The q-l3/neutron-l3 service must be disabled with OVN." @@ -461,7 +461,7 @@ function filter_network_api_extensions { function configure_ovn_plugin { echo "Configuring Neutron for OVN" - if is_service_enabled q-svc ; then + if is_service_enabled q-svc neutron-api; then filter_network_api_extensions populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_geneve max_header_size=$OVN_GENEVE_OVERHEAD populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_connection="$OVN_NB_REMOTE" @@ -485,7 +485,7 @@ function configure_ovn_plugin { inicomment /$Q_PLUGIN_CONF_FILE network_log local_output_log_base="$Q_LOG_DRIVER_LOG_BASE" fi - if is_service_enabled q-ovn-metadata-agent; then + if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True else populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=False @@ -506,7 +506,7 @@ function configure_ovn_plugin { fi if is_service_enabled n-api-meta ; then - if is_service_enabled q-ovn-metadata-agent ; then + if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then iniset $NOVA_CONF neutron service_metadata_proxy True fi fi @@ -539,7 +539,7 @@ function configure_ovn { fi # Metadata - if is_service_enabled q-ovn-metadata-agent && is_service_enabled ovn-controller; then + if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent && is_service_enabled ovn-controller; then sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR mkdir -p $NEUTRON_DIR/etc/neutron/plugins/ml2 @@ -551,7 +551,7 @@ function configure_ovn { iniset $OVN_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $OVN_META_CONF DEFAULT nova_metadata_host $OVN_META_DATA_HOST iniset $OVN_META_CONF DEFAULT metadata_workers $API_WORKERS - iniset $OVN_META_CONF DEFAULT state_path $NEUTRON_STATE_PATH + iniset $OVN_META_CONF DEFAULT state_path $DATA_DIR/neutron iniset $OVN_META_CONF ovs ovsdb_connection tcp:$OVSDB_SERVER_LOCAL_HOST:6640 iniset $OVN_META_CONF ovn ovn_sb_connection $OVN_SB_REMOTE if is_service_enabled tls-proxy; then @@ -680,7 +680,7 @@ function _start_ovn_services { if is_service_enabled ovs-vtep ; then _start_process "devstack@ovs-vtep.service" fi - if is_service_enabled q-ovn-metadata-agent; then + if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent ; then _start_process "devstack@q-ovn-metadata-agent.service" fi } @@ -743,7 +743,7 @@ function start_ovn { fi fi - if is_service_enabled q-ovn-metadata-agent; then + if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then run_process q-ovn-metadata-agent "$NEUTRON_OVN_BIN_DIR/$NEUTRON_OVN_METADATA_BINARY --config-file $OVN_META_CONF" # Format logging setup_logging $OVN_META_CONF @@ -767,7 +767,7 @@ function _stop_process { } function stop_ovn { - if is_service_enabled q-ovn-metadata-agent; then + if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then sudo pkill -9 -f haproxy || : _stop_process "devstack@q-ovn-metadata-agent.service" fi diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 3dffc33d37..2bf884a8c4 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -323,7 +323,7 @@ function _neutron_configure_router_v4 { openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID # This logic is specific to using OVN or the l3-agent for layer 3 - if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then + if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-api) || is_service_enabled q-l3 neutron-l3; then # Configure and enable public bridge local ext_gw_interface="none" if is_neutron_ovs_base_plugin; then @@ -372,7 +372,7 @@ function _neutron_configure_router_v6 { fi # This logic is specific to using OVN or the l3-agent for layer 3 - if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then + if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-api) || is_service_enabled q-l3 neutron-l3; then # if the Linux host considers itself to be a router then it will # ignore all router advertisements # Ensure IPv6 RAs are accepted on interfaces with a default route. diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering index 5b32468d21..757a562ee6 100644 --- a/lib/neutron_plugins/services/metering +++ b/lib/neutron_plugins/services/metering @@ -12,7 +12,7 @@ AGENT_METERING_BINARY="$NEUTRON_BIN_DIR/neutron-metering-agent" METERING_PLUGIN="neutron.services.metering.metering_plugin.MeteringPlugin" function neutron_agent_metering_configure_common { - _neutron_service_plugin_class_add $METERING_PLUGIN + neutron_service_plugin_class_add $METERING_PLUGIN } function neutron_agent_metering_configure_agent { diff --git a/lib/neutron_plugins/services/qos b/lib/neutron_plugins/services/qos index af9eb3d5b4..c11c315586 100644 --- a/lib/neutron_plugins/services/qos +++ b/lib/neutron_plugins/services/qos @@ -6,7 +6,7 @@ function configure_qos_service_plugin { function configure_qos_core_plugin { - configure_qos_$NEUTRON_CORE_PLUGIN + configure_qos_$Q_PLUGIN } diff --git a/lib/tempest b/lib/tempest index eaad6d255e..44a9b6f29f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -737,12 +737,12 @@ function configure_tempest { # Neutron API Extensions # disable metering if we didn't enable the service - if ! is_service_enabled q-metering; then + if ! is_service_enabled q-metering neutron-metering; then DISABLE_NETWORK_API_EXTENSIONS+=", metering" fi # disable l3_agent_scheduler if we didn't enable L3 agent - if ! is_service_enabled q-l3; then + if ! is_service_enabled q-l3 neutron-l3; then DISABLE_NETWORK_API_EXTENSIONS+=", l3_agent_scheduler" fi From aa47cb34ae25b66e46a216e1c9b7b668615b520b Mon Sep 17 00:00:00 2001 From: Rajat Dhasmana Date: Tue, 27 Dec 2022 06:11:07 +0000 Subject: [PATCH 288/574] Add config options for cinder nfs backend Currently the cinder nfs backend leaves out few options in a multi backend deployment. It works in single nfs backend deployment as devstack-plugin-nfs correctly configures all options[1]. We can clearly see the difference between what devstack-plugin-nfs configures[1] and what devstack nfs configures[2]. Following options are missing which are added by this patch. * nas_host * nas_share_path * nas_secure_file_operations * nas_secure_file_permissions * nfs_snapshot_support [1] https://github.com/openstack/devstack-plugin-nfs/blob/dd12367f90fc86d42bfebe8a0ebb694dc0308810/devstack/plugin.sh#L60-L68 [2] https://github.com/openstack/devstack/blob/a52041cd3f067156e478e355f5712a60e12ce649/lib/cinder_backends/nfs#L32-L34 Change-Id: I03cad66abb3c6f2ae1d5cf943ac952a30961f783 --- lib/cinder_backends/nfs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lib/cinder_backends/nfs b/lib/cinder_backends/nfs index 89a37a1f02..f3fcbeff19 100644 --- a/lib/cinder_backends/nfs +++ b/lib/cinder_backends/nfs @@ -32,6 +32,15 @@ function configure_cinder_backend_nfs { iniset $CINDER_CONF $be_name volume_backend_name $be_name iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.nfs.NfsDriver" iniset $CINDER_CONF $be_name nfs_shares_config "$CINDER_CONF_DIR/nfs-shares-$be_name.conf" + iniset $CINDER_CONF $be_name nas_host localhost + iniset $CINDER_CONF $be_name nas_share_path ${NFS_EXPORT_DIR} + iniset $CINDER_CONF $be_name nas_secure_file_operations \ + ${NFS_SECURE_FILE_OPERATIONS} + iniset $CINDER_CONF $be_name nas_secure_file_permissions \ + ${NFS_SECURE_FILE_PERMISSIONS} + + # NFS snapshot support is currently opt-in only. + iniset $CINDER_CONF $be_name nfs_snapshot_support True echo "$CINDER_NFS_SERVERPATH" | tee "$CINDER_CONF_DIR/nfs-shares-$be_name.conf" } From 69d71cfdf9c24d48fbea366714f4595cbd120723 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Tue, 10 Jan 2023 20:13:47 -0600 Subject: [PATCH 289/574] Option to disable the scope & new defaults enforcement In this release cycle, a few services are enabling the enforce scope and new defaults by default. Example Nova: - https://review.opendev.org/c/openstack/nova/+/866218) Until the new defaults enalbing by default is not released we should keep testing the old defaults in existing jobs and we can add new jobs testing new defautls. To do that we can provide the way in devstack to keep scope/new defaults disable by default which can be enabled by setting enforce_scope variable to true. Once any service release the new defaults enabled by default then we can switch the bhavior, enable the scope/new defaults by default and a single job can disbale them to keep testing the old defaults until service does not remove those. Change-Id: I5c2ec3e1667172a75e06458f16cf3d57947b2c53 --- lib/cinder | 3 +++ lib/glance | 4 ++++ lib/keystone | 7 ++++++- lib/nova | 3 +++ lib/placement | 3 +++ 5 files changed, 19 insertions(+), 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index bf2fe50e08..2424f928d1 100644 --- a/lib/cinder +++ b/lib/cinder @@ -411,6 +411,9 @@ function configure_cinder { if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then iniset $CINDER_CONF oslo_policy enforce_scope true iniset $CINDER_CONF oslo_policy enforce_new_defaults true + else + iniset $CINDER_CONF oslo_policy enforce_scope false + iniset $CINDER_CONF oslo_policy enforce_new_defaults false fi } diff --git a/lib/glance b/lib/glance index ba98f4133e..041acafc92 100644 --- a/lib/glance +++ b/lib/glance @@ -436,6 +436,10 @@ function configure_glance { iniset $GLANCE_API_CONF oslo_policy enforce_scope true iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults true iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac true + else + iniset $GLANCE_API_CONF oslo_policy enforce_scope false + iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults false + iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac false fi } diff --git a/lib/keystone b/lib/keystone index 80a136f78d..6cb4aac46a 100644 --- a/lib/keystone +++ b/lib/keystone @@ -265,10 +265,15 @@ function configure_keystone { iniset $KEYSTONE_CONF security_compliance lockout_duration $KEYSTONE_LOCKOUT_DURATION iniset $KEYSTONE_CONF security_compliance unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT fi + + iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml + if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then iniset $KEYSTONE_CONF oslo_policy enforce_scope true iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults true - iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml + else + iniset $KEYSTONE_CONF oslo_policy enforce_scope false + iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults false fi } diff --git a/lib/nova b/lib/nova index 14eb8fc3da..3aa6b9e3b3 100644 --- a/lib/nova +++ b/lib/nova @@ -490,6 +490,9 @@ function create_nova_conf { if [[ "$NOVA_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then iniset $NOVA_CONF oslo_policy enforce_new_defaults True iniset $NOVA_CONF oslo_policy enforce_scope True + else + iniset $NOVA_CONF oslo_policy enforce_new_defaults False + iniset $NOVA_CONF oslo_policy enforce_scope False fi if is_service_enabled tls-proxy && [ "$NOVA_USE_MOD_WSGI" == "False" ]; then # Set the service port for a proxy to take the original diff --git a/lib/placement b/lib/placement index bc22c564f4..c6bf99f868 100644 --- a/lib/placement +++ b/lib/placement @@ -120,6 +120,9 @@ function configure_placement { if [[ "$PLACEMENT_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then iniset $PLACEMENT_CONF oslo_policy enforce_new_defaults True iniset $PLACEMENT_CONF oslo_policy enforce_scope True + else + iniset $PLACEMENT_CONF oslo_policy enforce_new_defaults False + iniset $PLACEMENT_CONF oslo_policy enforce_scope False fi } From 7fecba2f135f16204050b627bb850a87aa597bad Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Thu, 12 Jan 2023 17:31:36 +0530 Subject: [PATCH 290/574] [OVN] Ensure socket files are absent in init_ovn Just like we remove db files let's also remove socket files when initializing ovn. Those will reappear once service fully restarts along with db files. Without it we see random issue as described in the below bug. Closes-Bug: #2002629 Change-Id: I726a9cac9c805d017273aa79e844724f0d00cdf0 --- lib/neutron_plugins/ovn_agent | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index dc8129553c..f27777867d 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -587,6 +587,7 @@ function init_ovn { rm -f $OVS_DATADIR/.*.db.~lock~ sudo rm -f $OVN_DATADIR/*.db sudo rm -f $OVN_DATADIR/.*.db.~lock~ + sudo rm -f $OVN_RUNDIR/*.sock } function _start_ovs { From 5a6f0bbd4c3c3006a50e9e70c81f31ea1fa409c6 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Sat, 21 Jan 2023 20:21:26 +0100 Subject: [PATCH 291/574] Remove the neutron bash completion installation The python-neutronclient CLI code is going to be removed from this repository. Change-Id: I39b3a43a7742481ec6d9501d5459bf0837ba0122 Related-Bug: #2003861 --- lib/neutron | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index c8ee8c5e76..9fb09ab9bb 100644 --- a/lib/neutron +++ b/lib/neutron @@ -527,7 +527,6 @@ function install_neutronclient { if use_library_from_git "python-neutronclient"; then git_clone_by_name "python-neutronclient" setup_dev_lib "python-neutronclient" - sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-neutronclient"]}/tools/,/etc/bash_completion.d/}neutron.bash_completion fi } From 91efe177b170c3874989affc73842dc4ffbe062d Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Thu, 29 Sep 2022 08:38:24 +0200 Subject: [PATCH 292/574] Bump cirros version to 0.6.1 Cirros has made a fresh release, let us use it. Switch the download URLs to https and drop an old example that no longer is available. Depends-On: https://review.opendev.org/c/openstack/tempest/+/871271 Change-Id: I1d391b871fc9bfa825db30db9434922226b94d8a --- doc/source/guides/nova.rst | 2 +- stackrc | 15 +++++++-------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst index 5b427972c4..d0fb274c13 100644 --- a/doc/source/guides/nova.rst +++ b/doc/source/guides/nova.rst @@ -122,7 +122,7 @@ when creating the server, for example: .. code-block:: shell $ openstack --os-compute-api-version 2.37 server create --flavor cirros256 \ - --image cirros-0.3.5-x86_64-disk --nic none --wait test-server + --image cirros-0.6.1-x86_64-disk --nic none --wait test-server .. note:: ``--os-compute-api-version`` greater than or equal to 2.37 is required to use ``--nic=none``. diff --git a/stackrc b/stackrc index b3130e5f7f..a71d843362 100644 --- a/stackrc +++ b/stackrc @@ -657,20 +657,19 @@ esac # If the file ends in .tar.gz, uncompress the tarball and and select the first # .img file inside it as the image. If present, use "*-vmlinuz*" as the kernel # and "*-initrd*" as the ramdisk -# example: http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-amd64.tar.gz +# example: https://cloud-images.ubuntu.com/releases/jammy/release/ubuntu-22.04-server-cloudimg-amd64.tar.gz # * disk image (*.img,*.img.gz) # if file ends in .img, then it will be uploaded and registered as a to # glance as a disk image. If it ends in .gz, it is uncompressed first. # example: -# http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-armel-disk1.img -# http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz +# https://cloud-images.ubuntu.com/releases/jammy/release/ubuntu-22.04-server-cloudimg-amd64.img +# https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz # * OpenVZ image: # OpenVZ uses its own format of image, and does not support UEC style images -#IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image -#IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image +#IMAGE_URLS="https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image -CIRROS_VERSION=${CIRROS_VERSION:-"0.5.2"} +CIRROS_VERSION=${CIRROS_VERSION:-"0.6.1"} CIRROS_ARCH=${CIRROS_ARCH:-$(uname -m)} # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of @@ -687,11 +686,11 @@ if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs} DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz} - IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + IMAGE_URLS+="https://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; *) # otherwise, use the qcow image DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk} DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img} - IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + IMAGE_URLS+="https://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; esac ;; vsphere) From 71c3c40c269a50303247855319d1d3a5d30f6773 Mon Sep 17 00:00:00 2001 From: Bence Romsics Date: Wed, 21 Dec 2022 13:50:54 +0100 Subject: [PATCH 293/574] 'sudo pkill -f' should not match the sudo process pkill already takes care that it does not kill itself, however the same problem may happen with 'sudo pkill -f' killing sudo. Use one of the usual regex tricks to avoid that. Change-Id: Ic6a94f516cbc509a2d77699494aa7bcaecf96ebc Closes-Bug: #1999395 --- lib/neutron | 4 +++- lib/neutron_plugins/ovn_agent | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index c8ee8c5e76..0d6a148c8e 100644 --- a/lib/neutron +++ b/lib/neutron @@ -689,7 +689,9 @@ function stop_other { fi if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - sudo pkill -9 -f $NEUTRON_ROOTWRAP-daemon || : + # pkill takes care not to kill itself, but it may kill its parent + # sudo unless we use the "ps | grep [f]oo" trick + sudo pkill -9 -f "$NEUTRON_ROOTWRAP-[d]aemon" || : fi } diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index f27777867d..34903924b3 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -769,7 +769,9 @@ function _stop_process { function stop_ovn { if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then - sudo pkill -9 -f haproxy || : + # pkill takes care not to kill itself, but it may kill its parent + # sudo unless we use the "ps | grep [f]oo" trick + sudo pkill -9 -f "[h]aproxy" || : _stop_process "devstack@q-ovn-metadata-agent.service" fi if is_service_enabled ovn-controller-vtep ; then From 7fe998109bda8cdd5cb5ba4a0e02c6c83cb0566d Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Thu, 26 Jan 2023 22:28:07 -0600 Subject: [PATCH 294/574] Fix setting the tempest virtual env constraints env var Devstack set the env var TOX_CONSTRAINTS_FILE/UPPER_CONSTRAINTS_FILE which are used to use the constraints during Tempest virtual env installation. Those env var are set to non-master constraint when we need to use non-master constraints but when we need to use the master constraints we do not set/reset them point to master constraints. This create the issue when running the grenade job where we run Tempest on the old devstack as well as in the new devstack. When tempest is installed on old devstack then old tempest is used and it sets these env var to stable/ constraints (this is the case when old devstack (the stable branch is in EM phase) uses the old tempest not the master tempest), all good till now. But the problem comes when in the same grenade script run upgrade-tempest install the master tempest (when new devstack branches are in the 'supported' phase and use the master tempest means) and are supposed to use the master constraints. But the TOX_CONSTRAINTS_FILE/UPPER_CONSTRAINTS_FILE env var set by old tempest is used by the tempest and due to a mismatch in constraints it fails. This happened when we tried to pin the stable/wallaby with Tempest 29.0.0 - https://review.opendev.org/c/openstack/devstack/+/871782 and table/xena grenade job failed (stable/xena use master tempest and supposed to use master constraints) - https://zuul.opendev.org/t/openstack/build/fb7b2a8b562c42bab4c741819f5e9732/log/controller/logs/grenade.sh_log.txt#16641 We should set/reset those constraint env var to master constraints if configuration tell devstack to use the master constraints. [1] https://github.com/openstack/devstack/blob/71c3c40c269a50303247855319d1d3a5d30f6773/lib/tempest#L124 Closes-Bug: #2003993 Change-Id: I5e938139b47f443a4c358415d0d4dcf6549cd085 --- lib/tempest | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/tempest b/lib/tempest index 44a9b6f29f..c3d3e9ac30 100644 --- a/lib/tempest +++ b/lib/tempest @@ -128,6 +128,13 @@ function set_tempest_venv_constraints { (cd $REQUIREMENTS_DIR && git show master:upper-constraints.txt 2>/dev/null || git show origin/master:upper-constraints.txt) > $tmp_c + # NOTE(gmann): we need to set the below env var pointing to master + # constraints even that is what default in tox.ini. Otherwise it can + # create the issue for grenade run where old and new devstack can have + # different tempest (old and master) to install. For detail problem, + # refer to the https://bugs.launchpad.net/devstack/+bug/2003993 + export UPPER_CONSTRAINTS_FILE=https://releases.openstack.org/constraints/upper/master + export TOX_CONSTRAINTS_FILE=https://releases.openstack.org/constraints/upper/master else echo "Using $TEMPEST_VENV_UPPER_CONSTRAINTS constraints in Tempest virtual env." cat $TEMPEST_VENV_UPPER_CONSTRAINTS > $tmp_c From a84b2091cf2c84eb4b81e542233bf446440e02b1 Mon Sep 17 00:00:00 2001 From: elajkat Date: Wed, 17 Nov 2021 11:52:56 +0100 Subject: [PATCH 295/574] Rehome functions to enable Neutron's segments integration Those functions were part of the neutron devstack plugin but we discussed it during last PTG [1] and decided to move to the Devstack repo as plugins which are used by e.g. CI jobs which are defined outside of the neutron repository. Placement integration is used e.g. in the tempest-slow job which is defined in tempest and used by many different OpenStack projects. [1] https://etherpad.opendev.org/p/neutron-yoga-ptg#L142 Change-Id: I2c26063896ab2679cffd01227a40a3283caa3b17 --- lib/neutron | 5 +++++ lib/neutron_plugins/services/segments | 10 ++++++++++ 2 files changed, 15 insertions(+) create mode 100644 lib/neutron_plugins/services/segments diff --git a/lib/neutron b/lib/neutron index 8708bf43ca..368a1b9c55 100644 --- a/lib/neutron +++ b/lib/neutron @@ -294,6 +294,7 @@ source $TOP_DIR/lib/neutron_plugins/services/l3 source $TOP_DIR/lib/neutron_plugins/services/placement source $TOP_DIR/lib/neutron_plugins/services/trunk source $TOP_DIR/lib/neutron_plugins/services/qos +source $TOP_DIR/lib/neutron_plugins/services/segments # Use security group or not if has_neutron_plugin_security_group; then @@ -416,6 +417,10 @@ function configure_neutron { configure_l3_agent_extension_gateway_ip_qos fi fi + if is_service_enabled neutron-segments; then + configure_placement_neutron + configure_segments_extension + fi # Finally configure Neutron server and core plugin if is_service_enabled q-agt neutron-agent q-svc neutron-api; then diff --git a/lib/neutron_plugins/services/segments b/lib/neutron_plugins/services/segments new file mode 100644 index 0000000000..08936bae49 --- /dev/null +++ b/lib/neutron_plugins/services/segments @@ -0,0 +1,10 @@ +#!/bin/bash + +function configure_segments_service_plugin { + neutron_service_plugin_class_add segments +} + +function configure_segments_extension { + configure_segments_service_plugin +} + From 48af5d4b1bf5332c879ee52fb4686874b212697f Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Tue, 14 Feb 2023 17:11:24 +0100 Subject: [PATCH 296/574] Make rockylinux job non-voting It is currently failing, let's unblock the CI until we have a fix. Change-Id: I7f072ceef57c302eb6ce20e108043d2390e9f481 --- .zuul.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.zuul.yaml b/.zuul.yaml index 8e20f6ed34..30e53976a5 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -726,6 +726,7 @@ description: Rocky Linux 9 Blue Onyx platform test nodeset: devstack-single-node-rockylinux-9 timeout: 9000 + voting: false vars: configure_swap_size: 4096 From fcc525f4fc5022a4b1f4e3d961e1b27cfbfa9d71 Mon Sep 17 00:00:00 2001 From: Gregory Thiemonge Date: Thu, 16 Feb 2023 10:26:32 +0100 Subject: [PATCH 297/574] Fix rockylinux and make it voting Some rockylinux deployments have the curl-minimal package installed by default (the latest GenericCloud image still has the curl package), it triggers an error when devstack wants to install the curl package. Fix this issue by swaping curl-minimal with curl before installing base packages. Change-Id: I969e8dc22e7d11c9917a843d9245f33a04fe197d --- .zuul.yaml | 1 - stack.sh | 7 +++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 30e53976a5..8e20f6ed34 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -726,7 +726,6 @@ description: Rocky Linux 9 Blue Onyx platform test nodeset: devstack-single-node-rockylinux-9 timeout: 9000 - voting: false vars: configure_swap_size: 4096 diff --git a/stack.sh b/stack.sh index 28576d1e14..ccd2d16baa 100755 --- a/stack.sh +++ b/stack.sh @@ -394,6 +394,13 @@ elif [[ $DISTRO == "rhel9" ]]; then sudo dnf config-manager --set-enabled crb # rabbitmq and other packages are provided by RDO repositories. _install_rdo + + # Some distributions (Rocky Linux 9) provide curl-minimal instead of curl, + # it triggers a conflict when devstack wants to install "curl". + # Swap curl-minimal with curl. + if is_package_installed curl-minimal; then + sudo dnf swap -y curl-minimal curl + fi elif [[ $DISTRO == "openEuler-22.03" ]]; then # There are some problem in openEuler. We should fix it first. Some required # package/action runs before fixup script. So we can't fix there. From ec07b343d25e9964db57ef9c3e2a89deeb5ac56e Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Tue, 24 Jan 2023 17:38:45 +0100 Subject: [PATCH 298/574] Remove support for opensuse We haven't been testing the distro for a while in CI, e.g. in Tempest, the jobs on opensuse15 haven't been executed for a year now. Therefore the patch removes opensuse support from devstack. Closes-Bug: #2002900 Change-Id: I0f5e4c644e2d14d1b8bb5bc0096d1469febe5fcc --- doc/source/index.rst | 3 +- doc/source/plugins.rst | 3 -- files/rpms-suse/baremetal | 1 - files/rpms-suse/ceph | 3 -- files/rpms-suse/cinder | 3 -- files/rpms-suse/dstat | 1 - files/rpms-suse/general | 34 --------------------- files/rpms-suse/horizon | 2 -- files/rpms-suse/keystone | 4 --- files/rpms-suse/ldap | 3 -- files/rpms-suse/n-api | 1 - files/rpms-suse/n-cpu | 10 ------ files/rpms-suse/neutron-agent | 1 - files/rpms-suse/neutron-common | 12 -------- files/rpms-suse/neutron-l3 | 2 -- files/rpms-suse/nova | 21 ------------- files/rpms-suse/openvswitch | 3 -- files/rpms-suse/os-brick | 2 -- files/rpms-suse/q-agt | 1 - files/rpms-suse/q-l3 | 1 - files/rpms-suse/swift | 6 ---- functions-common | 47 ----------------------------- inc/python | 4 --- lib/apache | 25 +++------------ lib/cinder | 9 ++---- lib/databases/mysql | 15 ++------- lib/databases/postgresql | 9 ++---- lib/glance | 4 --- lib/horizon | 2 +- lib/ldap | 14 --------- lib/lvm | 4 +-- lib/neutron_plugins/ovs_base | 13 -------- lib/neutron_plugins/ovs_source | 6 ---- lib/nova | 6 ++-- lib/nova_plugins/functions-libvirt | 6 ++-- lib/nova_plugins/hypervisor-libvirt | 3 -- lib/rpc_backend | 15 +-------- lib/swift | 3 -- lib/tls | 12 -------- stack.sh | 2 +- tests/test_package_ordering.sh | 2 +- tools/fixup_stuff.sh | 40 ------------------------ tools/install_prereqs.sh | 2 -- 43 files changed, 25 insertions(+), 335 deletions(-) delete mode 100644 files/rpms-suse/baremetal delete mode 100644 files/rpms-suse/ceph delete mode 100644 files/rpms-suse/cinder delete mode 100644 files/rpms-suse/dstat delete mode 100644 files/rpms-suse/general delete mode 100644 files/rpms-suse/horizon delete mode 100644 files/rpms-suse/keystone delete mode 100644 files/rpms-suse/ldap delete mode 100644 files/rpms-suse/n-api delete mode 100644 files/rpms-suse/n-cpu delete mode 100644 files/rpms-suse/neutron-agent delete mode 100644 files/rpms-suse/neutron-common delete mode 100644 files/rpms-suse/neutron-l3 delete mode 100644 files/rpms-suse/nova delete mode 100644 files/rpms-suse/openvswitch delete mode 100644 files/rpms-suse/os-brick delete mode 120000 files/rpms-suse/q-agt delete mode 120000 files/rpms-suse/q-l3 delete mode 100644 files/rpms-suse/swift diff --git a/doc/source/index.rst b/doc/source/index.rst index 1e932f88a5..ccd0fef330 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -38,8 +38,7 @@ Install Linux Start with a clean and minimal install of a Linux system. DevStack attempts to support the two latest LTS releases of Ubuntu, the -latest/current Fedora version, CentOS/RHEL/Rocky Linux 9, OpenSUSE and -openEuler. +latest/current Fedora version, CentOS/RHEL/Rocky Linux 9 and openEuler. If you do not have a preference, Ubuntu 22.04 (Jammy) is the most tested, and will probably go the smoothest. diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index 62dd15bfb1..dd75b5a22d 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -243,9 +243,6 @@ locations in the top-level of the plugin repository: - ``./devstack/files/rpms/$plugin_name`` - Packages to install when running on Red Hat, Fedora, or CentOS. -- ``./devstack/files/rpms-suse/$plugin_name`` - Packages to install when - running on SUSE Linux or openSUSE. - Although there a no plans to remove this method of installing packages, plugins should consider it deprecated for ``bindep`` support described below. diff --git a/files/rpms-suse/baremetal b/files/rpms-suse/baremetal deleted file mode 100644 index 61f73eeae3..0000000000 --- a/files/rpms-suse/baremetal +++ /dev/null @@ -1 +0,0 @@ -dnsmasq diff --git a/files/rpms-suse/ceph b/files/rpms-suse/ceph deleted file mode 100644 index 8c4955df90..0000000000 --- a/files/rpms-suse/ceph +++ /dev/null @@ -1,3 +0,0 @@ -ceph # NOPRIME -lsb -xfsprogs diff --git a/files/rpms-suse/cinder b/files/rpms-suse/cinder deleted file mode 100644 index b39cc79a27..0000000000 --- a/files/rpms-suse/cinder +++ /dev/null @@ -1,3 +0,0 @@ -lvm2 -qemu-tools -tgt # NOPRIME diff --git a/files/rpms-suse/dstat b/files/rpms-suse/dstat deleted file mode 100644 index 2b643b8b1b..0000000000 --- a/files/rpms-suse/dstat +++ /dev/null @@ -1 +0,0 @@ -dstat diff --git a/files/rpms-suse/general b/files/rpms-suse/general deleted file mode 100644 index f63611025c..0000000000 --- a/files/rpms-suse/general +++ /dev/null @@ -1,34 +0,0 @@ -apache2 -apache2-devel -bc -ca-certificates-mozilla -curl -gawk -gcc -gcc-c++ -git-core -graphviz # docs -iputils -libffi-devel # pyOpenSSL -libjpeg8-devel # Pillow 3.0.0 -libopenssl-devel # to rebuild pyOpenSSL if needed -libxslt-devel # lxml -lsof # useful when debugging -make -net-tools -openssh -openssl -pcre-devel # python-pcre -postgresql-devel # psycopg2 -psmisc -python3-systemd -python-cmd2 # dist:opensuse-12.3 -python-devel # pyOpenSSL -python-xml -tar -tcpdump -unzip -util-linux -wget -which -zlib-devel diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon deleted file mode 100644 index 753ea76e04..0000000000 --- a/files/rpms-suse/horizon +++ /dev/null @@ -1,2 +0,0 @@ -apache2-mod_wsgi # NOPRIME -apache2 # NOPRIME diff --git a/files/rpms-suse/keystone b/files/rpms-suse/keystone deleted file mode 100644 index 66cfc23423..0000000000 --- a/files/rpms-suse/keystone +++ /dev/null @@ -1,4 +0,0 @@ -cyrus-sasl-devel -memcached -openldap2-devel -sqlite3 diff --git a/files/rpms-suse/ldap b/files/rpms-suse/ldap deleted file mode 100644 index 46d26f0796..0000000000 --- a/files/rpms-suse/ldap +++ /dev/null @@ -1,3 +0,0 @@ -openldap2 -openldap2-client -python-ldap diff --git a/files/rpms-suse/n-api b/files/rpms-suse/n-api deleted file mode 100644 index 0f08daace3..0000000000 --- a/files/rpms-suse/n-api +++ /dev/null @@ -1 +0,0 @@ -python-dateutil diff --git a/files/rpms-suse/n-cpu b/files/rpms-suse/n-cpu deleted file mode 100644 index 9c724cb9d8..0000000000 --- a/files/rpms-suse/n-cpu +++ /dev/null @@ -1,10 +0,0 @@ -cdrkit-cdrtools-compat # dist:sle12 -cryptsetup -dosfstools -libosinfo -lvm2 -mkisofs # not:sle12 -open-iscsi -sg3_utils -# Stuff for diablo volumes -sysfsutils diff --git a/files/rpms-suse/neutron-agent b/files/rpms-suse/neutron-agent deleted file mode 100644 index ea8819e884..0000000000 --- a/files/rpms-suse/neutron-agent +++ /dev/null @@ -1 +0,0 @@ -ipset diff --git a/files/rpms-suse/neutron-common b/files/rpms-suse/neutron-common deleted file mode 100644 index e3799a9353..0000000000 --- a/files/rpms-suse/neutron-common +++ /dev/null @@ -1,12 +0,0 @@ -acl -dnsmasq -dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1 -ebtables -haproxy # to serve as metadata proxy inside router/dhcp namespaces -iptables -iputils -rabbitmq-server # NOPRIME -radvd # NOPRIME -sqlite3 -sudo -vlan diff --git a/files/rpms-suse/neutron-l3 b/files/rpms-suse/neutron-l3 deleted file mode 100644 index a7a190c063..0000000000 --- a/files/rpms-suse/neutron-l3 +++ /dev/null @@ -1,2 +0,0 @@ -conntrack-tools -keepalived diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova deleted file mode 100644 index 082b9aca22..0000000000 --- a/files/rpms-suse/nova +++ /dev/null @@ -1,21 +0,0 @@ -cdrkit-cdrtools-compat # dist:sle12 -conntrack-tools -curl -ebtables -iptables -iputils -kpartx -kvm # NOPRIME -libvirt # NOPRIME -libvirt-python # NOPRIME -# mkisofs is required for config_drive -mkisofs # not:sle12 -parted -polkit -# qemu as fallback if kvm cannot be used -qemu # NOPRIME -rabbitmq-server # NOPRIME -socat -sqlite3 -sudo -vlan diff --git a/files/rpms-suse/openvswitch b/files/rpms-suse/openvswitch deleted file mode 100644 index 53f8bb22cf..0000000000 --- a/files/rpms-suse/openvswitch +++ /dev/null @@ -1,3 +0,0 @@ - -openvswitch -openvswitch-switch diff --git a/files/rpms-suse/os-brick b/files/rpms-suse/os-brick deleted file mode 100644 index 67b33a9861..0000000000 --- a/files/rpms-suse/os-brick +++ /dev/null @@ -1,2 +0,0 @@ -lsscsi -open-iscsi diff --git a/files/rpms-suse/q-agt b/files/rpms-suse/q-agt deleted file mode 120000 index 99fe353094..0000000000 --- a/files/rpms-suse/q-agt +++ /dev/null @@ -1 +0,0 @@ -neutron-agent \ No newline at end of file diff --git a/files/rpms-suse/q-l3 b/files/rpms-suse/q-l3 deleted file mode 120000 index 0a5ca2a45f..0000000000 --- a/files/rpms-suse/q-l3 +++ /dev/null @@ -1 +0,0 @@ -neutron-l3 \ No newline at end of file diff --git a/files/rpms-suse/swift b/files/rpms-suse/swift deleted file mode 100644 index 3663b98545..0000000000 --- a/files/rpms-suse/swift +++ /dev/null @@ -1,6 +0,0 @@ -curl -liberasurecode-devel -memcached -sqlite3 -xfsprogs -xinetd diff --git a/functions-common b/functions-common index 4eed5d8407..c7a1c6e0bf 100644 --- a/functions-common +++ b/functions-common @@ -454,16 +454,6 @@ function GetDistro { elif [[ "$os_VENDOR" =~ (Fedora) ]]; then # For Fedora, just use 'f' and the release DISTRO="f$os_RELEASE" - elif is_opensuse; then - DISTRO="opensuse-$os_RELEASE" - # Tumbleweed uses "n/a" as a codename, and the release is a datestring - # like 20180218, so not very useful. Leap however uses a release - # with a "dot", so for example 15.0 - [ "$os_CODENAME" = "n/a" -a "$os_RELEASE" = "${os_RELEASE/\./}" ] && \ - DISTRO="opensuse-tumbleweed" - elif is_suse_linux_enterprise; then - # just use major release - DISTRO="sle${os_RELEASE%.*}" elif [[ "$os_VENDOR" =~ (Red.*Hat) || \ "$os_VENDOR" =~ (CentOS) || \ "$os_VENDOR" =~ (AlmaLinux) || \ @@ -537,37 +527,6 @@ function is_fedora { } -# Determine if current distribution is a SUSE-based distribution -# (openSUSE, SLE). -# is_suse -function is_suse { - is_opensuse || is_suse_linux_enterprise -} - - -# Determine if current distribution is an openSUSE distribution -# is_opensuse -function is_opensuse { - if [[ -z "$os_VENDOR" ]]; then - GetOSVersion - fi - - [[ "$os_VENDOR" =~ (openSUSE) ]] -} - - -# Determine if current distribution is a SUSE Linux Enterprise (SLE) -# distribution -# is_suse_linux_enterprise -function is_suse_linux_enterprise { - if [[ -z "$os_VENDOR" ]]; then - GetOSVersion - fi - - [[ "$os_VENDOR" =~ (^SUSE) ]] -} - - # Determine if current distribution is an Ubuntu-based distribution # It will also detect non-Ubuntu but Debian-based distros # is_ubuntu @@ -1168,8 +1127,6 @@ function _get_package_dir { pkg_dir=$base_dir/debs elif is_fedora; then pkg_dir=$base_dir/rpms - elif is_suse; then - pkg_dir=$base_dir/rpms-suse else exit_distro_not_supported "list of packages" fi @@ -1444,8 +1401,6 @@ function real_install_package { apt_get install "$@" elif is_fedora; then yum_install "$@" - elif is_suse; then - zypper_install "$@" else exit_distro_not_supported "installing packages" fi @@ -1487,8 +1442,6 @@ function uninstall_package { apt_get purge "$@" elif is_fedora; then sudo dnf remove -y "$@" ||: - elif is_suse; then - sudo zypper remove -y "$@" ||: else exit_distro_not_supported "uninstalling packages" fi diff --git a/inc/python b/inc/python index 3eb3efe80e..a24f4e910a 100644 --- a/inc/python +++ b/inc/python @@ -7,7 +7,6 @@ # External functions used: # - GetOSVersion # - is_fedora -# - is_suse # - safe_chown # Save trace setting @@ -62,7 +61,6 @@ function get_python_exec_prefix { $xtrace local PYTHON_PATH=/usr/local/bin - is_suse && PYTHON_PATH=/usr/bin echo $PYTHON_PATH } @@ -462,8 +460,6 @@ function install_python { function install_python3 { if is_ubuntu; then apt_get install python${PYTHON3_VERSION} python${PYTHON3_VERSION}-dev - elif is_suse; then - install_package python3-devel python3-dbm elif is_fedora; then if [ "$os_VENDOR" = "Fedora" ]; then install_package python${PYTHON3_VERSION//.} diff --git a/lib/apache b/lib/apache index dd8c9a0f06..4d68b49767 100644 --- a/lib/apache +++ b/lib/apache @@ -44,10 +44,6 @@ elif is_fedora; then APACHE_NAME=httpd APACHE_CONF_DIR=${APACHE_CONF_DIR:-/etc/$APACHE_NAME/conf.d} APACHE_SETTINGS_DIR=${APACHE_SETTINGS_DIR:-/etc/$APACHE_NAME/conf.d} -elif is_suse; then - APACHE_NAME=apache2 - APACHE_CONF_DIR=${APACHE_CONF_DIR:-/etc/$APACHE_NAME/vhosts.d} - APACHE_SETTINGS_DIR=${APACHE_SETTINGS_DIR:-/etc/$APACHE_NAME/conf.d} fi APACHE_LOG_DIR="/var/log/${APACHE_NAME}" @@ -65,11 +61,6 @@ function enable_apache_mod { sudo a2enmod $mod restart_apache_server fi - elif is_suse; then - if ! a2enmod -q $mod ; then - sudo a2enmod $mod - restart_apache_server - fi elif is_fedora; then # pass true @@ -104,10 +95,6 @@ function install_apache_uwsgi { # Thus there is nothing else to do after this install install_package uwsgi \ uwsgi-plugin-python3 - elif [[ $os_VENDOR =~ openSUSE ]]; then - install_package uwsgi \ - uwsgi-python3 \ - apache2-mod_uwsgi else # Compile uwsgi from source. local dir @@ -125,7 +112,7 @@ function install_apache_uwsgi { sudo rm -rf $dir fi - if is_ubuntu || is_suse ; then + if is_ubuntu; then # we've got to enable proxy and proxy_uwsgi for this to work sudo a2enmod proxy sudo a2enmod proxy_uwsgi @@ -155,8 +142,6 @@ function install_apache_wsgi { sudo sed -i '/mod_mpm_prefork.so/s/^/#/g' /etc/httpd/conf.modules.d/00-mpm.conf sudo sed -i '/mod_mpm_event.so/s/^/#/g' /etc/httpd/conf.modules.d/00-mpm.conf sudo sed -i '/mod_mpm_worker.so/s/^#//g' /etc/httpd/conf.modules.d/00-mpm.conf - elif is_suse; then - install_package apache2 apache2-mod_wsgi else exit_distro_not_supported "apache wsgi installation" fi @@ -171,7 +156,7 @@ function install_apache_wsgi { # recognise it. a2ensite and a2dissite ignore the .conf suffix used as parameter. The default sites' # files are 000-default.conf and default-ssl.conf. # -# On Fedora and openSUSE, any file in /etc/httpd/conf.d/ whose name ends with .conf is enabled. +# On Fedora, any file in /etc/httpd/conf.d/ whose name ends with .conf is enabled. # # On RHEL and CentOS, things should hopefully work as in Fedora. # @@ -187,7 +172,7 @@ function apache_site_config_for { if is_ubuntu; then # Ubuntu 14.04 - Apache 2.4 echo $APACHE_CONF_DIR/${site}.conf - elif is_fedora || is_suse; then + elif is_fedora; then # fedora conf.d is only imported if it ends with .conf so this is approx the same local enabled_site_file="$APACHE_CONF_DIR/${site}.conf" if [ -f $enabled_site_file ]; then @@ -205,7 +190,7 @@ function enable_apache_site { enable_apache_mod version if is_ubuntu; then sudo a2ensite ${site} - elif is_fedora || is_suse; then + elif is_fedora; then local enabled_site_file="$APACHE_CONF_DIR/${site}.conf" # Do nothing if site already enabled or no site config exists if [[ -f ${enabled_site_file}.disabled ]] && [[ ! -f ${enabled_site_file} ]]; then @@ -219,7 +204,7 @@ function disable_apache_site { local site=$@ if is_ubuntu; then sudo a2dissite ${site} || true - elif is_fedora || is_suse; then + elif is_fedora; then local enabled_site_file="$APACHE_CONF_DIR/${site}.conf" # Do nothing if no site config exists if [[ -f ${enabled_site_file} ]]; then diff --git a/lib/cinder b/lib/cinder index 2424f928d1..602e8dad0e 100644 --- a/lib/cinder +++ b/lib/cinder @@ -117,8 +117,8 @@ else fi -# EL and SUSE should only use lioadm -if is_fedora || is_suse; then +# EL should only use lioadm +if is_fedora; then if [[ ${CINDER_TARGET_HELPER} != "lioadm" && ${CINDER_TARGET_HELPER} != 'nvmet' ]]; then die "lioadm and nvmet are the only valid Cinder target_helper config on this platform" fi @@ -595,11 +595,6 @@ function start_cinder { _configure_tgt_for_config_d if is_ubuntu; then sudo service tgt restart - elif is_suse; then - # NOTE(dmllr): workaround restart bug - # https://bugzilla.suse.com/show_bug.cgi?id=934642 - stop_service tgtd - start_service tgtd else restart_service tgtd fi diff --git a/lib/databases/mysql b/lib/databases/mysql index fbad44e36a..ed8006e7db 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -20,12 +20,6 @@ if [[ -z "$MYSQL_SERVICE_NAME" ]]; then MYSQL_SERVICE_NAME=mysql if is_fedora && ! is_oraclelinux; then MYSQL_SERVICE_NAME=mariadb - elif is_suse && systemctl list-unit-files | grep -q 'mariadb\.service'; then - # Older mariadb packages on SLES 12 provided mysql.service. The - # newer ones on SLES 12 and 15 use mariadb.service; they also - # provide a mysql.service symlink for backwards-compatibility, but - # let's not rely on that. - MYSQL_SERVICE_NAME=mariadb elif [[ "$DISTRO" == "bullseye" ]]; then MYSQL_SERVICE_NAME=mariadb fi @@ -54,7 +48,7 @@ function cleanup_database_mysql { elif is_oraclelinux; then uninstall_package mysql-community-server sudo rm -rf /var/lib/mysql - elif is_suse || is_fedora; then + elif is_fedora; then uninstall_package mariadb-server sudo rm -rf /var/lib/mysql else @@ -74,7 +68,7 @@ function configure_database_mysql { if is_ubuntu; then my_conf=/etc/mysql/my.cnf - elif is_suse || is_oraclelinux; then + elif is_oraclelinux; then my_conf=/etc/my.cnf elif is_fedora; then my_conf=/etc/my.cnf @@ -90,7 +84,7 @@ function configure_database_mysql { iniset -sudo $my_conf mysqld bind-address "$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)" # (Re)Start mysql-server - if is_fedora || is_suse; then + if is_fedora; then # service is not started by default start_service $MYSQL_SERVICE_NAME elif is_ubuntu; then @@ -212,9 +206,6 @@ EOF elif is_fedora; then install_package mariadb-server mariadb-devel mariadb sudo systemctl enable $MYSQL_SERVICE_NAME - elif is_suse; then - install_package mariadb-server - sudo systemctl enable $MYSQL_SERVICE_NAME elif is_ubuntu; then install_package $MYSQL_SERVICE_NAME-server else diff --git a/lib/databases/postgresql b/lib/databases/postgresql index 4f0a5a0a4c..b21418b75e 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -32,7 +32,7 @@ function cleanup_database_postgresql { # Get ruthless with mysql apt_get purge -y postgresql* return - elif is_fedora || is_suse; then + elif is_fedora; then uninstall_package postgresql-server else return @@ -66,11 +66,6 @@ function configure_database_postgresql { pg_dir=`find /etc/postgresql -name pg_hba.conf|xargs dirname` pg_hba=$pg_dir/pg_hba.conf pg_conf=$pg_dir/postgresql.conf - elif is_suse; then - pg_hba=/var/lib/pgsql/data/pg_hba.conf - pg_conf=/var/lib/pgsql/data/postgresql.conf - # initdb is called when postgresql is first started - sudo [ -e $pg_hba ] || start_service postgresql else exit_distro_not_supported "postgresql configuration" fi @@ -107,7 +102,7 @@ EOF if [[ "$INSTALL_DATABASE_SERVER_PACKAGES" == "True" ]]; then if is_ubuntu; then install_package postgresql - elif is_fedora || is_suse; then + elif is_fedora; then install_package postgresql-server if is_fedora; then sudo systemctl enable postgresql diff --git a/lib/glance b/lib/glance index 041acafc92..5aeae16c61 100644 --- a/lib/glance +++ b/lib/glance @@ -47,10 +47,6 @@ USE_CINDER_FOR_GLANCE=$(trueorfalse False USE_CINDER_FOR_GLANCE) # from CINDER_ENABLED_BACKENDS GLANCE_CINDER_DEFAULT_BACKEND=${GLANCE_CINDER_DEFAULT_BACKEND:-lvmdriver-1} GLANCE_STORE_ROOTWRAP_BASE_DIR=/usr/local/etc/glance -# NOTE (abhishekk): For opensuse data files are stored in different directory -if is_opensuse; then - GLANCE_STORE_ROOTWRAP_BASE_DIR=/usr/etc/glance -fi # When Cinder is used as a glance store, you can optionally configure cinder to # optimize bootable volume creation by allowing volumes to be cloned directly # in the backend instead of transferring data via Glance. To use this feature, diff --git a/lib/horizon b/lib/horizon index b2bf7bcb49..f76f9e557d 100644 --- a/lib/horizon +++ b/lib/horizon @@ -129,7 +129,7 @@ function configure_horizon { if is_ubuntu; then disable_apache_site 000-default sudo touch $horizon_conf - elif is_fedora || is_suse; then + elif is_fedora; then : # nothing to do else exit_distro_not_supported "horizon apache configuration" diff --git a/lib/ldap b/lib/ldap index ea5faa1fe9..b0195db258 100644 --- a/lib/ldap +++ b/lib/ldap @@ -39,13 +39,6 @@ elif is_fedora; then LDAP_OLCDB_NUMBER=2 LDAP_OLCDB_TYPE=hdb LDAP_ROOTPW_COMMAND=add -elif is_suse; then - # SUSE has slappasswd in /usr/sbin/ - PATH=$PATH:/usr/sbin/ - LDAP_OLCDB_NUMBER=1 - LDAP_OLCDB_TYPE=hdb - LDAP_ROOTPW_COMMAND=add - LDAP_SERVICE_NAME=ldap fi @@ -76,8 +69,6 @@ function cleanup_ldap { sudo rm -rf /etc/ldap/ldap.conf /var/lib/ldap elif is_fedora; then sudo rm -rf /etc/openldap /var/lib/ldap - elif is_suse; then - sudo rm -rf /var/lib/ldap fi } @@ -126,11 +117,6 @@ function install_ldap { configure_ldap elif is_fedora; then start_ldap - elif is_suse; then - _ldap_varsubst $FILES/ldap/suse-base-config.ldif.in >$tmp_ldap_dir/suse-base-config.ldif - sudo slapadd -F /etc/openldap/slapd.d/ -bcn=config -l $tmp_ldap_dir/suse-base-config.ldif - sudo sed -i '/^OPENLDAP_START_LDAPI=/s/"no"/"yes"/g' /etc/sysconfig/openldap - start_ldap fi echo "LDAP_PASSWORD is $LDAP_PASSWORD" diff --git a/lib/lvm b/lib/lvm index 57ffb967c3..57d2cd4e62 100644 --- a/lib/lvm +++ b/lib/lvm @@ -129,8 +129,8 @@ function init_lvm_volume_group { local vg=$1 local size=$2 - # Start the tgtd service on Fedora and SUSE if tgtadm is used - if is_fedora || is_suse && [[ "$CINDER_TARGET_HELPER" = "tgtadm" ]]; then + # Start the tgtd service on Fedora if tgtadm is used + if is_fedora; then start_service tgtd fi diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index cc41a8cd46..adabc56412 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -80,19 +80,6 @@ function _neutron_ovs_base_install_agent_packages { elif is_fedora; then restart_service openvswitch sudo systemctl enable openvswitch - elif is_suse; then - if [[ $DISTRO == "sle12" ]] && vercmp "$os_RELEASE" "<" "12.2" ; then - restart_service openvswitch-switch - else - # workaround for https://bugzilla.suse.com/show_bug.cgi?id=1085971 - if [[ $DISTRO =~ "tumbleweed" ]]; then - sudo sed -i -e "s,^OVS_USER_ID=.*,OVS_USER_ID='root:root'," /etc/sysconfig/openvswitch - fi - restart_service openvswitch || { - journalctl -xe || : - systemctl status openvswitch - } - fi fi fi } diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source index ea71e60e68..288eb1d69b 100644 --- a/lib/neutron_plugins/ovs_source +++ b/lib/neutron_plugins/ovs_source @@ -182,12 +182,6 @@ function action_openvswitch { ${action}_service openvswitch-switch elif is_fedora; then ${action}_service openvswitch - elif is_suse; then - if [[ $DISTRO == "sle12" ]] && [[ $os_RELEASE -lt 12.2 ]]; then - ${action}_service openvswitch-switch - else - ${action}_service openvswitch - fi fi } diff --git a/lib/nova b/lib/nova index 3aa6b9e3b3..f34e823074 100644 --- a/lib/nova +++ b/lib/nova @@ -448,8 +448,8 @@ function create_nova_conf { iniset $NOVA_CONF key_manager backend nova.keymgr.conf_key_mgr.ConfKeyManager - if is_fedora || is_suse; then - # nova defaults to /usr/local/bin, but fedora and suse pip like to + if is_fedora; then + # nova defaults to /usr/local/bin, but fedora pip like to # install things in /usr/bin iniset $NOVA_CONF DEFAULT bindir "/usr/bin" fi @@ -523,7 +523,7 @@ function create_nova_conf { # nova defaults to genisoimage but only mkisofs is available for 15.0+ # rhel provides mkisofs symlink to genisoimage or xorriso appropiately - if is_suse || is_fedora; then + if is_fedora; then iniset $NOVA_CONF DEFAULT mkisofs_cmd /usr/bin/mkisofs fi diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index c0e45ebb85..799230603c 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -74,7 +74,7 @@ function install_libvirt { install_package qemu-efi fi #pip_install_gr - elif is_fedora || is_suse; then + elif is_fedora; then # Optionally enable the virt-preview repo when on Fedora if [[ $DISTRO =~ f[0-9][0-9] ]] && [[ ${ENABLE_FEDORA_VIRT_PREVIEW_REPO} == "True" ]]; then @@ -121,8 +121,8 @@ cgroup_device_acl = [ EOF fi - if is_fedora || is_suse; then - # Starting with fedora 18 and opensuse-12.3 enable stack-user to + if is_fedora; then + # Starting with fedora 18 enable stack-user to # virsh -c qemu:///system by creating a policy-kit rule for # stack-user using the new Javascript syntax rules_dir=/etc/polkit-1/rules.d diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index c1cd132548..87c3d3addc 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -114,9 +114,6 @@ function install_nova_hypervisor { sudo dpkg-statoverride --add --update $STAT_OVERRIDE fi done - elif is_suse; then - # Workaround for missing dependencies in python-libguestfs - install_package python-libguestfs guestfs-data augeas augeas-lenses elif is_fedora; then install_package python3-libguestfs fi diff --git a/lib/rpc_backend b/lib/rpc_backend index 743b4ae170..bbb41499be 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -52,20 +52,7 @@ function install_rpc_backend { if is_service_enabled rabbit; then # Install rabbitmq-server install_package rabbitmq-server - if is_suse; then - install_package rabbitmq-server-plugins - # the default systemd socket activation only listens on the loopback interface - # which causes rabbitmq to try to start its own epmd - sudo mkdir -p /etc/systemd/system/epmd.socket.d - cat </dev/null -[Socket] -ListenStream= -ListenStream=[::]:4369 -EOF - sudo systemctl daemon-reload - sudo systemctl restart epmd.socket epmd.service - fi - if is_fedora || is_suse; then + if is_fedora; then # NOTE(jangutter): If rabbitmq is not running (as in a fresh # install) then rabbit_setuser triggers epmd@0.0.0.0.socket with # socket activation. This fails the first time and does not get diff --git a/lib/swift b/lib/swift index 251c4625b5..1ebf073318 100644 --- a/lib/swift +++ b/lib/swift @@ -547,9 +547,6 @@ function configure_swift { local swift_log_dir=${SWIFT_DATA_DIR}/logs sudo rm -rf ${swift_log_dir} local swift_log_group=adm - if is_suse; then - swift_log_group=root - fi sudo install -d -o ${STACK_USER} -g ${swift_log_group} ${swift_log_dir}/hourly if [[ $SYSLOG != "False" ]]; then diff --git a/lib/tls b/lib/tls index b8758cd6d3..e0c7500b20 100644 --- a/lib/tls +++ b/lib/tls @@ -212,9 +212,6 @@ function init_CA { if is_fedora; then sudo cp $INT_CA_DIR/ca-chain.pem /usr/share/pki/ca-trust-source/anchors/devstack-chain.pem sudo update-ca-trust - elif is_suse; then - sudo cp $INT_CA_DIR/ca-chain.pem /usr/share/pki/trust/anchors/devstack-chain.pem - sudo update-ca-certificates elif is_ubuntu; then sudo cp $INT_CA_DIR/ca-chain.pem /usr/local/share/ca-certificates/devstack-int.crt sudo cp $ROOT_CA_DIR/cacert.pem /usr/local/share/ca-certificates/devstack-root.crt @@ -376,9 +373,6 @@ function fix_system_ca_bundle_path { elif is_ubuntu; then sudo rm -f $capath sudo ln -s /etc/ssl/certs/ca-certificates.crt $capath - elif is_suse; then - sudo rm -f $capath - sudo ln -s /etc/ssl/ca-bundle.pem $capath else echo "Don't know how to set the CA bundle, expect the install to fail." fi @@ -441,9 +435,6 @@ function enable_mod_ssl { if is_ubuntu; then sudo a2enmod ssl - elif is_suse; then - sudo a2enmod ssl - sudo a2enflag SSL elif is_fedora; then # Fedora enables mod_ssl by default : @@ -560,9 +551,6 @@ $listen_string CustomLog $APACHE_LOG_DIR/tls-proxy_access.log combined EOF - if is_suse ; then - sudo a2enflag SSL - fi for mod in headers ssl proxy proxy_http; do enable_apache_mod $mod done diff --git a/stack.sh b/stack.sh index 28576d1e14..8d450aadc1 100755 --- a/stack.sh +++ b/stack.sh @@ -229,7 +229,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bullseye|focal|jammy|f36|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9|openEuler-22.03" +SUPPORTED_DISTROS="bullseye|focal|jammy|f36|rhel8|rhel9|openEuler-22.03" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" diff --git a/tests/test_package_ordering.sh b/tests/test_package_ordering.sh index bfc2a1954f..f221c821a0 100755 --- a/tests/test_package_ordering.sh +++ b/tests/test_package_ordering.sh @@ -8,7 +8,7 @@ TOP=$(cd $(dirname "$0")/.. && pwd) source $TOP/tests/unittest.sh export LC_ALL=en_US.UTF-8 -PKG_FILES=$(find $TOP/files/debs $TOP/files/rpms $TOP/files/rpms-suse -type f) +PKG_FILES=$(find $TOP/files/debs $TOP/files/rpms -type f) TMPDIR=$(mktemp -d) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index daa1bc6301..0ec426b601 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -90,45 +90,6 @@ function fixup_fedora { fi } -function fixup_suse { - if ! is_suse; then - return - fi - - # Deactivate and disable apparmor profiles in openSUSE and SLE - # distros to avoid issues with haproxy and dnsmasq. In newer - # releases, systemctl stop apparmor is actually a no-op, so we - # have to use aa-teardown to make sure we've deactivated the - # profiles: - # - # https://www.suse.com/releasenotes/x86_64/SUSE-SLES/15/#fate-325343 - # https://gitlab.com/apparmor/apparmor/merge_requests/81 - # https://build.opensuse.org/package/view_file/openSUSE:Leap:15.2/apparmor/apparmor.service?expand=1 - if sudo systemctl is-active -q apparmor; then - sudo systemctl stop apparmor - fi - if [ -x /usr/sbin/aa-teardown ]; then - sudo /usr/sbin/aa-teardown - fi - if sudo systemctl is-enabled -q apparmor; then - sudo systemctl disable apparmor - fi - - # Since pip10, pip will refuse to uninstall files from packages - # that were created with distutils (rather than more modern - # setuptools). This is because it technically doesn't have a - # manifest of what to remove. However, in most cases, simply - # overwriting works. So this hacks around those packages that - # have been dragged in by some other system dependency - sudo rm -rf /usr/lib/python3.6/site-packages/ply-*.egg-info - sudo rm -rf /usr/lib/python3.6/site-packages/six-*.egg-info - - # Ensure trusted CA certificates are up to date - # See https://bugzilla.suse.com/show_bug.cgi?id=1154871 - # May be removed once a new opensuse-15 image is available in nodepool - sudo zypper up -y p11-kit ca-certificates-mozilla -} - function fixup_ovn_centos { if [[ $os_VENDOR != "CentOS" ]]; then return @@ -156,5 +117,4 @@ function fixup_ubuntu { function fixup_all { fixup_ubuntu fixup_fedora - fixup_suse } diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh index a7c03d26cd..f2d57c8451 100755 --- a/tools/install_prereqs.sh +++ b/tools/install_prereqs.sh @@ -74,8 +74,6 @@ install_package $PACKAGES if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then if is_ubuntu || is_fedora; then install_package rsyslog-relp - elif is_suse; then - install_package rsyslog-module-relp else exit_distro_not_supported "rsyslog-relp installation" fi From 7567359755a105e7278bbf97541332f28228b87d Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 13 Feb 2023 14:41:40 +0000 Subject: [PATCH 299/574] Try to reduce mysql memory usage These are a few tweaks I applied to my own memory-constrained cloud instances that seemed to help. I have lower performance requirements so this may make things worse and not better, but it's worth seeing what the impact is. I'll admit to not knowing the full impact of these as they're mostly collected from various tutorials on lowering memory usage. Enable this for now on devstack-multinode Change-Id: I7b223391d3de01e3e81b02076debd01d9d2f097c --- .zuul.yaml | 3 +++ lib/databases/mysql | 10 ++++++++++ stackrc | 5 +++++ 3 files changed, 18 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 8e20f6ed34..fa7f180797 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -696,6 +696,9 @@ description: | Simple multinode test to verify multinode functionality on devstack side. This is not meant to be used as a parent job. + vars: + devstack_localrc: + MYSQL_REDUCE_MEMORY: true # NOTE(ianw) Platform tests have traditionally been non-voting because # we often have to rush things through devstack to stabilise the gate, diff --git a/lib/databases/mysql b/lib/databases/mysql index fbad44e36a..e805b3e73f 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -176,6 +176,16 @@ function configure_database_mysql { count INT, PRIMARY KEY (db, op)) ENGINE MEMORY" stats fi + if [[ "$MYSQL_REDUCE_MEMORY" == "True" ]]; then + iniset -sudo $my_conf mysqld read_buffer_size 64K + iniset -sudo $my_conf mysqld innodb_buffer_pool_size 16M + iniset -sudo $my_conf mysqld thread_stack 192K + iniset -sudo $my_conf mysqld thread_cache_size 8 + iniset -sudo $my_conf mysqld tmp_table_size 8M + iniset -sudo $my_conf mysqld sort_buffer_size 8M + iniset -sudo $my_conf mysqld max_allowed_packet 8M + fi + restart_service $MYSQL_SERVICE_NAME } diff --git a/stackrc b/stackrc index a05d1e5553..442e9a0351 100644 --- a/stackrc +++ b/stackrc @@ -201,6 +201,11 @@ DATABASE_QUERY_LOGGING=$(trueorfalse False DATABASE_QUERY_LOGGING) # performance_schema that are of interest to us MYSQL_GATHER_PERFORMANCE=$(trueorfalse True MYSQL_GATHER_PERFORMANCE) +# This can be used to reduce the amount of memory mysqld uses while running. +# These are unscientifically determined, and could reduce performance or +# cause other issues. +MYSQL_REDUCE_MEMORY=$(trueorfalse False MYSQL_REDUCE_MEMORY) + # Set a timeout for git operations. If git is still running when the # timeout expires, the command will be retried up to 3 times. This is # in the format for timeout(1); From 37d11d00e56ec6ff402a13a28ec308c13291a937 Mon Sep 17 00:00:00 2001 From: Ghanshyam Date: Tue, 21 Feb 2023 21:41:40 +0000 Subject: [PATCH 300/574] Revert "Bump cirros version to 0.6.1" This reverts commit 91efe177b170c3874989affc73842dc4ffbe062d. Reason for revert: it broke tempest-slow job https://9afe3d390e4175b60a80-89b1085289883615a17bd93ef47f6ca9.ssl.cf5.rackcdn.com/871018/13/gate/tempest-slow-py3/d139ae1/testr_results.html Change-Id: Ib74e51a780d3e8101f4147db9d24eebea4980fb1 --- doc/source/guides/nova.rst | 2 +- stackrc | 15 ++++++++------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst index d0fb274c13..5b427972c4 100644 --- a/doc/source/guides/nova.rst +++ b/doc/source/guides/nova.rst @@ -122,7 +122,7 @@ when creating the server, for example: .. code-block:: shell $ openstack --os-compute-api-version 2.37 server create --flavor cirros256 \ - --image cirros-0.6.1-x86_64-disk --nic none --wait test-server + --image cirros-0.3.5-x86_64-disk --nic none --wait test-server .. note:: ``--os-compute-api-version`` greater than or equal to 2.37 is required to use ``--nic=none``. diff --git a/stackrc b/stackrc index a71d843362..b3130e5f7f 100644 --- a/stackrc +++ b/stackrc @@ -657,19 +657,20 @@ esac # If the file ends in .tar.gz, uncompress the tarball and and select the first # .img file inside it as the image. If present, use "*-vmlinuz*" as the kernel # and "*-initrd*" as the ramdisk -# example: https://cloud-images.ubuntu.com/releases/jammy/release/ubuntu-22.04-server-cloudimg-amd64.tar.gz +# example: http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-amd64.tar.gz # * disk image (*.img,*.img.gz) # if file ends in .img, then it will be uploaded and registered as a to # glance as a disk image. If it ends in .gz, it is uncompressed first. # example: -# https://cloud-images.ubuntu.com/releases/jammy/release/ubuntu-22.04-server-cloudimg-amd64.img -# https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz +# http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-armel-disk1.img +# http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz # * OpenVZ image: # OpenVZ uses its own format of image, and does not support UEC style images -#IMAGE_URLS="https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image +#IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image +#IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image -CIRROS_VERSION=${CIRROS_VERSION:-"0.6.1"} +CIRROS_VERSION=${CIRROS_VERSION:-"0.5.2"} CIRROS_ARCH=${CIRROS_ARCH:-$(uname -m)} # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of @@ -686,11 +687,11 @@ if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs} DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz} - IMAGE_URLS+="https://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; *) # otherwise, use the qcow image DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk} DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img} - IMAGE_URLS+="https://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; esac ;; vsphere) From 0572d73f8561f3304f897bf9ee2f63c406cc21b7 Mon Sep 17 00:00:00 2001 From: Nobuhiro MIKI Date: Wed, 22 Feb 2023 10:38:49 +0900 Subject: [PATCH 301/574] Disable memory_tracker and file_tracker in unstask.sh properly stop_dstat() calls stop_process() for dstat, memory_tracker and file_tracker respectively. Inside stop_process(), a check for the existence of the service is performed by is_service_enabled(). So even if we apply this seemingly dangerous commit, is_service_enabled() is respected, so it's safe. Closes-Bug: #1998990 Change-Id: Ica58cdb1d60c4c796f582d82ed2cde0be94b1a7e Signed-off-by: Nobuhiro MIKI --- unstack.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/unstack.sh b/unstack.sh index a36af3fb59..33b069b6a3 100755 --- a/unstack.sh +++ b/unstack.sh @@ -168,9 +168,7 @@ if is_service_enabled etcd3; then cleanup_etcd3 fi -if is_service_enabled dstat; then - stop_dstat -fi +stop_dstat # NOTE: Cinder automatically installs the lvm2 package, independently of the # enabled backends. So if Cinder is enabled, and installed successfully we are From 03c3fd763e301077ecfa0a2d3428a091bedd691d Mon Sep 17 00:00:00 2001 From: Yamato Tanaka Date: Fri, 10 Feb 2023 19:44:20 +0900 Subject: [PATCH 302/574] Support RHEL 9 This patch includes changes required to run devstack on RHEL 9. - en_US.utf8 is provided by glibc-langpack-en - iptables command is provided by iptables-nft - Use /etc/os-release to identify the distro in RHEL 9 as it doesn't provide lsb_release command. - CRB repository name is different from CentOS 9 Change-Id: I8f6d9263b24f9c2cf82e09258e2d14d7766ad337 --- files/rpms/general | 2 ++ functions-common | 5 +++-- stack.sh | 3 +++ 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/files/rpms/general b/files/rpms/general index b6866de62d..8a5755cc37 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -6,9 +6,11 @@ gcc gcc-c++ gettext # used for compiling message catalogs git-core +glibc-langpack-en # dist:rhel9 graphviz # needed only for docs httpd httpd-devel +iptables-nft # dist:rhel9 iptables-services java-1.8.0-openjdk-headless libffi-devel diff --git a/functions-common b/functions-common index 4eed5d8407..3e07a49e22 100644 --- a/functions-common +++ b/functions-common @@ -412,9 +412,9 @@ function _ensure_lsb_release { # - os_VENDOR # - os_PACKAGE function GetOSVersion { - # CentOS Stream 9 does not provide lsb_release + # CentOS Stream 9 and RHEL 9 do not provide lsb_release source /etc/os-release - if [[ "${ID}${VERSION}" == "centos9" ]]; then + if [[ "${ID}${VERSION}" == "centos9" ]] || [[ "${ID}${VERSION}" =~ "rhel9" ]]; then os_RELEASE=${VERSION_ID} os_CODENAME="n/a" os_VENDOR=$(echo $NAME | tr -d '[:space:]') @@ -530,6 +530,7 @@ function is_fedora { [ "$os_VENDOR" = "openEuler" ] || \ [ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \ [ "$os_VENDOR" = "RedHatEnterprise" ] || \ + [ "$os_VENDOR" = "RedHatEnterpriseLinux" ] || \ [ "$os_VENDOR" = "Rocky" ] || \ [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "CentOSStream" ] || \ [ "$os_VENDOR" = "AlmaLinux" ] || \ diff --git a/stack.sh b/stack.sh index ccd2d16baa..6e7b11a0bc 100755 --- a/stack.sh +++ b/stack.sh @@ -391,7 +391,10 @@ if [[ $DISTRO == "rhel8" ]]; then # Patch: https://github.com/rpm-software-management/dnf/pull/1448 echo "[]" | sudo tee /var/cache/dnf/expired_repos.json elif [[ $DISTRO == "rhel9" ]]; then + # for CentOS Stream 9 repository sudo dnf config-manager --set-enabled crb + # for RHEL 9 repository + sudo dnf config-manager --set-enabled codeready-builder-for-rhel-9-x86_64-rpms # rabbitmq and other packages are provided by RDO repositories. _install_rdo From f834f9adaf9c228ff4ec6a5e24e6d4cf3ca6a992 Mon Sep 17 00:00:00 2001 From: Michael Johnson Date: Mon, 6 Mar 2023 18:47:03 +0000 Subject: [PATCH 303/574] Fix NotImplementedError in dbcounter on SQLA 2.x This patch fixes a NotImplementedError raised in the dbcounter plugin when using SQLAlchemy 2.x. The plugin signature has changed and now requires an "update_url" method as part of the plugin[1]. This patch also updates the do_incr() explicit SQL string to use a TextClause and the new requirement for named bound parameters[2]. Closes-Bug: #2009521 [1] https://docs.sqlalchemy.org/en/20/changelog/migration_14.html#changes-to-createengineplugin [2] https://docs.sqlalchemy.org/en/20/changelog/migration_20.html#execute-method-more-strict-execution-options-are-more-prominent Change-Id: Ie5484597057a3306757cc46b657446ad61ac2098 --- ...ementedError-on-SQLAlchemy-2-21bb6dcdf3ce4225.yaml | 5 +++++ tools/dbcounter/dbcounter.py | 11 +++++++---- 2 files changed, 12 insertions(+), 4 deletions(-) create mode 100644 releasenotes/notes/Fix-dbcounter-NotImplementedError-on-SQLAlchemy-2-21bb6dcdf3ce4225.yaml diff --git a/releasenotes/notes/Fix-dbcounter-NotImplementedError-on-SQLAlchemy-2-21bb6dcdf3ce4225.yaml b/releasenotes/notes/Fix-dbcounter-NotImplementedError-on-SQLAlchemy-2-21bb6dcdf3ce4225.yaml new file mode 100644 index 0000000000..f815e14ccb --- /dev/null +++ b/releasenotes/notes/Fix-dbcounter-NotImplementedError-on-SQLAlchemy-2-21bb6dcdf3ce4225.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixes a NotImplementedError when using the dbcounter SQLAlchemy plugin on + SQLAlchemy 2.x. diff --git a/tools/dbcounter/dbcounter.py b/tools/dbcounter/dbcounter.py index 5057f0f393..0ed7bb813a 100644 --- a/tools/dbcounter/dbcounter.py +++ b/tools/dbcounter/dbcounter.py @@ -40,6 +40,9 @@ def __init__(self, url, kwargs): self.queue = queue.Queue() self.thread = None + def update_url(self, url): + return url.difference_update_query(["dbcounter"]) + def engine_created(self, engine): """Hook the engine creation process. @@ -77,12 +80,12 @@ def _log_event(self, conn, cursor, statement, parameters, context, def do_incr(self, db, op, count): """Increment the counter for (db,op) by count.""" - query = ('INSERT INTO queries (db, op, count) ' - ' VALUES (%s, %s, %s) ' - ' ON DUPLICATE KEY UPDATE count=count+%s') + query = sqlalchemy.text('INSERT INTO queries (db, op, count) ' + ' VALUES (:db, :op, :count) ' + ' ON DUPLICATE KEY UPDATE count=count+:count') try: with self.engine.begin() as conn: - r = conn.execute(query, (db, op, count, count)) + r = conn.execute(query, {'db': db, 'op': op, 'count': count}) except Exception as e: LOG.error('Failed to account for access to database %r: %s', db, e) From 07a7293721736e1184ae7dc22da33b2ce7babf61 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Fri, 10 Mar 2023 20:30:53 -0600 Subject: [PATCH 304/574] Update DEVSTACK_SERIES to 2023.2 stable/2023.1 branch has been created now and current master is for 2023.2. Change-Id: Ibd499ac35a38a5c1818c1df6009c5273ef3e90f7 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 442e9a0351..b7ce238366 100644 --- a/stackrc +++ b/stackrc @@ -248,7 +248,7 @@ REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements} # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="2023.1" +DEVSTACK_SERIES="2023.2" ############## # From 1898a683be78622445e48f1f071cf7188ab19450 Mon Sep 17 00:00:00 2001 From: Rajat Dhasmana Date: Tue, 14 Mar 2023 05:35:33 +0000 Subject: [PATCH 305/574] Create multiattach volume type for tempest Creating multiattach volume is a non-admin operation but creating multiattach volume type is an admin operation. Previously cinder allowed creating multiattach volumes without a volume type but that support is being removed with[1]. The change requires updating tempest tests[2] but some tempest tests are non-admin, which require admin priviledges to create the multiattach volume type. Based on the last discussion with tempest team[3], the proposed solution is to create a multiattach volume type in devstack, if ENABLE_VOLUME_MULTIATTACH is True, and use it in tempest tests. Similar to how admins create multiattach volume types for non-admin users. This patch creates a multiattach volume type if ENABLE_VOLUME_MULTIATTACH is True. Also we set the multiattach type name as a tempest config option 'volume_type_multiattach'. [1] https://review.opendev.org/c/openstack/cinder/+/874865 [2] https://review.opendev.org/c/openstack/tempest/+/875372 [3] https://meetings.opendev.org/irclogs/%23openstack-cinder/%23openstack-cinder.2023-03-13.log.html#t2023-03-13T18:47:56 Change-Id: Icd3690565bf7b27898cd206641e612da3993703d --- lib/cinder | 34 +++++++++++++++++++++++----------- lib/tempest | 4 ++++ 2 files changed, 27 insertions(+), 11 deletions(-) diff --git a/lib/cinder b/lib/cinder index 2424f928d1..c50a205f20 100644 --- a/lib/cinder +++ b/lib/cinder @@ -95,6 +95,7 @@ CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-lvm:lvmdriver-1} CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}} CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]') +VOLUME_TYPE_MULTIATTACH=${VOLUME_TYPE_MULTIATTACH:-multiattach} if [[ -n "$CINDER_ISCSI_HELPER" ]]; then if [[ -z "$CINDER_TARGET_HELPER" ]]; then @@ -649,6 +650,23 @@ function stop_cinder { stop_process c-vol } +function create_one_type { + type_name=$1 + property_key=$2 + property_value=$3 + # NOTE (e0ne): openstack client doesn't work with cinder in noauth mode + if is_service_enabled keystone; then + openstack --os-region-name="$REGION_NAME" volume type create --property $property_key="$property_value" $type_name + else + # TODO (e0ne): use openstack client once it will support cinder in noauth mode: + # https://bugs.launchpad.net/python-cinderclient/+bug/1755279 + local cinder_url + cinder_url=$CINDER_SERVICE_PROTOCOL://$SERVICE_HOST:$CINDER_SERVICE_PORT/v3 + OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-create $type_name + OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-key $type_name set $property_key="$property_value" + fi +} + # create_volume_types() - Create Cinder's configured volume types function create_volume_types { # Create volume types @@ -656,19 +674,13 @@ function create_volume_types { local be be_name for be in ${CINDER_ENABLED_BACKENDS//,/ }; do be_name=${be##*:} - # NOTE (e0ne): openstack client doesn't work with cinder in noauth mode - if is_service_enabled keystone; then - openstack --os-region-name="$REGION_NAME" volume type create --property volume_backend_name="${be_name}" ${be_name} - else - # TODO (e0ne): use openstack client once it will support cinder in noauth mode: - # https://bugs.launchpad.net/python-cinderclient/+bug/1755279 - local cinder_url - cinder_url=$CINDER_SERVICE_PROTOCOL://$SERVICE_HOST:$CINDER_SERVICE_PORT/v3 - OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-create ${be_name} - OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-key ${be_name} set volume_backend_name=${be_name} - fi + create_one_type $be_name "volume_backend_name" $be_name done + if [[ $ENABLE_VOLUME_MULTIATTACH == "True" ]]; then + create_one_type $VOLUME_TYPE_MULTIATTACH $VOLUME_TYPE_MULTIATTACH " True" + fi + # Increase quota for the service project if glance is using cinder, # since it's likely to occasionally go above the default 10 in parallel # test execution. diff --git a/lib/tempest b/lib/tempest index c3d3e9ac30..7da9f17052 100644 --- a/lib/tempest +++ b/lib/tempest @@ -604,6 +604,10 @@ function configure_tempest { iniset $TEMPEST_CONFIG volume storage_protocol "$TEMPEST_STORAGE_PROTOCOL" fi + if [[ $ENABLE_VOLUME_MULTIATTACH == "True" ]]; then + iniset $TEMPEST_CONFIG volume volume_type_multiattach $VOLUME_TYPE_MULTIATTACH + fi + # Placement Features # Set the microversion range for placement. # Setting [None, latest] range of microversion which allow Tempest to run all microversions tests. From 80c3ffe154fd79e03d8c4258b500b77a26efa008 Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Mon, 27 Mar 2023 20:56:20 +0000 Subject: [PATCH 306/574] Fix reboot on fedora like nodes This change enables httpd in systemd so that it starts after a reboot and updates how selinux is disabled to use /etc/selinux/config in addtion to setenforce. Change-Id: I5ea8693c0b967937483bd921b1d9984ea14bc723 --- lib/apache | 2 ++ tools/fixup_stuff.sh | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/lib/apache b/lib/apache index dd8c9a0f06..771a7d7ec0 100644 --- a/lib/apache +++ b/lib/apache @@ -150,6 +150,8 @@ function install_apache_wsgi { elif is_fedora; then sudo rm -f /etc/httpd/conf.d/000-* install_package httpd python3-mod_wsgi + # rpm distros dont enable httpd by default so enable it to support reboots. + sudo systemctl enable httpd # For consistency with Ubuntu, switch to the worker mpm, as # the default is event sudo sed -i '/mod_mpm_prefork.so/s/^/#/g' /etc/httpd/conf.modules.d/00-mpm.conf diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index daa1bc6301..fef47263de 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -36,6 +36,12 @@ function fixup_fedora { # Disable selinux to avoid configuring to allow Apache access # to Horizon files (LP#1175444) if selinuxenabled; then + #persit selinux config across reboots + cat << EOF | sudo tee /etc/selinux/config +SELINUX=permissive +SELINUXTYPE=targeted +EOF + # then disable at runtime sudo setenforce 0 fi From fa42b3ca7bbac7746644693241ea1dd58a4939f0 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 14 Apr 2023 02:16:59 +0000 Subject: [PATCH 307/574] Updated from generate-devstack-plugins-list Change-Id: I84015f860155e5c8ec3bcf54353d91405a13e549 --- doc/source/plugin-registry.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 2e8e8f53d7..b244ca5dd8 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -88,7 +88,6 @@ openstack/openstacksdk `https://opendev.org/openstack/openstac openstack/osprofiler `https://opendev.org/openstack/osprofiler `__ openstack/oswin-tempest-plugin `https://opendev.org/openstack/oswin-tempest-plugin `__ openstack/ovn-octavia-provider `https://opendev.org/openstack/ovn-octavia-provider `__ -openstack/patrole `https://opendev.org/openstack/patrole `__ openstack/rally-openstack `https://opendev.org/openstack/rally-openstack `__ openstack/sahara `https://opendev.org/openstack/sahara `__ openstack/sahara-dashboard `https://opendev.org/openstack/sahara-dashboard `__ From 42517968ff7bdced07c5bc08b6cb2b8d10d246cc Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Fri, 14 Apr 2023 19:06:03 +0530 Subject: [PATCH 308/574] [ovs] Reload ovs kernel module always Irrespective of build_modules is True or False reload ovs modules always. If ovs is installed from package before(like with multi-node-bridge role), then installing ovs from source requires openvswitch kernel module to be reloaded. The issue was not seen before jammy as there module was reloaded when build_modules was set to True. Closes-Bug: #2015364 Change-Id: I1785b49b2ef72ca1f817f504d5ea56021410c052 --- lib/neutron_plugins/ovs_source | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source index ea71e60e68..d0ca75334e 100644 --- a/lib/neutron_plugins/ovs_source +++ b/lib/neutron_plugins/ovs_source @@ -164,10 +164,8 @@ function compile_ovs { sudo make install if [[ "$build_modules" == "True" ]]; then sudo make INSTALL_MOD_DIR=kernel/net/openvswitch modules_install - reload_ovs_kernel_modules - else - load_ovs_kernel_modules fi + reload_ovs_kernel_modules cd $_pwd } From 15b2e429685fc753759ef8f3773ac559424e028f Mon Sep 17 00:00:00 2001 From: Ade Lee Date: Tue, 24 Jan 2023 14:44:13 +0100 Subject: [PATCH 309/574] Modify devstack-base to allow for fips devstack-base is changed to descend from openstack-multinode-fips which is defined in project-config. This allows jobs to execute the enable_fips playbook to enable FIPS mode on the node, but only if they opt-in by setting enable_fips to True. Otherwise, this is a no-op. Change-Id: I5631281662dbd18056ffba291290ed0978ab937e --- .zuul.yaml | 2 +- functions-common | 5 +++++ lib/databases/mysql | 11 ++++++++++- 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index fa7f180797..37625f3d11 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -370,7 +370,7 @@ - job: name: devstack-base - parent: multinode + parent: openstack-multinode-fips abstract: true description: | Base abstract Devstack job. diff --git a/functions-common b/functions-common index 4eed5d8407..844fffac37 100644 --- a/functions-common +++ b/functions-common @@ -2545,6 +2545,11 @@ function clean_pyc_files { fi } +function is_fips_enabled { + fips=`cat /proc/sys/crypto/fips_enabled` + [ "$fips" == "1" ] +} + # Restore xtrace $_XTRACE_FUNCTIONS_COMMON diff --git a/lib/databases/mysql b/lib/databases/mysql index e805b3e73f..bc6ce3d5c2 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -69,7 +69,7 @@ function recreate_database_mysql { } function configure_database_mysql { - local my_conf mysql slow_log + local my_conf mysql slow_log my_client_conf echo_summary "Configuring and starting MySQL" if is_ubuntu; then @@ -86,6 +86,15 @@ function configure_database_mysql { exit_distro_not_supported "mysql configuration" fi + # Set fips mode on + if is_ubuntu; then + if is_fips_enabled; then + my_client_conf=/etc/mysql/mysql.conf.d/mysql.cnf + iniset -sudo $my_client_conf mysql ssl-fips-mode "on" + iniset -sudo $my_conf mysqld ssl-fips-mode "on" + fi + fi + # Change bind-address from localhost (127.0.0.1) to any (::) iniset -sudo $my_conf mysqld bind-address "$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)" From 991a2794a3d6424f3b25cde471342846f9876470 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Mon, 17 Apr 2023 13:00:21 +0530 Subject: [PATCH 310/574] Fix name for neutron tempest uwsgi job This was renamed long back in [1]. [1] https://review.opendev.org/c/openstack/neutron/+/797051 Change-Id: If11e975fd890f55f99efc2c7d8122256ff831ad8 --- .zuul.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index fa7f180797..98f3353319 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -1047,7 +1047,7 @@ # * neutron-functional-with-uwsgi: maintained by neutron for functional # test. Next cycle we can remove this one if things turn out to be # stable engouh with uwsgi. - # * neutron-tempest-with-uwsgi: maintained by neutron for tempest test. + # * neutron-ovn-tempest-with-uwsgi: maintained by neutron for tempest test. # Next cycle we can remove this if everything run out stable enough. # * nova-multi-cell: maintained by nova and currently non-voting in the # check queue for nova changes but relies on devstack configuration @@ -1062,7 +1062,7 @@ - nova-next - neutron-fullstack-with-uwsgi - neutron-functional-with-uwsgi - - neutron-tempest-with-uwsgi + - neutron-ovn-tempest-with-uwsgi - devstack-plugin-ceph-tempest-py3: irrelevant-files: - ^.*\.rst$ From e8915786e1e007742f47fee507b1b6288b6cedae Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Wed, 19 Apr 2023 16:57:44 -0400 Subject: [PATCH 311/574] git: support git checkout for a commit hash git_clone assumes a branch or a tag is passed as the last argument, and it fails when a commit hash is passed, as in: timeout -s SIGINT 0 git clone https://github.com/ovn-org/ovn.git /opt/stack/ovn --branch 36e3ab9b47e93af0599a818e9d6b2930e49473f0 Cloning into '/opt/stack/ovn'... fatal: Remote branch 36e3ab9b47e93af0599a818e9d6b2930e49473f0 not found in upstream origin Change-Id: Id1328d7cba418fa7c227ae9db4fe83c09fd06035 --- functions-common | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/functions-common b/functions-common index c7a1c6e0bf..d8b15024a8 100644 --- a/functions-common +++ b/functions-common @@ -609,8 +609,9 @@ function git_clone { echo "the project to the \$PROJECTS variable in the job definition." die $LINENO "ERROR_ON_CLONE is set to True so cloning not allowed in this configuration" fi - # '--branch' can also take tags - git_timed clone $git_clone_flags $git_remote $git_dest --branch $git_ref + git_timed clone $git_clone_flags $git_remote $git_dest + cd $git_dest + git checkout $git_ref elif [[ "$RECLONE" = "True" ]]; then # if it does exist then simulate what clone does if asked to RECLONE cd $git_dest From b8f228620f6ad038ab8f31db861580f5e664a280 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9=20Ribaud?= Date: Mon, 24 Apr 2023 14:22:01 +0200 Subject: [PATCH 312/574] Add manila service to configuration Manila is a service in OpenStack that enables shared filesystems. The modifications add the manila section in nova configuration files enabling the virtiofs feature. Implements: blueprint libvirt-virtiofs-attach-manila-shares Change-Id: Ia17c7a136cbe83efa1ef4e302d1c404034a50cda --- lib/nova | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/lib/nova b/lib/nova index f34e823074..f5f002dd10 100644 --- a/lib/nova +++ b/lib/nova @@ -507,6 +507,10 @@ function create_nova_conf { configure_cinder_access fi + if is_service_enabled manila; then + configure_manila_access + fi + if [ -n "$NOVA_STATE_PATH" ]; then iniset $NOVA_CONF DEFAULT state_path "$NOVA_STATE_PATH" iniset $NOVA_CONF oslo_concurrency lock_path "$NOVA_STATE_PATH" @@ -652,6 +656,18 @@ function configure_cinder_access { fi } +# Configure access to manila. +function configure_manila_access { + iniset $NOVA_CONF manila os_region_name "$REGION_NAME" + iniset $NOVA_CONF manila auth_type "password" + iniset $NOVA_CONF manila auth_url "$KEYSTONE_SERVICE_URI" + iniset $NOVA_CONF manila username nova + iniset $NOVA_CONF manila password "$SERVICE_PASSWORD" + iniset $NOVA_CONF manila user_domain_name "$SERVICE_DOMAIN_NAME" + iniset $NOVA_CONF manila project_name "$SERVICE_TENANT_NAME" + iniset $NOVA_CONF manila project_domain_name "$SERVICE_DOMAIN_NAME" +} + function configure_console_compute { # If we are running multiple cells (and thus multiple console proxies) on a # single host, we offset the ports to avoid collisions. We need to From 6764eab2644b2f76769e4492ab136ff65763d2ef Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Fri, 12 May 2023 16:34:08 -0400 Subject: [PATCH 313/574] Remove usage of neutron-debug since it has been removed The neutron-debug command was deprecated and finally removed, so tools/ping_neutron.sh can no longer rely on it to create a probe namespace. Instead, just try and use any namespace with the network ID in it, since it's either the DHCP (ML2/OVS) or Metadata (OVN) namespace, which should work just as well. As this code is rarely (never?) used, this best-effort attempt is good enough. Change-Id: I98c992a2a774ef1fb22cee2e90ee342ab2d537ac Depends-on: https://review.opendev.org/c/openstack/neutron/+/883081 --- lib/neutron | 18 ------------------ tools/ping_neutron.sh | 16 +++++++++++----- 2 files changed, 11 insertions(+), 23 deletions(-) diff --git a/lib/neutron b/lib/neutron index 368a1b9c55..a6de7222db 100644 --- a/lib/neutron +++ b/lib/neutron @@ -1112,24 +1112,6 @@ function _neutron_setup_interface_driver { # Functions for Neutron Exercises #-------------------------------- -function delete_probe { - local from_net="$1" - net_id=`_get_net_id $from_net` - probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'` - neutron-debug --os-tenant-name admin --os-username admin probe-delete $probe_id -} - -function _get_net_id { - openstack --os-cloud devstack-admin --os-region-name="$REGION_NAME" --os-project-name admin --os-username admin --os-password $ADMIN_PASSWORD network list | grep $1 | awk '{print $2}' -} - -function _get_probe_cmd_prefix { - local from_net="$1" - net_id=`_get_net_id $from_net` - probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1` - echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id" -} - # ssh check function _ssh_check_neutron { local from_net=$1 diff --git a/tools/ping_neutron.sh b/tools/ping_neutron.sh index 73fe3f3bdf..ab8e8dfca8 100755 --- a/tools/ping_neutron.sh +++ b/tools/ping_neutron.sh @@ -30,7 +30,8 @@ ping_neutron.sh [ping args] This provides a wrapper to ping neutron guests that are on isolated tenant networks that the caller can't normally reach. It does so by -creating a network namespace probe. +using either the DHCP or Metadata network namespace to support both +ML2/OVS and OVN. It takes arguments like ping, except the first arg must be the network name. @@ -44,6 +45,12 @@ EOF exit 1 } +# BUG: with duplicate network names, this fails pretty hard since it +# will just pick the first match. +function _get_net_id { + openstack --os-cloud devstack-admin --os-region-name="$REGION_NAME" --os-project-name admin --os-username admin --os-password $ADMIN_PASSWORD network list | grep $1 | head -n 1 | awk '{print $2}' +} + NET_NAME=$1 if [[ -z "$NET_NAME" ]]; then @@ -53,12 +60,11 @@ fi REMAINING_ARGS="${@:2}" -# BUG: with duplicate network names, this fails pretty hard. -NET_ID=$(openstack network show -f value -c id "$NET_NAME") -PROBE_ID=$(neutron-debug probe-list -c id -c network_id | grep "$NET_ID" | awk '{print $2}' | head -n 1) +NET_ID=`_get_net_id $NET_NAME` +NET_NS=$(ip netns list | grep "$NET_ID" | head -n 1) # This runs a command inside the specific netns -NET_NS_CMD="ip netns exec qprobe-$PROBE_ID" +NET_NS_CMD="ip netns exec $NET_NS" PING_CMD="sudo $NET_NS_CMD ping $REMAINING_ARGS" echo "Running $PING_CMD" From bfa43975bca48bb021fb266a206885c5b09f5f45 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Thu, 18 May 2023 12:54:19 -0500 Subject: [PATCH 314/574] Enable NOVA_ENFORCE_SCOPE to True by default Nova antelope release has enabled the RBAC new defaults by default - https://review.opendev.org/c/openstack/nova/+/866218 With the latest release of Nova have new defaults enable, we should test the same by default in devstack. This change make NOVA_ENFORCE_SCOPE flag to True by default so that every job will run with Nova new defaults. As old defaults are still supported (in deprecated way), we will keep NOVA_ENFORCE_SCOPE flag so that we can have a single job can disable the new defaults and continue testing the old defaults. Change-Id: Id56819f03c19a5b7fe30adf799ecd3b8aeb67695 --- lib/nova | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/nova b/lib/nova index f34e823074..21067f302b 100644 --- a/lib/nova +++ b/lib/nova @@ -98,10 +98,10 @@ METADATA_SERVICE_PORT=${METADATA_SERVICE_PORT:-8775} NOVA_ENABLE_CACHE=${NOVA_ENABLE_CACHE:-True} # Flag to set the oslo_policy.enforce_scope and oslo_policy.enforce_new_defaults. -# This is used to switch the compute API policies enable the scope and new defaults. -# By Default, these flag are False. +# This is used to disable the compute API policies scope and new defaults. +# By Default, it is True. # For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope -NOVA_ENFORCE_SCOPE=$(trueorfalse False NOVA_ENFORCE_SCOPE) +NOVA_ENFORCE_SCOPE=$(trueorfalse True NOVA_ENFORCE_SCOPE) if [[ $SERVICE_IP_VERSION == 6 ]]; then NOVA_MY_IP="$HOST_IPV6" From cb1ec1834de0b1eaddb02b7847b21d1d617efb6e Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Thu, 18 May 2023 19:58:41 -0500 Subject: [PATCH 315/574] Enable GLANCE_ENFORCE_SCOPE to True by default Glance antelope release has enabled the RBAC new defaults by default - https://review.opendev.org/c/openstack/glance/+/872522 With the latest release of Glance have new defaults enable, we should test the same by default in devstack. This change make GLANCE_ENFORCE_SCOPE flag to True by default so that every job will run with Glance new defaults. As old defaults are still supported (in deprecated way), we will keep GLANCE_ENFORCE_SCOPE flag so that we can have a single job can disable the new defaults and continue testing the old defaults. Depends-On: https://review.opendev.org/c/openstack/tempest/+/883701 Change-Id: Idde6f3cb766597575ca822f21b4bb3a465e5e753 --- lib/glance | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/glance b/lib/glance index 5aeae16c61..430d94d3a4 100644 --- a/lib/glance +++ b/lib/glance @@ -95,10 +95,10 @@ GLANCE_USE_IMPORT_WORKFLOW=$(trueorfalse False GLANCE_USE_IMPORT_WORKFLOW) GLANCE_ENABLE_QUOTAS=$(trueorfalse True GLANCE_ENABLE_QUOTAS) # Flag to set the oslo_policy.enforce_scope. This is used to switch -# the Image API policies to start checking the scope of token. By Default, -# this flag is False. +# This is used to disable the Image API policies scope and new defaults. +# By Default, it is True. # For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope -GLANCE_ENFORCE_SCOPE=$(trueorfalse False GLANCE_ENFORCE_SCOPE) +GLANCE_ENFORCE_SCOPE=$(trueorfalse True GLANCE_ENFORCE_SCOPE) GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance} GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs From a2943894031e5b1c7662512d54ffb75a3cd3ca9d Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Tue, 25 Apr 2023 21:50:31 +0200 Subject: [PATCH 316/574] Set dhcp_client based on cirros version This change allows us to bump the default cirros version in devstack. Since cirros version 0.6.0 dhcpcd is the default dhcp client. The older cirros images used udhcpc client (the only available client at that time) which is also the default client in Tempest. This patch makes devstack configure dhcpcd client in tempest.conf if cirros >= 0.6.0 is going to be used in scenario tests. The commit also introduces a new SCENARIO_IMAGE_TYPE option. It is now a trigger for cirros specific settings, later it might be used for any other image's settings. Closes-Bug: #2007973 Change-Id: I2738c3b1d302c6656ce2c209671ea954fbc1b05b --- lib/tempest | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/lib/tempest b/lib/tempest index 7da9f17052..9fa989a2f6 100644 --- a/lib/tempest +++ b/lib/tempest @@ -517,8 +517,19 @@ function configure_tempest { # Scenario SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES} SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_FILE_NAME + SCENARIO_IMAGE_TYPE=${SCENARIO_IMAGE_TYPE:-cirros} iniset $TEMPEST_CONFIG scenario img_file $SCENARIO_IMAGE_DIR/$SCENARIO_IMAGE_FILE + # since version 0.6.0 cirros uses dhcpcd dhcp client by default, however, cirros, prior to the + # version 0.6.0, used udhcpc (the only available client at that time) which is also tempest's default + if [[ "$SCENARIO_IMAGE_TYPE" == "cirros" ]]; then + # the image is a cirros image + # use dhcpcd client when version greater or equal 0.6.0 + if [[ $(echo $CIRROS_VERSION | tr -d '.') -ge 060 ]]; then + iniset $TEMPEST_CONFIG scenario dhcp_client dhcpcd + fi + fi + # If using provider networking, use the physical network for validation rather than private TEMPEST_SSH_NETWORK_NAME=$PRIVATE_NETWORK_NAME if is_provider_network; then From b5f4b1148a3f646a82a759f1dde3da1f74eb803c Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Tue, 25 Apr 2023 20:01:42 +0000 Subject: [PATCH 317/574] Revert "Revert "Bump cirros version to 0.6.1"" This reverts commit 37d11d00e56ec6ff402a13a28ec308c13291a937. Reason for revert: reverting this revert as the issue caused by the original patch (before the first revert) is fixed by: https://review.opendev.org/c/openstack/devstack/+/881504 Therefore we can proceed with the cirros version bump. Change-Id: I43e2b04a0142c19fb1a79da5a33cc444149e18f1 --- doc/source/guides/nova.rst | 2 +- stackrc | 15 +++++++-------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst index 5b427972c4..d0fb274c13 100644 --- a/doc/source/guides/nova.rst +++ b/doc/source/guides/nova.rst @@ -122,7 +122,7 @@ when creating the server, for example: .. code-block:: shell $ openstack --os-compute-api-version 2.37 server create --flavor cirros256 \ - --image cirros-0.3.5-x86_64-disk --nic none --wait test-server + --image cirros-0.6.1-x86_64-disk --nic none --wait test-server .. note:: ``--os-compute-api-version`` greater than or equal to 2.37 is required to use ``--nic=none``. diff --git a/stackrc b/stackrc index b7ce238366..a17d88ecbd 100644 --- a/stackrc +++ b/stackrc @@ -662,20 +662,19 @@ esac # If the file ends in .tar.gz, uncompress the tarball and and select the first # .img file inside it as the image. If present, use "*-vmlinuz*" as the kernel # and "*-initrd*" as the ramdisk -# example: http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-amd64.tar.gz +# example: https://cloud-images.ubuntu.com/releases/jammy/release/ubuntu-22.04-server-cloudimg-amd64.tar.gz # * disk image (*.img,*.img.gz) # if file ends in .img, then it will be uploaded and registered as a to # glance as a disk image. If it ends in .gz, it is uncompressed first. # example: -# http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-armel-disk1.img -# http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz +# https://cloud-images.ubuntu.com/releases/jammy/release/ubuntu-22.04-server-cloudimg-amd64.img +# https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz # * OpenVZ image: # OpenVZ uses its own format of image, and does not support UEC style images -#IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image -#IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image +#IMAGE_URLS="https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image -CIRROS_VERSION=${CIRROS_VERSION:-"0.5.2"} +CIRROS_VERSION=${CIRROS_VERSION:-"0.6.1"} CIRROS_ARCH=${CIRROS_ARCH:-$(uname -m)} # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of @@ -692,11 +691,11 @@ if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs} DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz} - IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + IMAGE_URLS+="https://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; *) # otherwise, use the qcow image DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk} DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img} - IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + IMAGE_URLS+="https://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; esac ;; vsphere) From 814e659e32a919ea68c29451753aa49c993ce5ed Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 22 May 2023 10:25:38 -0700 Subject: [PATCH 318/574] Default MYSQL_REDUCE_MEMORY=True We have lots of evidence that this is a net benefit, so enable it by default instead of everyone having to opt-in. Change-Id: I66fa1799ff5177c3667630a89e15c072a8bf975a --- .zuul.yaml | 3 --- stackrc | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 316e89ae32..9cad5d4084 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -696,9 +696,6 @@ description: | Simple multinode test to verify multinode functionality on devstack side. This is not meant to be used as a parent job. - vars: - devstack_localrc: - MYSQL_REDUCE_MEMORY: true # NOTE(ianw) Platform tests have traditionally been non-voting because # we often have to rush things through devstack to stabilise the gate, diff --git a/stackrc b/stackrc index b7ce238366..672679e0b2 100644 --- a/stackrc +++ b/stackrc @@ -204,7 +204,7 @@ MYSQL_GATHER_PERFORMANCE=$(trueorfalse True MYSQL_GATHER_PERFORMANCE) # This can be used to reduce the amount of memory mysqld uses while running. # These are unscientifically determined, and could reduce performance or # cause other issues. -MYSQL_REDUCE_MEMORY=$(trueorfalse False MYSQL_REDUCE_MEMORY) +MYSQL_REDUCE_MEMORY=$(trueorfalse True MYSQL_REDUCE_MEMORY) # Set a timeout for git operations. If git is still running when the # timeout expires, the command will be retried up to 3 times. This is From a37b6abc8ecab1a32593aecdf8f74d54f3c4adb1 Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Fri, 26 May 2023 13:46:42 +0200 Subject: [PATCH 319/574] Resolve distutils deprecation warning The distutils package is deprecated and slated for removal in Python 3.12. Let's use shutil.which which is also recomended by PEP 632: https://peps.python.org/pep-0632/#migration-advice Closes-Bug: #2009229 Change-Id: Ibb2a9731449e765c4a56952a9f02679e9618778b --- tools/worlddump.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/worlddump.py b/tools/worlddump.py index e2921737db..aadd33b634 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -19,7 +19,6 @@ import argparse import datetime -from distutils import spawn import fnmatch import io import os @@ -76,7 +75,7 @@ def _dump_cmd(cmd): def _find_cmd(cmd): - if not spawn.find_executable(cmd): + if not shutil.which(cmd): print("*** %s not found: skipping" % cmd) return False return True From b2ad00cb66bd38ec6179d3bd1bf41556b966dc8c Mon Sep 17 00:00:00 2001 From: Alfredo Moralejo Date: Wed, 24 May 2023 21:03:28 +0200 Subject: [PATCH 320/574] Use RDO official CloudSIG mirrors for C9S deployments Instead of using RDO Trunk repo server, CentOS official mirrors provide a most reliable infrastructure and supports EMS which is required when enabling FIPS in C9S. In order to install the rdo-release rpm from repo.fedoraproject.org, which does not support EMS, I'm using a workaround to wget, which works with non-EMS servers because it uses gnutls instead of openssl, and install it locally with rpm. This is also consistent to CentOS 8 implementatioin. Closes-Bug: #2020661 Closes-Bug: #2020434 Change-Id: Icd99f467d47aaafaaf3ee8f2a3c4da08842cb672 --- stack.sh | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index be3c9dda11..1d32ed8156 100755 --- a/stack.sh +++ b/stack.sh @@ -311,7 +311,22 @@ function _install_rdo { sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el8.rpm fi elif [[ $DISTRO == "rhel9" ]]; then - sudo curl -L -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos9-master/delorean-deps.repo + install_package wget + # We need to download rdo-release package using wget as installing with dnf from repo.fedoraproject.org fails in + # FIPS enabled systems after https://bugzilla.redhat.com/show_bug.cgi?id=2157951 + # Until we can pull rdo-release from a server which supports EMS, this workaround is doing wget, which does + # not relies on openssl but on gnutls, and then install it locally using rpm + TEMPRDODIR=$(mktemp -d) + if [[ "$TARGET_BRANCH" == "master" ]]; then + # rdo-release.el9.rpm points to latest RDO release, use that for master + wget -P $TEMPRDODIR https://rdoproject.org/repos/rdo-release.el9.rpm + else + # For stable branches use corresponding release rpm + rdo_release=$(echo $TARGET_BRANCH | sed "s|stable/||g") + wget -P $TEMPRDODIR https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el9.rpm + fi + sudo rpm -ivh $TEMPRDODIR/rdo-release*rpm + rm -rf $TEMPRDODIR fi sudo dnf -y update } From a13201646d7ca50d92c44b73ba3f20bbf0f3f1d3 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Tue, 30 May 2023 13:31:05 -0400 Subject: [PATCH 321/574] Install systemd-coredump on Debian-based distros On Debian-based distros, the 'coredumpctl' command is provided by the systemd-coredump package, which is not installed by default. On failure, when "post" commands are executed this error is seen: controller | /bin/bash: line 1: coredumpctl: command not found Install it along with other libvirt packages to avoid the error. On Fedora distros it is in the systemd package, so the problem is not seen since it is always installed. Change-Id: I6012bd3240d68736a5db8ae49dc32098a086f320 --- lib/nova_plugins/functions-libvirt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 799230603c..ba2e98e304 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -69,7 +69,7 @@ function install_libvirt { $REQUIREMENTS_DIR/upper-constraints.txt -- libvirt-python if is_ubuntu; then - install_package qemu-system libvirt-clients libvirt-daemon-system libvirt-dev python3-libvirt + install_package qemu-system libvirt-clients libvirt-daemon-system libvirt-dev python3-libvirt systemd-coredump if is_arch "aarch64"; then install_package qemu-efi fi From fbc1865dc4e5b84ebafaf1d30cffc582ae3f0c0f Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Wed, 7 Jun 2023 15:19:37 +0200 Subject: [PATCH 322/574] Drop Fedora support Fedora 36 is EOL, also opendev is dropping support for Fedora images completely since interest in running jobs on that platform is no longer existing. CentOS 9 Stream has evolved as replacement platform for new features. Only drop the Zuul configuration and the tag in stack.sh for now plus update some docs. Cleanup of the deployment code will be done in a second step. Change-Id: Ica483fde27346e3939b5fc0d7e0a6dfeae0e8d1e --- .zuul.yaml | 33 --------------------------------- README.rst | 8 ++++---- doc/source/index.rst | 6 +++--- doc/source/overview.rst | 5 ++--- stack.sh | 4 ++-- 5 files changed, 11 insertions(+), 45 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 9cad5d4084..a7be67153b 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -78,16 +78,6 @@ nodes: - controller -- nodeset: - name: devstack-single-node-fedora-latest - nodes: - - name: controller - label: fedora-36 - groups: - - name: tempest - nodes: - - controller - - nodeset: name: devstack-single-node-debian-bullseye nodes: @@ -854,23 +844,6 @@ devstack_services: tls-proxy: false -- job: - name: devstack-platform-fedora-latest - parent: tempest-full-py3 - description: Fedora latest platform test - nodeset: devstack-single-node-fedora-latest - voting: false - -- job: - name: devstack-platform-fedora-latest-virt-preview - parent: tempest-full-py3 - description: Fedora latest platform test using the virt-preview repo. - nodeset: devstack-single-node-fedora-latest - voting: false - vars: - devstack_localrc: - ENABLE_FEDORA_VIRT_PREVIEW_REPO: true - - job: name: devstack-tox-base parent: devstack @@ -944,7 +917,6 @@ - devstack - devstack-ipv6 - devstack-enforce-scope - - devstack-platform-fedora-latest - devstack-platform-centos-9-stream - devstack-platform-debian-bullseye - devstack-platform-rocky-blue-onyx @@ -1048,10 +1020,6 @@ # Next cycle we can remove this if everything run out stable enough. # * nova-multi-cell: maintained by nova and currently non-voting in the # check queue for nova changes but relies on devstack configuration - # * devstack-platform-fedora-latest-virt-preview: Maintained by lyarwood - # for Nova to allow early testing of the latest versions of Libvirt and - # QEMU. Should only graduate out of experimental if it ever moves into - # the check queue for Nova. experimental: jobs: @@ -1080,7 +1048,6 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - - devstack-platform-fedora-latest-virt-preview - devstack-no-tls-proxy periodic: jobs: diff --git a/README.rst b/README.rst index f3a585a926..86b85da956 100644 --- a/README.rst +++ b/README.rst @@ -4,7 +4,7 @@ from git source trees. Goals ===== -* To quickly build dev OpenStack environments in a clean Ubuntu or Fedora +* To quickly build dev OpenStack environments in a clean Ubuntu or RockyLinux environment * To describe working configurations of OpenStack (which code branches work together? what do config files look like for those branches?) @@ -28,9 +28,9 @@ Versions The DevStack master branch generally points to trunk versions of OpenStack components. For older, stable versions, look for branches named stable/[release] in the DevStack repo. For example, you can do the -following to create a Pike OpenStack cloud:: +following to create a Zed OpenStack cloud:: - git checkout stable/pike + git checkout stable/zed ./stack.sh You can also pick specific OpenStack project releases by setting the appropriate @@ -55,7 +55,7 @@ When the script finishes executing, you should be able to access OpenStack endpoints, like so: * Horizon: http://myhost/ -* Keystone: http://myhost/identity/v2.0/ +* Keystone: http://myhost/identity/v3/ We also provide an environment file that you can use to interact with your cloud via CLI:: diff --git a/doc/source/index.rst b/doc/source/index.rst index ccd0fef330..a5a11e251b 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -37,8 +37,8 @@ Install Linux ------------- Start with a clean and minimal install of a Linux system. DevStack -attempts to support the two latest LTS releases of Ubuntu, the -latest/current Fedora version, CentOS/RHEL/Rocky Linux 9 and openEuler. +attempts to support the two latest LTS releases of Ubuntu, +Rocky Linux 9 and openEuler. If you do not have a preference, Ubuntu 22.04 (Jammy) is the most tested, and will probably go the smoothest. @@ -113,7 +113,7 @@ Start the install $ ./stack.sh -This will take a 15 - 20 minutes, largely depending on the speed of +This will take 15 - 30 minutes, largely depending on the speed of your internet connection. Many git trees and packages will be installed during this process. diff --git a/doc/source/overview.rst b/doc/source/overview.rst index a609333289..4384081769 100644 --- a/doc/source/overview.rst +++ b/doc/source/overview.rst @@ -23,13 +23,12 @@ strategy to include the latest Ubuntu release and the latest RHEL release.* - Ubuntu: current LTS release plus current development release -- Fedora: current release plus previous release -- RHEL/CentOS: current major release +- RHEL/CentOS/RockyLinux: current major release - Other OS platforms may continue to be included but the maintenance of those platforms shall not be assumed simply due to their presence. Having a listed point-of-contact for each additional OS will greatly increase its chance of being well-maintained. -- Patches for Ubuntu and/or Fedora will not be held up due to +- Patches for Ubuntu and/or RockyLinux will not be held up due to side-effects on other OS platforms. Databases diff --git a/stack.sh b/stack.sh index 1d32ed8156..e9617eee78 100755 --- a/stack.sh +++ b/stack.sh @@ -12,7 +12,7 @@ # a multi-node developer install. # To keep this script simple we assume you are running on a recent **Ubuntu** -# (Bionic or newer), **Fedora** (F36 or newer), or **CentOS/RHEL** +# (Bionic or newer) or **CentOS/RHEL/RockyLinux** # (7 or newer) machine. (It may work on other platforms but support for those # platforms is left to those who added them to DevStack.) It should work in # a VM or physical server. Additionally, we maintain a list of ``deb`` and @@ -229,7 +229,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bullseye|focal|jammy|f36|rhel8|rhel9|openEuler-22.03" +SUPPORTED_DISTROS="bullseye|focal|jammy|rhel8|rhel9|openEuler-22.03" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" From f1c5442becad6fcdfb16676e8bc99835d4a75b22 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Sat, 10 Jun 2023 03:07:59 +0000 Subject: [PATCH 323/574] Updated from generate-devstack-plugins-list Change-Id: Icc3aa69d7bbfa217676402682454cd4b37fb6c29 --- doc/source/plugin-registry.rst | 4 ---- 1 file changed, 4 deletions(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index b244ca5dd8..ec502ea252 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -69,10 +69,6 @@ openstack/networking-bagpipe `https://opendev.org/openstack/networki openstack/networking-baremetal `https://opendev.org/openstack/networking-baremetal `__ openstack/networking-bgpvpn `https://opendev.org/openstack/networking-bgpvpn `__ openstack/networking-generic-switch `https://opendev.org/openstack/networking-generic-switch `__ -openstack/networking-hyperv `https://opendev.org/openstack/networking-hyperv `__ -openstack/networking-odl `https://opendev.org/openstack/networking-odl `__ -openstack/networking-powervm `https://opendev.org/openstack/networking-powervm `__ -openstack/networking-sfc `https://opendev.org/openstack/networking-sfc `__ openstack/neutron `https://opendev.org/openstack/neutron `__ openstack/neutron-dynamic-routing `https://opendev.org/openstack/neutron-dynamic-routing `__ openstack/neutron-fwaas `https://opendev.org/openstack/neutron-fwaas `__ From 39228451b6542ff63f288affbda13897089eb16d Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Thu, 15 Jun 2023 10:46:51 +0200 Subject: [PATCH 324/574] Bump default cirros version to 0.6.2 Cirros has made a new release, including a newer kernel that should fix some issues when using nested virtualization. Related-Bug: 2023559 Change-Id: I63469371b13801094a3ee1baae6e343999fbefa5 --- doc/source/guides/nova.rst | 2 +- stackrc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst index d0fb274c13..705d427e68 100644 --- a/doc/source/guides/nova.rst +++ b/doc/source/guides/nova.rst @@ -122,7 +122,7 @@ when creating the server, for example: .. code-block:: shell $ openstack --os-compute-api-version 2.37 server create --flavor cirros256 \ - --image cirros-0.6.1-x86_64-disk --nic none --wait test-server + --image cirros-0.6.2-x86_64-disk --nic none --wait test-server .. note:: ``--os-compute-api-version`` greater than or equal to 2.37 is required to use ``--nic=none``. diff --git a/stackrc b/stackrc index 8820c621e5..7160d0a390 100644 --- a/stackrc +++ b/stackrc @@ -674,7 +674,7 @@ esac #IMAGE_URLS="https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image -CIRROS_VERSION=${CIRROS_VERSION:-"0.6.1"} +CIRROS_VERSION=${CIRROS_VERSION:-"0.6.2"} CIRROS_ARCH=${CIRROS_ARCH:-$(uname -m)} # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of From 7288df34f8513caf6f3985c75855feb572f6b004 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Fri, 16 Jun 2023 14:25:33 +0530 Subject: [PATCH 325/574] Add 10 second buffer for uwsgi service stop Default for systemd TimeoutStopSec is 90 seconds and that is same for default graceful shutdown of uwsgi service(WORKER_TIMEOUT). Due to the Related-Bug graceful stop attempt fails and there is no room for force shutdown. This patch reduces default for WORKER_TIMEOUT by 10 seconds so there is a buffer to force stop the service. Closes-Bug: #2020643 Related-Bug: #2015065 Change-Id: I6aacac94f9697088338b3d2f99d8eaa22c2be67b --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 8820c621e5..7465f54112 100644 --- a/stackrc +++ b/stackrc @@ -804,7 +804,7 @@ NOVA_READY_TIMEOUT=${NOVA_READY_TIMEOUT:-$SERVICE_TIMEOUT} SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT=${SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT:-5} # Service graceful shutdown timeout -WORKER_TIMEOUT=${WORKER_TIMEOUT:-90} +WORKER_TIMEOUT=${WORKER_TIMEOUT:-80} # Common Configuration # -------------------- From ad029c0e8b66d81889c80d4a68b4654dd169fecf Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Mon, 26 Jun 2023 10:57:49 +0100 Subject: [PATCH 326/574] The AZ filter is deprecated and planned for removal this cycle To facilitate that this change removes it form the default filter list. By default nova has used placement for AZs so this filter has not been requried since xena. Change-Id: Ie5e216dd8c2a7ecf43cc6954ec4f73d4d67b5b3b --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 21067f302b..abf4eee465 100644 --- a/lib/nova +++ b/lib/nova @@ -115,7 +115,7 @@ FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"False"} # The following NOVA_FILTERS contains SameHostFilter and DifferentHostFilter with # the default filters. -NOVA_FILTERS="AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter" +NOVA_FILTERS="ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter" QEMU_CONF=/etc/libvirt/qemu.conf From 4a1b2808af68ab50a15a9c16bfe217fac50bf309 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Mon, 26 Jun 2023 12:23:58 +0200 Subject: [PATCH 327/574] Remove unused file This was forgotten in [0] [0] I20501fec140998b91c9ddfd84b7b10168624430a Change-Id: Iacd86e3953f573a0fc38dc4898aafefccb3a9a79 --- files/dnsmasq-for-baremetal-from-nova-network.conf | 3 --- 1 file changed, 3 deletions(-) delete mode 100644 files/dnsmasq-for-baremetal-from-nova-network.conf diff --git a/files/dnsmasq-for-baremetal-from-nova-network.conf b/files/dnsmasq-for-baremetal-from-nova-network.conf deleted file mode 100644 index 66a375190e..0000000000 --- a/files/dnsmasq-for-baremetal-from-nova-network.conf +++ /dev/null @@ -1,3 +0,0 @@ -enable-tftp -tftp-root=/tftpboot -dhcp-boot=pxelinux.0 From 27568ea33460b9ea4635a7d0a0bb06d32654150b Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 27 Jun 2023 02:19:53 +0000 Subject: [PATCH 328/574] Updated from generate-devstack-plugins-list Change-Id: I6fd6a718ce39d849342b30970ca39477ce285374 --- doc/source/plugin-registry.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index ec502ea252..f54fca92e6 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -69,6 +69,9 @@ openstack/networking-bagpipe `https://opendev.org/openstack/networki openstack/networking-baremetal `https://opendev.org/openstack/networking-baremetal `__ openstack/networking-bgpvpn `https://opendev.org/openstack/networking-bgpvpn `__ openstack/networking-generic-switch `https://opendev.org/openstack/networking-generic-switch `__ +openstack/networking-hyperv `https://opendev.org/openstack/networking-hyperv `__ +openstack/networking-powervm `https://opendev.org/openstack/networking-powervm `__ +openstack/networking-sfc `https://opendev.org/openstack/networking-sfc `__ openstack/neutron `https://opendev.org/openstack/neutron `__ openstack/neutron-dynamic-routing `https://opendev.org/openstack/neutron-dynamic-routing `__ openstack/neutron-fwaas `https://opendev.org/openstack/neutron-fwaas `__ From e32715b2515fdae523a3d113a881f0a57fff9410 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Thu, 22 Jun 2023 21:10:31 -0500 Subject: [PATCH 329/574] Set two different image in tempest irespective of DEFAULT_IMAGE_NAME In current logic to set two different image in Tempest in config option image_ref and image_ref_alt, we consider if DEFAULT_IMAGE_NAME is found in glance then set the same image in tempest for those two config option. This means even we have two different image available in glance, still we set same image in image_ref as well as image_ref_alt and all the rebuild tests are rebuilt on the same image. I could not find any reason why we set same image if DEFAULT_IMAGE_NAME exist, below are the original change added this logic - https://review.opendev.org/c/openstack/devstack/+/17553 We had a requirement of test to run on two different images - https://review.opendev.org/c/openstack/tempest/+/831018 and for that we need to set DEFAULT_IMAGE_NAME to non exist image name but that broke the Ironic which was reply on the valid name in DEFAULT_IMAGE_NAME - https://review.opendev.org/c/openstack/ironic/+/886790 As we do not have any reason not to set two different image if DEFAULT_IMAGE_NAME is set, I am removing the condition of DEFAULT_IMAGE_NAME from lib/tempest logic and always set two different images if they are available. Depends-On: https://review.opendev.org/c/openstack/tempest/+/886796 Change-Id: I9d215f48d4440f2fa6dcc0d222a10896caf01215 --- lib/tempest | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/lib/tempest b/lib/tempest index 9fa989a2f6..4f72a6d174 100644 --- a/lib/tempest +++ b/lib/tempest @@ -149,11 +149,10 @@ function set_tempest_venv_constraints { # ramdisk and kernel images. Takes 3 arguments, an array and two # variables. The array will contain the list of active image UUIDs; # if an image with ``DEFAULT_IMAGE_NAME`` is found, its UUID will be -# set as the value of *both* other parameters. +# set as the value img_id ($2) parameters. function get_active_images { declare -n img_array=$1 declare -n img_id=$2 - declare -n img_id_alt=$3 # start with a fresh array in case we are called multiple times img_array=() @@ -161,7 +160,6 @@ function get_active_images { while read -r IMAGE_NAME IMAGE_UUID; do if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then img_id="$IMAGE_UUID" - img_id_alt="$IMAGE_UUID" fi img_array+=($IMAGE_UUID) done < <(openstack --os-cloud devstack-admin image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }') @@ -170,13 +168,12 @@ function get_active_images { function poll_glance_images { declare -n image_array=$1 declare -n image_id=$2 - declare -n image_id_alt=$3 local -i poll_count poll_count=$TEMPEST_GLANCE_IMPORT_POLL_LIMIT while (( poll_count-- > 0 )) ; do sleep $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL - get_active_images image_array image_id image_id_alt + get_active_images image_array image_id if (( ${#image_array[*]} >= $TEMPEST_GLANCE_IMAGE_COUNT )) ; then return fi @@ -228,7 +225,7 @@ function configure_tempest { declare -a images if is_service_enabled glance; then - get_active_images images image_uuid image_uuid_alt + get_active_images images image_uuid if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then # Glance image import is asynchronous and may be configured @@ -236,7 +233,7 @@ function configure_tempest { # it's possible that this code is being executed before the # import has completed and there may be no active images yet. if [[ "$GLANCE_USE_IMPORT_WORKFLOW" == "True" ]]; then - poll_glance_images images image_uuid image_uuid_alt + poll_glance_images images image_uuid if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then echo "Only found ${#images[*]} image(s), was looking for $TEMPEST_GLANCE_IMAGE_COUNT" exit 1 @@ -258,7 +255,15 @@ function configure_tempest { *) if [ -z "$image_uuid" ]; then image_uuid=${images[0]} - image_uuid_alt=${images[1]} + if [ -z "$image_uuid_alt" ]; then + image_uuid_alt=${images[1]} + fi + elif [ -z "$image_uuid_alt" ]; then + for image in $images; do + if [[ "$image" != "$image_uuid" ]]; then + image_uuid_alt=$image + fi + done fi ;; esac From 58c80b2424623096e4a1f7a901f424be0ce6cb3f Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Tue, 27 Jun 2023 12:16:32 -0700 Subject: [PATCH 330/574] nova: Bump timeout-per-gb for BFV rebuild ops This increases the timeout we use to wait for cinder to perform a volume reimage. Since devstack is often running on a single machine with non-production IO performance, we should bump this limit to avoid hitting it before the rebuild completes. Change-Id: Ie2663b951acb0c1a65597a39e032948764e6ae6a --- lib/nova | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/nova b/lib/nova index 21067f302b..e60b71c36d 100644 --- a/lib/nova +++ b/lib/nova @@ -1036,6 +1036,10 @@ function start_nova_compute { # by the compute process. configure_console_compute + # Set rebuild timeout longer for BFV instances because we likely have + # slower disk than expected. Default is 20s/GB + iniset $NOVA_CPU_CONF DEFAULT reimage_timeout_per_gb 60 + # Configure the OVSDB connection for os-vif if [ -n "$OVSDB_SERVER_LOCAL_HOST" ]; then iniset $NOVA_CPU_CONF os_vif_ovs ovsdb_connection "tcp:$OVSDB_SERVER_LOCAL_HOST:6640" From 931b45defd07991890707b434638166800ec948a Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Wed, 19 Jul 2023 12:15:52 +0530 Subject: [PATCH 331/574] Handle more than 1 image while configuring tempest [1] caused a regression causing failures when more than 1 images are setup. Fixing it by correctly using the array variable. Also add a break in the for loop once if condition is met. [1] https://review.opendev.org/c/openstack/devstack/+/886795 Closes-Bug: #2028123 Change-Id: I4f13c1239312bbcca8c65e875d65d03702161c18 --- lib/tempest | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 4f72a6d174..4ba101f218 100644 --- a/lib/tempest +++ b/lib/tempest @@ -259,9 +259,10 @@ function configure_tempest { image_uuid_alt=${images[1]} fi elif [ -z "$image_uuid_alt" ]; then - for image in $images; do + for image in ${images[@]}; do if [[ "$image" != "$image_uuid" ]]; then image_uuid_alt=$image + break fi done fi From e261bd809e81c01c153cdcdb50be47ed3c89c46a Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Wed, 19 Jul 2023 16:04:12 -0400 Subject: [PATCH 332/574] Always set image_uuid_alt in configure_tempest() When there is only a single image, configure_tempest() needs to always set image_uuid_alt the same as image_uuid, else it will fail trying to determine the size of the flavor to use for it later in the function. Introduced by [0], and subsequent change did not fix it. [0] https://review.opendev.org/c/openstack/devstack/+/886795 Change-Id: Ibfe99ff732570dbd415772c5625f43e35b68c871 Related-bug: #2028123 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 4ba101f218..2f62f6ea62 100644 --- a/lib/tempest +++ b/lib/tempest @@ -249,8 +249,8 @@ function configure_tempest { 1) if [ -z "$image_uuid" ]; then image_uuid=${images[0]} - image_uuid_alt=${images[0]} fi + image_uuid_alt=$image_uuid ;; *) if [ -z "$image_uuid" ]; then From 770352beb05f63fb9192ad488b6b8344fd57c985 Mon Sep 17 00:00:00 2001 From: jskunda Date: Tue, 18 Jul 2023 09:32:05 +0200 Subject: [PATCH 333/574] git: git checkout for a commit hash combinated with depth argument This patch: https://review.opendev.org/c/openstack/devstack/+/882299 provides functionality, that commit hash can be passed as last arugment, however when GIT_DEPTH is set, it fails, as in: timeout -s SIGINT 0 git clone https://github.com/ovn-org/ovn.git ./ovn --depth 1 --branch 03b95a4566a15f7544f4cdf35629dacede4dcf55 fatal: Remote branch 03b95a4566a15f7544f4cdf35629dacede4dcf55 not found in upstream origin Closes-Bug: #2023020 Change-Id: I748354964a133e028e12458cc9014d6d014cbdb9 --- functions-common | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/functions-common b/functions-common index 63144d6ed7..a668d55b8d 100644 --- a/functions-common +++ b/functions-common @@ -609,9 +609,10 @@ function git_clone { echo "the project to the \$PROJECTS variable in the job definition." die $LINENO "ERROR_ON_CLONE is set to True so cloning not allowed in this configuration" fi - git_timed clone $git_clone_flags $git_remote $git_dest + git_timed clone --no-checkout $git_clone_flags $git_remote $git_dest cd $git_dest - git checkout $git_ref + git_timed fetch $git_clone_flags origin $git_ref + git_timed checkout FETCH_HEAD elif [[ "$RECLONE" = "True" ]]; then # if it does exist then simulate what clone does if asked to RECLONE cd $git_dest From d115bfd72a61f23bba0eb5d2d82c2ad94eac15e2 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 31 Jul 2023 07:04:34 -0700 Subject: [PATCH 334/574] Reduce the flush frequency of dbcounter plugin This relaxes the limits for dbcounter to make it flush stats to the database less often. Currently every thirty seconds or 100 hits, we write a stats line to the database. In some services (like keystone) this can trigger more than one write per second because of the massive number of SELECT calls that service makes. This removes the hit limit and decreases the mandatory flush interval to once a minute. Hopefully this will manifest as lower load on the database triggered by what would be readonly operations. Change-Id: I43a58532c0541075a2d36408abc50a41f7994bda --- tools/dbcounter/dbcounter.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/tools/dbcounter/dbcounter.py b/tools/dbcounter/dbcounter.py index 0ed7bb813a..86e5529c97 100644 --- a/tools/dbcounter/dbcounter.py +++ b/tools/dbcounter/dbcounter.py @@ -96,20 +96,18 @@ def stat_writer(self): This reads "hists" from from a queue fed by _log_event() and writes (db,op)+=count stats to the database after ten seconds of no activity to avoid triggering a write for every SELECT - call. Write no less often than every thirty seconds and/or 100 - pending hits to avoid being starved by constant activity. + call. Write no less often than every sixty seconds to avoid being + starved by constant activity. """ LOG.debug('[%i] Writer thread running' % os.getpid()) while True: to_write = {} - total = 0 last = time.time() - while time.time() - last < 30 and total < 100: + while time.time() - last < 60: try: item = self.queue.get(timeout=10) to_write.setdefault(item, 0) to_write[item] += 1 - total += 1 except queue.Empty: break From 7a2021dfa01368a69e1e43785419ac68b62a9b5f Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Wed, 7 Jun 2023 15:30:02 +0200 Subject: [PATCH 335/574] Add rocky job to gate It was made voting some time ago, but we missed also running it in gate. With that RHEL platform test in place, we can keep c9s permanently non-voting, which is better suited to match its instability. Change-Id: I6712ac6dc64e4fe2203b2a5f6a381f6d2150ba0f --- .zuul.yaml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index a7be67153b..948a9af8ca 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -697,8 +697,6 @@ description: CentOS 9 Stream platform test nodeset: devstack-single-node-centos-9-stream timeout: 9000 - # TODO(kopecmartin) n-v until the following is resolved: - # https://bugs.launchpad.net/neutron/+bug/1979047 voting: false - job: @@ -968,11 +966,9 @@ jobs: - devstack - devstack-ipv6 - # TODO(kopecmartin) n-v until the following is resolved: - # https://bugs.launchpad.net/neutron/+bug/1979047 - # - devstack-platform-centos-9-stream - devstack-platform-debian-bullseye - devstack-platform-ubuntu-focal + - devstack-platform-rocky-blue-onyx - devstack-enforce-scope - devstack-multinode - devstack-unit-tests From a40f9cb91fbedddec89f0ffd6c7dd4b3828a232e Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Wed, 4 Apr 2018 14:02:30 -0700 Subject: [PATCH 336/574] Add option to install everything in global venvs Since we are python3 only for openstack we create a single python3 virtualenv to install all the packages into. This gives us the benefits of installing into a virtualenv while still ensuring coinstallability. This is a major change and will likely break many things. There are several reasons for this. The change that started this effort was pip stopped uninstalling packages which used distutils to generate their package installation. Many distro packages do this which meant that pip installed packages and distro packages could not coexist in the global install space. More recently git has made pip installing repos as root more difficult due to file ownership concerns. Currently the switch to the global venv is optional, but if we go down this path we should very quickly remove the old global installation method as it has only caused us problems. Major hurdles we have to get over are convincing rootwrap to trust binaries in the virtualenvs (so you'll notice we update rootwrap configs). Some distros still have issues, keep them using the old setup for now. Depends-On: https://review.opendev.org/c/openstack/grenade/+/880266 Co-Authored-By: Dr. Jens Harbott Change-Id: If9bc7ba45522189d03f19b86cb681bb150ee2f25 --- .zuul.yaml | 8 ++++++++ files/apache-horizon.template | 1 + functions-common | 5 +++++ inc/python | 34 +++++++++++++++++++++++++++++++--- inc/rootwrap | 5 +++++ lib/glance | 3 +++ lib/horizon | 6 ++++++ lib/tls | 7 +++++-- stack.sh | 12 ++++++++++++ stackrc | 8 ++++++++ tools/install_prereqs.sh | 2 ++ tools/memory_tracker.sh | 7 ++++++- 12 files changed, 92 insertions(+), 6 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index a7be67153b..803db3a3fa 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -700,6 +700,9 @@ # TODO(kopecmartin) n-v until the following is resolved: # https://bugs.launchpad.net/neutron/+bug/1979047 voting: false + vars: + devstack_localrc: + GLOBAL_VENV: false - job: name: devstack-platform-debian-bullseye @@ -709,6 +712,9 @@ timeout: 9000 vars: configure_swap_size: 4096 + devstack_localrc: + # TODO(frickler): drop this once wheel build is fixed + MYSQL_GATHER_PERFORMANCE: false - job: name: devstack-platform-rocky-blue-onyx @@ -718,6 +724,8 @@ timeout: 9000 vars: configure_swap_size: 4096 + devstack_localrc: + GLOBAL_VENV: false - job: name: devstack-platform-ubuntu-focal diff --git a/files/apache-horizon.template b/files/apache-horizon.template index efcfc0360b..da7a7d26c3 100644 --- a/files/apache-horizon.template +++ b/files/apache-horizon.template @@ -39,4 +39,5 @@ CustomLog /var/log/%APACHE_NAME%/horizon_access.log combined +%WSGIPYTHONHOME% WSGISocketPrefix /var/run/%APACHE_NAME% diff --git a/functions-common b/functions-common index 5e1aa34279..f752271976 100644 --- a/functions-common +++ b/functions-common @@ -1522,6 +1522,7 @@ function write_user_unit_file { mkdir -p $SYSTEMD_DIR iniset -sudo $unitfile "Unit" "Description" "Devstack $service" + iniset -sudo $unitfile "Service" "Environment" "\"PATH=$PATH\"" iniset -sudo $unitfile "Service" "User" "$user" iniset -sudo $unitfile "Service" "ExecStart" "$command" iniset -sudo $unitfile "Service" "KillMode" "process" @@ -1549,6 +1550,7 @@ function write_uwsgi_user_unit_file { mkdir -p $SYSTEMD_DIR iniset -sudo $unitfile "Unit" "Description" "Devstack $service" + iniset -sudo $unitfile "Service" "Environment" "\"PATH=$PATH\"" iniset -sudo $unitfile "Service" "SyslogIdentifier" "$service" iniset -sudo $unitfile "Service" "User" "$user" iniset -sudo $unitfile "Service" "ExecStart" "$command" @@ -1614,6 +1616,9 @@ function _run_under_systemd { fi local env_vars="$5" if [[ "$command" =~ "uwsgi" ]] ; then + if [[ "$GLOBAL_VENV" == "True" ]] ; then + cmd="$cmd --venv $DEVSTACK_VENV" + fi write_uwsgi_user_unit_file $systemd_service "$cmd" "$group" "$user" "$env_vars" else write_user_unit_file $systemd_service "$cmd" "$group" "$user" "$env_vars" diff --git a/inc/python b/inc/python index a24f4e910a..cc6e01fede 100644 --- a/inc/python +++ b/inc/python @@ -32,6 +32,23 @@ function join_extras { # Python Functions # ================ +# Setup the global devstack virtualenvs and the associated environment +# updates. +function setup_devstack_virtualenv { + # We run devstack out of a global virtualenv. + if [[ ! -d $DEVSTACK_VENV ]] ; then + # Using system site packages to enable nova to use libguestfs. + # This package is currently installed via the distro and not + # available on pypi. + python$PYTHON3_VERSION -m venv --system-site-packages $DEVSTACK_VENV + pip_install -U pip + fi + if [[ ":$PATH:" != *":$DEVSTACK_VENV/bin:"* ]] ; then + export PATH="$DEVSTACK_VENV/bin:$PATH" + export PYTHON="$DEVSTACK_VENV/bin/python3" + fi +} + # Get the path to the pip command. # get_pip_command function get_pip_command { @@ -60,8 +77,11 @@ function get_python_exec_prefix { fi $xtrace - local PYTHON_PATH=/usr/local/bin - echo $PYTHON_PATH + if [[ "$GLOBAL_VENV" == "True" ]] ; then + echo "$DEVSTACK_VENV/bin" + else + echo "/usr/local/bin" + fi } # Wrapper for ``pip install`` that only installs versions of libraries @@ -166,6 +186,14 @@ function pip_install { if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip local sudo_pip="env" + elif [[ "${GLOBAL_VENV}" == "True" && -d ${DEVSTACK_VENV} ]] ; then + # We have to check that the DEVSTACK_VENV exists because early + # devstack boostrapping needs to operate in a system context + # too bootstrap pip. Once pip is bootstrapped we create the + # global venv and can start to use it. + local cmd_pip=$DEVSTACK_VENV/bin/pip + local sudo_pip="env" + echo "Using python $PYTHON3_VERSION to install $package_dir" else local cmd_pip="python$PYTHON3_VERSION -m pip" # See @@ -439,7 +467,7 @@ function setup_package { pip_install $flags "$project_dir$extras" # ensure that further actions can do things like setup.py sdist - if [[ "$flags" == "-e" ]]; then + if [[ "$flags" == "-e" && "$GLOBAL_VENV" == "False" ]]; then safe_chown -R $STACK_USER $1/*.egg-info fi } diff --git a/inc/rootwrap b/inc/rootwrap index 2a6e4b648f..4c65440a4e 100644 --- a/inc/rootwrap +++ b/inc/rootwrap @@ -60,6 +60,11 @@ function configure_rootwrap { sudo install -o root -g root -m 644 $rootwrap_conf_src_dir/rootwrap.conf /etc/${project}/rootwrap.conf sudo sed -e "s:^filters_path=.*$:filters_path=/etc/${project}/rootwrap.d:" -i /etc/${project}/rootwrap.conf + # Rely on $PATH set by devstack to determine what is safe to execute + # by rootwrap rather than use explicit whitelist of paths in + # rootwrap.conf + sudo sed -e 's/^exec_dirs=.*/#&/' -i /etc/${project}/rootwrap.conf + # Set up the rootwrap sudoers local tempfile tempfile=$(mktemp) diff --git a/lib/glance b/lib/glance index 430d94d3a4..e64f00027e 100644 --- a/lib/glance +++ b/lib/glance @@ -47,6 +47,9 @@ USE_CINDER_FOR_GLANCE=$(trueorfalse False USE_CINDER_FOR_GLANCE) # from CINDER_ENABLED_BACKENDS GLANCE_CINDER_DEFAULT_BACKEND=${GLANCE_CINDER_DEFAULT_BACKEND:-lvmdriver-1} GLANCE_STORE_ROOTWRAP_BASE_DIR=/usr/local/etc/glance +if [[ "$GLOBAL_VENV" == "True" ]] ; then + GLANCE_STORE_ROOTWRAP_BASE_DIR=${DEVSTACK_VENV}/etc/glance +fi # When Cinder is used as a glance store, you can optionally configure cinder to # optimize bootable volume creation by allowing volumes to be cloned directly # in the backend instead of transferring data via Glance. To use this feature, diff --git a/lib/horizon b/lib/horizon index f76f9e557d..611329d619 100644 --- a/lib/horizon +++ b/lib/horizon @@ -115,6 +115,11 @@ function configure_horizon { local horizon_conf horizon_conf=$(apache_site_config_for horizon) + local wsgi_venv_config="" + if [[ "$GLOBAL_VENV" == "True" ]] ; then + wsgi_venv_config="WSGIPythonHome $DEVSTACK_VENV" + fi + # Configure apache to run horizon # Set up the django horizon application to serve via apache/wsgi sudo sh -c "sed -e \" @@ -124,6 +129,7 @@ function configure_horizon { s,%APACHE_NAME%,$APACHE_NAME,g; s,%DEST%,$DEST,g; s,%WEBROOT%,$HORIZON_APACHE_ROOT,g; + s,%WSGIPYTHONHOME%,$wsgi_venv_config,g; \" $FILES/apache-horizon.template >$horizon_conf" if is_ubuntu; then diff --git a/lib/tls b/lib/tls index a1e162d2e2..d35e9e2cee 100644 --- a/lib/tls +++ b/lib/tls @@ -364,8 +364,11 @@ function deploy_int_CA { function fix_system_ca_bundle_path { if is_service_enabled tls-proxy; then local capath - capath=$(python3 -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass') - + if [[ "$GLOBAL_VENV" == "True" ]] ; then + capath=$($DEVSTACK_VENV/bin/python3 -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass') + else + capath=$(python3 -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass') + fi if [[ ! $capath == "" && ! $capath =~ ^/etc/.* && ! -L $capath ]]; then if is_fedora; then sudo rm -f $capath diff --git a/stack.sh b/stack.sh index ad88eab9d5..c8f7c9d79e 100755 --- a/stack.sh +++ b/stack.sh @@ -1,5 +1,6 @@ #!/usr/bin/env bash + # ``stack.sh`` is an opinionated OpenStack developer installation. It # installs and configures various combinations of **Cinder**, **Glance**, # **Horizon**, **Keystone**, **Nova**, **Neutron**, and **Swift** @@ -824,6 +825,17 @@ fi source $TOP_DIR/tools/fixup_stuff.sh fixup_all +if [[ "$GLOBAL_VENV" == "True" ]] ; then + # TODO(frickler): find a better solution for this + sudo ln -sf /opt/stack/data/venv/bin/privsep-helper /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/cinder-rtstool /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/openstack /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/tox /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/nova-manage /usr/local/bin + + setup_devstack_virtualenv +fi + # Install subunit for the subunit output stream pip_install -U os-testr diff --git a/stackrc b/stackrc index dcc0ce45e0..0d1880cec9 100644 --- a/stackrc +++ b/stackrc @@ -183,6 +183,14 @@ IDENTITY_API_VERSION=3 # each services ${SERVICE}_ENFORCE_SCOPE variables ENFORCE_SCOPE=$(trueorfalse False ENFORCE_SCOPE) +# Devstack supports the use of a global virtualenv. These variables enable +# and disable this functionality as well as set the path to the virtualenv. +# Note that the DATA_DIR is selected because grenade testing uses a shared +# DATA_DIR but different DEST dirs and we don't want two sets of venvs, +# instead we want one global set. +GLOBAL_VENV=$(trueorfalse True GLOBAL_VENV) +DEVSTACK_VENV=${DEVSTACK_VENV:-$DATA_DIR/venv} + # Enable use of Python virtual environments. Individual project use of # venvs are controlled by the PROJECT_VENV array; every project with # an entry in the array will be installed into the named venv. diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh index f2d57c8451..bb470b2927 100755 --- a/tools/install_prereqs.sh +++ b/tools/install_prereqs.sh @@ -79,6 +79,8 @@ if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then fi fi +# TODO(clarkb) remove these once we are switched to global venv by default +export PYTHON=$(which python${PYTHON3_VERSION} 2>/dev/null || which python3 2>/dev/null) # Mark end of run # --------------- diff --git a/tools/memory_tracker.sh b/tools/memory_tracker.sh index 6c36534f01..2f404c26fb 100755 --- a/tools/memory_tracker.sh +++ b/tools/memory_tracker.sh @@ -14,7 +14,12 @@ set -o errexit -PYTHON=${PYTHON:-python3} +# TODO(frickler): make this use stackrc variables +if [ -x /opt/stack/data/venv/bin/python ]; then + PYTHON=/opt/stack/data/venv/bin/python +else + PYTHON=${PYTHON:-python3} +fi # time to sleep between checks SLEEP_TIME=20 From 0b79f6f7690773701a37921f626782e528fa9c36 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Tue, 4 Jul 2023 07:18:01 +0200 Subject: [PATCH 337/574] Add debian-bookworm job Change-Id: Id5e54775e2be38a75db0bd1f55d1d3b5ae7ef71f --- .zuul.yaml | 24 ++++++++++++++++++++++++ lib/databases/mysql | 5 +++-- stack.sh | 2 +- 3 files changed, 28 insertions(+), 3 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 803db3a3fa..9cc95b607e 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -78,6 +78,16 @@ nodes: - controller +- nodeset: + name: devstack-single-node-debian-bookworm + nodes: + - name: controller + label: debian-bookworm + groups: + - name: tempest + nodes: + - controller + - nodeset: name: devstack-single-node-debian-bullseye nodes: @@ -704,6 +714,19 @@ devstack_localrc: GLOBAL_VENV: false +- job: + name: devstack-platform-debian-bookworm + parent: tempest-full-py3 + description: Debian Bookworm platform test + nodeset: devstack-single-node-debian-bookworm + timeout: 9000 + voting: false + vars: + configure_swap_size: 4096 + devstack_localrc: + # TODO(frickler): drop this once wheel build is fixed + MYSQL_GATHER_PERFORMANCE: false + - job: name: devstack-platform-debian-bullseye parent: tempest-full-py3 @@ -926,6 +949,7 @@ - devstack-ipv6 - devstack-enforce-scope - devstack-platform-centos-9-stream + - devstack-platform-debian-bookworm - devstack-platform-debian-bullseye - devstack-platform-rocky-blue-onyx - devstack-platform-ubuntu-focal diff --git a/lib/databases/mysql b/lib/databases/mysql index 27d1ec600f..e069e128e9 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -20,7 +20,7 @@ if [[ -z "$MYSQL_SERVICE_NAME" ]]; then MYSQL_SERVICE_NAME=mysql if is_fedora && ! is_oraclelinux; then MYSQL_SERVICE_NAME=mariadb - elif [[ "$DISTRO" == "bullseye" ]]; then + elif [[ "$DISTRO" =~ bookworm|bullseye ]]; then MYSQL_SERVICE_NAME=mariadb fi fi @@ -122,7 +122,8 @@ function configure_database_mysql { # In mariadb e.g. on Ubuntu socket plugin is used for authentication # as root so it works only as sudo. To restore old "mysql like" behaviour, # we need to change auth plugin for root user - if is_ubuntu && [[ "$DISTRO" != "bullseye" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then + # TODO(frickler): simplify this logic + if is_ubuntu && [[ ! "$DISTRO" =~ bookworm|bullseye ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then if [[ "$DISTRO" == "jammy" ]]; then # For Ubuntu 22.04 (jammy) we follow the model outlined in # https://mariadb.org/authentication-in-mariadb-10-4/ diff --git a/stack.sh b/stack.sh index c8f7c9d79e..0434001b7a 100755 --- a/stack.sh +++ b/stack.sh @@ -230,7 +230,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bullseye|focal|jammy|rhel8|rhel9|openEuler-22.03" +SUPPORTED_DISTROS="bookworm|bullseye|focal|jammy|rhel8|rhel9|openEuler-22.03" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" From 113689ee4694de20c019735fdace447225aa18f7 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Wed, 2 Aug 2023 12:58:45 +0530 Subject: [PATCH 338/574] Woraround systemd issue on CentOS 9-stream systemd-252-16.el9 introduced a regression where libvirtd process exits after 120s of inactivity. Add a workaround to unset 120s timeout for libvirtd, the workaround can be removed once the fix is available in systemd rpm. Related-Bug: #2029335 Change-Id: Id6db6c17518b54d5fef7c381c509066a569aff6d --- tools/fixup_stuff.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index faea44f1e0..80a83bb128 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -94,6 +94,11 @@ EOF if [[ $os_VENDOR == "CentOSStream" && $os_RELEASE -eq 8 ]]; then sudo sysctl -w net.ipv4.ping_group_range='0 2147483647' fi + # TODO(ykarel): Workaround for systemd issue, remove once fix is + # included in systemd rpm https://bugs.launchpad.net/devstack/+bug/2029335 + if [[ $os_VENDOR == "CentOSStream" && $os_RELEASE -eq 9 ]]; then + echo 'LIBVIRTD_ARGS=""' | sudo tee /etc/sysconfig/libvirtd + fi } function fixup_ovn_centos { From 3832ff52b4445324b58a5da123ef4e3880df1591 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 3 Aug 2023 09:16:55 -0700 Subject: [PATCH 339/574] Add SERVICE_REPORT_INTERVAL knob Heavily-loaded workers in CI consistently fail to complete the service checkin task, which is configured for every ten seconds in nova and cinder. This generates additional load on the database server as well as consumes a threadpool worker. If we're not making the deadline, there's really no point in having it be so high. Further, since the workers must remain up for all the tempest tests we're running against them, there's really no benefit to a fast-fail detection. This sets the report_interval to 120s for nova and cinder, and sets service_down_time to 6x that value, which is consistent with the default scale. Depends-On: https://review.opendev.org/c/openstack/tempest/+/890448 Change-Id: Idd7aa1daf354256b143a3778f161cfc72b318ea5 --- lib/cinder | 8 ++++++++ lib/nova | 8 ++++++++ 2 files changed, 16 insertions(+) diff --git a/lib/cinder b/lib/cinder index e37eff4019..f8682d5a71 100644 --- a/lib/cinder +++ b/lib/cinder @@ -76,6 +76,11 @@ CINDER_SERVICE_PORT_INT=${CINDER_SERVICE_PORT_INT:-18776} CINDER_SERVICE_PROTOCOL=${CINDER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} CINDER_SERVICE_LISTEN_ADDRESS=${CINDER_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} +# We do not need to report service status every 10s for devstack-like +# deployments. In the gate this generates extra work for the services and the +# database which are already taxed. +CINDER_SERVICE_REPORT_INTERVAL=120 + # What type of LVM device should Cinder use for LVM backend # Defaults to auto, which will do thin provisioning if it's a fresh # volume group, otherwise it will do thick. The other valid choices are @@ -325,6 +330,9 @@ function configure_cinder { # details and example failures. iniset $CINDER_CONF DEFAULT rpc_response_timeout 120 + iniset $CINDER_CONF DEFAULT report_interval $CINDER_SERVICE_REPORT_INTERVAL + iniset $CINDER_CONF DEFAULT service_down_time $(($CINDER_SERVICE_REPORT_INTERVAL * 6)) + if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then local enabled_backends="" local default_name="" diff --git a/lib/nova b/lib/nova index 888a2e2b25..905788f28f 100644 --- a/lib/nova +++ b/lib/nova @@ -75,6 +75,11 @@ NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini} # mean "use uwsgi" because we'll be always using uwsgi. NOVA_USE_MOD_WSGI=${NOVA_USE_MOD_WSGI:-True} +# We do not need to report service status every 10s for devstack-like +# deployments. In the gate this generates extra work for the services and the +# database which are already taxed. +NOVA_SERVICE_REPORT_INTERVAL=120 + if is_service_enabled tls-proxy; then NOVA_SERVICE_PROTOCOL="https" fi @@ -448,6 +453,9 @@ function create_nova_conf { iniset $NOVA_CONF key_manager backend nova.keymgr.conf_key_mgr.ConfKeyManager + iniset $NOVA_CONF DEFAULT report_interval $NOVA_SERVICE_REPORT_INTERVAL + iniset $NOVA_CONF DEFAULT service_down_time $(($NOVA_SERVICE_REPORT_INTERVAL * 6)) + if is_fedora; then # nova defaults to /usr/local/bin, but fedora pip like to # install things in /usr/bin From c3b0b9034e6b35187a125283e55056ae90cbbc4a Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Fri, 4 Aug 2023 06:41:30 -0700 Subject: [PATCH 340/574] Disable waiting forever for connpool workers This will cause apache to no longer wait forever for a connection pool member to become available before returning 503 to the client. This may help us determine if some of the timeouts we see when talking to the services come from an overloaded apache. Change-Id: Ibc19fc9a53e2330f9aca45f5a10a59c576cb22e6 --- lib/apache | 6 +++--- lib/tls | 4 +++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/lib/apache b/lib/apache index 76eae9c057..cf7215bef2 100644 --- a/lib/apache +++ b/lib/apache @@ -290,7 +290,7 @@ function write_uwsgi_config { apache_conf=$(apache_site_config_for $name) iniset "$file" uwsgi socket "$socket" iniset "$file" uwsgi chmod-socket 666 - echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}\" retry=0 " | sudo tee -a $apache_conf + echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}\" retry=0 acquire=1 " | sudo tee -a $apache_conf enable_apache_site $name restart_apache_server fi @@ -351,7 +351,7 @@ function write_local_uwsgi_http_config { apache_conf=$(apache_site_config_for $name) echo "KeepAlive Off" | sudo tee $apache_conf echo "SetEnv proxy-sendchunked 1" | sudo tee -a $apache_conf - echo "ProxyPass \"${url}\" \"http://$APACHE_LOCAL_HOST:$port\" retry=0 " | sudo tee -a $apache_conf + echo "ProxyPass \"${url}\" \"http://$APACHE_LOCAL_HOST:$port\" retry=0 acquire=1 " | sudo tee -a $apache_conf enable_apache_site $name restart_apache_server } @@ -370,7 +370,7 @@ function write_local_proxy_http_config { echo "KeepAlive Off" | sudo tee $apache_conf echo "SetEnv proxy-sendchunked 1" | sudo tee -a $apache_conf - echo "ProxyPass \"${loc}\" \"$url\" retry=0 " | sudo tee -a $apache_conf + echo "ProxyPass \"${loc}\" \"$url\" retry=0 acquire=1 " | sudo tee -a $apache_conf enable_apache_site $name restart_apache_server } diff --git a/lib/tls b/lib/tls index a1e162d2e2..48e5929a2b 100644 --- a/lib/tls +++ b/lib/tls @@ -541,9 +541,11 @@ $listen_string # Avoid races (at the cost of performance) to re-use a pooled connection # where the connection is closed (bug 1807518). + # Set acquire=1 to disable waiting for connection pool members so that + # we can determine when apache is overloaded (returns 503). SetEnv proxy-initial-not-pooled - ProxyPass http://$b_host:$b_port/ retry=0 nocanon + ProxyPass http://$b_host:$b_port/ retry=0 nocanon acquire=1 ProxyPassReverse http://$b_host:$b_port/ ErrorLog $APACHE_LOG_DIR/tls-proxy_error.log From 0da88c4af096ab95ccf438960433bb113278181e Mon Sep 17 00:00:00 2001 From: Alfredo Moralejo Date: Mon, 7 Aug 2023 14:13:41 +0200 Subject: [PATCH 341/574] Remove wget + rpm workaround to manage repos install in CentOS RDO has moved rdo-release packages to a new infra which supports EMS so we do not need to wget it and install it using local rpm install. This partially reverts [1]. [1] https://review.opendev.org/c/openstack/devstack/+/884277/ Change-Id: I189d0c3da0e7b017e2568022c14e6c8fb28251f1 --- stack.sh | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/stack.sh b/stack.sh index ad88eab9d5..b03c3cda1f 100755 --- a/stack.sh +++ b/stack.sh @@ -311,22 +311,14 @@ function _install_rdo { sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el8.rpm fi elif [[ $DISTRO == "rhel9" ]]; then - install_package wget - # We need to download rdo-release package using wget as installing with dnf from repo.fedoraproject.org fails in - # FIPS enabled systems after https://bugzilla.redhat.com/show_bug.cgi?id=2157951 - # Until we can pull rdo-release from a server which supports EMS, this workaround is doing wget, which does - # not relies on openssl but on gnutls, and then install it locally using rpm - TEMPRDODIR=$(mktemp -d) if [[ "$TARGET_BRANCH" == "master" ]]; then # rdo-release.el9.rpm points to latest RDO release, use that for master - wget -P $TEMPRDODIR https://rdoproject.org/repos/rdo-release.el9.rpm + sudo dnf -y install https://rdoproject.org/repos/rdo-release.el9.rpm else # For stable branches use corresponding release rpm rdo_release=$(echo $TARGET_BRANCH | sed "s|stable/||g") - wget -P $TEMPRDODIR https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el9.rpm + sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el9.rpm fi - sudo rpm -ivh $TEMPRDODIR/rdo-release*rpm - rm -rf $TEMPRDODIR fi sudo dnf -y update } From 4363b0bd84aad8984ee148b3b4868b311e5d855b Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Tue, 8 Aug 2023 08:38:00 -0400 Subject: [PATCH 342/574] Fix $LOGDIR owner to be stack.stack I have seen this failure in the gate a few times: [ERROR] /opt/stack/devstack/functions-common:2334 Neutron did not start /opt/stack/devstack/functions-common: line 310: /opt/stack/logs/error.log: Permission denied So whatever was trying to be written to error.log never happened. Change to be like other directories in this file and make the $LOGDIR owner stack.stack. Change-Id: I673011aba10c8d03234100503ccc5876e75baff2 --- stack.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index ad88eab9d5..e0caafafa6 100755 --- a/stack.sh +++ b/stack.sh @@ -349,7 +349,9 @@ fi # Destination path for devstack logs if [[ -n ${LOGDIR:-} ]]; then - mkdir -p $LOGDIR + sudo mkdir -p $LOGDIR + safe_chown -R $STACK_USER $LOGDIR + safe_chmod 0755 $LOGDIR fi # Destination path for service data From 26b5eddeaaeb3e142d483c12d9a501fdc6abaf10 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Fri, 11 Aug 2023 21:51:05 +0200 Subject: [PATCH 343/574] GLOBAL_VENV: add nova to linked binaries This is being used in some nova jobs, so we need to add it. Also order the list of linked binaries to allow easier maintenance. Change-Id: Ief012f7842d6e14380c9575740d1856bc1f2355e --- stack.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index c8f7c9d79e..641c3c3b5a 100755 --- a/stack.sh +++ b/stack.sh @@ -827,11 +827,12 @@ fixup_all if [[ "$GLOBAL_VENV" == "True" ]] ; then # TODO(frickler): find a better solution for this - sudo ln -sf /opt/stack/data/venv/bin/privsep-helper /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/cinder-rtstool /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/nova /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/nova-manage /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/openstack /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/privsep-helper /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/tox /usr/local/bin - sudo ln -sf /opt/stack/data/venv/bin/nova-manage /usr/local/bin setup_devstack_virtualenv fi From 4c45bec6ebb965202d8d7d7832c093f47ecc2910 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Sat, 12 Aug 2023 11:35:08 +0200 Subject: [PATCH 344/574] GLOBAL_VENV: add more binaries glance and rally binaries are also needed. Also make sure the cinder-rtstool is only called when cinder is actually enabled. Change-Id: I18113eabf2fa83e36bace276883775303f6a1e9a --- lib/lvm | 20 +++++++++++--------- stack.sh | 2 ++ 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/lib/lvm b/lib/lvm index 57d2cd4e62..162c491f22 100644 --- a/lib/lvm +++ b/lib/lvm @@ -137,15 +137,17 @@ function init_lvm_volume_group { # Start with a clean volume group _create_lvm_volume_group $vg $size - # Remove iscsi targets - if [ "$CINDER_TARGET_HELPER" = "lioadm" ]; then - sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete - elif [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then - sudo tgtadm --op show --mode target | awk '/Target/ {print $3}' | sudo xargs -r -n1 tgt-admin --delete - elif [ "$CINDER_TARGET_HELPER" = "nvmet" ]; then - # If we don't disconnect everything vgremove will block - sudo nvme disconnect-all - sudo nvmetcli clear + if is_service_enabled cinder; then + # Remove iscsi targets + if [ "$CINDER_TARGET_HELPER" = "lioadm" ]; then + sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete + elif [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then + sudo tgtadm --op show --mode target | awk '/Target/ {print $3}' | sudo xargs -r -n1 tgt-admin --delete + elif [ "$CINDER_TARGET_HELPER" = "nvmet" ]; then + # If we don't disconnect everything vgremove will block + sudo nvme disconnect-all + sudo nvmetcli clear + fi fi _clean_lvm_volume_group $vg } diff --git a/stack.sh b/stack.sh index 94d586e812..d8b70a2b39 100755 --- a/stack.sh +++ b/stack.sh @@ -828,10 +828,12 @@ fixup_all if [[ "$GLOBAL_VENV" == "True" ]] ; then # TODO(frickler): find a better solution for this sudo ln -sf /opt/stack/data/venv/bin/cinder-rtstool /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/glance /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/nova /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/nova-manage /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/openstack /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/privsep-helper /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/rally /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/tox /usr/local/bin setup_devstack_virtualenv From 08b434e5b06a0f28a1779159df494d27db95704c Mon Sep 17 00:00:00 2001 From: Ghanshyam Date: Mon, 14 Aug 2023 17:05:45 +0000 Subject: [PATCH 345/574] Revert "GLOBAL_VENV: add nova to linked binaries" This reverts commit 26b5eddeaaeb3e142d483c12d9a501fdc6abaf10. Reason for revert: nova changed to use osc - https://review.opendev.org/c/openstack/nova/+/891247/2 Resolving conflict due to - https://review.opendev.org/c/openstack/devstack/+/891248 Change-Id: I69e179a90a241946b3f426a41c38ae72a66ba6dc --- stack.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/stack.sh b/stack.sh index d8b70a2b39..a8f46bfeb9 100755 --- a/stack.sh +++ b/stack.sh @@ -829,7 +829,6 @@ if [[ "$GLOBAL_VENV" == "True" ]] ; then # TODO(frickler): find a better solution for this sudo ln -sf /opt/stack/data/venv/bin/cinder-rtstool /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/glance /usr/local/bin - sudo ln -sf /opt/stack/data/venv/bin/nova /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/nova-manage /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/openstack /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/privsep-helper /usr/local/bin From 220004fb5c529d84e2e8d909db71cf17a00c0815 Mon Sep 17 00:00:00 2001 From: Artom Lifshitz Date: Wed, 16 Aug 2023 14:08:15 -0400 Subject: [PATCH 346/574] Allow others to override NOVA_SERVICE_REPORT_INTERVAL While the patch where this was first introduced and set to 120 [1] is sensible for the vast majority of jobs, it's conceivable that some jobs might want a different value. Specifically, the whitebox-tempest-plugin changes configurations and restarts Nova services, and to do so it waits for the service status to update in the API before continuing with the tests. With the report interval set to 120 and the down time threshold set to 720, the service would continue showing 'up' in the API long after it was actually down, causing the wait to time out. Whitebox is a low-traffic project with only a couple of devstack jobs that run tempest tests sequentially (concurrency=1). Its CI is also pretty stable. It seems legitimate for it to keep the old default values of report_interval and service_down_time. This patch keeps the 120 default for NOVA_SERVICE_REPORT_INTERVAL, but makes it configurable by individual jobs. Since the original patch also introduced CINDER_SERVICE_REPORT_INTERVAL as a constant, make that configurable as well. [1] https://review.opendev.org/c/openstack/devstack/+/890439 Needed-by: https://review.opendev.org/c/openstack/whitebox-tempest-plugin/+/891612 Change-Id: I64fa2059537ea072a38fb4900d3c7d2d8f0ce429 --- lib/cinder | 2 +- lib/nova | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/cinder b/lib/cinder index f8682d5a71..768a069a12 100644 --- a/lib/cinder +++ b/lib/cinder @@ -79,7 +79,7 @@ CINDER_SERVICE_LISTEN_ADDRESS=${CINDER_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $S # We do not need to report service status every 10s for devstack-like # deployments. In the gate this generates extra work for the services and the # database which are already taxed. -CINDER_SERVICE_REPORT_INTERVAL=120 +CINDER_SERVICE_REPORT_INTERVAL=${CINDER_SERVICE_REPORT_INTERVAL:-120} # What type of LVM device should Cinder use for LVM backend # Defaults to auto, which will do thin provisioning if it's a fresh diff --git a/lib/nova b/lib/nova index 905788f28f..da3118f4cd 100644 --- a/lib/nova +++ b/lib/nova @@ -78,7 +78,7 @@ NOVA_USE_MOD_WSGI=${NOVA_USE_MOD_WSGI:-True} # We do not need to report service status every 10s for devstack-like # deployments. In the gate this generates extra work for the services and the # database which are already taxed. -NOVA_SERVICE_REPORT_INTERVAL=120 +NOVA_SERVICE_REPORT_INTERVAL=${NOVA_SERVICE_REPORT_INTERVAL:-120} if is_service_enabled tls-proxy; then NOVA_SERVICE_PROTOCOL="https" From 7c4a955c52ead024ef50f448b3894b5ef362508d Mon Sep 17 00:00:00 2001 From: yatin Date: Mon, 21 Aug 2023 06:28:30 +0000 Subject: [PATCH 347/574] Revert "Woraround systemd issue on CentOS 9-stream" This reverts commit 113689ee4694de20c019735fdace447225aa18f7. Reason for revert: systemd-252-17.el9 which includes the fix is now available in CentOS 9-stream repos. Change-Id: I6fe19838a75a30fd5d2434c03b7f403f1c7e4b50 --- tools/fixup_stuff.sh | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 80a83bb128..faea44f1e0 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -94,11 +94,6 @@ EOF if [[ $os_VENDOR == "CentOSStream" && $os_RELEASE -eq 8 ]]; then sudo sysctl -w net.ipv4.ping_group_range='0 2147483647' fi - # TODO(ykarel): Workaround for systemd issue, remove once fix is - # included in systemd rpm https://bugs.launchpad.net/devstack/+bug/2029335 - if [[ $os_VENDOR == "CentOSStream" && $os_RELEASE -eq 9 ]]; then - echo 'LIBVIRTD_ARGS=""' | sudo tee /etc/sysconfig/libvirtd - fi } function fixup_ovn_centos { From 3a7a3cd8c5a5ac3f1655d6ff17974f8623fb3330 Mon Sep 17 00:00:00 2001 From: Jan Gutter Date: Mon, 14 Aug 2023 21:02:04 +0100 Subject: [PATCH 348/574] Update etcd version to 3.4.27 * etcd 3.3 is no longer maintained. * etcd 3.4 removes deprecated interfaces, and clients may need updated configs. * The cinder backend coordination URL needs to explicitly specify the version, until tooz can be updated https://review.opendev.org/c/openstack/tooz/+/891355 * etcd only supports in-place upgrades between minor versions, so any jobs testing upgrades could fail if they skip from 3.2 directly to 3.4 Change-Id: Ifcecdffa17a3a2b1075aa503978c44545c4a2a3c --- lib/cinder | 4 +++- stackrc | 10 +++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/lib/cinder b/lib/cinder index f8682d5a71..dad17980bc 100644 --- a/lib/cinder +++ b/lib/cinder @@ -414,7 +414,9 @@ function configure_cinder { if [[ ! -z "$CINDER_COORDINATION_URL" ]]; then iniset $CINDER_CONF coordination backend_url "$CINDER_COORDINATION_URL" elif is_service_enabled etcd3; then - iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT" + # NOTE(jan.gutter): api_version can revert to default once tooz is + # updated with the etcd v3.4 defaults + iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT?api_version=v3" fi if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then diff --git a/stackrc b/stackrc index 0d1880cec9..2d25e379fd 100644 --- a/stackrc +++ b/stackrc @@ -728,11 +728,11 @@ fi EXTRA_CACHE_URLS="" # etcd3 defaults -ETCD_VERSION=${ETCD_VERSION:-v3.3.12} -ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"dc5d82df095dae0a2970e4d870b6929590689dd707ae3d33e7b86da0f7f211b6"} -ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"170b848ac1a071fe7d495d404a868a2c0090750b2944f8a260ef1c6125b2b4f4"} -ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"77f807b1b51abbf51e020bb05bdb8ce088cb58260fcd22749ea32eee710463d3"} -# etcd v3.2.x doesn't have anything for s390x +ETCD_VERSION=${ETCD_VERSION:-v3.4.27} +ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"a32d21e006252dbc3405b0645ba8468021ed41376974b573285927bf39b39eb9"} +ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"ed7e257c225b9b9545fac22246b97f4074a4b5109676e92dbaebfb9315b69cc0"} +ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"eb8825e0bc2cbaf9e55947f5ee373ebc9ca43b6a2ea5ced3b992c81855fff37e"} +# etcd v3.2.x and later doesn't have anything for s390x ETCD_SHA256_S390X=${ETCD_SHA256_S390X:-""} # Make sure etcd3 downloads the correct architecture if is_arch "x86_64"; then From 7cd3a8eebe1830f94c02bb6ec010c0365f6ab6f1 Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Tue, 22 Aug 2023 20:40:20 +0200 Subject: [PATCH 349/574] Set GLOBAL_VENV to false for centos and rocky As a temporary workaround, let's set the GLOBAL_VENV to false specifically for centos 9 stream and rocky distros where we encountered issues after changing the default value of GLOBAL_VENV to True in Devstack: https://review.opendev.org/c/openstack/devstack/+/558930 Related-Bug: #2031639 Change-Id: I708b5a81c32b0bd650dcd63a51e16346863a6fc0 --- .zuul.yaml | 5 ----- stackrc | 11 ++++++++++- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index e65dc5b7cf..8b60fc9936 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -708,9 +708,6 @@ nodeset: devstack-single-node-centos-9-stream timeout: 9000 voting: false - vars: - devstack_localrc: - GLOBAL_VENV: false - job: name: devstack-platform-debian-bookworm @@ -745,8 +742,6 @@ timeout: 9000 vars: configure_swap_size: 4096 - devstack_localrc: - GLOBAL_VENV: false - job: name: devstack-platform-ubuntu-focal diff --git a/stackrc b/stackrc index 0d1880cec9..bd4e2f17a2 100644 --- a/stackrc +++ b/stackrc @@ -188,9 +188,18 @@ ENFORCE_SCOPE=$(trueorfalse False ENFORCE_SCOPE) # Note that the DATA_DIR is selected because grenade testing uses a shared # DATA_DIR but different DEST dirs and we don't want two sets of venvs, # instead we want one global set. -GLOBAL_VENV=$(trueorfalse True GLOBAL_VENV) DEVSTACK_VENV=${DEVSTACK_VENV:-$DATA_DIR/venv} +# NOTE(kopecmartin): remove this once this is fixed +# https://bugs.launchpad.net/devstack/+bug/2031639 +# This couldn't go to fixup_stuff as that's called after projects +# (e.g. certain paths) are set taking GLOBAL_VENV into account +if [[ "$os_VENDOR" =~ (CentOSStream|Rocky) ]]; then + GLOBAL_VENV=$(trueorfalse False GLOBAL_VENV) +else + GLOBAL_VENV=$(trueorfalse True GLOBAL_VENV) +fi + # Enable use of Python virtual environments. Individual project use of # venvs are controlled by the PROJECT_VENV array; every project with # an entry in the array will be installed into the named venv. From 5a51aa524c1f955a4650099c344756acc6c6b507 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Wed, 23 Aug 2023 10:43:32 -0700 Subject: [PATCH 350/574] Fix glance-remote with global venv The base systemd unit file setup now writes an Environment= line to the file for the venv. The glance-remote code was setting that to point at the alternate config location, using iniset which was clobbering the venv one. Switch to iniadd to fix. Also, we need to explicitly put the --venv flag into the command since we write our unit file ourselves. This probably needs a cleanup at this point, but since the glance gate is blocked, do this for now. Change-Id: I2bd33de45c41b18ed7d4270a7301b1e322134987 --- lib/glance | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/glance b/lib/glance index e64f00027e..3cf8230f41 100644 --- a/lib/glance +++ b/lib/glance @@ -584,9 +584,10 @@ function start_glance_remote_clone { write_uwsgi_user_unit_file devstack@g-api-r.service "$(which uwsgi) \ --procname-prefix \ glance-api-remote \ - --ini $glance_remote_uwsgi" \ + --ini $glance_remote_uwsgi \ + --venv $DEVSTACK_VENV" \ "" "$STACK_USER" - iniset -sudo ${SYSTEMD_DIR}/devstack@g-api-r.service \ + iniadd -sudo ${SYSTEMD_DIR}/devstack@g-api-r.service \ "Service" "Environment" \ "OS_GLANCE_CONFIG_DIR=$glance_remote_conf_dir" From ef53db76d029382dd8b3566224e51351b9d36280 Mon Sep 17 00:00:00 2001 From: melanie witt Date: Thu, 24 Aug 2023 00:42:19 +0000 Subject: [PATCH 351/574] Fix configuration of LVM global_filter As far as I could tell, the global_filter config added in change I5d5c48e188cbb9b4208096736807f082bce524e8 wasn't actually making it into the lvm.conf. Given the volume (or rather LVM volume) related issues we've been seeing in the gate recently, we can give this a try to see if the global_filter setting has any positive effect. This also adds the contents of /etc/lvm/* to the logs collected by the jobs, so that we can see the LVM config. Change-Id: I2b39acd352669231d16b5cb2e151f290648355c0 --- .zuul.yaml | 1 + lib/lvm | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 8b60fc9936..46e1e45e39 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -430,6 +430,7 @@ /var/log/mysql: logs /var/log/libvirt: logs /etc/libvirt: logs + /etc/lvm: logs /etc/sudoers: logs /etc/sudoers.d: logs '{{ stage_dir }}/iptables.txt': logs diff --git a/lib/lvm b/lib/lvm index 162c491f22..b7e84d9505 100644 --- a/lib/lvm +++ b/lib/lvm @@ -200,7 +200,7 @@ function set_lvm_filter { filter_string=$filter_string$filter_suffix clean_lvm_filter - sudo sed -i "/# global_filter = \[*\]/a\ $global_filter$filter_string" /etc/lvm/lvm.conf + sudo sed -i "/# global_filter = \[.*\]/a\ $filter_string" /etc/lvm/lvm.conf echo_summary "set lvm.conf device global_filter to: $filter_string" } From ffc1b76f64341e18b5a6e60783f1e33297623f99 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Mon, 28 Aug 2023 10:52:26 +0530 Subject: [PATCH 352/574] [neutron] Rely on PATH env set by devstack This was missed as part of [1], neutron sets exec_dirs in rootwrap.conf differently so that also needs to be fixed. Without it neutron openvswitch jobs relying on neutron-keepalived-state-change scripts were failing when deployed with GLOBAL_VENV=True as binaries no longer found at /usr/local/bin. [1] https://review.opendev.org/c/openstack/devstack/+/558930 Closes-Bug: #2031415 Change-Id: I9aa56bff02594f253381ffe47a70949079f4c240 --- lib/neutron | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index a6de7222db..e90ada8929 100644 --- a/lib/neutron +++ b/lib/neutron @@ -1075,7 +1075,10 @@ function _neutron_setup_rootwrap { sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE fi sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE - sudo sed -e 's:^exec_dirs=\(.*\)$:exec_dirs=\1,/usr/local/bin:' -i $Q_RR_CONF_FILE + # Rely on $PATH set by devstack to determine what is safe to execute + # by rootwrap rather than use explicit whitelist of paths in + # rootwrap.conf + sudo sed -e 's/^exec_dirs=.*/#&/' -i $Q_RR_CONF_FILE # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *" From 427a4e1a9b7f20a8be0ad5091f2229945ce711a8 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Wed, 7 Jun 2023 15:26:07 +0200 Subject: [PATCH 353/574] Drop focal platform job and support This was dropped in tempest, too[0], and we want to focus on getting and keeping the jammy job stable. Still retaining the nodeset definitions until we are sure they are not needed in other projects. [0] https://review.opendev.org/c/openstack/tempest/+/884952 Change-Id: Iafb5a939a650b763935d8b7ce7069ac4c6d9a95b --- .zuul.yaml | 9 --------- stack.sh | 2 +- 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 46e1e45e39..356acec479 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -744,13 +744,6 @@ vars: configure_swap_size: 4096 -- job: - name: devstack-platform-ubuntu-focal - parent: tempest-full-py3 - description: Ubuntu 20.04 LTS (focal) platform test - nodeset: openstack-single-node-focal - timeout: 9000 - - job: name: devstack-platform-ubuntu-jammy-ovn-source parent: devstack-platform-ubuntu-jammy @@ -946,7 +939,6 @@ - devstack-platform-debian-bookworm - devstack-platform-debian-bullseye - devstack-platform-rocky-blue-onyx - - devstack-platform-ubuntu-focal - devstack-platform-ubuntu-jammy-ovn-source - devstack-platform-ubuntu-jammy-ovs - devstack-platform-openEuler-22.03-ovn-source @@ -995,7 +987,6 @@ - devstack - devstack-ipv6 - devstack-platform-debian-bullseye - - devstack-platform-ubuntu-focal - devstack-platform-rocky-blue-onyx - devstack-enforce-scope - devstack-multinode diff --git a/stack.sh b/stack.sh index a8f46bfeb9..c8810cd2f0 100755 --- a/stack.sh +++ b/stack.sh @@ -230,7 +230,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bookworm|bullseye|focal|jammy|rhel8|rhel9|openEuler-22.03" +SUPPORTED_DISTROS="bookworm|bullseye|jammy|rhel8|rhel9|openEuler-22.03" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" From 16ac21f0da4f1b83963c4beb876f8494d9594b7a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Harald=20Jens=C3=A5s?= Date: Thu, 31 Aug 2023 15:06:52 +0200 Subject: [PATCH 354/574] Add OVN_BRIDGE_MAPPINGS - support extra bridge Add's the OVN_BRIDGE_MAPPINGS variable to ovn_agent. Uses the same format as OVS_BRIDGE_MAPPINGS, it defaults to "$PYSICAL_NETWORK:$PUBLIC_BRIDGE". This enables use of providernet for public network and setting up additional bridges, for example a for baremetal. Example: Q_USE_PROVIDER_NETWORKING="True" OVS_PHYSICAL_BRIDGE="brbm" PHYSICAL_NETWORK="mynetwork" PUBLIC_PHYSICAL_NETWORK="public" PUBLIC_BRIDGE="br-ex" OVN_BRIDGE_MAPPINGS="public:br-ex,mynetwork:brbm" Change-Id: I37317251bbe95d64de06d6232c2d472a98c0ee4d --- lib/neutron | 5 +++++ lib/neutron_plugins/ovn_agent | 2 +- lib/neutron_plugins/services/l3 | 8 +++++++- 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index e90ada8929..ca9b788b2e 100644 --- a/lib/neutron +++ b/lib/neutron @@ -303,6 +303,11 @@ else Q_USE_SECGROUP=False fi +# OVN_BRIDGE_MAPPINGS - ovn-bridge-mappings +# NOTE(hjensas): Initialize after sourcing neutron_plugins/services/l3 +# which initialize PUBLIC_BRIDGE. +OVN_BRIDGE_MAPPINGS=${OVN_BRIDGE_MAPPINGS:-$PHYSICAL_NETWORK:$PUBLIC_BRIDGE} + # Save trace setting _XTRACE_NEUTRON=$(set +o | grep xtrace) set +o xtrace diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 3526ccd354..c51b708130 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -288,7 +288,7 @@ function clone_repository { function create_public_bridge { # Create the public bridge that OVN will use sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE -- set bridge $PUBLIC_BRIDGE protocols=OpenFlow13,OpenFlow15 - sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=$PHYSICAL_NETWORK:$PUBLIC_BRIDGE + sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=${OVN_BRIDGE_MAPPINGS} _configure_public_network_connectivity } diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 2bf884a8c4..c6d4663114 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -47,7 +47,8 @@ Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-True} # used for the network. In case of ofagent, you should add the # corresponding entry to your OFAGENT_PHYSICAL_INTERFACE_MAPPINGS. # For openvswitch agent, you should add the corresponding entry to -# your OVS_BRIDGE_MAPPINGS. +# your OVS_BRIDGE_MAPPINGS and for OVN add the corresponding entry +# to your OVN_BRIDGE_MAPPINGS. # # eg. (ofagent) # Q_USE_PROVIDERNET_FOR_PUBLIC=True @@ -60,6 +61,11 @@ Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-True} # PUBLIC_PHYSICAL_NETWORK=public # OVS_BRIDGE_MAPPINGS=public:br-ex # +# eg. (ovn agent) +# Q_USER_PROVIDERNET_FOR_PUBLIC=True +# PUBLIC_PHYSICAL_NETWORK=public +# OVN_BRIDGE_MAPPINGS=public:br-ex +# # The provider-network-type defaults to flat, however, the values # PUBLIC_PROVIDERNET_TYPE and PUBLIC_PROVIDERNET_SEGMENTATION_ID could # be set to specify the parameters for an alternate network type. From a389128dba4ce7d7051b86f3ac7db4164d24b95f Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Tue, 18 Jul 2023 16:31:28 +0100 Subject: [PATCH 355/574] OVN: Let ironic manage the OVN startup in it's case. In order for Ironic perform full testing with devstack, it uses virtual machines attached to a ovs bridge network to simulate bare metal machines. This worked great for OVS because often OVS was already running on the nodes due to the package, and we could just apply configuration and be done with it when Ironic's devstack plugin was applying initial configuration and setting up the test environment. With OVN, and the requirement of a specific co-installed OVS version, Ironic has discovered that we cannot perform this same configuration without having already started OVN during the initial system setup. Which is fine, but we can't initialize and start OVN twice. It just doesn't work. The original form of this patch was proposed by lucasgnomes in order to validate that we, did, indeed, need to do this to enable Ironic to successfully test an OVN based configuration, and is now being revised to handle that case automatically when Ironic is the selected virt plugin. Co-Authored-By: Julia Kreger Change-Id: Ifbfdaaa97fdbe75ede49dc47235e92a8035d1de6 --- lib/neutron | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index e90ada8929..5407f8a7b8 100644 --- a/lib/neutron +++ b/lib/neutron @@ -570,8 +570,15 @@ function configure_rbac_policies { # Start running OVN processes function start_ovn_services { if [[ $Q_AGENT == "ovn" ]]; then - init_ovn - start_ovn + if [ "$VIRT_DRIVER" != 'ironic' ]; then + # NOTE(TheJulia): Ironic's devstack plugin needs to perform + # additional networking configuration to setup a working test + # environment with test virtual machines to emulate baremetal, + # which requires OVN to be up and running earlier to complete + # that base configuration. + init_ovn + start_ovn + fi if [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]]; then if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" != "True" ]]; then echo "OVN_L3_CREATE_PUBLIC_NETWORK=True is being ignored " From e1297193dcb93acc1f7b89f5fe91babbcc6dda49 Mon Sep 17 00:00:00 2001 From: Jay Faulkner Date: Sun, 10 Sep 2023 16:24:38 -0700 Subject: [PATCH 356/574] [nova][ironic] Support configuring 1 shard on n-cpu Allows for testing of basic sharding configuration. Change-Id: Idfb2bd1822898d95af8643d69d97d9a76b4d64cc Needed-By: https://review.opendev.org/c/openstack/ironic/+/894460 --- functions-common | 6 ++++++ lib/nova_plugins/hypervisor-ironic | 4 ++++ 2 files changed, 10 insertions(+) diff --git a/functions-common b/functions-common index f752271976..c57c4cc054 100644 --- a/functions-common +++ b/functions-common @@ -1114,6 +1114,12 @@ function is_ironic_enforce_scope { return 1 } +function is_ironic_sharded { + # todo(JayF): Support >1 shard with multiple n-cpu instances for each + is_service_enabled ironic && [[ "$IRONIC_SHARDS" == "1" ]] && return 0 + return 1 +} + # Package Functions # ================= diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic index f058e9bb53..9a39c798a8 100644 --- a/lib/nova_plugins/hypervisor-ironic +++ b/lib/nova_plugins/hypervisor-ironic @@ -53,6 +53,10 @@ function configure_nova_hypervisor { iniset $NOVA_CONF ironic project_domain_id default iniset $NOVA_CONF ironic project_name demo fi + if is_ironic_sharded; then + iniset $NOVA_CONF ironic shard $IRONIC_SHARD_1_NAME + fi + iniset $NOVA_CONF ironic user_domain_id default iniset $NOVA_CONF ironic region_name $REGION_NAME From 290a02d1f80b4de1bdeaaddaef7f59402a767d02 Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Tue, 12 Sep 2023 17:32:03 +0200 Subject: [PATCH 357/574] Remove openeuler job from periodic and check queue The openeuler job running version 22.03 fails due to old libvirt. Nova requires version 7.0.0 or greater. Related-Bug: #2035224 Change-Id: I4ad6151c3d8555de059c9228253d287aecf9f953 --- .zuul.yaml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 356acec479..5a7edd6c93 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -941,8 +941,6 @@ - devstack-platform-rocky-blue-onyx - devstack-platform-ubuntu-jammy-ovn-source - devstack-platform-ubuntu-jammy-ovs - - devstack-platform-openEuler-22.03-ovn-source - - devstack-platform-openEuler-22.03-ovs - devstack-multinode - devstack-unit-tests - openstack-tox-bashate @@ -1067,7 +1065,3 @@ periodic: jobs: - devstack-no-tls-proxy - periodic-weekly: - jobs: - - devstack-platform-openEuler-22.03-ovn-source - - devstack-platform-openEuler-22.03-ovs From d3953db76641e825565390acc6f68501777c0f53 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 19 Sep 2023 02:15:19 +0000 Subject: [PATCH 358/574] Updated from generate-devstack-plugins-list Change-Id: I18a47f5d604bbb83173151fb0b129deee2fcbe62 --- doc/source/plugin-registry.rst | 2 -- 1 file changed, 2 deletions(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index f54fca92e6..03c7469c8f 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -69,7 +69,6 @@ openstack/networking-bagpipe `https://opendev.org/openstack/networki openstack/networking-baremetal `https://opendev.org/openstack/networking-baremetal `__ openstack/networking-bgpvpn `https://opendev.org/openstack/networking-bgpvpn `__ openstack/networking-generic-switch `https://opendev.org/openstack/networking-generic-switch `__ -openstack/networking-hyperv `https://opendev.org/openstack/networking-hyperv `__ openstack/networking-powervm `https://opendev.org/openstack/networking-powervm `__ openstack/networking-sfc `https://opendev.org/openstack/networking-sfc `__ openstack/neutron `https://opendev.org/openstack/neutron `__ @@ -85,7 +84,6 @@ openstack/octavia-dashboard `https://opendev.org/openstack/octavia- openstack/octavia-tempest-plugin `https://opendev.org/openstack/octavia-tempest-plugin `__ openstack/openstacksdk `https://opendev.org/openstack/openstacksdk `__ openstack/osprofiler `https://opendev.org/openstack/osprofiler `__ -openstack/oswin-tempest-plugin `https://opendev.org/openstack/oswin-tempest-plugin `__ openstack/ovn-octavia-provider `https://opendev.org/openstack/ovn-octavia-provider `__ openstack/rally-openstack `https://opendev.org/openstack/rally-openstack `__ openstack/sahara `https://opendev.org/openstack/sahara `__ From f73d3127832798db8d7830d1456bdedd1a6a6903 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Wed, 20 Sep 2023 07:04:37 +0200 Subject: [PATCH 359/574] CI: Make bookworm platform job voting It has been very stable for some time and it is going to be a major platform for the next cycle. Signed-off-by: Dr. Jens Harbott Change-Id: Id2df9514b41eda0798179157282a8486b1e9ae23 --- .zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 5a7edd6c93..1d1e3c9807 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -716,7 +716,6 @@ description: Debian Bookworm platform test nodeset: devstack-single-node-debian-bookworm timeout: 9000 - voting: false vars: configure_swap_size: 4096 devstack_localrc: @@ -984,6 +983,7 @@ jobs: - devstack - devstack-ipv6 + - devstack-platform-debian-bookworm - devstack-platform-debian-bullseye - devstack-platform-rocky-blue-onyx - devstack-enforce-scope From 5441b3df6e534101e66f8187ac9ff2bba2533fb5 Mon Sep 17 00:00:00 2001 From: Jake Yip Date: Sat, 10 Jun 2023 00:17:53 +1000 Subject: [PATCH 360/574] Use OS_CLOUD in sample local.sh local.sh, if present, will be executed at the end of stack.sh. The sample file here is meant to be copied to devstack root if desired. Unfortunately, due to Change I86ffa9cd52454f1c1c72d29b3a0e0caa3e44b829 changing to use OS_CLOUD in stack.sh, sourcing openrc here will cause both OS_CLOUD and traditional OS_* env vars to be set, which causes a conflict. Change-Id: Id80b46acab7d600ad7394ab5bc1984304825a672 --- samples/local.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/local.sh b/samples/local.sh index a1c5c8143b..7e6ae70ad4 100755 --- a/samples/local.sh +++ b/samples/local.sh @@ -31,7 +31,7 @@ if is_service_enabled nova; then # ``demo``) # Get OpenStack user auth - source $TOP_DIR/openrc + export OS_CLOUD=devstack # Add first keypair found in localhost:$HOME/.ssh for i in $HOME/.ssh/id_rsa.pub $HOME/.ssh/id_dsa.pub; do From 25cd7eb67286ba39060d05b3f3f9e785d125195a Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 21 Sep 2023 07:12:15 -0700 Subject: [PATCH 361/574] Fix g-api-r for non-global venv This makes the glance-api-remote setup honor the GLOBAL_VENV flag, and not pass the --venv stuff to uwsgi if it is disabled. This should fix the glance-multistore-cinder-import-fips job. Change-Id: I2005da5ced027d273e1f25f47b644fecafffc6c1 --- lib/glance | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/glance b/lib/glance index 3cf8230f41..796ebdb68d 100644 --- a/lib/glance +++ b/lib/glance @@ -543,7 +543,7 @@ function glance_remote_conf { # start_glance_remote_clone() - Clone the regular glance api worker function start_glance_remote_clone { local glance_remote_conf_dir glance_remote_port remote_data - local glance_remote_uwsgi + local glance_remote_uwsgi venv glance_remote_conf_dir="$(glance_remote_conf "")" glance_remote_port=$(get_random_port) @@ -581,11 +581,14 @@ function start_glance_remote_clone { # We need to create the systemd service for the clone, but then # change it to include an Environment line to point the WSGI app # at the alternate config directory. + if [[ "$GLOBAL_VENV" == True ]]; then + venv="--venv $DEVSTACK_VENV" + fi write_uwsgi_user_unit_file devstack@g-api-r.service "$(which uwsgi) \ --procname-prefix \ glance-api-remote \ --ini $glance_remote_uwsgi \ - --venv $DEVSTACK_VENV" \ + $venv" \ "" "$STACK_USER" iniadd -sudo ${SYSTEMD_DIR}/devstack@g-api-r.service \ "Service" "Environment" \ From 3d37d13ee7aacd5594b351e324d8780e6d64d61b Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Thu, 28 Sep 2023 11:36:07 -0700 Subject: [PATCH 362/574] Update DEVSTACK_SERIES to 2024.1 stable/2023.2 branch has been created now and current master is for 2024.1 Change-Id: I67eee1ba721a1ad99b3503312acc2f94a52c5552 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index ff30d37721..464e935839 100644 --- a/stackrc +++ b/stackrc @@ -265,7 +265,7 @@ REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements} # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="2023.2" +DEVSTACK_SERIES="2024.1" ############## # From 8c25a8586122d5f00bdcec9b6c4826309891ba62 Mon Sep 17 00:00:00 2001 From: Lukas Piwowarski Date: Thu, 5 Oct 2023 08:11:05 +0000 Subject: [PATCH 363/574] Add support volume backup_driver config option The depends-on patch adds a new backup_driver option to tempest. The goal of this change is to be able to do a proper cleanup of containers when swift is used as a backup driver. Thich change makes sure that the new option is properly set to "swift" when Swift is used as the driver. Depends-On: https://review.opendev.org/c/openstack/tempest/+/896011/13 Change-Id: I76e7fd712ee352051f8aa2f2912a29abad9ad017 --- lib/tempest | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/tempest b/lib/tempest index 2f62f6ea62..7b5fde170e 100644 --- a/lib/tempest +++ b/lib/tempest @@ -29,6 +29,7 @@ # - ``DEFAULT_INSTANCE_USER`` # - ``DEFAULT_INSTANCE_ALT_USER`` # - ``CINDER_ENABLED_BACKENDS`` +# - ``CINDER_BACKUP_DRIVER`` # - ``NOVA_ALLOW_DUPLICATE_NETWORKS`` # # ``stack.sh`` calls the entry points in this order: @@ -571,6 +572,9 @@ function configure_tempest { TEMPEST_VOLUME_REVERT_TO_SNAPSHOT=${TEMPEST_VOLUME_REVERT_TO_SNAPSHOT:-True} fi iniset $TEMPEST_CONFIG volume-feature-enabled volume_revert $(trueorfalse False TEMPEST_VOLUME_REVERT_TO_SNAPSHOT) + if [[ "$CINDER_BACKUP_DRIVER" == *"swift"* ]]; then + iniset $TEMPEST_CONFIG volume backup_driver swift + fi local tempest_volume_min_microversion=${TEMPEST_VOLUME_MIN_MICROVERSION:-None} local tempest_volume_max_microversion=${TEMPEST_VOLUME_MAX_MICROVERSION:-"latest"} if [ "$tempest_volume_min_microversion" == "None" ]; then From ca4d5132e63752878620c4e4f374d98d433b3f52 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Tue, 10 Oct 2023 09:22:16 +0200 Subject: [PATCH 364/574] zuul: Drop neutron-linuxbridge-tempest job Neutron has deprecated linuxbridge support and is only doing reduced testing for the neutron-linuxbridge-tempest job, so we need no longer run it in devstack, even less gate on it. Signed-off-by: Dr. Jens Harbott Change-Id: Ie1a8f978efe7fc9b037cf6a6b70b67d539d76fd6 --- .zuul.yaml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 1d1e3c9807..6ee8177a6d 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -953,10 +953,6 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - - neutron-linuxbridge-tempest: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ - neutron-ovn-tempest-ovs-release: voting: false irrelevant-files: @@ -994,10 +990,6 @@ irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - - neutron-linuxbridge-tempest: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ - ironic-tempest-bios-ipmi-direct-tinyipa - swift-dsvm-functional - grenade: From 72cf4e60060d8024a9fb79c845babc621f35dd2f Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 12 Oct 2023 11:08:30 -0700 Subject: [PATCH 365/574] Allow forcing nova compute_id Developers that need to stack and re-stack non-AIO compute-only environments will want to be able to keep the compute node uuid the same across runs. This mimics the behavior of a deployment tool that pre-creates the uuids, so it matches pretty well. Default to the current behavior of create-on-start, but allow forcing it ahead of time to something specific. Change-Id: Icab0b783e2233cad9a93c04758a5bccac0832203 --- lib/nova | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/lib/nova b/lib/nova index da3118f4cd..b04f94beef 100644 --- a/lib/nova +++ b/lib/nova @@ -58,6 +58,14 @@ NOVA_METADATA_UWSGI=$NOVA_BIN_DIR/nova-metadata-wsgi NOVA_UWSGI_CONF=$NOVA_CONF_DIR/nova-api-uwsgi.ini NOVA_METADATA_UWSGI_CONF=$NOVA_CONF_DIR/nova-metadata-uwsgi.ini +# Allow forcing the stable compute uuid to something specific. This would be +# done by deployment tools that pre-allocate the UUIDs, but it is also handy +# for developers that need to re-stack a compute-only deployment multiple +# times. Since the DB is non-local and not erased on an unstack, making it +# stay the same each time is what developers want. Set to a uuid here or +# leave it blank for default allocate-on-start behavior. +NOVA_CPU_UUID="" + # The total number of cells we expect. Must be greater than one and doesn't # count cell0. NOVA_NUM_CELLS=${NOVA_NUM_CELLS:-1} @@ -1058,6 +1066,10 @@ function start_nova_compute { iniset $NOVA_CPU_CONF workarounds libvirt_disable_apic True fi + if [[ "$NOVA_CPU_UUID" ]]; then + echo -n $NOVA_CPU_UUID > $NOVA_CONF_DIR/compute_id + fi + if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then # The group **$LIBVIRT_GROUP** is added to the current user in this script. # ``sg`` is used in run_process to execute nova-compute as a member of the From eb9b08a8833884b7c7c5b55813d7621715fe7adf Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Sat, 14 Oct 2023 02:26:11 +0000 Subject: [PATCH 366/574] Updated from generate-devstack-plugins-list Change-Id: Ieecc17159ac36b65124598c36fc92b77c2a75399 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 03c7469c8f..b2e733337a 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -78,6 +78,7 @@ openstack/neutron-fwaas-dashboard `https://opendev.org/openstack/neutron- openstack/neutron-tempest-plugin `https://opendev.org/openstack/neutron-tempest-plugin `__ openstack/neutron-vpnaas `https://opendev.org/openstack/neutron-vpnaas `__ openstack/neutron-vpnaas-dashboard `https://opendev.org/openstack/neutron-vpnaas-dashboard `__ +openstack/nova `https://opendev.org/openstack/nova `__ openstack/nova-powervm `https://opendev.org/openstack/nova-powervm `__ openstack/octavia `https://opendev.org/openstack/octavia `__ openstack/octavia-dashboard `https://opendev.org/openstack/octavia-dashboard `__ From d2acd60870c63b486d4802cc3af0fdb27bd506c7 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Mon, 16 Oct 2023 15:02:08 +0900 Subject: [PATCH 367/574] Horizon: Install pymemcached ... so that we can use PyMemcacheCache backend. The MemcachedCache backend, which has been used previously, has been removed in recent Django, and we are switching the default backend in [1]. [1] https://review.opendev.org/c/openstack/horizon/+/891828 Change-Id: Ie1da8970628e34c41721198cdada8c7bb3b26ec0 --- lib/horizon | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/horizon b/lib/horizon index 611329d619..6f753f546f 100644 --- a/lib/horizon +++ b/lib/horizon @@ -169,6 +169,10 @@ function install_horizon { # Apache installation, because we mark it NOPRIME install_apache_wsgi + # Install the memcache library so that horizon can use memcached as its + # cache backend + pip_install_gr pymemcache + git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH } From cace4044316befbbef9bcb7af2003f3045350830 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 23 Oct 2023 11:21:24 -0700 Subject: [PATCH 368/574] Fix performance stats gathering for global VENV Change-Id: I113c571ffddb241b29b1394e181ed0145b3c1e04 --- roles/capture-performance-data/tasks/main.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/roles/capture-performance-data/tasks/main.yaml b/roles/capture-performance-data/tasks/main.yaml index f9bb0f7851..51a11b60bc 100644 --- a/roles/capture-performance-data/tasks/main.yaml +++ b/roles/capture-performance-data/tasks/main.yaml @@ -3,7 +3,9 @@ executable: /bin/bash cmd: | source {{ devstack_conf_dir }}/stackrc - python3 {{ devstack_conf_dir }}/tools/get-stats.py \ + source {{ devstack_conf_dir }}/inc/python + setup_devstack_virtualenv + $PYTHON {{ devstack_conf_dir }}/tools/get-stats.py \ --db-user="$DATABASE_USER" \ --db-pass="$DATABASE_PASSWORD" \ --db-host="$DATABASE_HOST" \ From 29e73a215557b2d20d0d9611e0d5317e08cf9538 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Tue, 24 Oct 2023 06:18:22 +0200 Subject: [PATCH 369/574] Enable performance collection on Debian Change-Id: I84f1432262138cc9ff0942e1a2b2abe7447afe34 --- .zuul.yaml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 6ee8177a6d..12bef3bff1 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -718,9 +718,6 @@ timeout: 9000 vars: configure_swap_size: 4096 - devstack_localrc: - # TODO(frickler): drop this once wheel build is fixed - MYSQL_GATHER_PERFORMANCE: false - job: name: devstack-platform-debian-bullseye @@ -730,9 +727,6 @@ timeout: 9000 vars: configure_swap_size: 4096 - devstack_localrc: - # TODO(frickler): drop this once wheel build is fixed - MYSQL_GATHER_PERFORMANCE: false - job: name: devstack-platform-rocky-blue-onyx From bacb8400942b2ed6b724bdd3d28797896e1054c6 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Wed, 25 Oct 2023 12:52:28 -0700 Subject: [PATCH 370/574] Enable NEUTRON_ENFORCE_SCOPE to True by default Neutron bobcat release has enabled the RBAC new defaults by default. With the latest release of Neutron have new defaults enable, we should configure the same by default in devstack. This change make NEUTRON_ENFORCE_SCOPE flag to True by default so that every job will run with Neutron new defaults. As old defaults are still supported (in deprecated way), we will keep this flag so that we can have one job disable it and test the old defaults. Change-Id: I3361d33885b2e3af7cad0141f9b799b2723ee8a1 --- lib/neutron | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index 808043cebe..3628bfc25e 100644 --- a/lib/neutron +++ b/lib/neutron @@ -92,8 +92,9 @@ NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini # If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope" # and "enforce_new_defaults" to True in the Neutron's config to enforce usage -# of the new RBAC policies and scopes. -NEUTRON_ENFORCE_SCOPE=$(trueorfalse False NEUTRON_ENFORCE_SCOPE) +# of the new RBAC policies and scopes. Set it to False if you do not +# want to run Neutron with new RBAC. +NEUTRON_ENFORCE_SCOPE=$(trueorfalse True NEUTRON_ENFORCE_SCOPE) # Agent binaries. Note, binary paths for other agents are set in per-service # scripts in lib/neutron_plugins/services/ From 67630d4c52aef5ddcb15cff4f3b6594d447e8992 Mon Sep 17 00:00:00 2001 From: Artem Goncharov Date: Sun, 18 Jun 2023 14:46:06 +0200 Subject: [PATCH 371/574] Enable keystone token caching by OSC SDK uses python keyring library to enable token caching. Normally this is requiring a proper desktop (interactive) session, but there are some backend plugins working in non-interactive mode. Store cache in an unencrypted file on FS (this is not worse than storing passwords in plaintext). Change-Id: I42d698f15db5918443073fff8f27b926126d1d0f --- functions-common | 10 +++++++++- lib/libraries | 4 ++++ tools/update_clouds_yaml.py | 27 ++++++++++++++++++++++----- 3 files changed, 35 insertions(+), 6 deletions(-) diff --git a/functions-common b/functions-common index c57c4cc054..03d7c96417 100644 --- a/functions-common +++ b/functions-common @@ -1047,6 +1047,8 @@ function get_or_create_service { --description="$3" \ -f value -c id ) + # Drop cached token to invalidate catalog info in the token + remove_token_cache echo $service_id } @@ -1064,7 +1066,6 @@ function _get_or_create_endpoint_with_interface { endpoint_id=$(openstack --os-cloud devstack-system-admin endpoint create \ $1 $2 $3 --region $4 -f value -c id) fi - echo $endpoint_id } @@ -1088,6 +1089,8 @@ function get_or_create_endpoint { if [[ -n "$5" ]]; then _get_or_create_endpoint_with_interface $1 internal $5 $2 fi + # Drop cached token to invalidate catalog info in the token + remove_token_cache # return the public id to indicate success, and this is the endpoint most likely wanted echo $public_id } @@ -2517,6 +2520,11 @@ function is_fips_enabled { [ "$fips" == "1" ] } +function remove_token_cache { + # Remove Keyring cache file + rm ~/.local/share/python_keyring/keyring_pass.cfg +} + # Restore xtrace $_XTRACE_FUNCTIONS_COMMON diff --git a/lib/libraries b/lib/libraries index 9ea32304fc..146434e2b9 100755 --- a/lib/libraries +++ b/lib/libraries @@ -138,6 +138,10 @@ function install_libs { # doesn't pull in etcd3. pip_install etcd3 pip_install etcd3gw + + # Add libraries required for token caching by OpenStackSDK/CLI + pip_install keyring + pip_install keyrings.alt } # Restore xtrace diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py index 74dcdb2a07..918988245b 100755 --- a/tools/update_clouds_yaml.py +++ b/tools/update_clouds_yaml.py @@ -30,7 +30,9 @@ def __init__(self, args): self._clouds_path = os.path.expanduser( '~/.config/openstack/clouds.yaml') self._create_directory = True - self._clouds = {} + self._keyringrc_path = os.path.expanduser( + '~/.config/python_keyring/keyringrc.cfg') + self._config = {} self._cloud = args.os_cloud self._cloud_data = { @@ -65,14 +67,17 @@ def run(self): def _read_clouds(self): try: with open(self._clouds_path) as clouds_file: - self._clouds = yaml.safe_load(clouds_file) + self._config = yaml.safe_load(clouds_file) except IOError: # The user doesn't have a clouds.yaml file. print("The user clouds.yaml file didn't exist.") - self._clouds = {} + if "cache" not in self._config: + # Enable auth (and only auth) caching. Currently caching into the + # file on FS is configured in `_write_clouds` function. + self._config["cache"] = {"auth": True} def _update_clouds(self): - self._clouds.setdefault('clouds', {})[self._cloud] = self._cloud_data + self._config.setdefault('clouds', {})[self._cloud] = self._cloud_data def _write_clouds(self): @@ -81,7 +86,19 @@ def _write_clouds(self): os.makedirs(clouds_dir) with open(self._clouds_path, 'w') as clouds_file: - yaml.dump(self._clouds, clouds_file, default_flow_style=False) + yaml.dump(self._config, clouds_file, default_flow_style=False) + + # Enable keyring token caching + keyringrc_dir = os.path.dirname(self._keyringrc_path) + os.makedirs(keyringrc_dir, exist_ok=True) + + # Configure auth caching into the file on FS. We do not bother of any + # expiration since SDK is smart enough to reauth once the token becomes + # invalid. + with open(self._keyringrc_path, 'w') as keyringrc_file: + keyringrc_file.write("[backend]\n") + keyringrc_file.write( + "default-keyring=keyrings.alt.file.PlaintextKeyring\n") def main(): From 5123700ea6fe25164bd51e967ce85aaefb5c364c Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Thu, 19 Oct 2023 17:04:56 +0000 Subject: [PATCH 372/574] ignore dbcounter sub dirs currently id you run devstack with the dbcounter service enabled the created subdirs show up in git status this change justs add them to .gitgnore Change-Id: Iee48eb4e12ac22734c8a2c1dcbe0b92a0a387eaa --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 8fe56ad6ab..ad153f4a07 100644 --- a/.gitignore +++ b/.gitignore @@ -38,3 +38,5 @@ stack-screenrc userrc_early AUTHORS ChangeLog +tools/dbcounter/build/ +tools/dbcounter/dbcounter.egg-info/ From 0f402b8327cc3e501df93c735c1b049361ed3dbb Mon Sep 17 00:00:00 2001 From: tzing Date: Mon, 6 Nov 2023 02:24:14 +0000 Subject: [PATCH 373/574] Fix openEuler support openEuler 22.03 LTS support was removed from devstack in last few months due to its libvirt version is too old and the CI job always fail. This Patch add a yum repository for libvirt7.2.0, and add the related CI job to make sure its works well. Change-Id: Ic507f165cfa117451283360854c4776a968bbb10 --- .zuul.yaml | 2 ++ stack.sh | 6 +++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 12bef3bff1..75930112ca 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -934,6 +934,8 @@ - devstack-platform-rocky-blue-onyx - devstack-platform-ubuntu-jammy-ovn-source - devstack-platform-ubuntu-jammy-ovs + - devstack-platform-openEuler-22.03-ovn-source + - devstack-platform-openEuler-22.03-ovs - devstack-multinode - devstack-unit-tests - openstack-tox-bashate diff --git a/stack.sh b/stack.sh index 530fda48aa..dce15ac01c 100755 --- a/stack.sh +++ b/stack.sh @@ -421,8 +421,12 @@ elif [[ $DISTRO == "openEuler-22.03" ]]; then # 1. the hostname package is not installed by default # 2. Some necessary packages are in openstack repo, for example liberasurecode-devel # 3. python3-pip can be uninstalled by `get_pip.py` automaticly. - install_package hostname openstack-release-wallaby + # 4. Ensure wget installation before use + install_package hostname openstack-release-wallaby wget uninstall_package python3-pip + + # Add yum repository for libvirt7.X + sudo wget https://eur.openeuler.openatom.cn/coprs/g/sig-openstack/Libvirt-7.X/repo/openeuler-22.03_LTS/group_sig-openstack-Libvirt-7.X-openeuler-22.03_LTS.repo -O /etc/yum.repos.d/libvirt7.2.0.repo fi # Ensure python is installed From e7c12616e27ad2987c2dead1e1a413aaa2c632ee Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Tue, 14 Nov 2023 16:27:56 +0100 Subject: [PATCH 374/574] Add periodic-weekly pipeline with platform jobs Originally we only had the openeuler jobs there, but the other platforms could also do with some regular testing. Change-Id: I93526a4c592d85acd4debf72eb59e306ab8e6382 --- .zuul.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index 75930112ca..47466cb3eb 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -1053,3 +1053,13 @@ periodic: jobs: - devstack-no-tls-proxy + periodic-weekly: + jobs: + - devstack-platform-centos-9-stream + - devstack-platform-debian-bookworm + - devstack-platform-debian-bullseye + - devstack-platform-rocky-blue-onyx + - devstack-platform-ubuntu-jammy-ovn-source + - devstack-platform-ubuntu-jammy-ovs + - devstack-platform-openEuler-22.03-ovn-source + - devstack-platform-openEuler-22.03-ovs From 82c30cd82ee00012d21bee94dad2bcbc2c047f78 Mon Sep 17 00:00:00 2001 From: yatin Date: Wed, 15 Nov 2023 12:44:50 +0000 Subject: [PATCH 375/574] Revert "Enable keystone token caching by OSC" This reverts commit 67630d4c52aef5ddcb15cff4f3b6594d447e8992. Reason for revert: Seeing random failures across jobs as sometimes 'keyring_pass.cfg' gets duplicated keys and that makes executions of any openstackclient command to fail until the file is removed. This should be handled before re enabling the token caching again. Change-Id: I3d2fe53a2e7552ac6304c30aa2fe5be33d77df53 Related-Bug: #2042943 --- functions-common | 10 +--------- lib/libraries | 4 ---- tools/update_clouds_yaml.py | 27 +++++---------------------- 3 files changed, 6 insertions(+), 35 deletions(-) diff --git a/functions-common b/functions-common index 03d7c96417..c57c4cc054 100644 --- a/functions-common +++ b/functions-common @@ -1047,8 +1047,6 @@ function get_or_create_service { --description="$3" \ -f value -c id ) - # Drop cached token to invalidate catalog info in the token - remove_token_cache echo $service_id } @@ -1066,6 +1064,7 @@ function _get_or_create_endpoint_with_interface { endpoint_id=$(openstack --os-cloud devstack-system-admin endpoint create \ $1 $2 $3 --region $4 -f value -c id) fi + echo $endpoint_id } @@ -1089,8 +1088,6 @@ function get_or_create_endpoint { if [[ -n "$5" ]]; then _get_or_create_endpoint_with_interface $1 internal $5 $2 fi - # Drop cached token to invalidate catalog info in the token - remove_token_cache # return the public id to indicate success, and this is the endpoint most likely wanted echo $public_id } @@ -2520,11 +2517,6 @@ function is_fips_enabled { [ "$fips" == "1" ] } -function remove_token_cache { - # Remove Keyring cache file - rm ~/.local/share/python_keyring/keyring_pass.cfg -} - # Restore xtrace $_XTRACE_FUNCTIONS_COMMON diff --git a/lib/libraries b/lib/libraries index 146434e2b9..9ea32304fc 100755 --- a/lib/libraries +++ b/lib/libraries @@ -138,10 +138,6 @@ function install_libs { # doesn't pull in etcd3. pip_install etcd3 pip_install etcd3gw - - # Add libraries required for token caching by OpenStackSDK/CLI - pip_install keyring - pip_install keyrings.alt } # Restore xtrace diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py index 918988245b..74dcdb2a07 100755 --- a/tools/update_clouds_yaml.py +++ b/tools/update_clouds_yaml.py @@ -30,9 +30,7 @@ def __init__(self, args): self._clouds_path = os.path.expanduser( '~/.config/openstack/clouds.yaml') self._create_directory = True - self._keyringrc_path = os.path.expanduser( - '~/.config/python_keyring/keyringrc.cfg') - self._config = {} + self._clouds = {} self._cloud = args.os_cloud self._cloud_data = { @@ -67,17 +65,14 @@ def run(self): def _read_clouds(self): try: with open(self._clouds_path) as clouds_file: - self._config = yaml.safe_load(clouds_file) + self._clouds = yaml.safe_load(clouds_file) except IOError: # The user doesn't have a clouds.yaml file. print("The user clouds.yaml file didn't exist.") - if "cache" not in self._config: - # Enable auth (and only auth) caching. Currently caching into the - # file on FS is configured in `_write_clouds` function. - self._config["cache"] = {"auth": True} + self._clouds = {} def _update_clouds(self): - self._config.setdefault('clouds', {})[self._cloud] = self._cloud_data + self._clouds.setdefault('clouds', {})[self._cloud] = self._cloud_data def _write_clouds(self): @@ -86,19 +81,7 @@ def _write_clouds(self): os.makedirs(clouds_dir) with open(self._clouds_path, 'w') as clouds_file: - yaml.dump(self._config, clouds_file, default_flow_style=False) - - # Enable keyring token caching - keyringrc_dir = os.path.dirname(self._keyringrc_path) - os.makedirs(keyringrc_dir, exist_ok=True) - - # Configure auth caching into the file on FS. We do not bother of any - # expiration since SDK is smart enough to reauth once the token becomes - # invalid. - with open(self._keyringrc_path, 'w') as keyringrc_file: - keyringrc_file.write("[backend]\n") - keyringrc_file.write( - "default-keyring=keyrings.alt.file.PlaintextKeyring\n") + yaml.dump(self._clouds, clouds_file, default_flow_style=False) def main(): From bb0c273697bf54dd569ad38e459cd161b62f96cb Mon Sep 17 00:00:00 2001 From: elajkat Date: Thu, 16 Nov 2023 11:30:04 +0100 Subject: [PATCH 376/574] Option for SQLAlchemy and alembic git source Change-Id: If7ff0075834a1e9cee01713676166e56b797debd Closes-Bug: #2042941 --- lib/neutron | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/lib/neutron b/lib/neutron index 3628bfc25e..bc77f161d7 100644 --- a/lib/neutron +++ b/lib/neutron @@ -158,6 +158,14 @@ if [[ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" && -z "$NEUTRON_ENDPOINT_SERVICE_NAME NEUTRON_ENDPOINT_SERVICE_NAME="networking" fi +# Source install libraries +ALEMBIC_REPO=${ALEMBIC_REPO:-https://github.com/sqlalchemy/alembic.git} +ALEMBIC_DIR=${ALEMBIC_DIR:-$DEST/alembic} +ALEMBIC_BRANCH=${ALEMBIC_BRANCH:-main} +SQLALCHEMY_REPO=${SQLALCHEMY_REPO:-https://github.com/sqlalchemy/sqlalchemy.git} +SQLALCHEMY_DIR=${SQLALCHEMY_DIR:-$DEST/sqlalchemy} +SQLALCHEMY_BRANCH=${SQLALCHEMY_BRANCH:-main} + # List of config file names in addition to the main plugin config file # To add additional plugin config files, use ``neutron_server_config_add`` # utility function. For example: @@ -525,6 +533,17 @@ function install_neutron { setup_dev_lib "neutron-lib" fi + # Install SQLAlchemy and alembic from git when these are required + # see https://bugs.launchpad.net/neutron/+bug/2042941 + if use_library_from_git "sqlalchemy"; then + git_clone $SQLALCHEMY_REPO $SQLALCHEMY_DIR $SQLALCHEMY_BRANCH + setup_develop $SQLALCHEMY_DIR + fi + if use_library_from_git "alembic"; then + git_clone $ALEMBIC_REPO $ALEMBIC_DIR $ALEMBIC_BRANCH + setup_develop $ALEMBIC_DIR + fi + git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH setup_develop $NEUTRON_DIR From 2211c778db0e18702c7177f7750571cba3697509 Mon Sep 17 00:00:00 2001 From: Abhishek Kekane Date: Wed, 22 Nov 2023 06:21:55 +0000 Subject: [PATCH 377/574] Allow devstack to set cache driver for glance Added new devstack variable `GLANCE_CACHE_DRIVER` default to `sqlite` to set the cache driver for glance service. Related blueprint centralized-cache-db Change-Id: I76d064590356e2d65bfc6a3f57d1bdaeeb83a74a --- lib/glance | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/glance b/lib/glance index 796ebdb68d..4ff9a34ca8 100644 --- a/lib/glance +++ b/lib/glance @@ -75,6 +75,7 @@ GLANCE_MULTIPLE_FILE_STORES=${GLANCE_MULTIPLE_FILE_STORES:-fast} GLANCE_DEFAULT_BACKEND=${GLANCE_DEFAULT_BACKEND:-fast} GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache} +GLANCE_CACHE_DRIVER=${GLANCE_CACHE_DRIVER:-sqlite} # Full Glance functionality requires running in standalone mode. If we are # not in uwsgi mode, then we are standalone, otherwise allow separate control. @@ -329,6 +330,7 @@ function configure_glance { iniset $GLANCE_API_CONF database connection $dburl iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ + iniset $GLANCE_API_CONF DEFAULT image_cache_driver $GLANCE_CACHE_DRIVER iniset $GLANCE_API_CONF oslo_concurrency lock_path $GLANCE_LOCK_DIR iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement configure_keystone_authtoken_middleware $GLANCE_API_CONF glance @@ -392,6 +394,7 @@ function configure_glance { iniset $GLANCE_CACHE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_CACHE_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ + iniset $GLANCE_CACHE_CONF DEFAULT image_cache_driver $GLANCE_CACHE_DRIVER iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI iniset $GLANCE_CACHE_CONF DEFAULT admin_tenant_name $SERVICE_PROJECT_NAME iniset $GLANCE_CACHE_CONF DEFAULT admin_user glance From 2e14add0fdbc749f40caf075e42221d85ff2f27e Mon Sep 17 00:00:00 2001 From: Eric Harney Date: Wed, 29 Nov 2023 09:22:10 -0500 Subject: [PATCH 378/574] Add cinder-manage to /usr/local/bin/ This is useful in a dev environment. Change-Id: I247eb4aea23a906d0e667ec6c5ac79f932bdca24 --- stack.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/stack.sh b/stack.sh index 530fda48aa..4d649f6cec 100755 --- a/stack.sh +++ b/stack.sh @@ -821,6 +821,7 @@ fixup_all if [[ "$GLOBAL_VENV" == "True" ]] ; then # TODO(frickler): find a better solution for this + sudo ln -sf /opt/stack/data/venv/bin/cinder-manage /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/cinder-rtstool /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/glance /usr/local/bin sudo ln -sf /opt/stack/data/venv/bin/nova-manage /usr/local/bin From d126330efebb98b7fe8ce74d8da333e13782576d Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Wed, 6 Dec 2023 09:58:18 +0000 Subject: [PATCH 379/574] lib/apache: Rename variable This is a little more meaningful, IMO. Change-Id: Ib9d3fdc54b1cdbd822c2a4eca0a3310ca3f6324c Signed-off-by: Stephen Finucane --- lib/apache | 84 +++++++++++++++++++++++++++--------------------------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/lib/apache b/lib/apache index cf7215bef2..9017e0a38a 100644 --- a/lib/apache +++ b/lib/apache @@ -238,7 +238,7 @@ function restart_apache_server { } function write_uwsgi_config { - local file=$1 + local conf=$1 local wsgi=$2 local url=$3 local http=$4 @@ -258,38 +258,38 @@ function write_uwsgi_config { local socket="$socket_dir/${name}.socket" # always cleanup given that we are using iniset here - rm -rf $file - iniset "$file" uwsgi wsgi-file "$wsgi" - iniset "$file" uwsgi processes $API_WORKERS + rm -rf $conf + iniset "$conf" uwsgi wsgi-file "$wsgi" + iniset "$conf" uwsgi processes $API_WORKERS # This is running standalone - iniset "$file" uwsgi master true + iniset "$conf" uwsgi master true # Set die-on-term & exit-on-reload so that uwsgi shuts down - iniset "$file" uwsgi die-on-term true - iniset "$file" uwsgi exit-on-reload false + iniset "$conf" uwsgi die-on-term true + iniset "$conf" uwsgi exit-on-reload false # Set worker-reload-mercy so that worker will not exit till the time # configured after graceful shutdown - iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT - iniset "$file" uwsgi enable-threads true - iniset "$file" uwsgi plugins http,python3 + iniset "$conf" uwsgi worker-reload-mercy $WORKER_TIMEOUT + iniset "$conf" uwsgi enable-threads true + iniset "$conf" uwsgi plugins http,python3 # uwsgi recommends this to prevent thundering herd on accept. - iniset "$file" uwsgi thunder-lock true + iniset "$conf" uwsgi thunder-lock true # Set hook to trigger graceful shutdown on SIGTERM - iniset "$file" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all" + iniset "$conf" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all" # Override the default size for headers from the 4k default. - iniset "$file" uwsgi buffer-size 65535 + iniset "$conf" uwsgi buffer-size 65535 # Make sure the client doesn't try to re-use the connection. - iniset "$file" uwsgi add-header "Connection: close" + iniset "$conf" uwsgi add-header "Connection: close" # This ensures that file descriptors aren't shared between processes. - iniset "$file" uwsgi lazy-apps true + iniset "$conf" uwsgi lazy-apps true # If we said bind directly to http, then do that and don't start the apache proxy if [[ -n "$http" ]]; then - iniset "$file" uwsgi http $http + iniset "$conf" uwsgi http $http else local apache_conf="" apache_conf=$(apache_site_config_for $name) - iniset "$file" uwsgi socket "$socket" - iniset "$file" uwsgi chmod-socket 666 + iniset "$conf" uwsgi socket "$socket" + iniset "$conf" uwsgi chmod-socket 666 echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}\" retry=0 acquire=1 " | sudo tee -a $apache_conf enable_apache_site $name restart_apache_server @@ -303,7 +303,7 @@ function write_uwsgi_config { # but that involves having apache buffer the request before sending it to # uwsgi. function write_local_uwsgi_http_config { - local file=$1 + local conf=$1 local wsgi=$2 local url=$3 name=$(basename $wsgi) @@ -312,38 +312,38 @@ function write_local_uwsgi_http_config { # a private view of it on some platforms. # always cleanup given that we are using iniset here - rm -rf $file - iniset "$file" uwsgi wsgi-file "$wsgi" + rm -rf $conf + iniset "$conf" uwsgi wsgi-file "$wsgi" port=$(get_random_port) - iniset "$file" uwsgi http-socket "$APACHE_LOCAL_HOST:$port" - iniset "$file" uwsgi processes $API_WORKERS + iniset "$conf" uwsgi http-socket "$APACHE_LOCAL_HOST:$port" + iniset "$conf" uwsgi processes $API_WORKERS # This is running standalone - iniset "$file" uwsgi master true + iniset "$conf" uwsgi master true # Set die-on-term & exit-on-reload so that uwsgi shuts down - iniset "$file" uwsgi die-on-term true - iniset "$file" uwsgi exit-on-reload false - iniset "$file" uwsgi enable-threads true - iniset "$file" uwsgi plugins http,python3 + iniset "$conf" uwsgi die-on-term true + iniset "$conf" uwsgi exit-on-reload false + iniset "$conf" uwsgi enable-threads true + iniset "$conf" uwsgi plugins http,python3 # uwsgi recommends this to prevent thundering herd on accept. - iniset "$file" uwsgi thunder-lock true + iniset "$conf" uwsgi thunder-lock true # Set hook to trigger graceful shutdown on SIGTERM - iniset "$file" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all" + iniset "$conf" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all" # Set worker-reload-mercy so that worker will not exit till the time # configured after graceful shutdown - iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT + iniset "$conf" uwsgi worker-reload-mercy $WORKER_TIMEOUT # Override the default size for headers from the 4k default. - iniset "$file" uwsgi buffer-size 65535 + iniset "$conf" uwsgi buffer-size 65535 # Make sure the client doesn't try to re-use the connection. - iniset "$file" uwsgi add-header "Connection: close" + iniset "$conf" uwsgi add-header "Connection: close" # This ensures that file descriptors aren't shared between processes. - iniset "$file" uwsgi lazy-apps true - iniset "$file" uwsgi chmod-socket 666 - iniset "$file" uwsgi http-raw-body true - iniset "$file" uwsgi http-chunked-input true - iniset "$file" uwsgi http-auto-chunked true - iniset "$file" uwsgi http-keepalive false + iniset "$conf" uwsgi lazy-apps true + iniset "$conf" uwsgi chmod-socket 666 + iniset "$conf" uwsgi http-raw-body true + iniset "$conf" uwsgi http-chunked-input true + iniset "$conf" uwsgi http-auto-chunked true + iniset "$conf" uwsgi http-keepalive false # Increase socket timeout for slow chunked uploads - iniset "$file" uwsgi socket-timeout 30 + iniset "$conf" uwsgi socket-timeout 30 enable_apache_mod proxy enable_apache_mod proxy_http @@ -376,12 +376,12 @@ function write_local_proxy_http_config { } function remove_uwsgi_config { - local file=$1 + local conf=$1 local wsgi=$2 local name="" name=$(basename $wsgi) - rm -rf $file + rm -rf $conf disable_apache_site $name } From 6b0f055b4ed407f8a190f768d0e654235ac015dd Mon Sep 17 00:00:00 2001 From: Yadnesh Kulkarni Date: Thu, 23 Nov 2023 11:59:49 +0530 Subject: [PATCH 380/574] Make multiple attempts to download image Downloading an image can fail due to network issues, so let's retry 5 times before giving up. We have seen issues in CI due to network issues as described below and in the Related-Bug:- Often times fetching Fedora image in FIPS jobs fails due to "GnuTLS: One of the involved algorithms has insufficient security level." This occurs when request to pull image is redirected to a mirror that's incompatible with FIPS enabled system. Making multiple attempts to download images could provide better chance of pulling images from different mirrors and avoid failure of the job. This will also save a few rechecks. Related-Bug: #2045725 Change-Id: I7163aea4d121cb27620e4f2a083a543abfc286bf --- functions | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/functions b/functions index 7ada0feba7..01e1d259ad 100644 --- a/functions +++ b/functions @@ -133,17 +133,28 @@ function upload_image { local image image_fname image_name + local max_attempts=5 + # Create a directory for the downloaded image tarballs. mkdir -p $FILES/images image_fname=`basename "$image_url"` if [[ $image_url != file* ]]; then # Downloads the image (uec ami+akistyle), then extracts it. if [[ ! -f $FILES/$image_fname || "$(stat -c "%s" $FILES/$image_fname)" = "0" ]]; then - wget --progress=dot:giga -c $image_url -O $FILES/$image_fname - if [[ $? -ne 0 ]]; then - echo "Not found: $image_url" - return - fi + for attempt in `seq $max_attempts`; do + local rc=0 + wget --progress=dot:giga -c $image_url -O $FILES/$image_fname || rc=$? + if [[ $rc -ne 0 ]]; then + if [[ "$attempt" -eq "$max_attempts" ]]; then + echo "Not found: $image_url" + return + fi + echo "Download failed, retrying in $attempt second, attempt: $attempt" + sleep $attempt + else + break + fi + done fi image="$FILES/${image_fname}" else From 5e98509eaad724bb68d1a457bd690a387c51a114 Mon Sep 17 00:00:00 2001 From: Rajat Dhasmana Date: Tue, 12 Dec 2023 12:40:58 +0000 Subject: [PATCH 381/574] Increase timeout for reimage operation Looking at the recent failures in the tempest-integrated-compute job, the reimage operation seems to be taking longer than our expected time of 60 seconds (which was increased because of a similar failure in the past, default is 20 seconds). The main culprit for this failure is the image conversion from qcow2 to raw which is taking ~159 seconds. Dec 05 13:29:59.709129 np0035951188 cinder-volume[77000]: DEBUG oslo_concurrency.processutils [req-5113eccb-05ba-486a-8130-a58898c8ad35 req-0edf972a-109a-465f-a771-ceb87ecbda3e tempest-ServerActionsV293TestJSON-1780705112 None] CMD "sudo cinder-rootwrap /etc/cinder/rootwrap.conf qemu-img convert -O raw -t none -f qcow2 /opt/stack/data/cinder/conversion/image_download_dbe01f18-1c90-4536-a09a-b49f0811c7a0_copod3cm /dev/mapper/stack--volumes--lvmdriver--1-volume--073a98e8--3c89--4734--9ae5--59af25f8914a" returned: 0 in 159.272s {{(pid=77000) execute /opt/stack/data/venv/lib/python3.10/site-packages/oslo_concurrency/processutils.py:422}} The recent run took ~165 seconds on the cinder side but it failed early since the nova operation timed out in 60 seconds hence deleting the volume. To be on the safer side, 180 seconds seems to be a sane time for the operation to complete which this patch configures. Closes-Bug: 2046252 Change-Id: I8a9628216038f6d363cab5dd8177274c9cfc17c2 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index b04f94beef..17c90dfe26 100644 --- a/lib/nova +++ b/lib/nova @@ -1054,7 +1054,7 @@ function start_nova_compute { # Set rebuild timeout longer for BFV instances because we likely have # slower disk than expected. Default is 20s/GB - iniset $NOVA_CPU_CONF DEFAULT reimage_timeout_per_gb 60 + iniset $NOVA_CPU_CONF DEFAULT reimage_timeout_per_gb 180 # Configure the OVSDB connection for os-vif if [ -n "$OVSDB_SERVER_LOCAL_HOST" ]; then From 6fc0e74aa7369ed1503e2d0f12d7543d4835212e Mon Sep 17 00:00:00 2001 From: Fabian Wiesel Date: Tue, 19 Dec 2023 11:24:02 +0100 Subject: [PATCH 382/574] Fix spelling of `ADITIONAL_VENV_PACKAGES` This preserved `ADITIONAL_VENV_PACKAGES` as an input for backwards compatiblity, but takes `ADDITIONAL_VENV_PACKAGES` with priority. Fixes spelling in comment. Related-Bug: #2046936 Change-Id: I84151d8f71b12da134e8fb9dbf3ae30f2a171fe2 --- stackrc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/stackrc b/stackrc index 464e935839..6cbadf9915 100644 --- a/stackrc +++ b/stackrc @@ -207,8 +207,9 @@ fi USE_VENV=$(trueorfalse False USE_VENV) # Add packages that need to be installed into a venv but are not in any -# requirmenets files here, in a comma-separated list -ADDITIONAL_VENV_PACKAGES=${ADITIONAL_VENV_PACKAGES:-""} +# requirements files here, in a comma-separated list. +# Currently only used when USE_VENV is true (individual project venvs) +ADDITIONAL_VENV_PACKAGES=${ADDITIONAL_VENV_PACKAGES:-""}} # This can be used to turn database query logging on and off # (currently only implemented for MySQL backend) From 7699ce2d5c465f86f2aace7af6b150dceb0e6e1c Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Mon, 25 Dec 2023 03:14:20 +0000 Subject: [PATCH 383/574] Updated from generate-devstack-plugins-list Change-Id: Ie5cbd87269a10d6abdf1d24f7e6224d9aac3bf5d --- doc/source/plugin-registry.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index b2e733337a..f70041162b 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -85,6 +85,7 @@ openstack/octavia-dashboard `https://opendev.org/openstack/octavia- openstack/octavia-tempest-plugin `https://opendev.org/openstack/octavia-tempest-plugin `__ openstack/openstacksdk `https://opendev.org/openstack/openstacksdk `__ openstack/osprofiler `https://opendev.org/openstack/osprofiler `__ +openstack/ovn-bgp-agent `https://opendev.org/openstack/ovn-bgp-agent `__ openstack/ovn-octavia-provider `https://opendev.org/openstack/ovn-octavia-provider `__ openstack/rally-openstack `https://opendev.org/openstack/rally-openstack `__ openstack/sahara `https://opendev.org/openstack/sahara `__ @@ -185,6 +186,7 @@ x/trio2o `https://opendev.org/x/trio2o `__ x/vmware-nsx `https://opendev.org/x/vmware-nsx `__ x/vmware-vspc `https://opendev.org/x/vmware-vspc `__ +x/whitebox-neutron-tempest-plugin `https://opendev.org/x/whitebox-neutron-tempest-plugin `__ ======================================== === From a2da805f8107703e5f6738399ce5f5e358190fdc Mon Sep 17 00:00:00 2001 From: Fabian Wiesel Date: Mon, 8 Jan 2024 10:18:28 +0100 Subject: [PATCH 384/574] Fixup of 'Fix spelling of `ADITIONAL_VENV_PACKAGES`' Introduced a dangling } in the environment variable. This removes it. Change-Id: If9413dc1751399e5b9c9a0094772394252e5a81c --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 6cbadf9915..59ba9074f2 100644 --- a/stackrc +++ b/stackrc @@ -209,7 +209,7 @@ USE_VENV=$(trueorfalse False USE_VENV) # Add packages that need to be installed into a venv but are not in any # requirements files here, in a comma-separated list. # Currently only used when USE_VENV is true (individual project venvs) -ADDITIONAL_VENV_PACKAGES=${ADDITIONAL_VENV_PACKAGES:-""}} +ADDITIONAL_VENV_PACKAGES=${ADDITIONAL_VENV_PACKAGES:-""} # This can be used to turn database query logging on and off # (currently only implemented for MySQL backend) From 6091df25a39c9b17883d86ccb091bf2b9c39aa15 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Wed, 20 Dec 2023 23:06:18 +0000 Subject: [PATCH 385/574] [OVN] Add support for the Neutron OVN agent service The Neutron OVN agent is a service that could run in any node. The functionality will depend on the extensions configured. This new agent is meant to be the replacement for the Neutron OVN metadata agent once the "metadata" extension is implemented in this service [1]. [1]https://review.opendev.org/c/openstack/neutron/+/898238 Related-Bug: #2017871 Change-Id: I8f82f0047e89aac122a67f59db84f03e1a6bf519 --- lib/neutron_plugins/ovn_agent | 62 ++++++++++++++++++++++++++++------- 1 file changed, 50 insertions(+), 12 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index c51b708130..e646258651 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -91,9 +91,14 @@ OVN_GENEVE_OVERHEAD=${OVN_GENEVE_OVERHEAD:-38} # http://www.openvswitch.org/support/dist-docs/ovs-appctl.8.txt OVN_DBS_LOG_LEVEL=${OVN_DBS_LOG_LEVEL:-info} +# OVN metadata agent configuration OVN_META_CONF=$NEUTRON_CONF_DIR/neutron_ovn_metadata_agent.ini OVN_META_DATA_HOST=${OVN_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)} +# OVN agent configuration +OVN_AGENT_CONF=$NEUTRON_CONF_DIR/plugins/ml2/ovn_agent.ini +OVN_AGENT_EXTENSIONS=${OVN_AGENT_EXTENSIONS:-} + # If True (default) the node will be considered a gateway node. ENABLE_CHASSIS_AS_GW=$(trueorfalse True ENABLE_CHASSIS_AS_GW) OVN_L3_CREATE_PUBLIC_NETWORK=$(trueorfalse True OVN_L3_CREATE_PUBLIC_NETWORK) @@ -132,6 +137,7 @@ OVN_RUNDIR=$OVS_PREFIX/var/run/ovn NEUTRON_OVN_BIN_DIR=$(get_python_exec_prefix) NEUTRON_OVN_METADATA_BINARY="neutron-ovn-metadata-agent" +NEUTRON_OVN_AGENT_BINARY="neutron-ovn-agent" STACK_GROUP="$( id --group --name "$STACK_USER" )" @@ -487,6 +493,8 @@ function configure_ovn_plugin { if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True + elif is_service_enabled q-ovn-agent neutron-ovn-agent && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]]; then + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True else populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=False fi @@ -508,6 +516,8 @@ function configure_ovn_plugin { if is_service_enabled n-api-meta ; then if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then iniset $NOVA_CONF neutron service_metadata_proxy True + elif is_service_enabled q-ovn-agent neutron-ovn-agent && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]]; then + iniset $NOVA_CONF neutron service_metadata_proxy True fi fi } @@ -539,29 +549,42 @@ function configure_ovn { fi # Metadata - if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent && is_service_enabled ovn-controller; then + local sample_file="" + local config_file="" + if is_service_enabled q-ovn-agent neutron-ovn-agent && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]] && is_service_enabled ovn-controller; then + sample_file=$NEUTRON_DIR/etc/neutron/plugins/ml2/ovn_agent.ini.sample + config_file=$OVN_AGENT_CONF + elif is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent && is_service_enabled ovn-controller; then + sample_file=$NEUTRON_DIR/etc/neutron_ovn_metadata_agent.ini.sample + config_file=$OVN_META_CONF + fi + if [ -n ${config_file} ]; then sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR mkdir -p $NEUTRON_DIR/etc/neutron/plugins/ml2 (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) - cp $NEUTRON_DIR/etc/neutron_ovn_metadata_agent.ini.sample $OVN_META_CONF - configure_root_helper_options $OVN_META_CONF + cp $sample_file $config_file + configure_root_helper_options $config_file - iniset $OVN_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $OVN_META_CONF DEFAULT nova_metadata_host $OVN_META_DATA_HOST - iniset $OVN_META_CONF DEFAULT metadata_workers $API_WORKERS - iniset $OVN_META_CONF DEFAULT state_path $DATA_DIR/neutron - iniset $OVN_META_CONF ovs ovsdb_connection tcp:$OVSDB_SERVER_LOCAL_HOST:6640 - iniset $OVN_META_CONF ovn ovn_sb_connection $OVN_SB_REMOTE + iniset $config_file DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $config_file DEFAULT nova_metadata_host $OVN_META_DATA_HOST + iniset $config_file DEFAULT metadata_workers $API_WORKERS + iniset $config_file DEFAULT state_path $DATA_DIR/neutron + iniset $config_file ovs ovsdb_connection tcp:$OVSDB_SERVER_LOCAL_HOST:6640 + iniset $config_file ovn ovn_sb_connection $OVN_SB_REMOTE if is_service_enabled tls-proxy; then - iniset $OVN_META_CONF ovn \ + iniset $config_file ovn \ ovn_sb_ca_cert $INT_CA_DIR/ca-chain.pem - iniset $OVN_META_CONF ovn \ + iniset $config_file ovn \ ovn_sb_certificate $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt - iniset $OVN_META_CONF ovn \ + iniset $config_file ovn \ ovn_sb_private_key $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key fi + if [[ $config_file == $OVN_AGENT_CONF ]]; then + iniset $config_file agent extensions $OVN_AGENT_EXTENSIONS + iniset $config_file ovn ovn_nb_connection $OVN_NB_REMOTE + fi fi } @@ -684,6 +707,9 @@ function _start_ovn_services { if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent ; then _start_process "devstack@q-ovn-metadata-agent.service" fi + if is_service_enabled q-ovn-agent neutron-ovn-agent ; then + _start_process "devstack@q-ovn-agent.service" + fi } # start_ovn() - Start running processes, including screen @@ -750,6 +776,12 @@ function start_ovn { setup_logging $OVN_META_CONF fi + if is_service_enabled q-ovn-agent neutron-ovn-agent; then + run_process q-ovn-agent "$NEUTRON_OVN_BIN_DIR/$NEUTRON_OVN_AGENT_BINARY --config-file $OVN_AGENT_CONF" + # Format logging + setup_logging $OVN_AGENT_CONF + fi + _start_ovn_services } @@ -774,6 +806,12 @@ function stop_ovn { sudo pkill -9 -f "[h]aproxy" || : _stop_process "devstack@q-ovn-metadata-agent.service" fi + if is_service_enabled q-ovn-agent neutron-ovn-agent; then + # pkill takes care not to kill itself, but it may kill its parent + # sudo unless we use the "ps | grep [f]oo" trick + sudo pkill -9 -f "[h]aproxy" || : + _stop_process "devstack@q-ovn-agent.service" + fi if is_service_enabled ovn-controller-vtep ; then _stop_process "$OVN_CONTROLLER_VTEP_SERVICE" fi From 224fe1b09adb3adcdd02d680a46eeed5b271f7e4 Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Mon, 7 Aug 2023 19:30:31 +0000 Subject: [PATCH 386/574] add support for zswap and ksmtuned This change add a new lib/host-mem file and moves the existing ksm support to a new configure_ksm function. Additional support for ksmtuned is added with a new flag "ENABLE_KSMTUNED" which defaults to true. This change also adds support for zswap. zswap is disabled by default. When enabled on ubuntu lz4 will be used as the default compressor and z3fold as the zpool. On non debian distros the compressor and zpool are not set. The default values should result in very low overhead although the zstd compressor may provide better overall performance in ci or with slow io due to the higher compression ratio. Additionally memory and network sysctl tunings are optionally applied to defer writes, prefer swapping and optimise tcp connection startup and keepalive. The sysctl tunings are disabled by default The base devstack job has been modifed to enable zram and sysctl tuning. Both ksm and zswap are wrapped by a tune_host function which is now called very early in devstack to ensure they are configured before any memory/network intensive operations are executed. The ci jobs do not enable this functionality by default. To use this functionaltiy define ENABLE_SYSCTL_MEM_TUNING: true ENABLE_SYSCTL_NET_TUNING: true ENABLE_ZSWAP: true in the devstack_localrc section of the job vars. Change-Id: Ia5202d5a9903492a4c18b50ea8d12bd91cc9f135 --- functions-common | 21 +++++++++++ lib/host | 98 ++++++++++++++++++++++++++++++++++++++++++++++++ stack.sh | 22 +++-------- stackrc | 9 ----- 4 files changed, 125 insertions(+), 25 deletions(-) create mode 100644 lib/host diff --git a/functions-common b/functions-common index c57c4cc054..5238dff30a 100644 --- a/functions-common +++ b/functions-common @@ -236,6 +236,27 @@ function trueorfalse { $xtrace } +# bool_to_int +# +# Convert True|False to int 1 or 0 +# This function can be used to convert the output of trueorfalse +# to an int follow c conventions where false is 0 and 1 it true. +function bool_to_int { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + if [ -z $1 ]; then + die $LINENO "Bool value required" + fi + if [[ $1 == "True" ]] ; then + echo '1' + else + echo '0' + fi + $xtrace +} + + function isset { [[ -v "$1" ]] } diff --git a/lib/host b/lib/host new file mode 100644 index 0000000000..95c5b9bbcb --- /dev/null +++ b/lib/host @@ -0,0 +1,98 @@ +#!/bin/bash + +# Kernel Samepage Merging (KSM) +# ----------------------------- + +# Processes that mark their memory as mergeable can share identical memory +# pages if KSM is enabled. This is particularly useful for nova + libvirt +# backends but any other setup that marks its memory as mergeable can take +# advantage. The drawback is there is higher cpu load; however, we tend to +# be memory bound not cpu bound so enable KSM by default but allow people +# to opt out if the CPU time is more important to them. +ENABLE_KSM=$(trueorfalse True ENABLE_KSM) +ENABLE_KSMTUNED=$(trueorfalse True ENABLE_KSMTUNED) +function configure_ksm { + if [[ $ENABLE_KSMTUNED == "True" ]] ; then + install_package "ksmtuned" + fi + if [[ -f /sys/kernel/mm/ksm/run ]] ; then + echo $(bool_to_int ENABLE_KSM) | sudo tee /sys/kernel/mm/ksm/run + fi +} + +# Compressed swap (ZSWAP) +#------------------------ + +# as noted in the kernel docs https://docs.kernel.org/admin-guide/mm/zswap.html +# Zswap is a lightweight compressed cache for swap pages. +# It takes pages that are in the process of being swapped out and attempts +# to compress them into a dynamically allocated RAM-based memory pool. +# zswap basically trades CPU cycles for potentially reduced swap I/O. +# This trade-off can also result in a significant performance improvement +# if reads from the compressed cache are faster than reads from a swap device. + +ENABLE_ZSWAP=$(trueorfalse False ENABLE_ZSWAP) +# lz4 is very fast although it does not have the best compression +# zstd has much better compression but more latency +ZSWAP_COMPRESSOR=${ZSWAP_COMPRESSOR:="lz4"} +ZSWAP_ZPOOL=${ZSWAP_ZPOOL:="z3fold"} +function configure_zswap { + if [[ $ENABLE_KSMTUNED == "True" ]] ; then + # Centos 9 stream seems to only support enabling but not run time + # tuning so dont try to choose better default on centos + if is_ubuntu; then + echo ${ZSWAP_COMPRESSOR} | sudo tee /sys/module/zswap/parameters/compressor + echo ${ZSWAP_ZPOOL} | sudo tee /sys/module/zswap/parameters/zpool + fi + echo 1 | sudo tee /sys/module/zswap/parameters/enabled + # print curent zswap kernel config + sudo grep -R . /sys/module/zswap/parameters || /bin/true + fi +} + +ENABLE_SYSCTL_MEM_TUNING=$(trueorfalse False ENABLE_SYSCTL_MEM_TUNING) +function configure_sysctl_mem_parmaters { + if [[ $ENABLE_SYSCTL_MEM_TUNING == "True" ]] ; then + # defer write when memory is available + sudo sysctl -w vm.dirty_ratio=60 + sudo sysctl -w vm.dirty_background_ratio=10 + sudo sysctl -w vm.vfs_cache_pressure=50 + # assume swap is compressed so on new kernels + # give it equal priority as page cache which is + # uncompressed. on kernels < 5.8 the max is 100 + # not 200 so it will strongly prefer swapping. + sudo sysctl -w vm.swappiness=100 + sudo grep -R . /proc/sys/vm/ || /bin/true + fi +} + +function configure_host_mem { + configure_zswap + configure_ksm + configure_sysctl_mem_parmaters +} + +ENABLE_SYSCTL_NET_TUNING=$(trueorfalse False ENABLE_SYSCTL_NET_TUNING) +function configure_sysctl_net_parmaters { + if [[ $ENABLE_SYSCTL_NET_TUNING == "True" ]] ; then + # detect dead TCP connections after 120 seconds + sudo sysctl -w net.ipv4.tcp_keepalive_time=60 + sudo sysctl -w net.ipv4.tcp_keepalive_intvl=10 + sudo sysctl -w net.ipv4.tcp_keepalive_probes=6 + # reudce network latency for new connections + sudo sysctl -w net.ipv4.tcp_fastopen=3 + # print tcp options + sudo grep -R . /proc/sys/net/ipv4/tcp* || /bin/true + # disable qos by default + sudo sysctl -w net.core.default_qdisc=pfifo_fast + fi +} + +function configure_host_net { + configure_sysctl_net_parmaters +} + +function tune_host { + configure_host_mem + configure_host_net +} \ No newline at end of file diff --git a/stack.sh b/stack.sh index dce15ac01c..a816efda22 100755 --- a/stack.sh +++ b/stack.sh @@ -611,6 +611,12 @@ rm -f $SSL_BUNDLE_FILE source $TOP_DIR/lib/database source $TOP_DIR/lib/rpc_backend +# load host tuning functions and defaults +source $TOP_DIR/lib/host +# tune host memory early to ensure zswap/ksm are configured before +# doing memory intensive operation like cloning repos or unpacking packages. +tune_host + # Configure Projects # ================== @@ -1079,22 +1085,6 @@ fi # Save configuration values save_stackenv $LINENO -# Kernel Samepage Merging (KSM) -# ----------------------------- - -# Processes that mark their memory as mergeable can share identical memory -# pages if KSM is enabled. This is particularly useful for nova + libvirt -# backends but any other setup that marks its memory as mergeable can take -# advantage. The drawback is there is higher cpu load; however, we tend to -# be memory bound not cpu bound so enable KSM by default but allow people -# to opt out if the CPU time is more important to them. - -if [[ $ENABLE_KSM == "True" ]] ; then - if [[ -f /sys/kernel/mm/ksm/run ]] ; then - sudo sh -c "echo 1 > /sys/kernel/mm/ksm/run" - fi -fi - # Start Services # ============== diff --git a/stackrc b/stackrc index 59ba9074f2..097913a4e9 100644 --- a/stackrc +++ b/stackrc @@ -121,15 +121,6 @@ else SYSTEMCTL="sudo systemctl" fi - -# Whether or not to enable Kernel Samepage Merging (KSM) if available. -# This allows programs that mark their memory as mergeable to share -# memory pages if they are identical. This is particularly useful with -# libvirt backends. This reduces memory usage at the cost of CPU overhead -# to scan memory. We default to enabling it because we tend to be more -# memory constrained than CPU bound. -ENABLE_KSM=$(trueorfalse True ENABLE_KSM) - # Passwords generated by interactive devstack runs if [[ -r $RC_DIR/.localrc.password ]]; then source $RC_DIR/.localrc.password From 5c1736b78256f5da86a91c4489f43f8ba1bce224 Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Wed, 24 Jan 2024 10:53:12 +0000 Subject: [PATCH 387/574] fix zswap enable flag zswap should only be enabled if ENABLE_ZSWAP is true. The if condition was checking ENABLE_KSMTUNED. That is now fixed. Change-Id: I76ba139de69fb1710bcb96cc9f638260463e2032 --- lib/host | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/host b/lib/host index 95c5b9bbcb..2fa22e24ea 100644 --- a/lib/host +++ b/lib/host @@ -37,7 +37,7 @@ ENABLE_ZSWAP=$(trueorfalse False ENABLE_ZSWAP) ZSWAP_COMPRESSOR=${ZSWAP_COMPRESSOR:="lz4"} ZSWAP_ZPOOL=${ZSWAP_ZPOOL:="z3fold"} function configure_zswap { - if [[ $ENABLE_KSMTUNED == "True" ]] ; then + if [[ $ENABLE_ZSWAP == "True" ]] ; then # Centos 9 stream seems to only support enabling but not run time # tuning so dont try to choose better default on centos if is_ubuntu; then From b485549efc9851bfb2cabd1fce40cf39c403c24e Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Sat, 27 Jan 2024 18:58:11 +0900 Subject: [PATCH 388/574] Uncap bashate The bashate tool has been very stable for a while and we rarely expect changes which may break existing scripts. This removes the current capping to avoid updating the upper limit when when a new release is created in bashate. Change-Id: Iae94811aebf58b491d6b2b2773db88ac50fdd737 --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index ec764abc87..26cd68c031 100644 --- a/tox.ini +++ b/tox.ini @@ -12,7 +12,7 @@ basepython = python3 # against devstack, just set BASHATE_INSTALL_PATH=/path/... to your # modified bashate tree deps = - {env:BASHATE_INSTALL_PATH:bashate==2.0.0} + {env:BASHATE_INSTALL_PATH:bashate} allowlist_externals = bash commands = bash -c "find {toxinidir} \ -not \( -type d -name .?\* -prune \) \ From d251d12d71ebca758e8584204a0ba14d3c6bab6c Mon Sep 17 00:00:00 2001 From: Abhishek Kekane Date: Mon, 29 Jan 2024 18:20:06 +0000 Subject: [PATCH 389/574] Make `centralized_db` driver as default cache driver Making newly introduced `centralized_db` driver as default cache driver for glance so that it can be tested in available CI jobs. New cache driver `centralized_db` needs `worker_self_reference_url` in glance-api.conf file otherwise glance api service will fail to start. Related blueprint centralized-cache-db Depends-On: https://review.opendev.org/c/openstack/glance/+/899871 Change-Id: I75267988b1c80ac9daa5843ce8462bbac49ffe27 --- lib/glance | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/glance b/lib/glance index 4ff9a34ca8..e4bfc8f5a3 100644 --- a/lib/glance +++ b/lib/glance @@ -75,7 +75,7 @@ GLANCE_MULTIPLE_FILE_STORES=${GLANCE_MULTIPLE_FILE_STORES:-fast} GLANCE_DEFAULT_BACKEND=${GLANCE_DEFAULT_BACKEND:-fast} GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache} -GLANCE_CACHE_DRIVER=${GLANCE_CACHE_DRIVER:-sqlite} +GLANCE_CACHE_DRIVER=${GLANCE_CACHE_DRIVER:-centralized_db} # Full Glance functionality requires running in standalone mode. If we are # not in uwsgi mode, then we are standalone, otherwise allow separate control. @@ -432,6 +432,7 @@ function configure_glance { iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS" + iniset $GLANCE_API_CONF DEFAULT worker_self_reference_url $GLANCE_URL fi if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then From 4ddd456dd3e71bcdf9a02a12dd5914b82ec48e91 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Fri, 9 Feb 2024 14:11:44 +0100 Subject: [PATCH 390/574] Add support for the pyproject.toml file in setup with constraints In the _setup_package_with_constraints_edit name of the package was always discovered from the setup.cfg file. But as some projects implements PEP-621 (see [1] for the SQLAlchemy for example) it is not enough now. This patch adds parsing pyproject.toml file also if name is not found in the setup.cfg file. [1] https://github.com/sqlalchemy/sqlalchemy/commit/a8dbf8763a8fa2ca53cc01033f06681a421bf60b Closes-Bug: #2052509 Change-Id: Iee9262079d09a8bd22cd05a8f17950a41a0d1f9d --- inc/python | 3 +++ 1 file changed, 3 insertions(+) diff --git a/inc/python b/inc/python index cc6e01fede..43b06eb520 100644 --- a/inc/python +++ b/inc/python @@ -405,6 +405,9 @@ function _setup_package_with_constraints_edit { # source we are about to do. local name name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg) + if [ -z $name ]; then + name=$(awk '/^name =/ {gsub(/"/, "", $3); print $3}' $project_dir/pyproject.toml) + fi $REQUIREMENTS_DIR/.venv/bin/edit-constraints \ $REQUIREMENTS_DIR/upper-constraints.txt -- $name fi From 402b7e89b60035b39b40e8886dee82487c54de97 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Fri, 23 Feb 2024 11:46:03 +0100 Subject: [PATCH 391/574] Drop nodesets with ubuntu-xenial The ubuntu-xenial labels are going to disappear from opendev as that image is EOL and will we deleted. Clean up our zuul config. Update some example reference as well. Change-Id: Id04110f7c871caa1739ff2b62e9796be4fb9aa00 --- .zuul.yaml | 80 ------------------------------------------------ functions-common | 4 +-- 2 files changed, 2 insertions(+), 82 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 47466cb3eb..13b4633e13 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -1,13 +1,3 @@ -- nodeset: - name: openstack-single-node - nodes: - - name: controller - label: ubuntu-xenial - groups: - - name: tempest - nodes: - - controller - - nodeset: name: openstack-single-node-jammy nodes: @@ -38,16 +28,6 @@ nodes: - controller -- nodeset: - name: openstack-single-node-xenial - nodes: - - name: controller - label: ubuntu-xenial - groups: - - name: tempest - nodes: - - controller - - nodeset: name: devstack-single-node-centos-7 nodes: @@ -118,36 +98,6 @@ nodes: - controller -- nodeset: - name: openstack-two-node - nodes: - - name: controller - label: ubuntu-xenial - - name: compute1 - label: ubuntu-xenial - groups: - # Node where tests are executed and test results collected - - name: tempest - nodes: - - controller - # Nodes running the compute service - - name: compute - nodes: - - controller - - compute1 - # Nodes that are not the controller - - name: subnode - nodes: - - compute1 - # Switch node for multinode networking setup - - name: switch - nodes: - - controller - # Peer nodes for multinode networking setup - - name: peers - nodes: - - compute1 - - nodeset: name: openstack-two-node-centos-9-stream nodes: @@ -268,36 +218,6 @@ nodes: - compute1 -- nodeset: - name: openstack-two-node-xenial - nodes: - - name: controller - label: ubuntu-xenial - - name: compute1 - label: ubuntu-xenial - groups: - # Node where tests are executed and test results collected - - name: tempest - nodes: - - controller - # Nodes running the compute service - - name: compute - nodes: - - controller - - compute1 - # Nodes that are not the controller - - name: subnode - nodes: - - compute1 - # Switch node for multinode networking setup - - name: switch - nodes: - - controller - # Peer nodes for multinode networking setup - - name: peers - nodes: - - compute1 - - nodeset: name: openstack-three-node-focal nodes: diff --git a/functions-common b/functions-common index 5238dff30a..8ea6df7c1d 100644 --- a/functions-common +++ b/functions-common @@ -401,9 +401,9 @@ function warn { # such as "install_package" further abstract things in better ways. # # ``os_VENDOR`` - vendor name: ``Ubuntu``, ``Fedora``, etc -# ``os_RELEASE`` - major release: ``16.04`` (Ubuntu), ``23`` (Fedora) +# ``os_RELEASE`` - major release: ``22.04`` (Ubuntu), ``23`` (Fedora) # ``os_PACKAGE`` - package type: ``deb`` or ``rpm`` -# ``os_CODENAME`` - vendor's codename for release: ``xenial`` +# ``os_CODENAME`` - vendor's codename for release: ``jammy`` declare -g os_VENDOR os_RELEASE os_PACKAGE os_CODENAME From 50c791c0ae3bd75335c89312a5595f0ad2864945 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Thu, 29 Feb 2024 17:02:55 +0900 Subject: [PATCH 392/574] Drop unused environments for TripleO and heat agents TripleO was already retired. These environments are not actually used by heat jobs. Change-Id: I63b7413a1575a620f9d2cbd56e93be78816639e0 --- stackrc | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/stackrc b/stackrc index 464e935839..966e5ed69d 100644 --- a/stackrc +++ b/stackrc @@ -588,28 +588,6 @@ GITREPO["os-ken"]=${OS_KEN_REPO:-${GIT_BASE}/openstack/os-ken.git} GITBRANCH["os-ken"]=${OS_KEN_BRANCH:-$TARGET_BRANCH} GITDIR["os-ken"]=$DEST/os-ken -################## -# -# TripleO / Heat Agent Components -# -################## - -# run-parts script required by os-refresh-config -DIB_UTILS_REPO=${DIB_UTILS_REPO:-${GIT_BASE}/openstack/dib-utils.git} -DIB_UTILS_BRANCH=${DIB_UTILS_BRANCH:-$BRANCHLESS_TARGET_BRANCH} - -# os-apply-config configuration template tool -OAC_REPO=${OAC_REPO:-${GIT_BASE}/openstack/os-apply-config.git} -OAC_BRANCH=${OAC_BRANCH:-$TRAILING_TARGET_BRANCH} - -# os-collect-config configuration agent -OCC_REPO=${OCC_REPO:-${GIT_BASE}/openstack/os-collect-config.git} -OCC_BRANCH=${OCC_BRANCH:-$TRAILING_TARGET_BRANCH} - -# os-refresh-config configuration run-parts tool -ORC_REPO=${ORC_REPO:-${GIT_BASE}/openstack/os-refresh-config.git} -ORC_BRANCH=${ORC_BRANCH:-$TRAILING_TARGET_BRANCH} - ################# # From af57c0b778bb13a9b5ffd784fc456a21614e67b1 Mon Sep 17 00:00:00 2001 From: Jeremy Stanley Date: Mon, 4 Mar 2024 18:24:24 +0000 Subject: [PATCH 393/574] Drop the devstack-single-node-centos-7 nodeset OpenDev is preparing to remove centos-7 nodes on March 15[*]. This change drops one nodeset definition which is the last remaining reference on DevStack's master branch. [*] https://lists.openstack.org/archives/list/openstack-discuss@lists.openstack.org/message/A2YIY5L7MVYSQMTVZU3L3OM7GLVVZPLK/ Change-Id: Icd487e1012263a9b0bc13b529d31ff2025108adf --- .zuul.yaml | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 13b4633e13..8bc082364c 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -28,16 +28,6 @@ nodes: - controller -- nodeset: - name: devstack-single-node-centos-7 - nodes: - - name: controller - label: centos-7 - groups: - - name: tempest - nodes: - - controller - - nodeset: name: devstack-single-node-centos-9-stream nodes: From 1fe7707cf04852d024b64f695e40568696851b15 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Tue, 5 Mar 2024 08:30:19 -0800 Subject: [PATCH 394/574] Ignore 500 status code in generate plugin script Due to various reasons, this script may encounter the 500 status code from some repo (x/fuel-plugin-onos in current case[1]) If that happen then it return failure status code to the propose-updates job and fail that job - https://zuul.openstack.org/builds?job_name=propose-updates&project=openstack%2Fdevstack&skip=0 It is better not to raise the 500 error in this script and just ignore those repo to process further to detect the plugin. [1] https://zuul.openstack.org/build/dba0aa41d145472397916dfcd13948de/log/job-output.txt#2442 Change-Id: Ibca0a2aac404161340e8fc00170018eecf5c8326 --- tools/generate-devstack-plugins-list.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/generate-devstack-plugins-list.py b/tools/generate-devstack-plugins-list.py index 1cacd06bf8..bc28515a26 100644 --- a/tools/generate-devstack-plugins-list.py +++ b/tools/generate-devstack-plugins-list.py @@ -73,8 +73,11 @@ def has_devstack_plugin(session, proj): s = requests.Session() # sometimes gitea gives us a 500 error; retry sanely # https://stackoverflow.com/a/35636367 +# We need to disable raise_on_status because if any repo endup with 500 then +# propose-updates job which run this script will fail. retries = Retry(total=3, backoff_factor=1, - status_forcelist=[ 500 ]) + status_forcelist=[ 500 ], + raise_on_status=False) s.mount('https://', HTTPAdapter(max_retries=retries)) found_plugins = filter(functools.partial(has_devstack_plugin, s), projects) From 5e837d1f0d9078c58bc634474a1adf311bc2b491 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 6 Mar 2024 03:13:36 +0000 Subject: [PATCH 395/574] Updated from generate-devstack-plugins-list Change-Id: Ic99b518ddf1045893991accaa089f44d0d4f4b0d --- doc/source/plugin-registry.rst | 4 ---- 1 file changed, 4 deletions(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index f70041162b..2d2a92c4a9 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -28,8 +28,6 @@ openstack/aodh `https://opendev.org/openstack/aodh `__ openstack/blazar `https://opendev.org/openstack/blazar `__ openstack/ceilometer `https://opendev.org/openstack/ceilometer `__ -openstack/ceilometer-powervm `https://opendev.org/openstack/ceilometer-powervm `__ -openstack/cinderlib `https://opendev.org/openstack/cinderlib `__ openstack/cloudkitty `https://opendev.org/openstack/cloudkitty `__ openstack/cyborg `https://opendev.org/openstack/cyborg `__ openstack/designate `https://opendev.org/openstack/designate `__ @@ -69,7 +67,6 @@ openstack/networking-bagpipe `https://opendev.org/openstack/networki openstack/networking-baremetal `https://opendev.org/openstack/networking-baremetal `__ openstack/networking-bgpvpn `https://opendev.org/openstack/networking-bgpvpn `__ openstack/networking-generic-switch `https://opendev.org/openstack/networking-generic-switch `__ -openstack/networking-powervm `https://opendev.org/openstack/networking-powervm `__ openstack/networking-sfc `https://opendev.org/openstack/networking-sfc `__ openstack/neutron `https://opendev.org/openstack/neutron `__ openstack/neutron-dynamic-routing `https://opendev.org/openstack/neutron-dynamic-routing `__ @@ -79,7 +76,6 @@ openstack/neutron-tempest-plugin `https://opendev.org/openstack/neutron- openstack/neutron-vpnaas `https://opendev.org/openstack/neutron-vpnaas `__ openstack/neutron-vpnaas-dashboard `https://opendev.org/openstack/neutron-vpnaas-dashboard `__ openstack/nova `https://opendev.org/openstack/nova `__ -openstack/nova-powervm `https://opendev.org/openstack/nova-powervm `__ openstack/octavia `https://opendev.org/openstack/octavia `__ openstack/octavia-dashboard `https://opendev.org/openstack/octavia-dashboard `__ openstack/octavia-tempest-plugin `https://opendev.org/openstack/octavia-tempest-plugin `__ From 5f5255bc011ef885f254c659009662870499de5a Mon Sep 17 00:00:00 2001 From: huicoffee <784657156@qq.com> Date: Fri, 15 Mar 2024 17:15:33 +0800 Subject: [PATCH 396/574] Remove Glance uWSGI config in clean.sh Updated clean.sh to remove Glance's Apache uWSGI config files in APACHE_CONF_DIR, including /etc/apache2/sites-enabled/ on Ubuntu. Test Plan: - Run clean.sh. - Confirm Glance uWSGI configs are removed from APACHE_CONF_DIR. Closes-Bug: #2057999 Change-Id: I44475b8e084c4b20d7b7cb7f28574f797dbda7a2 --- lib/glance | 1 + lib/host | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/glance b/lib/glance index e4bfc8f5a3..8ee842625f 100644 --- a/lib/glance +++ b/lib/glance @@ -168,6 +168,7 @@ function cleanup_glance { # Cleanup reserved stores directories sudo rm -rf $GLANCE_STAGING_DIR $GLANCE_TASKS_DIR fi + remove_uwsgi_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" } # Set multiple cinder store related config options for each of the cinder store diff --git a/lib/host b/lib/host index 2fa22e24ea..a812c39612 100644 --- a/lib/host +++ b/lib/host @@ -95,4 +95,4 @@ function configure_host_net { function tune_host { configure_host_mem configure_host_net -} \ No newline at end of file +} From e1b7cc0ef8db3f7363cd478effa8f7292b61b9bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Douglas=20Mendiz=C3=A1bal?= Date: Mon, 25 Mar 2024 12:09:04 -0400 Subject: [PATCH 397/574] Do not configure system-scope admin for keystone This patch removes a couple of tempest.conf settings that are being overwrriten when Keystone is set to enforce scope. These settings are already being set by the keystone devstack plugin [1] and do not need to be overwritten here. Keystone is changing the default admin credentials to be project-admin instead of system-admin to address some failing tests in services that require project-scoped admin for their admin APIs. [2] These overrides are preventing that change from taking effect. [1] https://opendev.org/openstack/keystone/src/branch/stable/2024.1/devstack/lib/scope.sh#L24-L25 [2] https://review.opendev.org/c/openstack/keystone/+/913999 Change-Id: I48edbcbaa993f2d1f35160c415986d21a15a4999 --- lib/tempest | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index 7b5fde170e..6bd203e6f4 100644 --- a/lib/tempest +++ b/lib/tempest @@ -702,8 +702,6 @@ function configure_tempest { # test can be run with scoped token. if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then iniset $TEMPEST_CONFIG enforce_scope keystone true - iniset $TEMPEST_CONFIG auth admin_system 'all' - iniset $TEMPEST_CONFIG auth admin_project_name '' fi if [[ "$NOVA_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then From 99a96288eb14e66723d85e6ca24ff51babac7ec8 Mon Sep 17 00:00:00 2001 From: Martin Kopec Date: Thu, 28 Mar 2024 23:38:19 +0100 Subject: [PATCH 398/574] Update DEVSTACK_SERIES to 2024.2 stable/2024.1 branch has been created now and current master is for 2024.2. Change-Id: I4af9e87318ef9cbfede7df7c23872a1a7e38c820 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 4e49b461d4..de81f01f38 100644 --- a/stackrc +++ b/stackrc @@ -257,7 +257,7 @@ REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements} # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="2024.1" +DEVSTACK_SERIES="2024.2" ############## # From c336b873421c954921ee26c22bd9bfe65b330d0c Mon Sep 17 00:00:00 2001 From: Jaromir Wysoglad Date: Wed, 27 Mar 2024 11:36:26 +0100 Subject: [PATCH 399/574] Fix neutron empty string check The variable should be in quotes for the check to work Testing the behavior in bash: current behavior: $ config_file="" $ if [ -n ${config_file} ]; then echo a; fi a $ config_file="abc" $ if [ -n ${config_file} ]; then echo a; fi a behavior with quotes: $ config_file="" $ if [ -n "$config_file" ]; then echo a; fi $ config_file="abc" $ if [ -n "$config_file" ]; then echo a; fi a Change-Id: Iba956d9d4f43b925848174a632aabe58999be74b --- lib/neutron_plugins/ovn_agent | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index e646258651..699bd54f4e 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -558,7 +558,7 @@ function configure_ovn { sample_file=$NEUTRON_DIR/etc/neutron_ovn_metadata_agent.ini.sample config_file=$OVN_META_CONF fi - if [ -n ${config_file} ]; then + if [ -n "$config_file" ]; then sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR mkdir -p $NEUTRON_DIR/etc/neutron/plugins/ml2 From b6613b1e71fb6a0efb63ec9346bd2e67131657e0 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Wed, 6 Dec 2023 10:22:30 +0000 Subject: [PATCH 400/574] lib/apache: Use module paths instead of WSGI scripts pbr's 'wsgi_scripts' entrypoint functionality is not long for this world so we need to start working towards an alternative. We could start packaging our own WSGI scripts in DevStack but using module paths seems like a better option, particularly when it's supported by other WSGI servers like gunicorn. Currently only nova is migrated. We should switch additional projects as they migrate and eventually remove the support for WSGI scripts entirely. Change-Id: I057dc635c01e54740ee04dfe7b39ef83db5dc180 Signed-off-by: Stephen Finucane Depends-on: https://review.opendev.org/c/openstack/nova/+/902687/ --- lib/apache | 33 ++++++++++++++++++++++++++++----- lib/nova | 8 ++++---- 2 files changed, 32 insertions(+), 9 deletions(-) diff --git a/lib/apache b/lib/apache index 9017e0a38a..a314b76fb7 100644 --- a/lib/apache +++ b/lib/apache @@ -237,13 +237,17 @@ function restart_apache_server { restart_service $APACHE_NAME } +# write_uwsgi_config() - Create a new uWSGI config file function write_uwsgi_config { local conf=$1 local wsgi=$2 local url=$3 local http=$4 - local name="" - name=$(basename $wsgi) + local name=$5 + + if [ -z "$name" ]; then + name=$(basename $wsgi) + fi # create a home for the sockets; note don't use /tmp -- apache has # a private view of it on some platforms. @@ -259,7 +263,15 @@ function write_uwsgi_config { # always cleanup given that we are using iniset here rm -rf $conf - iniset "$conf" uwsgi wsgi-file "$wsgi" + # Set either the module path or wsgi script path depending on what we've + # been given. Note that the regex isn't exhaustive - neither Python modules + # nor Python variables can start with a number - but it's "good enough" + if [[ "$wsgi" =~ ^[a-zA-Z0-9_.]+:[a-zA-Z0-9_]+$ ]]; then + iniset "$conf" uwsgi module "$wsgi" + else + deprecated 'Configuring uWSGI with a WSGI file is deprecated, use module paths instead' + iniset "$conf" uwsgi wsgi-file "$wsgi" + fi iniset "$conf" uwsgi processes $API_WORKERS # This is running standalone iniset "$conf" uwsgi master true @@ -306,14 +318,25 @@ function write_local_uwsgi_http_config { local conf=$1 local wsgi=$2 local url=$3 - name=$(basename $wsgi) + local name=$4 + + if [ -z "$name" ]; then + name=$(basename $wsgi) + fi # create a home for the sockets; note don't use /tmp -- apache has # a private view of it on some platforms. # always cleanup given that we are using iniset here rm -rf $conf - iniset "$conf" uwsgi wsgi-file "$wsgi" + # Set either the module path or wsgi script path depending on what we've + # been given + if [[ "$wsgi" =~ ^[a-zA-Z0-9_.]+:[a-zA-Z0-9_]+$ ]]; then + iniset "$conf" uwsgi module "$wsgi" + else + deprecated 'Configuring uWSGI with a WSGI file is deprecated, use module paths instead' + iniset "$conf" uwsgi wsgi-file "$wsgi" + fi port=$(get_random_port) iniset "$conf" uwsgi http-socket "$APACHE_LOCAL_HOST:$port" iniset "$conf" uwsgi processes $API_WORKERS diff --git a/lib/nova b/lib/nova index 17c90dfe26..a261fac8f6 100644 --- a/lib/nova +++ b/lib/nova @@ -53,8 +53,8 @@ NOVA_COND_CONF=$NOVA_CONF_DIR/nova.conf NOVA_CPU_CONF=$NOVA_CONF_DIR/nova-cpu.conf NOVA_FAKE_CONF=$NOVA_CONF_DIR/nova-fake.conf NOVA_API_DB=${NOVA_API_DB:-nova_api} -NOVA_UWSGI=$NOVA_BIN_DIR/nova-api-wsgi -NOVA_METADATA_UWSGI=$NOVA_BIN_DIR/nova-metadata-wsgi +NOVA_UWSGI=nova.wsgi.osapi_compute:application +NOVA_METADATA_UWSGI=nova.wsgi.metadata:application NOVA_UWSGI_CONF=$NOVA_CONF_DIR/nova-api-uwsgi.ini NOVA_METADATA_UWSGI_CONF=$NOVA_CONF_DIR/nova-metadata-uwsgi.ini @@ -549,11 +549,11 @@ function create_nova_conf { iniset $NOVA_CONF upgrade_levels compute "auto" if is_service_enabled n-api; then - write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute" + write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute" "" "nova-api" fi if is_service_enabled n-api-meta; then - write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" "$SERVICE_LISTEN_ADDRESS:${METADATA_SERVICE_PORT}" + write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" "$SERVICE_LISTEN_ADDRESS:${METADATA_SERVICE_PORT}" "nova-metadata" fi if is_service_enabled ceilometer; then From 9be4ceeaa10f6ed92291e77ec52794acfb67c147 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Tue, 23 Apr 2024 15:37:37 -0400 Subject: [PATCH 401/574] Fix datetime.utcnow() deprecation warning Running stack.sh on a python 3.12 system generates this warning from worlddump.py: DeprecationWarning: datetime.datetime.utcnow() is deprecated Use datetime.now(timezone.utc) instead, which should be backwards-compatible with older python versions. TrivialFix Change-Id: I11fe60f6b04842412045c6cb97f493f7fef66e1a --- tools/worlddump.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/worlddump.py b/tools/worlddump.py index aadd33b634..edbfa268db 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -51,7 +51,7 @@ def get_options(): def filename(dirname, name=""): - now = datetime.datetime.utcnow() + now = datetime.datetime.now(datetime.timezone.utc) fmt = "worlddump-%Y-%m-%d-%H%M%S" if name: fmt += "-" + name From aee9b0ff9e68f9306d9a55bca5304366fb85e91b Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 9 May 2024 10:29:43 -0700 Subject: [PATCH 402/574] Make rocky 9 job non-voting This job is currently failing with mirror or repo issues. Change-Id: Ie0f862f933cd99cc9fe698d5a178b952e6e93ac4 --- .zuul.yaml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 8bc082364c..294dd48f4d 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -644,6 +644,11 @@ description: Rocky Linux 9 Blue Onyx platform test nodeset: devstack-single-node-rockylinux-9 timeout: 9000 + # NOTE(danms): This has been failing lately with some repository metadata + # errors. We're marking this as non-voting until it appears to have + # stabilized: + # https://zuul.openstack.org/builds?job_name=devstack-platform-rocky-blue-onyx&skip=0 + voting: false vars: configure_swap_size: 4096 @@ -887,7 +892,9 @@ - devstack-ipv6 - devstack-platform-debian-bookworm - devstack-platform-debian-bullseye - - devstack-platform-rocky-blue-onyx + # NOTE(danms): Disabled due to instability, see comment in the job + # definition above. + # - devstack-platform-rocky-blue-onyx - devstack-enforce-scope - devstack-multinode - devstack-unit-tests From 769adbd69daf89f05c96d877519efc81a25fd3c1 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Wed, 8 May 2024 16:58:46 +0000 Subject: [PATCH 403/574] Upload images with --file instead of stdin This is more likely how people will actually upload their images, but it also prevents the "osc as a service" feature from working because stdin isn't proxied (of course). So just convert our uses of "image create" to use --file instead of stdin. Change-Id: I7205eb0100ba7406650ed609cf517cba2c8d30aa --- functions | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/functions b/functions index 01e1d259ad..f81e8f0a08 100644 --- a/functions +++ b/functions @@ -118,7 +118,7 @@ function _upload_image { useimport="--import" fi - openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format "$container" --disk-format "$disk" $useimport $properties < "${image}" + openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format "$container" --disk-format "$disk" $useimport $properties --file $(readlink -f "${image}") } # Retrieve an image from a URL and upload into Glance. @@ -425,10 +425,10 @@ function upload_image { # kernel for use when uploading the root filesystem. local kernel_id="" ramdisk_id=""; if [ -n "$kernel" ]; then - kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki < "$kernel" -f value -c id) + kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki --file $(readlink -f "$kernel") -f value -c id) fi if [ -n "$ramdisk" ]; then - ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari < "$ramdisk" -f value -c id) + ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari --file $(readlink -f "$ramdisk") -f value -c id) fi _upload_image "${image_name%.img}" ami ami "$image" ${kernel_id:+ kernel_id=$kernel_id} ${ramdisk_id:+ ramdisk_id=$ramdisk_id} $img_property fi From c80b9f4fc16997631696100a8e468d907a177f7d Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Mon, 13 May 2024 11:04:45 +0200 Subject: [PATCH 404/574] Drop reno devstack doesn't do releases, so there should be no release notes, either. Drop the one that was accidentally created to avoid confusion. Change-Id: I75a295e50c36925a0137a5458444fb48bd5d9f8a --- ...NotImplementedError-on-SQLAlchemy-2-21bb6dcdf3ce4225.yaml | 5 ----- 1 file changed, 5 deletions(-) delete mode 100644 releasenotes/notes/Fix-dbcounter-NotImplementedError-on-SQLAlchemy-2-21bb6dcdf3ce4225.yaml diff --git a/releasenotes/notes/Fix-dbcounter-NotImplementedError-on-SQLAlchemy-2-21bb6dcdf3ce4225.yaml b/releasenotes/notes/Fix-dbcounter-NotImplementedError-on-SQLAlchemy-2-21bb6dcdf3ce4225.yaml deleted file mode 100644 index f815e14ccb..0000000000 --- a/releasenotes/notes/Fix-dbcounter-NotImplementedError-on-SQLAlchemy-2-21bb6dcdf3ce4225.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - | - Fixes a NotImplementedError when using the dbcounter SQLAlchemy plugin on - SQLAlchemy 2.x. From d5182ce3fcf5caf8f7dca84217b2c3cb70993df7 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 19 Apr 2024 12:27:14 +0100 Subject: [PATCH 405/574] lib/apache: Pass name, not path, to remove_uwsgi_config We'd like to move from configuring uWSGI with '.wsgi' files to configuring with module paths. Do this for all in-tree services and log a deprecation warning for anyone still passing a path. Note that since 'basepath foo' returns 'foo', this is effectively a no-op for the services being converted here. Change-Id: Ia1ad5ff160a9821ceab97ff1c24bc48cd4bf1d6f Signed-off-by: Stephen Finucane --- lib/apache | 6 ++++++ lib/cinder | 2 +- lib/glance | 2 +- lib/keystone | 2 +- lib/neutron | 2 +- lib/nova | 4 ++-- lib/placement | 2 +- 7 files changed, 13 insertions(+), 7 deletions(-) diff --git a/lib/apache b/lib/apache index a314b76fb7..48438da6a1 100644 --- a/lib/apache +++ b/lib/apache @@ -402,8 +402,14 @@ function remove_uwsgi_config { local conf=$1 local wsgi=$2 local name="" + # TODO(stephenfin): Remove this call when everyone is using module path + # configuration instead of file path configuration name=$(basename $wsgi) + if [[ "$wsgi" = /* ]]; then + deprecated "Passing a wsgi script to remove_uwsgi_config is deprecated, pass an application name instead" + fi + rm -rf $conf disable_apache_site $name } diff --git a/lib/cinder b/lib/cinder index f7824eb6f4..ae898e9522 100644 --- a/lib/cinder +++ b/lib/cinder @@ -275,7 +275,7 @@ function cleanup_cinder { fi stop_process "c-api" - remove_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" + remove_uwsgi_config "$CINDER_UWSGI_CONF" "cinder-wsgi" } # configure_cinder() - Set config files, create data dirs, etc diff --git a/lib/glance b/lib/glance index 8ee842625f..274687112e 100644 --- a/lib/glance +++ b/lib/glance @@ -168,7 +168,7 @@ function cleanup_glance { # Cleanup reserved stores directories sudo rm -rf $GLANCE_STAGING_DIR $GLANCE_TASKS_DIR fi - remove_uwsgi_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" + remove_uwsgi_config "$GLANCE_UWSGI_CONF" "glance-wsgi-api" } # Set multiple cinder store related config options for each of the cinder store diff --git a/lib/keystone b/lib/keystone index 6cb4aac46a..7d6b05fd41 100644 --- a/lib/keystone +++ b/lib/keystone @@ -150,7 +150,7 @@ function cleanup_keystone { sudo rm -f $(apache_site_config_for keystone) else stop_process "keystone" - remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" + remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "keystone-wsgi-public" sudo rm -f $(apache_site_config_for keystone-wsgi-public) fi } diff --git a/lib/neutron b/lib/neutron index bc77f161d7..ed854fdd66 100644 --- a/lib/neutron +++ b/lib/neutron @@ -823,7 +823,7 @@ function cleanup_neutron { if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then stop_process neutron-api stop_process neutron-rpc-server - remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api" + remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "neutron-api" sudo rm -f $(apache_site_config_for neutron-api) fi diff --git a/lib/nova b/lib/nova index a261fac8f6..ee3f29eebf 100644 --- a/lib/nova +++ b/lib/nova @@ -248,8 +248,8 @@ function cleanup_nova { stop_process "n-api" stop_process "n-api-meta" - remove_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" - remove_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" + remove_uwsgi_config "$NOVA_UWSGI_CONF" "nova-api" + remove_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "nova-metadata" if [[ "$NOVA_BACKEND" == "LVM" ]]; then clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME diff --git a/lib/placement b/lib/placement index c6bf99f868..63fdfb6c1a 100644 --- a/lib/placement +++ b/lib/placement @@ -68,7 +68,7 @@ function is_placement_enabled { # runs that a clean run would need to clean up function cleanup_placement { sudo rm -f $(apache_site_config_for placement-api) - remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" + remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "placement-api" } # _config_placement_apache_wsgi() - Set WSGI config files From a6f3901a4bf81f3fe9f6132629bc552e179dd8c9 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Wed, 6 Dec 2023 17:20:37 +0000 Subject: [PATCH 406/574] lib/apache: Reshuffle lines Make it a little more obvious what the difference between the two helper functions is. Change-Id: I07ec34ecfcd2b7925485145c4b4bf68eda385a32 Signed-off-by: Stephen Finucane --- lib/apache | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/apache b/lib/apache index 48438da6a1..1420f76ff2 100644 --- a/lib/apache +++ b/lib/apache @@ -345,15 +345,15 @@ function write_local_uwsgi_http_config { # Set die-on-term & exit-on-reload so that uwsgi shuts down iniset "$conf" uwsgi die-on-term true iniset "$conf" uwsgi exit-on-reload false + # Set worker-reload-mercy so that worker will not exit till the time + # configured after graceful shutdown + iniset "$conf" uwsgi worker-reload-mercy $WORKER_TIMEOUT iniset "$conf" uwsgi enable-threads true iniset "$conf" uwsgi plugins http,python3 # uwsgi recommends this to prevent thundering herd on accept. iniset "$conf" uwsgi thunder-lock true # Set hook to trigger graceful shutdown on SIGTERM iniset "$conf" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all" - # Set worker-reload-mercy so that worker will not exit till the time - # configured after graceful shutdown - iniset "$conf" uwsgi worker-reload-mercy $WORKER_TIMEOUT # Override the default size for headers from the 4k default. iniset "$conf" uwsgi buffer-size 65535 # Make sure the client doesn't try to re-use the connection. From 9a97326c3f3b04728cf4484df37ce7260f6367af Mon Sep 17 00:00:00 2001 From: Ben Nemec Date: Mon, 12 Aug 2019 20:10:49 +0000 Subject: [PATCH 407/574] Use OSCaaS to speed up devstack runs OpenStackClient has a significant amount of startup overhead, which adds a non-trivial amount of time to each devstack run because it makes a lot of OSC calls. This change uses the OSC service from [0] to run a persistent process that handles openstack calls. This removes most of the startup overhead and in my local testing removes about three minutes per devstack run. Currently this is implemented as an opt-in feature. There are likely a lot of edge cases in projects that use a devstack plugin so turning it on universally is going to require boiling the ocean. I think getting this in and enabled for some of the major projects should give us a lot of the benefit without the enormous effort of making it 100% compatible across all of OpenStack. Depends-On: https://review.opendev.org/c/openstack/nova/+/918689 Depends-On: https://review.opendev.org/c/openstack/ironic/+/918690 Change-Id: I28e6159944746abe2d320369249b87f1c4b9e24e 0: http://lists.openstack.org/pipermail/openstack-dev/2016-April/092546.html --- files/openstack-cli-server/openstack | 119 ++++++++++++++++++ .../openstack-cli-server/openstack-cli-server | 118 +++++++++++++++++ functions-common | 5 + stack.sh | 3 + unstack.sh | 4 + 5 files changed, 249 insertions(+) create mode 100755 files/openstack-cli-server/openstack create mode 100755 files/openstack-cli-server/openstack-cli-server diff --git a/files/openstack-cli-server/openstack b/files/openstack-cli-server/openstack new file mode 100755 index 0000000000..ef05f1b841 --- /dev/null +++ b/files/openstack-cli-server/openstack @@ -0,0 +1,119 @@ +#!/usr/bin/env python3 +# Copyright 2016 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import socket +import sys +import os +import os.path +import json + +server_address = "/tmp/openstack.sock" + +sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + +try: + sock.connect(server_address) +except socket.error as msg: + print(msg, file=sys.stderr) + sys.exit(1) + + +def send(sock, doc): + jdoc = json.dumps(doc) + sock.send(b'%d\n' % len(jdoc)) + sock.sendall(jdoc.encode('utf-8')) + +def recv(sock): + length_str = b'' + + char = sock.recv(1) + if len(char) == 0: + print("Unexpected end of file", file=sys.stderr) + sys.exit(1) + + while char != b'\n': + length_str += char + char = sock.recv(1) + if len(char) == 0: + print("Unexpected end of file", file=sys.stderr) + sys.exit(1) + + total = int(length_str) + + # use a memoryview to receive the data chunk by chunk efficiently + jdoc = memoryview(bytearray(total)) + next_offset = 0 + while total - next_offset > 0: + recv_size = sock.recv_into(jdoc[next_offset:], total - next_offset) + next_offset += recv_size + try: + doc = json.loads(jdoc.tobytes()) + except (TypeError, ValueError) as e: + raise Exception('Data received was not in JSON format') + return doc + +try: + env = {} + passenv = ["CINDER_VERSION", + "OS_AUTH_URL", + "OS_IDENTITY_API_VERSION", + "OS_NO_CACHE", + "OS_PASSWORD", + "OS_PROJECT_NAME", + "OS_REGION_NAME", + "OS_TENANT_NAME", + "OS_USERNAME", + "OS_VOLUME_API_VERSION", + "OS_CLOUD"] + for name in passenv: + if name in os.environ: + env[name] = os.environ[name] + + cmd = { + "app": os.path.basename(sys.argv[0]), + "env": env, + "argv": sys.argv[1:] + } + try: + image_idx = sys.argv.index('image') + create_idx = sys.argv.index('create') + missing_file = image_idx < create_idx and \ + not any(x.startswith('--file') for x in sys.argv) + except ValueError: + missing_file = False + + if missing_file: + # This means we were called with an image create command, but were + # not provided a --file option. That likely means we're being passed + # the image data to stdin, which won't work because we do not proxy + # stdin to the server. So, we just reject the operation and ask the + # caller to provide the file with --file instead. + # We've already connected to the server, we need to send it some dummy + # data so it doesn't wait forever. + send(sock, {}) + print('Image create without --file is not allowed in server mode', + file=sys.stderr) + sys.exit(1) + else: + send(sock, cmd) + + doc = recv(sock) + if doc["stdout"] != b'': + print(doc["stdout"], end='') + if doc["stderr"] != b'': + print(doc["stderr"], file=sys.stderr) + sys.exit(doc["status"]) +finally: + sock.close() diff --git a/files/openstack-cli-server/openstack-cli-server b/files/openstack-cli-server/openstack-cli-server new file mode 100755 index 0000000000..f3d2747e52 --- /dev/null +++ b/files/openstack-cli-server/openstack-cli-server @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 +# Copyright 2016 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import socket +import sys +import os +import json + +from openstackclient import shell as osc_shell +from io import StringIO + +server_address = "/tmp/openstack.sock" + +try: + os.unlink(server_address) +except OSError: + if os.path.exists(server_address): + raise + +sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) +print('starting up on %s' % server_address, file=sys.stderr) +sock.bind(server_address) + +# Listen for incoming connections +sock.listen(1) + +def send(sock, doc): + jdoc = json.dumps(doc) + sock.send(b'%d\n' % len(jdoc)) + sock.sendall(jdoc.encode('utf-8')) + +def recv(sock): + length_str = b'' + char = sock.recv(1) + while char != b'\n': + length_str += char + char = sock.recv(1) + + total = int(length_str) + + # use a memoryview to receive the data chunk by chunk efficiently + jdoc = memoryview(bytearray(total)) + next_offset = 0 + while total - next_offset > 0: + recv_size = sock.recv_into(jdoc[next_offset:], total - next_offset) + next_offset += recv_size + try: + doc = json.loads(jdoc.tobytes()) + except (TypeError, ValueError) as e: + raise Exception('Data received was not in JSON format') + return doc + +while True: + csock, client_address = sock.accept() + try: + doc = recv(csock) + + print("%s %s" % (doc["app"], doc["argv"]), file=sys.stderr) + oldenv = {} + for name in doc["env"].keys(): + oldenv[name] = os.environ.get(name, None) + os.environ[name] = doc["env"][name] + + try: + old_stdout = sys.stdout + old_stderr = sys.stderr + my_stdout = sys.stdout = StringIO() + my_stderr = sys.stderr = StringIO() + + class Exit(BaseException): + def __init__(self, status): + self.status = status + + def noexit(stat): + raise Exit(stat) + + sys.exit = noexit + + if doc["app"] == "openstack": + sh = osc_shell.OpenStackShell() + ret = sh.run(doc["argv"]) + else: + print("Unknown application %s" % doc["app"], file=sys.stderr) + ret = 1 + except Exit as e: + ret = e.status + finally: + sys.stdout = old_stdout + sys.stderr = old_stderr + + for name in oldenv.keys(): + if oldenv[name] is None: + del os.environ[name] + else: + os.environ[name] = oldenv[name] + + send(csock, { + "stdout": my_stdout.getvalue(), + "stderr": my_stderr.getvalue(), + "status": ret, + }) + + except BaseException as e: + print(e, file=sys.stderr) + finally: + csock.close() diff --git a/functions-common b/functions-common index 8ea6df7c1d..84d281b21e 100644 --- a/functions-common +++ b/functions-common @@ -2438,6 +2438,11 @@ function time_stop { _TIME_TOTAL[$name]=$(($total + $elapsed_time)) } +function install_openstack_cli_server { + export PATH=$TOP_DIR/files/openstack-cli-server:$PATH + run_process openstack-cli-server "$PYTHON $TOP_DIR/files/openstack-cli-server/openstack-cli-server" +} + function oscwrap { local xtrace xtrace=$(set +o | grep xtrace) diff --git a/stack.sh b/stack.sh index c6652e5c6a..0c36e1034e 100755 --- a/stack.sh +++ b/stack.sh @@ -1022,6 +1022,9 @@ if use_library_from_git "python-openstackclient"; then setup_dev_lib "python-openstackclient" else pip_install_gr python-openstackclient + if is_service_enabled openstack-cli-server; then + install_openstack_cli_server + fi fi # Installs alias for osc so that we can collect timing for all diff --git a/unstack.sh b/unstack.sh index 33b069b6a3..1b2d8dd62a 100755 --- a/unstack.sh +++ b/unstack.sh @@ -168,6 +168,10 @@ if is_service_enabled etcd3; then cleanup_etcd3 fi +if is_service_enabled openstack-cli-server; then + stop_service devstack@openstack-cli-server +fi + stop_dstat # NOTE: Cinder automatically installs the lvm2 package, independently of the From 6971ccc49ad97216d97be46c70c241c5473aff92 Mon Sep 17 00:00:00 2001 From: MinhNLH2 Date: Wed, 1 May 2024 21:29:15 +0700 Subject: [PATCH 408/574] Display backup dashboard on Horizon when c-bak is enabled Currently, when enabling c-bak service, the backup tab will not be shown on Horizon by default. This patch tells Horizon to display backup dashboard when c-bak is enabled. Closes-Bug: 2064496 Change-Id: I06295706e985bac58de2878c6d24c51f3267c205 Signed-off-by: MinhNLH2 --- lib/horizon | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/horizon b/lib/horizon index 6f753f546f..7c0d443aa6 100644 --- a/lib/horizon +++ b/lib/horizon @@ -109,6 +109,10 @@ function configure_horizon { _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT "True" fi + if is_service_enabled c-bak; then + _horizon_config_set $local_settings OPENSTACK_CINDER_FEATURES enable_backup "True" + fi + # Create an empty directory that apache uses as docroot sudo mkdir -p $HORIZON_DIR/.blackhole From fadf63e4a962e4922cdf529c17231fbb49f91e89 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 16 May 2024 02:37:02 +0000 Subject: [PATCH 409/574] Updated from generate-devstack-plugins-list Change-Id: Ifa6db2e765f5f15a1d7421eef061377e55b58ec7 --- doc/source/plugin-registry.rst | 6 ------ 1 file changed, 6 deletions(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 2d2a92c4a9..21cf52c736 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -37,7 +37,6 @@ openstack/devstack-plugin-container `https://opendev.org/openstack/devstack openstack/devstack-plugin-kafka `https://opendev.org/openstack/devstack-plugin-kafka `__ openstack/devstack-plugin-nfs `https://opendev.org/openstack/devstack-plugin-nfs `__ openstack/devstack-plugin-open-cas `https://opendev.org/openstack/devstack-plugin-open-cas `__ -openstack/ec2-api `https://opendev.org/openstack/ec2-api `__ openstack/freezer `https://opendev.org/openstack/freezer `__ openstack/freezer-api `https://opendev.org/openstack/freezer-api `__ openstack/freezer-tempest-plugin `https://opendev.org/openstack/freezer-tempest-plugin `__ @@ -62,7 +61,6 @@ openstack/mistral `https://opendev.org/openstack/mistral openstack/monasca-api `https://opendev.org/openstack/monasca-api `__ openstack/monasca-events-api `https://opendev.org/openstack/monasca-events-api `__ openstack/monasca-tempest-plugin `https://opendev.org/openstack/monasca-tempest-plugin `__ -openstack/murano `https://opendev.org/openstack/murano `__ openstack/networking-bagpipe `https://opendev.org/openstack/networking-bagpipe `__ openstack/networking-baremetal `https://opendev.org/openstack/networking-baremetal `__ openstack/networking-bgpvpn `https://opendev.org/openstack/networking-bgpvpn `__ @@ -84,12 +82,8 @@ openstack/osprofiler `https://opendev.org/openstack/osprofil openstack/ovn-bgp-agent `https://opendev.org/openstack/ovn-bgp-agent `__ openstack/ovn-octavia-provider `https://opendev.org/openstack/ovn-octavia-provider `__ openstack/rally-openstack `https://opendev.org/openstack/rally-openstack `__ -openstack/sahara `https://opendev.org/openstack/sahara `__ -openstack/sahara-dashboard `https://opendev.org/openstack/sahara-dashboard `__ -openstack/senlin `https://opendev.org/openstack/senlin `__ openstack/shade `https://opendev.org/openstack/shade `__ openstack/skyline-apiserver `https://opendev.org/openstack/skyline-apiserver `__ -openstack/solum `https://opendev.org/openstack/solum `__ openstack/storlets `https://opendev.org/openstack/storlets `__ openstack/tacker `https://opendev.org/openstack/tacker `__ openstack/tap-as-a-service `https://opendev.org/openstack/tap-as-a-service `__ From 608489cd59b0d0f6f82937abb6a317489ac4d7a4 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 28 May 2024 13:27:14 +0100 Subject: [PATCH 410/574] openrc: Stop setting OS_TENANT_NAME MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All clients - OSC included - use keystoneauth under the hood which hasn't required this in a very long time. Stop setting it and remove the warning. We also remove references to 'NOVA_*' variables that haven't been a thing since well before *I* started working on OpenStack 😅 Change-Id: I882081040215d8e32932ec5d03be34e467e4fbc2 Signed-off-by: Stephen Finucane --- openrc | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/openrc b/openrc index 6d488bb0ba..b72bedbea4 100644 --- a/openrc +++ b/openrc @@ -7,9 +7,6 @@ # Set OS_USERNAME to override the default user name 'demo' # Set ADMIN_PASSWORD to set the password for 'admin' and 'demo' -# NOTE: support for the old NOVA_* novaclient environment variables has -# been removed. - if [[ -n "$1" ]]; then OS_USERNAME=$1 fi @@ -36,22 +33,14 @@ fi source $RC_DIR/lib/tls # The OpenStack ecosystem has standardized the term **project** as the -# entity that owns resources. In some places **tenant** remains -# referenced, but in all cases this just means **project**. We will -# warn if we need to turn on legacy **tenant** support to have a -# working environment. +# entity that owns resources. export OS_PROJECT_NAME=${OS_PROJECT_NAME:-demo} -echo "WARNING: setting legacy OS_TENANT_NAME to support cli tools." -export OS_TENANT_NAME=$OS_PROJECT_NAME - # In addition to the owning entity (project), nova stores the entity performing # the action as the **user**. export OS_USERNAME=${OS_USERNAME:-demo} # With Keystone you pass the keystone password instead of an api key. -# Recent versions of novaclient use OS_PASSWORD instead of NOVA_API_KEYs -# or NOVA_PASSWORD. export OS_PASSWORD=${ADMIN_PASSWORD:-secret} # Region From 9fff87fbc7c972d18b9bf59847b61b0bbd8e4dd9 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 28 May 2024 13:33:32 +0100 Subject: [PATCH 411/574] openrc: Group auth-related options together Change-Id: I98f283b33c2350cc4388463571013896086b31fa Signed-off-by: Stephen Finucane --- openrc | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/openrc b/openrc index b72bedbea4..e20a5a56b9 100644 --- a/openrc +++ b/openrc @@ -32,18 +32,11 @@ fi # Get some necessary configuration source $RC_DIR/lib/tls -# The OpenStack ecosystem has standardized the term **project** as the -# entity that owns resources. +# Minimal configuration +export OS_AUTH_TYPE=password export OS_PROJECT_NAME=${OS_PROJECT_NAME:-demo} - -# In addition to the owning entity (project), nova stores the entity performing -# the action as the **user**. export OS_USERNAME=${OS_USERNAME:-demo} - -# With Keystone you pass the keystone password instead of an api key. export OS_PASSWORD=${ADMIN_PASSWORD:-secret} - -# Region export OS_REGION_NAME=${REGION_NAME:-RegionOne} # Set the host API endpoint. This will default to HOST_IP if SERVICE_IP_VERSION @@ -65,9 +58,6 @@ fi # Identity API version export OS_IDENTITY_API_VERSION=3 -# Ask keystoneauth1 to use keystone -export OS_AUTH_TYPE=password - # Authenticating against an OpenStack cloud using Keystone returns a **Token** # and **Service Catalog**. The catalog contains the endpoints for all services # the user/project has access to - including nova, glance, keystone, swift, ... From 5412dbfe7b797149f1f68100de8003b1876398fe Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 28 May 2024 13:35:28 +0100 Subject: [PATCH 412/574] stackrc: Remove USE_PYTHON3 This is no longer necessary and any users of this should be updated to remove references. Change-Id: Ice5083d8897376fd2ed6bd509419526e15baaf12 Signed-off-by: Stephen Finucane --- stackrc | 4 ---- 1 file changed, 4 deletions(-) diff --git a/stackrc b/stackrc index de81f01f38..b37959712b 100644 --- a/stackrc +++ b/stackrc @@ -126,10 +126,6 @@ if [[ -r $RC_DIR/.localrc.password ]]; then source $RC_DIR/.localrc.password fi -# Control whether Python 3 should be used at all. -# TODO(frickler): Drop this when all consumers are fixed -export USE_PYTHON3=True - # Adding the specific version of Python 3 to this variable will install # the app using that version of the interpreter instead of just 3. _DEFAULT_PYTHON3_VERSION="$(_get_python_version python3)" From b500d80c7641583039188baf62c215676e3d81db Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Fri, 14 Jun 2024 12:58:58 +0200 Subject: [PATCH 413/574] Fix deployment of the neutron with uwsgi After patch [1] deploying neutron with uwsgi was not working correctly due to the fact that there was different paths for the applications set in the api-paste.ini file. Instead of default ones like: /: neutronversions_composite /healthcheck: healthcheck /v2.0: neutronapi_v2_0 it was changing it to something like: /networking/: neutronversions_composite /networking/healthcheck: healthcheck /networking/v2.0: neutronapi_v2_0 where 'networking' can be configured to something else. This patch fixes deployment of neutron with uwsgi by not changing its api-paste.ini file when NEUTRON_DEPLOY_MOD_WSGI=True. [1] https://review.opendev.org/c/openstack/devstack/+/849145 Closes-bug: #2069418 Change-Id: I12b860d4d98442e2b5ac0c9fd854f1226633b518 --- lib/neutron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 808043cebe..021ffeb11e 100644 --- a/lib/neutron +++ b/lib/neutron @@ -1002,7 +1002,7 @@ function _configure_neutron_service { Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE - if [[ -n "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then + if [[ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" && -n "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then _replace_api_paste_composite fi From 4d69238383c45c862d588cfe1e0234e6a13a1220 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Fri, 21 Jun 2024 18:27:32 +0530 Subject: [PATCH 414/574] Fix rdo_release for unmaintained branches Only branches with stable/ as prefix were considered but now we have branches even with different prefix like unmaintained/, fix it to consider such cases by using a generic filter instead of assuming branch name starts with stable. Change-Id: I967de13094ff6df46737a22d4e1758f9900dfbc9 --- stack.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/stack.sh b/stack.sh index 0c36e1034e..740682920c 100755 --- a/stack.sh +++ b/stack.sh @@ -307,8 +307,8 @@ function _install_rdo { # rdo-release.el8.rpm points to latest RDO release, use that for master sudo dnf -y install https://rdoproject.org/repos/rdo-release.el8.rpm else - # For stable branches use corresponding release rpm - rdo_release=$(echo $TARGET_BRANCH | sed "s|stable/||g") + # For stable/unmaintained branches use corresponding release rpm + rdo_release=${TARGET_BRANCH#*/} sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el8.rpm fi elif [[ $DISTRO == "rhel9" ]]; then @@ -316,8 +316,8 @@ function _install_rdo { # rdo-release.el9.rpm points to latest RDO release, use that for master sudo dnf -y install https://rdoproject.org/repos/rdo-release.el9.rpm else - # For stable branches use corresponding release rpm - rdo_release=$(echo $TARGET_BRANCH | sed "s|stable/||g") + # For stable/unmaintained branches use corresponding release rpm + rdo_release=${TARGET_BRANCH#*/} sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el9.rpm fi fi From 56368c271d5915af76e8e5d2b0bd873d09ba3a49 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Mon, 17 Jun 2024 15:10:40 +0000 Subject: [PATCH 415/574] [Neutron] Add a new Neutron service: neutron-periodic-workers This new service is spawned when using Neutron WSGI module. This new service executes the plugin workers inside a wrapper executor class called ``AllServicesNeutronWorker``. The workers are executed as threads inside the process. Depends-On: https://review.opendev.org/c/openstack/neutron/+/922110 Related-Bug: #2069581 Change-Id: I6b76b7bcee1365c80f76231e0311406831f8ce41 --- lib/neutron | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/neutron b/lib/neutron index 8b65980e90..e0b5d5d68c 100644 --- a/lib/neutron +++ b/lib/neutron @@ -634,7 +634,9 @@ function start_neutron_service_and_check { run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" neutron_url=$Q_PROTOCOL://$Q_HOST/ enable_service neutron-rpc-server + enable_service neutron-periodic-workers run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" + run_process neutron-periodic-workers "$NEUTRON_BIN_DIR/neutron-periodic-workers $cfg_file_options" else run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options" neutron_url=$service_protocol://$Q_HOST:$service_port/ @@ -706,6 +708,7 @@ function stop_other { if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then stop_process neutron-rpc-server + stop_process neutron-periodic-workers stop_process neutron-api else stop_process q-svc @@ -823,6 +826,7 @@ function cleanup_neutron { if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then stop_process neutron-api stop_process neutron-rpc-server + stop_process neutron-periodic-workers remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "neutron-api" sudo rm -f $(apache_site_config_for neutron-api) fi From 41d253a6f94c1646f2bd28ac373d6aaf8bfa6089 Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Thu, 20 Jun 2024 19:03:37 +0100 Subject: [PATCH 416/574] add ubuntu noble (24.04) support This change installs setuptools in the requirements and global venv to ensure that distutils is present This change also adds new single and two node nodeset for noble and a devstack platform job as nonvoting. Change-Id: Ie1f8ebc5db75d6913239c529ee923395a764e19c --- .zuul.yaml | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++++ inc/python | 2 +- lib/infra | 2 +- stack.sh | 2 +- 4 files changed, 55 insertions(+), 3 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 294dd48f4d..50a34ae0b3 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -8,6 +8,16 @@ nodes: - controller +- nodeset: + name: openstack-single-node-noble + nodes: + - name: controller + label: ubuntu-noble + groups: + - name: tempest + nodes: + - controller + - nodeset: name: openstack-single-node-focal nodes: @@ -148,6 +158,36 @@ nodes: - compute1 +- nodeset: + name: openstack-two-node-noble + nodes: + - name: controller + label: ubuntu-noble + - name: compute1 + label: ubuntu-noble + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + - nodeset: name: openstack-two-node-focal nodes: @@ -652,6 +692,17 @@ vars: configure_swap_size: 4096 + +- job: + name: devstack-platform-ubuntu-noble + parent: tempest-full-py3 + description: Ubuntu 24.04 LTS (noble) platform test + nodeset: openstack-single-node-noble + timeout: 9000 + voting: false + vars: + configure_swap_size: 8192 + - job: name: devstack-platform-ubuntu-jammy-ovn-source parent: devstack-platform-ubuntu-jammy @@ -849,6 +900,7 @@ - devstack-platform-rocky-blue-onyx - devstack-platform-ubuntu-jammy-ovn-source - devstack-platform-ubuntu-jammy-ovs + - devstack-platform-ubuntu-noble - devstack-platform-openEuler-22.03-ovn-source - devstack-platform-openEuler-22.03-ovs - devstack-multinode diff --git a/inc/python b/inc/python index 43b06eb520..2339afdd6d 100644 --- a/inc/python +++ b/inc/python @@ -41,7 +41,7 @@ function setup_devstack_virtualenv { # This package is currently installed via the distro and not # available on pypi. python$PYTHON3_VERSION -m venv --system-site-packages $DEVSTACK_VENV - pip_install -U pip + pip_install -U pip setuptools fi if [[ ":$PATH:" != *":$DEVSTACK_VENV/bin:"* ]] ; then export PATH="$DEVSTACK_VENV/bin:$PATH" diff --git a/lib/infra b/lib/infra index b983f2b739..2aad00354a 100644 --- a/lib/infra +++ b/lib/infra @@ -31,7 +31,7 @@ function install_infra { local PIP_VIRTUAL_ENV="$REQUIREMENTS_DIR/.venv" [ ! -d $PIP_VIRTUAL_ENV ] && ${VIRTUALENV_CMD} $PIP_VIRTUAL_ENV # We don't care about testing git pbr in the requirements venv. - PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr + PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr setuptools PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install $REQUIREMENTS_DIR # Unset the PIP_VIRTUAL_ENV so that PBR does not end up trapped diff --git a/stack.sh b/stack.sh index 0c36e1034e..6ae324bb0c 100755 --- a/stack.sh +++ b/stack.sh @@ -230,7 +230,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bookworm|bullseye|jammy|rhel8|rhel9|openEuler-22.03" +SUPPORTED_DISTROS="bookworm|bullseye|jammy|noble|rhel8|rhel9|openEuler-22.03" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" From db305d2a4bb36c3d3a4ef4a108069cd77bca540e Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Mon, 24 Jun 2024 15:27:58 +0100 Subject: [PATCH 417/574] enable openstack-cli-server and other perfromace tunings This commit enabeles a number of performance optimizations to tune the host vms memory and io by leveraging zswap and other kernel parmaters to minimize the effect of io latency and memory pressure. The openstack-cli-server has been enabled in the nova ci for several months now and has proven to speed up devstack signifcantly, while this change does not enable it by default in devstack it does enable it by default in the ci jobs. simiarly the zswap and other tuning remain disabled by default in devstack but are enabled by default in the devstack job. This change limits the qemu tb_cache_size to 128MB form 1G, this requires libvirt 8.0.0 or newer. as bullseye and openeuler-22.03 do not meet that requirement they have been removed. libvirt 8.0.0 will be the new min version supported in nova in the 2025.1 release so the decions was made to drop supprot for older release now instead of doing it at the start of the 2025.1 cycle. debain coverage is still provided by the newer bookworm relase. openeuler-22.03 has been superseded by the openeuler-24.03 lts release. openeuler-24.03 is not currnetly aviable in ci but supprot could be readded if desired however that is out os scope of this change. Change-Id: Ib45ca08c7e3e833b14f7e6ec496ad2d2f7073f99 --- .zuul.yaml | 148 +++++++++++++++++++++++------------------------------ stack.sh | 2 +- 2 files changed, 64 insertions(+), 86 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 50a34ae0b3..06d76d0093 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -68,6 +68,9 @@ nodes: - controller +# Note(sean-k-mooney): this is still used by horizon for +# horizon-integration-tests, horizon-integration-pytest and +# horizon-ui-pytest, remove when horizon is updated. - nodeset: name: devstack-single-node-debian-bullseye nodes: @@ -88,16 +91,6 @@ nodes: - controller -- nodeset: - name: devstack-single-node-openeuler-22.03 - nodes: - - name: controller - label: openEuler-22-03-LTS - groups: - - name: tempest - nodes: - - controller - - nodeset: name: openstack-two-node-centos-9-stream nodes: @@ -463,6 +456,7 @@ file_tracker: true mysql: true rabbit: true + openstack-cli-server: true group-vars: subnode: devstack_services: @@ -470,6 +464,7 @@ dstat: false memory_tracker: true file_tracker: true + openstack-cli-server: true devstack_localrc: # Multinode specific settings HOST_IP: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}" @@ -517,7 +512,14 @@ - opendev.org/openstack/swift timeout: 7200 vars: - configure_swap_size: 4096 + # based on observation of the integrated gate + # tempest-integrated-compute was only using ~1.7GB of swap + # when zswap and the host turning are enabled that increase + # slightly to ~2GB. we are setting the swap size to 8GB to + # be safe and account for more complex scenarios. + # we should revisit this value after some time to see if we + # can reduce it. + configure_swap_size: 8192 devstack_localrc: # Common OpenStack services settings SWIFT_REPLICAS: 1 @@ -526,11 +528,33 @@ DEBUG_LIBVIRT_COREDUMPS: true NOVA_VNC_ENABLED: true OVN_DBS_LOG_LEVEL: dbg + # tune the host to optimize memory usage and hide io latency + # these setting will configure the kernel to treat the host page + # cache and swap with equal priority, and prefer deferring writes + # changing the default swappiness, dirty_ratio and + # the vfs_cache_pressure + ENABLE_SYSCTL_MEM_TUNING: true + # the net tuning optimizes ipv4 tcp fast open and config the default + # qdisk policy to pfifo_fast which effectively disable all qos. + # this minimizes the cpu load of the host network stack + ENABLE_SYSCTL_NET_TUNING: true + # zswap allows the kernel to compress pages in memory before swapping + # them to disk. this can reduce the amount of swap used and improve + # performance. effectively this trades a small amount of cpu for an + # increase in swap performance by reducing the amount of data + # written to disk. the overall speedup is proportional to the + # compression ratio and the speed of the swap device. + ENABLE_ZSWAP: true devstack_local_conf: post-config: $NEUTRON_CONF: DEFAULT: global_physnet_mtu: '{{ external_bridge_mtu }}' + $NOVA_CPU_CONF: + libvirt: + # Use lower TB cache than default(1GiB), only applicable with + # libvirt>=8.0.0 + tb_cache_size: 128 devstack_services: # Core services enabled for this branch. # This list replaces the test-matrix. @@ -618,6 +642,30 @@ Q_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" NOVA_VNC_ENABLED: true ENABLE_CHASSIS_AS_GW: false + # tune the host to optimize memory usage and hide io latency + # these setting will configure the kernel to treat the host page + # cache and swap with equal priority, and prefer deferring writes + # changing the default swappiness, dirty_ratio and + # the vfs_cache_pressure + ENABLE_SYSCTL_MEM_TUNING: true + # the net tuning optimizes ipv4 tcp fast open and config the default + # qdisk policy to pfifo_fast which effectively disable all qos. + # this minimizes the cpu load of the host network stack + ENABLE_SYSCTL_NET_TUNING: true + # zswap allows the kernel to compress pages in memory before swapping + # them to disk. this can reduce the amount of swap used and improve + # performance. effectivly this trades a small amount of cpu for an + # increase in swap performance by reducing the amount of data + # written to disk. the overall speedup is porportional to the + # compression ratio and the speed of the swap device. + ENABLE_ZSWAP: true + devstack_local_conf: + post-config: + $NOVA_CPU_CONF: + libvirt: + # Use lower TB cache than default(1GiB), only applicable with + # libvirt>=8.0.0 + tb_cache_size: 128 - job: name: devstack-ipv6 @@ -669,15 +717,6 @@ vars: configure_swap_size: 4096 -- job: - name: devstack-platform-debian-bullseye - parent: tempest-full-py3 - description: Debian Bullseye platform test - nodeset: devstack-single-node-debian-bullseye - timeout: 9000 - vars: - configure_swap_size: 4096 - - job: name: devstack-platform-rocky-blue-onyx parent: tempest-full-py3 @@ -754,62 +793,6 @@ # Enable Neutron ML2/OVS services q-agt: true -- job: - name: devstack-platform-openEuler-22.03-ovn-source - parent: tempest-full-py3 - description: openEuler 22.03 LTS platform test (OVN) - nodeset: devstack-single-node-openeuler-22.03 - voting: false - timeout: 9000 - vars: - configure_swap_size: 4096 - devstack_localrc: - # NOTE(wxy): OVN package is not supported by openEuler yet. Build it - # from source instead. - OVN_BUILD_FROM_SOURCE: True - OVN_BRANCH: "v21.06.0" - OVS_BRANCH: "a4b04276ab5934d087669ff2d191a23931335c87" - OVS_SYSCONFDIR: "/usr/local/etc/openvswitch" - -- job: - name: devstack-platform-openEuler-22.03-ovs - parent: tempest-full-py3 - description: openEuler 22.03 LTS platform test (OVS) - nodeset: devstack-single-node-openeuler-22.03 - voting: false - timeout: 9000 - vars: - configure_swap_size: 8192 - devstack_localrc: - Q_AGENT: openvswitch - Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch - Q_ML2_TENANT_NETWORK_TYPE: vxlan - devstack_services: - # Disable OVN services - ovn-northd: false - ovn-controller: false - ovs-vswitchd: false - ovsdb-server: false - # Disable Neutron ML2/OVN services - q-ovn-metadata-agent: false - # Enable Neutron ML2/OVS services - q-agt: true - q-dhcp: true - q-l3: true - q-meta: true - q-metering: true - group-vars: - subnode: - devstack_services: - # Disable OVN services - ovn-controller: false - ovs-vswitchd: false - ovsdb-server: false - # Disable Neutron ML2/OVN services - q-ovn-metadata-agent: false - # Enable Neutron ML2/OVS services - q-agt: true - - job: name: devstack-no-tls-proxy parent: tempest-full-py3 @@ -896,13 +879,10 @@ - devstack-enforce-scope - devstack-platform-centos-9-stream - devstack-platform-debian-bookworm - - devstack-platform-debian-bullseye - devstack-platform-rocky-blue-onyx - devstack-platform-ubuntu-jammy-ovn-source - devstack-platform-ubuntu-jammy-ovs - devstack-platform-ubuntu-noble - - devstack-platform-openEuler-22.03-ovn-source - - devstack-platform-openEuler-22.03-ovs - devstack-multinode - devstack-unit-tests - openstack-tox-bashate @@ -943,7 +923,6 @@ - devstack - devstack-ipv6 - devstack-platform-debian-bookworm - - devstack-platform-debian-bullseye # NOTE(danms): Disabled due to instability, see comment in the job # definition above. # - devstack-platform-rocky-blue-onyx @@ -978,7 +957,9 @@ # pruned. # # * nova-next: maintained by nova for unreleased/undefaulted - # things + # things, this job is not experimental but often is used to test + # things that are not yet production ready or to test what will be + # the new default after a deprecation period has ended. # * neutron-fullstack-with-uwsgi: maintained by neutron for fullstack test # when neutron-api is served by uwsgi, it's in exprimental for testing. # the next cycle we can remove this job if things turn out to be @@ -988,7 +969,7 @@ # stable engouh with uwsgi. # * neutron-ovn-tempest-with-uwsgi: maintained by neutron for tempest test. # Next cycle we can remove this if everything run out stable enough. - # * nova-multi-cell: maintained by nova and currently non-voting in the + # * nova-multi-cell: maintained by nova and now is voting in the # check queue for nova changes but relies on devstack configuration experimental: @@ -1026,9 +1007,6 @@ jobs: - devstack-platform-centos-9-stream - devstack-platform-debian-bookworm - - devstack-platform-debian-bullseye - devstack-platform-rocky-blue-onyx - devstack-platform-ubuntu-jammy-ovn-source - devstack-platform-ubuntu-jammy-ovs - - devstack-platform-openEuler-22.03-ovn-source - - devstack-platform-openEuler-22.03-ovs diff --git a/stack.sh b/stack.sh index 77548105a7..ab3f01cdbd 100755 --- a/stack.sh +++ b/stack.sh @@ -230,7 +230,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bookworm|bullseye|jammy|noble|rhel8|rhel9|openEuler-22.03" +SUPPORTED_DISTROS="bookworm|jammy|noble|rhel9" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" From e825ba07a1b88ab0570053f92123aa451e4b2ec8 Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Wed, 26 Jun 2024 18:10:43 +0200 Subject: [PATCH 418/574] Create parent directory in merge_config_file The code accounts for the config file not existing but it does not account for the parent directory missing. This is currently breaking any Ironic jobs that disable Nova. Change-Id: Ia5fcfe6c63f5cc40b11f7e1f3be244d7897f26f6 --- inc/meta-config | 1 + 1 file changed, 1 insertion(+) diff --git a/inc/meta-config b/inc/meta-config index be73b60800..b9d9649e4b 100644 --- a/inc/meta-config +++ b/inc/meta-config @@ -90,6 +90,7 @@ function merge_config_file { local real_configfile real_configfile=$(eval echo $configfile) if [ ! -f $real_configfile ]; then + mkdir -p $(dirname $real_configfile) || die $LINENO "could not create the directory of $real_configfile ($configfile)" touch $real_configfile || die $LINENO "could not create config file $real_configfile ($configfile)" fi From c707dd3fc2d601db5169508ed39e24dde89e9631 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Wed, 26 Apr 2023 14:59:25 +0000 Subject: [PATCH 419/574] [nova] Add flag to set libvirt tb_cache_size A config option is being added in nova with [1] in order to allow configuring lower tb-cache size for qemu guest VMs. This patch adds a flag in devstack so jobs can utilize it to set required tb-cache size. [1] https://review.opendev.org/c/openstack/nova/+/868419 Co-Authored-By: Sean Mooney Related: blueprint libvirt-tb-cache-size Change-Id: Ifde737eb5d87dfe860445097d1f2b0ce16b0de05 --- .zuul.yaml | 18 ++++++------------ lib/nova | 6 ++++++ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 06d76d0093..3e6c42e68f 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -544,17 +544,15 @@ # increase in swap performance by reducing the amount of data # written to disk. the overall speedup is proportional to the # compression ratio and the speed of the swap device. + # NOTE: this option is ignored when not using nova with the libvirt + # virt driver. + NOVA_LIBVIRT_TB_CACHE_SIZE: 128 ENABLE_ZSWAP: true devstack_local_conf: post-config: $NEUTRON_CONF: DEFAULT: global_physnet_mtu: '{{ external_bridge_mtu }}' - $NOVA_CPU_CONF: - libvirt: - # Use lower TB cache than default(1GiB), only applicable with - # libvirt>=8.0.0 - tb_cache_size: 128 devstack_services: # Core services enabled for this branch. # This list replaces the test-matrix. @@ -659,13 +657,9 @@ # written to disk. the overall speedup is porportional to the # compression ratio and the speed of the swap device. ENABLE_ZSWAP: true - devstack_local_conf: - post-config: - $NOVA_CPU_CONF: - libvirt: - # Use lower TB cache than default(1GiB), only applicable with - # libvirt>=8.0.0 - tb_cache_size: 128 + # NOTE: this option is ignored when not using nova with the libvirt + # virt driver. + NOVA_LIBVIRT_TB_CACHE_SIZE: 128 - job: name: devstack-ipv6 diff --git a/lib/nova b/lib/nova index ee3f29eebf..7c6ffb2239 100644 --- a/lib/nova +++ b/lib/nova @@ -173,6 +173,9 @@ NOVA_SHUTDOWN_TIMEOUT=${NOVA_SHUTDOWN_TIMEOUT:-0} # Whether to use Keystone unified limits instead of legacy quota limits. NOVA_USE_UNIFIED_LIMITS=$(trueorfalse False NOVA_USE_UNIFIED_LIMITS) +# TB Cache Size in MiB for qemu guests +NOVA_LIBVIRT_TB_CACHE_SIZE=${NOVA_LIBVIRT_TB_CACHE_SIZE:-0} + # Functions # --------- @@ -1071,6 +1074,9 @@ function start_nova_compute { fi if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then + if [ ${NOVA_LIBVIRT_TB_CACHE_SIZE} -gt 0 ]; then + iniset $NOVA_CPU_CONF libvirt tb_cache_size ${NOVA_LIBVIRT_TB_CACHE_SIZE} + fi # The group **$LIBVIRT_GROUP** is added to the current user in this script. # ``sg`` is used in run_process to execute nova-compute as a member of the # **$LIBVIRT_GROUP** group. From 3a0c0b9ff4bb3568efc471e1bf98fc273e8bc767 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Mon, 24 Jun 2024 11:09:34 +0000 Subject: [PATCH 420/574] [Neutron] Add a new Neutron service: neutron-ovn-maintenance-worker This new service is spawned when using Neutron WSGI module. This new service executes the OVN maintenance task that syncs the Neutron database and the OVN database. Depends-On: https://review.opendev.org/c/openstack/neutron/+/922074 Related-Bug: #1912359 Change-Id: I495459cd9e35e2e76ba7fc9611a589e1685814f5 --- lib/neutron | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/lib/neutron b/lib/neutron index e0b5d5d68c..a8cc953b0e 100644 --- a/lib/neutron +++ b/lib/neutron @@ -369,6 +369,24 @@ function _determine_config_l3 { echo "$opts" } +function _enable_ovn_maintenance { + if [[ $Q_AGENT == "ovn" ]]; then + enable_service neutron-ovn-maintenance-worker + fi +} + +function _run_ovn_maintenance { + if [[ $Q_AGENT == "ovn" ]]; then + run_process neutron-ovn-maintenance-worker "$NEUTRON_BIN_DIR/neutron-ovn-maintenance-worker $cfg_file_options" + fi +} + +function _stop_ovn_maintenance { + if [[ $Q_AGENT == "ovn" ]]; then + stop_process neutron-ovn-maintenance-worker + fi +} + # For services and agents that require it, dynamically construct a list of # --config-file arguments that are passed to the binary. function determine_config_files { @@ -635,8 +653,10 @@ function start_neutron_service_and_check { neutron_url=$Q_PROTOCOL://$Q_HOST/ enable_service neutron-rpc-server enable_service neutron-periodic-workers + _enable_ovn_maintenance run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" run_process neutron-periodic-workers "$NEUTRON_BIN_DIR/neutron-periodic-workers $cfg_file_options" + _run_ovn_maintenance else run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options" neutron_url=$service_protocol://$Q_HOST:$service_port/ @@ -710,6 +730,7 @@ function stop_other { stop_process neutron-rpc-server stop_process neutron-periodic-workers stop_process neutron-api + _stop_ovn_maintenance else stop_process q-svc fi @@ -827,6 +848,7 @@ function cleanup_neutron { stop_process neutron-api stop_process neutron-rpc-server stop_process neutron-periodic-workers + _stop_ovn_maintenance remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "neutron-api" sudo rm -f $(apache_site_config_for neutron-api) fi From eb0ac1d217fe8a545f2e697d09fbb650efecb9ef Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Mon, 8 Jul 2024 18:02:25 +0200 Subject: [PATCH 421/574] Drop remainders of identity API v2.0 references keystone has dropped the v2.0 API in queens, time to drop all special casing for it. Change-Id: If628c4627f7c8b8c2ee9bca16ea6db693cf8526a --- files/openstack-cli-server/openstack | 1 - lib/tempest | 15 +-------------- openrc | 17 ++--------------- stackrc | 4 ---- 4 files changed, 3 insertions(+), 34 deletions(-) diff --git a/files/openstack-cli-server/openstack b/files/openstack-cli-server/openstack index ef05f1b841..47fbfc5e17 100755 --- a/files/openstack-cli-server/openstack +++ b/files/openstack-cli-server/openstack @@ -68,7 +68,6 @@ try: env = {} passenv = ["CINDER_VERSION", "OS_AUTH_URL", - "OS_IDENTITY_API_VERSION", "OS_NO_CACHE", "OS_PASSWORD", "OS_PROJECT_NAME", diff --git a/lib/tempest b/lib/tempest index 6bd203e6f4..7beaf21292 100644 --- a/lib/tempest +++ b/lib/tempest @@ -18,7 +18,7 @@ # - ``PUBLIC_NETWORK_NAME`` # - ``VIRT_DRIVER`` # - ``LIBVIRT_TYPE`` -# - ``KEYSTONE_SERVICE_URI``, ``KEYSTONE_SERVICE_URI_V3`` from lib/keystone +# - ``KEYSTONE_SERVICE_URI_V3`` from lib/keystone # # Optional Dependencies: # @@ -381,7 +381,6 @@ function configure_tempest { iniset $TEMPEST_CONFIG volume build_timeout $BUILD_TIMEOUT # Identity - iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_URI/v2.0/" iniset $TEMPEST_CONFIG identity uri_v3 "$KEYSTONE_SERVICE_URI_V3" iniset $TEMPEST_CONFIG identity user_lockout_failure_attempts $KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS iniset $TEMPEST_CONFIG identity user_lockout_duration $KEYSTONE_LOCKOUT_DURATION @@ -392,19 +391,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG auth admin_project_name $admin_project_name iniset $TEMPEST_CONFIG auth admin_domain_name $admin_domain_name fi - if [ "$ENABLE_IDENTITY_V2" == "True" ]; then - # Run Identity API v2 tests ONLY if needed - iniset $TEMPEST_CONFIG identity-feature-enabled api_v2 True - else - # Skip Identity API v2 tests by default - iniset $TEMPEST_CONFIG identity-feature-enabled api_v2 False - fi iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v3} - if [[ "$TEMPEST_AUTH_VERSION" != "v2" ]]; then - # we're going to disable v2 admin unless we're using v2 by default. - iniset $TEMPEST_CONFIG identity-feature-enabled api_v2_admin False - fi - if is_service_enabled tls-proxy; then iniset $TEMPEST_CONFIG identity ca_certificates_file $SSL_BUNDLE_FILE fi diff --git a/openrc b/openrc index e20a5a56b9..5ec7634638 100644 --- a/openrc +++ b/openrc @@ -55,27 +55,14 @@ else GLANCE_HOST=${GLANCE_HOST:-$HOST_IP} fi -# Identity API version -export OS_IDENTITY_API_VERSION=3 - -# Authenticating against an OpenStack cloud using Keystone returns a **Token** -# and **Service Catalog**. The catalog contains the endpoints for all services -# the user/project has access to - including nova, glance, keystone, swift, ... -# We currently recommend using the version 3 *identity api*. -# - # If you don't have a working .stackenv, this is the backup position KEYSTONE_BACKUP=$SERVICE_PROTOCOL://$SERVICE_HOST:5000 KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_URI:-$KEYSTONE_BACKUP} export OS_AUTH_URL=${OS_AUTH_URL:-$KEYSTONE_SERVICE_URI} -# Currently, in order to use openstackclient with Identity API v3, -# we need to set the domain which the user and project belong to. -if [ "$OS_IDENTITY_API_VERSION" = "3" ]; then - export OS_USER_DOMAIN_ID=${OS_USER_DOMAIN_ID:-"default"} - export OS_PROJECT_DOMAIN_ID=${OS_PROJECT_DOMAIN_ID:-"default"} -fi +export OS_USER_DOMAIN_ID=${OS_USER_DOMAIN_ID:-"default"} +export OS_PROJECT_DOMAIN_ID=${OS_PROJECT_DOMAIN_ID:-"default"} # Set OS_CACERT to a default CA certificate chain if it exists. if [[ ! -v OS_CACERT ]] ; then diff --git a/stackrc b/stackrc index b37959712b..0492c73d1f 100644 --- a/stackrc +++ b/stackrc @@ -162,10 +162,6 @@ else export PS4='+ $(short_source): ' fi -# Configure Identity API version -# TODO(frickler): Drop this when plugins no longer need it -IDENTITY_API_VERSION=3 - # Global option for enforcing scope. If enabled, ENFORCE_SCOPE overrides # each services ${SERVICE}_ENFORCE_SCOPE variables ENFORCE_SCOPE=$(trueorfalse False ENFORCE_SCOPE) From d714f7deaac8d56abe8b028385f5282d6c02d355 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Tue, 9 Jul 2024 17:14:54 +0200 Subject: [PATCH 422/574] Add devstack-platform-ubuntu-noble to periodic Seems the platform is stable, let's add it to the periodic-weekly tests that we run. Change-Id: I185443c0fdb9e1248542a16fd877dc6b8ffd7683 --- .zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 3e6c42e68f..af7e74b57b 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -725,7 +725,6 @@ vars: configure_swap_size: 4096 - - job: name: devstack-platform-ubuntu-noble parent: tempest-full-py3 @@ -1004,3 +1003,4 @@ - devstack-platform-rocky-blue-onyx - devstack-platform-ubuntu-jammy-ovn-source - devstack-platform-ubuntu-jammy-ovs + - devstack-platform-ubuntu-noble From 696dbdf045cbf1c1525bb25c005ce767d1c9e9b8 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Tue, 9 Jul 2024 16:36:37 +0200 Subject: [PATCH 423/574] Make nova only use the nova account Each service should only be using that service's user account within its configuration, in order to reduce the possible impact of credential leaks. Start with nova, other services will follow. Change-Id: I6b3fef5de05d5e0cc032b83a2ed834f1c997a048 --- lib/neutron | 2 +- lib/nova | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index a8cc953b0e..da21d46079 100644 --- a/lib/neutron +++ b/lib/neutron @@ -485,7 +485,7 @@ function create_nova_conf_neutron { local conf=${1:-$NOVA_CONF} iniset $conf neutron auth_type "password" iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI" - iniset $conf neutron username "$Q_ADMIN_USERNAME" + iniset $conf neutron username nova iniset $conf neutron password "$SERVICE_PASSWORD" iniset $conf neutron user_domain_name "$SERVICE_DOMAIN_NAME" iniset $conf neutron project_name "$SERVICE_PROJECT_NAME" diff --git a/lib/nova b/lib/nova index 7c6ffb2239..35c6893763 100644 --- a/lib/nova +++ b/lib/nova @@ -640,7 +640,7 @@ function configure_placement_nova_compute { local conf=${1:-$NOVA_CONF} iniset $conf placement auth_type "password" iniset $conf placement auth_url "$KEYSTONE_SERVICE_URI" - iniset $conf placement username placement + iniset $conf placement username nova iniset $conf placement password "$SERVICE_PASSWORD" iniset $conf placement user_domain_name "$SERVICE_DOMAIN_NAME" iniset $conf placement project_name "$SERVICE_TENANT_NAME" From 6df53719180c8d587e058a26ed3bb19562e55745 Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Fri, 12 Jul 2024 20:08:58 +0100 Subject: [PATCH 424/574] bump guest ram to prevent kernel panics one observation we had in down stream ci is sometimes the cirros 0.6.2 image appared to crash when using 128MB of ram. upstream we have been dealing with semi random kernel panics which are losely corralated with cinder volume usage. Recently we optimisted the devstack jobs by using zswap this has reduced memory pressure in the jobs. This patch increase the ram allocated to a flavor to see if we can afford that with the current conncurnace level in an attempt to reduce kernel panics. Two new parmaters are added to allow jobs or users to set the desired ram size. TEMPEST_FLAVOR_RAM=${TEMPEST_FLAVOR_RAM:-192} TEMPEST_FLAVOR_ALT_RAM=${TEMPEST_FLAVOR_ALT_RAM:-256} Change-Id: Ib6a2d5ab61a771d4f85bd2c2412052efadc77ac5 --- lib/tempest | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index 7beaf21292..a14ed1af72 100644 --- a/lib/tempest +++ b/lib/tempest @@ -102,6 +102,9 @@ TEMPEST_USE_TEST_ACCOUNTS=$(trueorfalse False TEMPEST_USE_TEST_ACCOUNTS) # it will run tempest with TEMPEST_CONCURRENCY=${TEMPEST_CONCURRENCY:-$(nproc)} +TEMPEST_FLAVOR_RAM=${TEMPEST_FLAVOR_RAM:-192} +TEMPEST_FLAVOR_ALT_RAM=${TEMPEST_FLAVOR_ALT_RAM:-256} + # Functions # --------- @@ -295,13 +298,15 @@ function configure_tempest { if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then # Determine the flavor disk size based on the image size. disk=$(image_size_in_gib $image_uuid) - openstack --os-cloud devstack-admin flavor create --id 42 --ram 128 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano + ram=${TEMPEST_FLAVOR_RAM} + openstack --os-cloud devstack-admin flavor create --id 42 --ram ${ram} --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano fi flavor_ref=42 if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then # Determine the alt flavor disk size based on the alt image size. disk=$(image_size_in_gib $image_uuid_alt) - openstack --os-cloud devstack-admin flavor create --id 84 --ram 192 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro + ram=${TEMPEST_FLAVOR_ALT_RAM} + openstack --os-cloud devstack-admin flavor create --id 84 --ram ${ram} --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro fi flavor_ref_alt=84 else From aaaa03718bdc05df197708f9354e985936f96853 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Fri, 19 Jul 2024 08:09:25 +0000 Subject: [PATCH 425/574] [Neutron] Do not execute RPC workers if "rpc_workers=0" When the Neutron WSGI module is used, an independent service called "neutron-rpc-server" is configured and executed. However it will fail if the number of RPC workers is configured to zero. In that case, the configuration and execution of this service should be skipped. If the service is explicitly disabled in the devstack configuration, it won't be executed neither. Closes-Bug: #2073572 Change-Id: Idd023a2a8f588152221f20a13ae24fbb7d1618a4 --- lib/neutron | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index a8cc953b0e..474613926b 100644 --- a/lib/neutron +++ b/lib/neutron @@ -142,6 +142,7 @@ Q_META_DATA_IP=${Q_META_DATA_IP:-$(ipv6_unquote $SERVICE_HOST)} Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True} Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True} Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True} +_Q_RUN_RPC_SERVER=True VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True} VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300} @@ -464,6 +465,15 @@ function configure_neutron { # clouds, therefore running without a dedicated RPC worker # for state reports is more than adequate. iniset $NEUTRON_CONF DEFAULT rpc_state_report_workers 0 + # The default value of "rpc_workers" is None (not defined). If + # "rpc_workers" is explicitly set to 0, the RPC workers process should not + # be executed. NOTE: this service is only executed when WSGI is enabled + # (NEUTRON_DEPLOY_MOD_WSGI=True) for the Neutron server. + local rpc_workers + rpc_workers=$(iniget_multiline /etc/neutron/neutron.conf DEFAULT rpc_workers) + if ! is_service_enabled neutron-rpc-server || [ "$rpc_workers" -eq "0" ]; then + _Q_RUN_RPC_SERVER=False + fi if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api" "/networking" @@ -651,10 +661,14 @@ function start_neutron_service_and_check { enable_service neutron-api run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" neutron_url=$Q_PROTOCOL://$Q_HOST/ - enable_service neutron-rpc-server + if [[ "$_Q_RUN_RPC_SERVER" = True ]]; then + enable_service neutron-rpc-server + fi enable_service neutron-periodic-workers _enable_ovn_maintenance - run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" + if [[ "$_Q_RUN_RPC_SERVER" = True ]]; then + run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" + fi run_process neutron-periodic-workers "$NEUTRON_BIN_DIR/neutron-periodic-workers $cfg_file_options" _run_ovn_maintenance else From 13888a31d2bac9aa46adf72a154be4aa4fbcd790 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Sat, 20 Jul 2024 15:50:30 +0000 Subject: [PATCH 426/574] [Neutron] neutron-rpc-server is not a configurable service The "neutron-rpc-server" is not a configurable service that can be enabled or disabled. This service is a dependant process of the "neutron-api-server" service that is spawned when the Neutron API uses the WSGI module. The execution of this child service will depend on: * The Neutron API service when running with the WSGI module. If the Neutron API uses the eventlet module, this service won't run (the RPC workers will be spawned by the eventlet server). * The "rpc_workers" configuration variable. If this variable is explicitly set to "0", the server must not run. Closes-Bug: #2073844 Related-Bug: #2073572 Change-Id: Ic019423ca033ded8609d82bb11841b975862ac14 --- lib/neutron | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index 474613926b..69bcb86d4d 100644 --- a/lib/neutron +++ b/lib/neutron @@ -471,7 +471,7 @@ function configure_neutron { # (NEUTRON_DEPLOY_MOD_WSGI=True) for the Neutron server. local rpc_workers rpc_workers=$(iniget_multiline /etc/neutron/neutron.conf DEFAULT rpc_workers) - if ! is_service_enabled neutron-rpc-server || [ "$rpc_workers" -eq "0" ]; then + if [ "$rpc_workers" == "0" ]; then _Q_RUN_RPC_SERVER=False fi From 0cd876384a77d2144c3ebc51a0228433fdb7facb Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 19 Apr 2024 12:12:16 +0100 Subject: [PATCH 427/574] lib/neutron: Migrate neutron to WSGI module path Change-Id: Ie99ec3bf4198fa7cd7583d2dca648e1474f94aea Signed-off-by: Stephen Finucane Depends-on: https://review.opendev.org/c/openstack/neutron/+/916407 --- lib/neutron | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index a8cc953b0e..6336795f2e 100644 --- a/lib/neutron +++ b/lib/neutron @@ -88,6 +88,7 @@ export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/d # enough NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI) +NEUTRON_UWSGI=neutron.wsgi.api:application NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini # If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope" @@ -466,7 +467,7 @@ function configure_neutron { iniset $NEUTRON_CONF DEFAULT rpc_state_report_workers 0 if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api" "/networking" + write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_UWSGI" "/networking" "" "neutron-api" fi } From 95697d84cb59dcbc53748ccdb472987cf61df1f4 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 23 Jul 2024 11:36:49 +0100 Subject: [PATCH 428/574] docs: Add a minimal Tempest guide This can be fleshed out more in the future, including with information about managing plugins, but this is a start. Change-Id: I1094d093b704e37370e3e434ebf3697954e99da3 Signed-off-by: Stephen Finucane --- doc/source/tempest.rst | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 doc/source/tempest.rst diff --git a/doc/source/tempest.rst b/doc/source/tempest.rst new file mode 100644 index 0000000000..65dd5b16b2 --- /dev/null +++ b/doc/source/tempest.rst @@ -0,0 +1,25 @@ +======= +Tempest +======= + +`Tempest`_ is the OpenStack Integration test suite. It is installed by default +and is used to provide integration testing for many of the OpenStack services. +Just like DevStack itself, it is possible to extend Tempest with plugins. In +fact, many Tempest plugin packages also include DevStack plugin to do things +like pre-create required static resources. + +The `Tempest documentation `_ provides a thorough guide to using +Tempest. However, if you simply wish to run the standard set of Tempest tests +against an existing deployment, you can do the following: + +.. code-block:: shell + + cd /opt/stack/tempest + /opt/stack/data/venv/bin/tempest run ... + +The above assumes you have installed DevStack in the default location +(configured via the ``DEST`` configuration variable) and have enabled +virtualenv-based installation in the standard location (configured via the +``USE_VENV`` and ``VENV_DEST`` configuration variables, respectively). + +.. _Tempest: https://docs.openstack.org/tempest/latest/ From 6990b06cd321930f69907ba42ee744755f8029fe Mon Sep 17 00:00:00 2001 From: Riccardo Pittau Date: Wed, 24 Jul 2024 18:01:51 +0200 Subject: [PATCH 429/574] Install simplejson in devstack venv Workaround to avoid failure due to missing osc dependency removed in [1] [1] https://review.opendev.org/c/openstack/python-openstackclient/+/920001 Change-Id: I3f7541e691717186b7c73f10ffabae6fc0c5c9f9 --- inc/python | 3 +++ 1 file changed, 3 insertions(+) diff --git a/inc/python b/inc/python index 2339afdd6d..1fd414773f 100644 --- a/inc/python +++ b/inc/python @@ -42,6 +42,9 @@ function setup_devstack_virtualenv { # available on pypi. python$PYTHON3_VERSION -m venv --system-site-packages $DEVSTACK_VENV pip_install -U pip setuptools + #NOTE(rpittau): workaround for simplejson removal in osc + # https://review.opendev.org/c/openstack/python-openstackclient/+/920001 + pip_install -U simplejson fi if [[ ":$PATH:" != *":$DEVSTACK_VENV/bin:"* ]] ; then export PATH="$DEVSTACK_VENV/bin:$PATH" From 8784a3027fc3154aa2f6482d0127e45070e60b5a Mon Sep 17 00:00:00 2001 From: karolinku Date: Wed, 31 Jul 2024 12:34:00 +0200 Subject: [PATCH 430/574] Replacing usage of rdo-release rpm with centos-release-openstack rpms follwing [1]. [1] https://issues.redhat.com/browse/RDO-311 Change-Id: I50951e077e73297d10b075677a440992d1e2fa91 --- stack.sh | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/stack.sh b/stack.sh index ab3f01cdbd..dcfd398c01 100755 --- a/stack.sh +++ b/stack.sh @@ -302,23 +302,14 @@ function _install_epel { } function _install_rdo { - if [[ $DISTRO == "rhel8" ]]; then + if [[ $DISTRO == "rhel9" ]]; then + rdo_release=${TARGET_BRANCH#*/} if [[ "$TARGET_BRANCH" == "master" ]]; then - # rdo-release.el8.rpm points to latest RDO release, use that for master - sudo dnf -y install https://rdoproject.org/repos/rdo-release.el8.rpm + # adding delorean-deps repo to provide current master rpms + sudo wget https://trunk.rdoproject.org/centos9-master/delorean-deps.repo -O /etc/yum.repos.d/delorean-deps.repo else # For stable/unmaintained branches use corresponding release rpm - rdo_release=${TARGET_BRANCH#*/} - sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el8.rpm - fi - elif [[ $DISTRO == "rhel9" ]]; then - if [[ "$TARGET_BRANCH" == "master" ]]; then - # rdo-release.el9.rpm points to latest RDO release, use that for master - sudo dnf -y install https://rdoproject.org/repos/rdo-release.el9.rpm - else - # For stable/unmaintained branches use corresponding release rpm - rdo_release=${TARGET_BRANCH#*/} - sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el9.rpm + sudo dnf -y install centos-release-openstack-${rdo_release} fi fi sudo dnf -y update From 92b65a84cc8135316922a0f8b91420ed221f3269 Mon Sep 17 00:00:00 2001 From: elajkat Date: Mon, 5 Aug 2024 11:39:06 +0200 Subject: [PATCH 431/574] Handle_tags and branches for unmaintained also Related-Bug: #2056276 Change-Id: Iaa34624d1d85cadf1b45bec780ef8d97dd054041 --- roles/setup-devstack-source-dirs/tasks/main.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/setup-devstack-source-dirs/tasks/main.yaml b/roles/setup-devstack-source-dirs/tasks/main.yaml index 294c29cd29..cb7c6e3af8 100644 --- a/roles/setup-devstack-source-dirs/tasks/main.yaml +++ b/roles/setup-devstack-source-dirs/tasks/main.yaml @@ -43,9 +43,9 @@ base_branch={{ devstack_sources_branch }} if git branch -a | grep "$base_branch" > /dev/null ; then git checkout $base_branch - elif [[ "$base_branch" == stable/* ]]; then + elif [[ "$base_branch" == stable/* ]] || [[ "$base_branch" == unmaintained/* ]]; then # Look for an eol tag for the stable branch. - eol_tag=${base_branch#stable/}-eol + eol_tag="${base_branch#*/}-eol" if git tag -l |grep $eol_tag >/dev/null; then git checkout $eol_tag git reset --hard $eol_tag From 38dea33fe9a5e6bef39566295cc8d05fb1d88223 Mon Sep 17 00:00:00 2001 From: melanie witt Date: Thu, 1 Aug 2024 23:41:43 +0000 Subject: [PATCH 432/574] oslo.log: Configure log color by $LOG_COLOR Relatively recently oslo.log 6.1.0 was released and contains change I7966d4f4977b267f620946de4a5509f53b043652 which added an option to enable color in logs which defaults to False. This caused a change in behavior for DevStack such that viewing logs with journalctl no longer showed different colors for different log levels, which can make debugging more difficult when developing with DevStack. This adds olso.log color configuration based on the existing $LOG_COLOR DevStack variable for log color which defaults to True for interactive invocations. Change-Id: If10aada573eb4360e81585d4fb7e5d97f15bc52b --- functions | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/functions b/functions index f81e8f0a08..42d08d7c4a 100644 --- a/functions +++ b/functions @@ -694,6 +694,8 @@ function setup_colorized_logging { iniset $conf_file DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" iniset $conf_file DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" iniset $conf_file DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" + # Enable or disable color for oslo.log + iniset $conf_file DEFAULT log_color $LOG_COLOR } function setup_systemd_logging { @@ -715,6 +717,9 @@ function setup_systemd_logging { iniset $conf_file DEFAULT logging_context_format_string "%(color)s%(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(project_name)s %(user_name)s%(color)s] %(instance)s%(color)s%(message)s" iniset $conf_file DEFAULT logging_default_format_string "%(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" iniset $conf_file DEFAULT logging_exception_prefix "ERROR %(name)s %(instance)s" + + # Enable or disable color for oslo.log + iniset $conf_file DEFAULT log_color $LOG_COLOR } function setup_standard_logging_identity { From 79a812a69e4015f6c911aa54989970e35bfc241f Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Wed, 31 Jul 2024 14:41:33 +0000 Subject: [PATCH 433/574] Move the check of "rpc_workers" after the post-config phase The configuration variable can be checked in the Neutron configuration during the post-config phase when the configuration files and sections are merged together. Closes-Bug: #2075342 Change-Id: Ic42463e2f72488a1b14ce49e4e435cb4a2c0c855 --- lib/neutron | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/lib/neutron b/lib/neutron index 2325188f94..bcef8a5042 100644 --- a/lib/neutron +++ b/lib/neutron @@ -143,7 +143,6 @@ Q_META_DATA_IP=${Q_META_DATA_IP:-$(ipv6_unquote $SERVICE_HOST)} Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True} Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True} Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True} -_Q_RUN_RPC_SERVER=True VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True} VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300} @@ -466,15 +465,6 @@ function configure_neutron { # clouds, therefore running without a dedicated RPC worker # for state reports is more than adequate. iniset $NEUTRON_CONF DEFAULT rpc_state_report_workers 0 - # The default value of "rpc_workers" is None (not defined). If - # "rpc_workers" is explicitly set to 0, the RPC workers process should not - # be executed. NOTE: this service is only executed when WSGI is enabled - # (NEUTRON_DEPLOY_MOD_WSGI=True) for the Neutron server. - local rpc_workers - rpc_workers=$(iniget_multiline /etc/neutron/neutron.conf DEFAULT rpc_workers) - if [ "$rpc_workers" == "0" ]; then - _Q_RUN_RPC_SERVER=False - fi if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_UWSGI" "/networking" "" "neutron-api" @@ -657,17 +647,24 @@ function start_neutron_service_and_check { service_port=$Q_PORT_INT service_protocol="http" fi + # Start the Neutron service if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then + # The default value of "rpc_workers" is None (not defined). If + # "rpc_workers" is explicitly set to 0, the RPC workers process + # should not be executed. + local rpc_workers + rpc_workers=$(iniget_multiline $NEUTRON_CONF DEFAULT rpc_workers) + enable_service neutron-api run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" neutron_url=$Q_PROTOCOL://$Q_HOST/ - if [[ "$_Q_RUN_RPC_SERVER" = True ]]; then + if [ "$rpc_workers" != "0" ]; then enable_service neutron-rpc-server fi enable_service neutron-periodic-workers _enable_ovn_maintenance - if [[ "$_Q_RUN_RPC_SERVER" = True ]]; then + if [ "$rpc_workers" != "0" ]; then run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" fi run_process neutron-periodic-workers "$NEUTRON_BIN_DIR/neutron-periodic-workers $cfg_file_options" From 84ce1984b1f0639025af599b426019a4b140fcb4 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Wed, 31 Jul 2024 19:04:08 +0000 Subject: [PATCH 434/574] Add os-test-images support in lib/tempest This generates the test images in os-test-images and also configures tempest to know where it is (and if image conversion is enabled in glance). Change-Id: Ib74002828a77838ab95d2322e92bdab68caac37c --- .zuul.yaml | 1 + lib/tempest | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index af7e74b57b..59a577e522 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -510,6 +510,7 @@ - opendev.org/openstack/nova - opendev.org/openstack/placement - opendev.org/openstack/swift + - opendev.org/openstack/os-test-images timeout: 7200 vars: # based on observation of the integrated gate diff --git a/lib/tempest b/lib/tempest index a14ed1af72..24c8271132 100644 --- a/lib/tempest +++ b/lib/tempest @@ -105,6 +105,10 @@ TEMPEST_CONCURRENCY=${TEMPEST_CONCURRENCY:-$(nproc)} TEMPEST_FLAVOR_RAM=${TEMPEST_FLAVOR_RAM:-192} TEMPEST_FLAVOR_ALT_RAM=${TEMPEST_FLAVOR_ALT_RAM:-256} +OSTESTIMAGES_REPO=${OSTESTIMAGES_REPO:-${GIT_BASE}/openstack/os-test-images.git} +OSTESTIMAGES_BRANCH=${OSTESTIMAGES_BRANCH:-$BRANCHLESS_TARGET_BRANCH} +OSTESTIMAGES_DIR=${DEST}/os-test-images + # Functions # --------- @@ -357,6 +361,19 @@ function configure_tempest { fi fi + if is_service_enabled glance; then + git_clone $OSTESTIMAGES_REPO $OSTESTIMAGES_DIR $OSTESTIMAGES_BRANCH + pushd $OSTESTIMAGES_DIR + tox -egenerate + popd + iniset $TEMPEST_CONFIG image images_manifest_file ${OSTESTIMAGES_DIR}/images/manifest.yaml + local image_conversion + image_conversion=$(iniget $GLANCE_IMAGE_IMPORT_CONF image_conversion output_format) + if [[ "$image_conversion" ]]; then + iniset $TEMPEST_CONFIG image-feature-enabled image_conversion True + fi + fi + iniset $TEMPEST_CONFIG network project_network_cidr $FIXED_RANGE ssh_connect_method=${TEMPEST_SSH_CONNECT_METHOD:-$ssh_connect_method} From 3b0d76c30bf63332f494e8aae18dc2f1feed28dd Mon Sep 17 00:00:00 2001 From: Eric Harney Date: Mon, 12 Aug 2024 17:01:12 +0000 Subject: [PATCH 435/574] Fix get_default_host_ip ipv6 address parsing This is another occurrence of the issue fixed in bug 1786259 with change I30bf655f which occurs when there are multiple IPv6 gateways present. Before this change: $ source openrc +++++functions-common:get_default_host_ip:776 ip -f inet6 addr show 100 Device "100" does not exist. This is because the ip route command returns: default proto ra metric 100 expires 1497sec pref medium nexthop via fe80::4e16:fc01:298c:98ed dev ens3 weight 1 nexthop via fe80::4e16:fc01:2983:88aa dev ens3 weight 1 Related-Bug: #1786259 Change-Id: I7729730df66a4dc7ee11df1d23b19b9c0794b575 --- functions-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions-common b/functions-common index 84d281b21e..e265256ccf 100644 --- a/functions-common +++ b/functions-common @@ -771,7 +771,7 @@ function get_default_host_ip { if [ -z "$host_ip" -o "$host_ip" == "dhcp" ]; then host_ip="" # Find the interface used for the default route - host_ip_iface=${host_ip_iface:-$(ip -f $af route | awk '/default/ {print $5}' | head -1)} + host_ip_iface=${host_ip_iface:-$(ip -f $af route list match default table all | grep via | awk '/default/ {print $5}' | head -1)} local host_ips host_ips=$(LC_ALL=C ip -f $af addr show ${host_ip_iface} | sed /temporary/d |awk /$af'/ {split($2,parts,"/"); print parts[1]}') local ip From d6e3d06001e7c4bb092cf9dc77188627bd2b9358 Mon Sep 17 00:00:00 2001 From: Rajat Dhasmana Date: Thu, 22 Feb 2024 00:11:15 +0530 Subject: [PATCH 436/574] Add config options for optimized upload volume When glance is using cinder as a backend, we can use optimized path for upload volume to image operation. The config options image_upload_use_cinder_backend and image_upload_use_internal_tenant are used to configure optimization in the upload volume to image workflow where we create a cinder volume in the internal service project and register the location in glance. Recently it was found that the glance location API workflow was broken[1] for the upload volume case and it wasn't detected because we are not testing it in our glance cinder job "cinder-for-glance-optimized". This patch adds the config option to test the optimized path. Note that the optimized upload functionality is only possible when glance uses cinder as it's backend since it uses clone volume functionality to clone the Image-Volume from the source volume. [1] https://bugs.launchpad.net/glance/+bug/2054575 Change-Id: I521ed04696a5a545b2a2923cf8008bd64add7782 --- lib/cinder | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/lib/cinder b/lib/cinder index ae898e9522..f80542a35f 100644 --- a/lib/cinder +++ b/lib/cinder @@ -183,6 +183,12 @@ fi # Environment variables to configure the image-volume cache CINDER_IMG_CACHE_ENABLED=${CINDER_IMG_CACHE_ENABLED:-True} +# Environment variables to configure the optimized volume upload +CINDER_UPLOAD_OPTIMIZED=${CINDER_UPLOAD_OPTIMIZED:-False} + +# Environment variables to configure the internal tenant during optimized volume upload +CINDER_UPLOAD_INTERNAL_TENANT=${CINDER_UPLOAD_INTERNAL_TENANT:-False} + # For limits, if left unset, it will use cinder defaults of 0 for unlimited CINDER_IMG_CACHE_SIZE_GB=${CINDER_IMG_CACHE_SIZE_GB:-} CINDER_IMG_CACHE_SIZE_COUNT=${CINDER_IMG_CACHE_SIZE_COUNT:-} @@ -192,6 +198,11 @@ CINDER_IMG_CACHE_SIZE_COUNT=${CINDER_IMG_CACHE_SIZE_COUNT:-} # enable the cache for all cinder backends. CINDER_CACHE_ENABLED_FOR_BACKENDS=${CINDER_CACHE_ENABLED_FOR_BACKENDS:-$CINDER_ENABLED_BACKENDS} +# Configure which cinder backends will have optimized volume upload, this takes the same +# form as the CINDER_ENABLED_BACKENDS config option. By default it will +# enable the cache for all cinder backends. +CINDER_UPLOAD_OPTIMIZED_BACKENDS=${CINDER_UPLOAD_OPTIMIZED_BACKENDS:-$CINDER_ENABLED_BACKENDS} + # Flag to set the oslo_policy.enforce_scope. This is used to switch # the Volume API policies to start checking the scope of token. by default, # this flag is False. @@ -353,6 +364,14 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT default_volume_type ${default_name} fi configure_cinder_image_volume_cache + + # The upload optimization uses Cinder's clone volume functionality to + # clone the Image-Volume from source volume hence can only be + # performed when glance is using cinder as it's backend. + if [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then + # Configure optimized volume upload + configure_cinder_volume_upload + fi fi if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then @@ -729,6 +748,18 @@ function configure_cinder_image_volume_cache { done } +function configure_cinder_volume_upload { + # Expect UPLOAD_VOLUME_OPTIMIZED_FOR_BACKENDS to be a list of backends + # similar to CINDER_ENABLED_BACKENDS with NAME:TYPE where NAME will + # be the backend specific configuration stanza in cinder.conf. + local be be_name + for be in ${CINDER_UPLOAD_OPTIMIZED_BACKENDS//,/ }; do + be_name=${be##*:} + + iniset $CINDER_CONF $be_name image_upload_use_cinder_backend $CINDER_UPLOAD_OPTIMIZED + iniset $CINDER_CONF $be_name image_upload_use_internal_tenant $CINDER_UPLOAD_INTERNAL_TENANT + done +} # Restore xtrace $_XTRACE_CINDER From 80c1605a1df9687c7d1d842b258a3d99ec2eda35 Mon Sep 17 00:00:00 2001 From: Rajat Dhasmana Date: Wed, 28 Feb 2024 13:08:12 +0530 Subject: [PATCH 437/574] Configure cinder service token Glance is implementing new location APIs, for which, cinder needs to pass service token to register a location in glance. This is required in the case when glance is using cinder as a backend and cinder tries to upload a volume in the optimized path. We are adding a new option, ``CINDER_USE_SERVICE_TOKEN`` that will configure the service user section in cinder.conf. By default, it is set to False. Change-Id: I0045539f1e31a6d26c4f31935c5ddfaaa7607a48 --- lib/cinder | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/lib/cinder b/lib/cinder index f80542a35f..0adca4f4ec 100644 --- a/lib/cinder +++ b/lib/cinder @@ -88,6 +88,10 @@ CINDER_SERVICE_REPORT_INTERVAL=${CINDER_SERVICE_REPORT_INTERVAL:-120} # thin provisioning. CINDER_LVM_TYPE=${CINDER_LVM_TYPE:-auto} +# ``CINDER_USE_SERVICE_TOKEN`` is a mode where service token is passed along with +# user token while communicating to external REST APIs like Glance. +CINDER_USE_SERVICE_TOKEN=$(trueorfalse True CINDER_USE_SERVICE_TOKEN) + # Default backends # The backend format is type:name where type is one of the supported backend # types (lvm, nfs, etc) and name is the identifier used in the Cinder @@ -445,6 +449,10 @@ function configure_cinder { iniset $CINDER_CONF oslo_policy enforce_scope false iniset $CINDER_CONF oslo_policy enforce_new_defaults false fi + + if [ "$CINDER_USE_SERVICE_TOKEN" == "True" ]; then + init_cinder_service_user_conf + fi } # create_cinder_accounts() - Set up common required cinder accounts @@ -761,6 +769,12 @@ function configure_cinder_volume_upload { done } +function init_cinder_service_user_conf { + configure_keystone_authtoken_middleware $CINDER_CONF cinder service_user + iniset $CINDER_CONF service_user send_service_user_token True + iniset $CINDER_CONF service_user auth_strategy keystone +} + # Restore xtrace $_XTRACE_CINDER From 1a336ef4aec1c908b139db3b67e766a437c2cbb9 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 12 Aug 2024 11:34:02 -0700 Subject: [PATCH 438/574] Trivial fixes from review of os-test-images This fixes some trivial things from the review where this support was added: https://review.opendev.org/c/openstack/devstack/+/925425 Change-Id: I990a3816f425a1b4c8680ec43d698e32eea2238b --- lib/tempest | 6 +----- stackrc | 3 +++ 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/lib/tempest b/lib/tempest index 24c8271132..310db2daa6 100644 --- a/lib/tempest +++ b/lib/tempest @@ -105,10 +105,6 @@ TEMPEST_CONCURRENCY=${TEMPEST_CONCURRENCY:-$(nproc)} TEMPEST_FLAVOR_RAM=${TEMPEST_FLAVOR_RAM:-192} TEMPEST_FLAVOR_ALT_RAM=${TEMPEST_FLAVOR_ALT_RAM:-256} -OSTESTIMAGES_REPO=${OSTESTIMAGES_REPO:-${GIT_BASE}/openstack/os-test-images.git} -OSTESTIMAGES_BRANCH=${OSTESTIMAGES_BRANCH:-$BRANCHLESS_TARGET_BRANCH} -OSTESTIMAGES_DIR=${DEST}/os-test-images - # Functions # --------- @@ -369,7 +365,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG image images_manifest_file ${OSTESTIMAGES_DIR}/images/manifest.yaml local image_conversion image_conversion=$(iniget $GLANCE_IMAGE_IMPORT_CONF image_conversion output_format) - if [[ "$image_conversion" ]]; then + if [[ -n "$image_conversion" ]]; then iniset $TEMPEST_CONFIG image-feature-enabled image_conversion True fi fi diff --git a/stackrc b/stackrc index 0492c73d1f..0b3e1c61da 100644 --- a/stackrc +++ b/stackrc @@ -304,6 +304,9 @@ TEMPEST_REPO=${TEMPEST_REPO:-${GIT_BASE}/openstack/tempest.git} TEMPEST_BRANCH=${TEMPEST_BRANCH:-$BRANCHLESS_TARGET_BRANCH} TEMPEST_VENV_UPPER_CONSTRAINTS=${TEMPEST_VENV_UPPER_CONSTRAINTS:-master} +OSTESTIMAGES_REPO=${OSTESTIMAGES_REPO:-${GIT_BASE}/openstack/os-test-images.git} +OSTESTIMAGES_BRANCH=${OSTESTIMAGES_BRANCH:-$BRANCHLESS_TARGET_BRANCH} +OSTESTIMAGES_DIR=${DEST}/os-test-images ############## # From 9e1348f81b84e3bef93d6998606e09725c585b1a Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Tue, 20 Aug 2024 17:23:02 +0900 Subject: [PATCH 439/574] etcd: Replace deprecated --debug option ... to resolve the following warning. [WARNING] Deprecated '--debug' flag is set to true (use '--log-level=debug' instead Change-Id: Idb412cea64dfc42e3d1223b77f134804eeb7bd60 --- lib/etcd3 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/etcd3 b/lib/etcd3 index 4f3a7a4349..0d22de8c73 100644 --- a/lib/etcd3 +++ b/lib/etcd3 @@ -51,7 +51,7 @@ function start_etcd3 { fi cmd+=" --listen-client-urls http://$SERVICE_HOST:$ETCD_PORT" if [ "$ENABLE_DEBUG_LOG_LEVEL" == "True" ]; then - cmd+=" --debug" + cmd+=" --log-level=debug" fi local unitfile="$SYSTEMD_DIR/$ETCD_SYSTEMD_SERVICE" From 5ed2b7c6b2e2a5da50c3db9cda9e9b8e4ae4402f Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Fri, 30 Aug 2024 14:15:40 +0100 Subject: [PATCH 440/574] make devstack-platform-ubuntu-noble voting devstack-platform-ubuntu-noble was added in Ie1f8ebc5db75d6913239c529ee923395a764e19c and has been runnning for a little over 2 months in that time https://zuul.openstack.org/builds?job_name=devstack-platform-ubuntu-noble the job has been pretty stable so its time to make this voting in advance of it becoming required in the 2025.1 release. Change-Id: Iffd6ccf9603117d6720931e260afa2da13c26ec4 --- .zuul.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 59a577e522..a1c251a398 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -732,7 +732,6 @@ description: Ubuntu 24.04 LTS (noble) platform test nodeset: openstack-single-node-noble timeout: 9000 - voting: false vars: configure_swap_size: 8192 @@ -917,6 +916,7 @@ - devstack - devstack-ipv6 - devstack-platform-debian-bookworm + - devstack-platform-ubuntu-noble # NOTE(danms): Disabled due to instability, see comment in the job # definition above. # - devstack-platform-rocky-blue-onyx From 0ff627286297a3957143577412884dc50ff8a57a Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Mon, 2 Sep 2024 17:29:55 +0530 Subject: [PATCH 441/574] Run chown for egg-info only if the directory exists 9-stream jobs failing since [1] merged as these still use GLOBAL_VENV=False. egg-info directory is not created in project source directory when pyproject.toml is used in the project. pyproject.toml being added across projects[2] to support pip 23.1. [1] https://review.opendev.org/c/openstack/nova/+/899753 [2] https://review.opendev.org/q/topic:%22pip-23.1-support%22 Change-Id: I53954a37461aee5dd7f487d6bd205caef4408392 --- inc/python | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/inc/python b/inc/python index 1fd414773f..2083b74dc1 100644 --- a/inc/python +++ b/inc/python @@ -474,7 +474,10 @@ function setup_package { pip_install $flags "$project_dir$extras" # ensure that further actions can do things like setup.py sdist if [[ "$flags" == "-e" && "$GLOBAL_VENV" == "False" ]]; then - safe_chown -R $STACK_USER $1/*.egg-info + # egg-info is not created when project have pyproject.toml + if [ -d $1/*.egg-info ]; then + safe_chown -R $STACK_USER $1/*.egg-info + fi fi } From d7c3c0accc89e4e99915c24fa7c3bff2e90a715e Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 20 Sep 2024 10:56:04 +0100 Subject: [PATCH 442/574] lib/cinder: Remove 'volume3' endpoint This was needed when 'block-storage' pointed to the v2 API. This is no longer the case (and hasn't been for some time). This is unnecessary duplication now. Change-Id: I00cfb56d3e54d0162b1609f4bf58814e9000c103 Signed-off-by: Stephen Finucane Depends-on: https://review.opendev.org/c/openstack/tempest/+/930296 --- lib/cinder | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/lib/cinder b/lib/cinder index 0adca4f4ec..2f9955b1d3 100644 --- a/lib/cinder +++ b/lib/cinder @@ -483,23 +483,11 @@ function create_cinder_accounts { "block-storage" \ "$REGION_NAME" \ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" - - get_or_create_service "cinderv3" "volumev3" "Cinder Volume Service V3" - get_or_create_endpoint \ - "volumev3" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" else get_or_create_endpoint \ "block-storage" \ "$REGION_NAME" \ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v3/\$(project_id)s" - - get_or_create_service "cinderv3" "volumev3" "Cinder Volume Service V3" - get_or_create_endpoint \ - "volumev3" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v3/\$(project_id)s" fi configure_cinder_internal_tenant From 2d487d8c7b424a76eb484d09f09530e24b7207fb Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 20 Sep 2024 11:00:39 +0100 Subject: [PATCH 443/574] lib/cinder: Strip project_id from URL This is optional. There's no need to include it. Change-Id: I2e745865696dbb317f819ecb74f5b5df88a9ed76 Signed-off-by: Stephen Finucane --- lib/cinder | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/cinder b/lib/cinder index 2f9955b1d3..6da5d4579d 100644 --- a/lib/cinder +++ b/lib/cinder @@ -482,12 +482,12 @@ function create_cinder_accounts { get_or_create_endpoint \ "block-storage" \ "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" + "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3" else get_or_create_endpoint \ "block-storage" \ "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v3/\$(project_id)s" + "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v3" fi configure_cinder_internal_tenant From 9b4439038144f380c1d1c8a8e87ad76cd26b72b5 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 20 Sep 2024 11:03:15 +0100 Subject: [PATCH 444/574] lib/cinder: Align endpoint creation code Do this the same way we do it for Nova, to make for easier review. Change-Id: I31877705894a21570f130723e0a27ff38f945eea Signed-off-by: Stephen Finucane --- lib/cinder | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/lib/cinder b/lib/cinder index 6da5d4579d..dc284920e0 100644 --- a/lib/cinder +++ b/lib/cinder @@ -476,20 +476,19 @@ function create_cinder_accounts { create_service_user "cinder" $extra_role - # block-storage is the official service type - get_or_create_service "cinder" "block-storage" "Cinder Volume Service" - if [ "$CINDER_USE_MOD_WSGI" == "False" ]; then - get_or_create_endpoint \ - "block-storage" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3" + local cinder_api_url + if [[ "$CINDER_USE_MOD_WSGI" == "False" ]]; then + cinder_api_url="$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT" else - get_or_create_endpoint \ - "block-storage" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v3" + cinder_api_url="$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume" fi + # block-storage is the official service type + get_or_create_service "cinder" "block-storage" "Cinder Volume Service" + get_or_create_endpoint \ + "block-storage" \ + "$REGION_NAME" \ + "$cinder_api_url/v3" configure_cinder_internal_tenant fi } From 03bc214525c7d7f9dfb6cb855025b70053839a72 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Wed, 25 Sep 2024 12:03:40 -0700 Subject: [PATCH 445/574] Update DEVSTACK_SERIES to 2025.1 stable/2024.2 branch has been created now and current master is for 2025.1. Change-Id: If5c9de9ddfab1bff313c70cf2c40ce7fbe60473f --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 0b3e1c61da..ab1f8a6ffd 100644 --- a/stackrc +++ b/stackrc @@ -249,7 +249,7 @@ REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements} # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="2024.2" +DEVSTACK_SERIES="2025.1" ############## # From fec589a1ce6b1dd29e27ed2d5aa088390a7dfa92 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Thu, 26 Sep 2024 21:23:17 +0200 Subject: [PATCH 446/574] Bump cirros version to 0.6.3 This is the latest cirros release, featuring an updated kernel and some fixes and added features, let's use it. [0] https://github.com/cirros-dev/cirros/releases/tag/0.6.3 Change-Id: I2506fa713e0426789fa40a5f4f7fd4e963a158f0 --- doc/source/guides/nova.rst | 2 +- stackrc | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst index 705d427e68..6b8aabf8db 100644 --- a/doc/source/guides/nova.rst +++ b/doc/source/guides/nova.rst @@ -122,7 +122,7 @@ when creating the server, for example: .. code-block:: shell $ openstack --os-compute-api-version 2.37 server create --flavor cirros256 \ - --image cirros-0.6.2-x86_64-disk --nic none --wait test-server + --image cirros-0.6.3-x86_64-disk --nic none --wait test-server .. note:: ``--os-compute-api-version`` greater than or equal to 2.37 is required to use ``--nic=none``. diff --git a/stackrc b/stackrc index 0b3e1c61da..b9f86d1ae6 100644 --- a/stackrc +++ b/stackrc @@ -656,7 +656,7 @@ esac #IMAGE_URLS="https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image -CIRROS_VERSION=${CIRROS_VERSION:-"0.6.2"} +CIRROS_VERSION=${CIRROS_VERSION:-"0.6.3"} CIRROS_ARCH=${CIRROS_ARCH:-$(uname -m)} # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of @@ -673,11 +673,11 @@ if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs} DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz} - IMAGE_URLS+="https://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + IMAGE_URLS+="https://github.com/cirros-dev/cirros/releases/download/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; *) # otherwise, use the qcow image DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk} DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img} - IMAGE_URLS+="https://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + IMAGE_URLS+="https://github.com/cirros-dev/cirros/releases/download/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; esac ;; vsphere) @@ -688,7 +688,7 @@ if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then # Use the same as the default for libvirt DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk} DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img} - IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + IMAGE_URLS+="https://github.com/cirros-dev/cirros/releases/download/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; esac DOWNLOAD_DEFAULT_IMAGES=False fi From 6512f0140c9a312f9455bfe420462c64635fd622 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Mon, 26 Aug 2024 12:19:06 +0100 Subject: [PATCH 447/574] doc: drop sphinxcontrib-nwdiag, sphinxcontrib-blockdiag usage sphinxcontrib-nwdiag does not appear to be maintained anymore [1] and there have been no releases in nearly 5 years. Statically generate the images and include them this way. We can revert this change if the maintainership issue resolves itself. sphinxcontrib-blockdiag has had activity more recently [2], but it's still been nearly 3 years. More importantly, we don't actually use it so there's no reason to keep it around. [1] https://pypi.org/project/sphinxcontrib-nwdiag/#history [1] https://pypi.org/project/sphinxcontrib-blockdiag/#history Change-Id: Ic5244c792acd01f8aec5ff626e53303c1738aa69 Signed-off-by: Stephen Finucane --- doc/requirements.txt | 4 -- .../assets/images/neutron-network-1.png | Bin 0 -> 10251 bytes .../assets/images/neutron-network-2.png | Bin 0 -> 11243 bytes .../assets/images/neutron-network-3.png | Bin 0 -> 13293 bytes doc/source/conf.py | 12 ++-- doc/source/guides/neutron.rst | 60 ++---------------- 6 files changed, 12 insertions(+), 64 deletions(-) create mode 100644 doc/source/assets/images/neutron-network-1.png create mode 100644 doc/source/assets/images/neutron-network-2.png create mode 100644 doc/source/assets/images/neutron-network-3.png diff --git a/doc/requirements.txt b/doc/requirements.txt index ffce3ff74c..7980b93ed7 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -4,8 +4,4 @@ Pygments docutils sphinx>=2.0.0,!=2.1.0 # BSD openstackdocstheme>=2.2.1 # Apache-2.0 -nwdiag -blockdiag -sphinxcontrib-blockdiag -sphinxcontrib-nwdiag zuul-sphinx>=0.2.0 diff --git a/doc/source/assets/images/neutron-network-1.png b/doc/source/assets/images/neutron-network-1.png new file mode 100644 index 0000000000000000000000000000000000000000..7730ca93f175651c1ec860f37ea6be33310e73e1 GIT binary patch literal 10251 zcmeHtc~Fyi*CmZ8pt5M&;sOMeMn9!N1wk|W~rH}si~LBQw6DK`91e{ z?>YCJ%e|A1wrZ=ltX5D^P_wr?`ip|XckdMx6i=wEf^VX(nAa#M>}ax`jL)jns8@T< z{r_oy9t7huP_xHhH`uc(>iHvqxQrj|+XY_nH>-XkjjG*@+ZgZ8?ybLp&obvuUgMfO zu*}vu9zc4fAb-w26_K;4ub*GHG+9X~Ph3zn9Jh=fJhl04HDl)m;j1){?GGj5*!A-d zt!J)0!*`>OZraX$V1_w8dwqO;UTnQVN}W(QpfmK5yLrv#pB+j{-JT4;$uD)fe!{%6 z5T8{?;C>9kjq*lT?jBc-9?YpUVHEAfv48Y*G5$t1?yhXTV|T72T5|E;qnKcp=)QPM z3ntr^+8l@*xow10h#u4v3`8~IIH;wLhMz+ZDW%5DJbqhE*#594-_3mi{(Q+4f6p^4 zy!V>x&ReB?Sex{mPc>ka72&g<^U!0rjmK|Vmo2=Tj;B%_?msp1$rV*->_f9Z^&3ZG|PqeMy?bnOSWmG$H0}z!oa=%xb@2izn z-mB=Y<;m_nx%eR5LQUpHpEK1W%SQ2K#uM>ur22Uu1z#sU;_i9LmCpjr3qR2LUu&Ek zo?C1-Hvh=;a@eW=7Oy+r)6g;<)Dsvz9cgji=i&jYck!N!+r)bP`+GJlAIZdY2H`wh zI%74n+*NztAg4B)+67Mm&_%@yg6KIlK@ zm7^>4W9Msmk_nQi5h0x_WSL1CTU%6yB^;?p)@~wcmt}amuxK5`TxVh~o0w}M59Xwo z__O-;!M)$Z7O7lL_Y69<0qc^^m<~l|pD@fokd~)qQ_CV@$@%l=Bm9X(Vrsz{yAthY z=C?7v3n80E$T$!OmMB8%@#21+UY&Va%rQ)(!DkUC*kjos|p{1qe7A9R5IU&R6$9I#Xmd2dwL#MwsmY0`nc$IU) zoJvCFxfcbna~>!%LIcgxw6W4n*0Qy=HOWOqE{%mx z)-TYN5fM`e+U)Npk^%7~O%}sqvApGHFv`vk6EFOZ-}MlA)hlKEoR-a%8Dz)KX(Jex2abK)h*9)(XL=glqw=g#5}Q9)jn69MQv(<4&`bH~B@3v70o3-i0{l#( zU&H{q0N))Y%;weEv1ZAA3yZX=HMA*-rj5AOFG?7pH6Z&|BK;=AZfFWPV3oh~evnIg zF%z}Sp-W*CC02S|B$EwKGS6{zLlS2OMXW+6bmY?PShAy|W3Ce{OFuzBw)x+5^-{(^ z`hViP-+N`mz;Y`yaPqnchJ4TebY*Ym31buZ*%;q6ef3q=uh{)t)HkW>2R0t7nB-1R zPajjsqnAB*OdTXoTv|3_e8VN8b(6gZrbAI%5;rz-Ly@_+HM`4yhh?|z=gOCIFa6!E97+zm2Nk#^zpXL0~A3?1o9ATLevDl%odek|ijkZG3av+LK zZu2T0{<1^D}@)I6vssA4ur!X{0kyxj3whBQ$&O*C0%bZrLdrgiLP zn=nGMgEX^4#)(XnZKX|bmD!T?K2VJq9g)d0SK6GbjAF!S^4Le@C{K#$bh;=M+3-PF zH$gQv!+d{d=v$c)`-oX`Qj&22Ypm(hr%%Udh=2>}Lv=gv&uPAI6(4JS>cE2NT_%NY{imAB;VcTnmloA;CX9Xs_azz^h8!ts336|-|a1B zCN3d$nMp}W7aR*&@!{Gs-n{sRmLyx|^nlaU=ec6hVKX6rNEjLLf@FR(9CHpbX@N$;A$tH!YF?nLhbCqM2AoH`JWFAZ)T@MJDS*EA$ zU^+TEkqJsO9s9B6M%Rw0U{&%2-uJLHw+dB#AF}Mqu*y~-3RfskGNMa$z zS$8zMpYH5zjH-?ro_mU}?|%xV&#mNKdifX-NFv8MQe;$=bjS=N%Pww~g^lm>)og&kWKF8{e2qEE9{37+GeRY^gYE&YM)26ufxD z^!`^BGV}#8U5gG?_Do726+m&n5kpB; zdu9z<&Cs4@#yRjfgwuOzdC&5RGNWSW;*LS;bexMHT3QHc$%JHU0@wuL>aH#s7`D=(mHM69)E}lu zkD-3GU+S<7jYM`!6;hqc^CK46SHokery5S+y8)~874}bQUFPlYCwnj+ac+|w@W>iJFb|8?GKNLnRb@c{5G(= znXH3Akktmrxw$Ab3KexdA|hh%mP9Y^$X-MHnHr(3PQ;3k?J47FCQqK^7T~!-xOQGY ziqrE`N&&u|nSMd%(LpF3@#eEYtD^&_A9>bjC+*B}Ec9-CW7#;w0c?gs=&yStRa|LL zlliHPd%|+$>?(;`u0T(DTWv|4@LU!}M75KZ!?@FK>7F%RcQ2RM$F8o^jrQQx-}fS& zvIi(ti?vWSo$@V1hm{SvkWr2VuSOhd`MA@VIbOS%)T?b~DT0ZS?~Vi0NQfa*JA#VgJ&ZDwnhIuDEx z8*~$oX8`Ez6d9$EXvNb!53%?FYPMu`-)2R6-e@~BSHg`VmhqjmHY=V!gMGD$^4e7U zyF*ArPzY$ur5s4x>f_($RcCs=52==DqwP}%B?~-R3ocPs!1Z(aVIrA@JaB-kr9Eom zo}p%YTiy1KdZE2fu0}Pn<8iTL3e9(UEHErejtsa&5`wGIqXClTSX=(uNdI;N`?te1 ze*Y-DzfG^oQe&!U8jzfFrg)6fOe+y6_dT%=_*iL^_D+iNQyRR%NU2g~%tK^4f^ZqN zTVxGYh|tLS!vP<+J*b{X-3jS+(u)5$oU<=@kF?$wJ+<^fuU~yhxUcbupK@uq)K>YX zO0?3yj{gfWxC=-&n`dz=#5t=#aKvu~n@??9<9YWzB5XzI0%*LC*{^?SH^bb~qC?+g zpStLI*h#Pd0pB~NZ7CG~w%Cu*h{6X{dQ>OspDEmBpSmkH#lXIOp7CO8r>a41YSv-h zJMr=H3s9ZU30%3VK%}2zA=saG>*)R%?XiPOF^xfLnp~(`ET!j3UhcYo>5-?4ihh?eJ>M{8JR$z1_8pK3CIF>bCk)Gh zI5=jQJ`niSR_vM3i1UYIaCU+iq&77Cbh+{>VGlt22EC{3YxCp-TZ|rOvsh1~5qOmX z?}^2F0<-K1pA`?h{N65fu&8KPanUYrWh=mqxmWVkKU58n0R-;G$W7%@!#k7oQmna^ zW6gL=JSl@5*Jl*!uc*3KtbKc(xeA=;da9yswN-TNp`Tf^IknV;Y{Nz^5m|9looh#& z#o*5ek*gaMO88V}t8`*!L6A&nKy#>GWAoxm0~6~^9_qi9Z9=Yoc@a0a!2kM6^s;qL z8PBUnz*L^kww@VX`=I;Sa|+j@hEcMn2kr2DYE@;-&ZwygAh}a_1&2;hToe-p&UL<= zKw)IIZOw(bplAsYe7-+da^G*pvMLhczv+YCcX~Ge-6OhE#_}XxjIma$lshf-=@Io`3w_?l%mpg3r?pbG_&%e{cO*-vQlpT&jVM>Vz_#`-L7%yRBIy z?>JsKus-kGtzuAX4&<)|Y6CR4GC5*~juySqh?-(H$Rv|8Awn{0i3;?>1nMIg=xbgoo|KgW@txYh2c>V< zt);3+SD#}9@_8k1JSLpRhslA)Nb^ea;zN49iWEP{6 zJ>YUp3^MJ6|S!T!BLGCIZGcE=T)0>yjqAxKT(&;>lua)K*4&td-z(4XnG{@0I z-a3JH5<(`sqz~S% zSjVK@Pm|C84^XHtYt?nVH@yhvd?6Fg^)1eWWaN`l;{>&3zN3@}& zDp%An8zH=*K!g(1zBE?y(O#EHEpK%Z-YcHb2{*@)s73rAqBM^ZCQ( zuHaCeT5lIzK(g^Yq5^1xMQ8f}146$yXW9a$;6bICyeg4-A9w&qkgbS zL%$5Yx~2^a@r;#aHPZ#q_BpbXUZJMHww!LxDAQaspiAzawgihnOaAO@+r!*chMLE8 zd`T-6>6AH#AUvs_2`FtIo8tS^AG!}A0pYv__Z#$}YpUk!Y&Grrk_2gOaWs;<+N9>K z%{Etb!?s>%`N5%}x3gtIrlsI_4}@HAXi)l*uE`&4LNm6_7)P4;mnfyi1^&aSEsomz zT4!89?^2jbQChFC?Y}&h)nZj@{#u?Ko)BYT4*y1czoN{=|N{8qI2GtT==mD-ocgz_3fh=jWE zDrd?(^YGgh)T{JqOvbjVnArk{EU%l$Fgh>>oR`AAdKq3Ggz7uYPxoYW(=s zo5n4^hm?Atc@^Nif}L0Ts$2|cU6-GojPX_Z7Bt{CXX&@EEi%H5n;advJ;h%+M9sjz zdO2BLVV$Dt|Jos<(5%OY|5(`D7nvlg>{(!53h%)ss7;;K=}G%TA>sj9^|gd6S{tC5 zZPnz3%1-7=7l(P5lY1jhj0-3CNrOJ!OS}&5w3s$|SzJ{W|`?PoTw5UOQCaN0o3VYw_I4i-j_C^;1~|7RK~Q znpk}}X5~!L@9%1<--K=<%i?qCb@(Ul1&akdSA4w_ue@nc+lVd4z5K+W$TXlmtFI`*{)h2ro6w% zo|FjCJzw(_#6#W<^BsfXB(Vnlidhz@?n3?8gRvh088vOP4eYyeaeW7_Tu`)J}IGc#26jNjSzr|Da|a2oHNf>E_H5LsFJ0uWxAn-uSyK% zew8Gi!1a>2j#7;UyuMLBa`Cnzr7RM~n3TkWvZDanxm+@tL z`p*w2$51uiQfHO4I2kQnB*_VuK_TrEjX#qCjet!9ZI*W8@pvGhLkAC17w82y9D` zegNHbOjjYOZA9X6;LCl8pY#adkg%YCbfFR7zat<<7~ejkaA6$jfx{vYb%FUZ$y# z#Y`cUw)6E>Fxueha=F_F2Jlkxx4KV`@9Wok7%8xE;@Q15-$wj9nBVU21?AZ-UH+}e z?at^O4p_tBK>iy<`x~`QJYD`G{xR)^-)7xM8tCPzNkrJzd3q`G;D=RW;Z|^U|ATc< z=^gWr0#gkvKkOcRm!yKZApF|RUb%n%adGo4EF|Rue$eYW5?ywB=6xgOEnwr%;TZF6EIwb&3zzwKP1l zsx0>8VkG1Ki>5kW`Pq|O-(KldsokIB$n-Afq{a0mMZOA3=^vTsxt2J~ihk$^_hPx5+D>Z%_*oLA+J zz`Zi3{35AhJ404JPdK6CUDfdvX75Z zOq+4ZrkZv)Z3ltgcn(=&^EPX}@A?k2bu?K)xavtxXYJ0Qme89Lutvf&GJCPnTNNLl zzwvH#_;T9TTVcJ6&XH}|2{+h4CpRXpp`TeRj^C?YmD)QKukgQJCI5#aRQwVopO4O( z``1(W>cQL6(X*=*>h4kax?$f}%RQt$sxWp1W%XN>i=yi5wpr8n&hE%(Cs!LN96HSo zT4!^GWkz6`2~rL4Xubz^;lT#GFDEUdzW&#ekQGU`Uv1!@@}EhRca&jUc|J6^zWCXwictAh)BEsOcarJw;`+-A;!~x)& z118AFSm3_HREEMm@7e359DaMnmrpd6s7nudnij{%~HYjO7sY|#qao3!R7&i*KKD4D@kYk==eno?TgNo z`xqlH_DcMrtBsc}(B49cbJI-0Xfvuo(%hSQSh435w*0N&B%y7i-=W6Zf4kxwDGd%J z-cVHaw+&G}>0=(+@gY!n!}r|R#6l*o*J0ul(x6E{OFn*#&qu&1$x?=bn5!zdAyFZvW z9uI|~_dh2w)xU~FdTUJ=`xjD1L@=jng1H0Oll6~5gMmDN2@lK}+RX?Wo+N;ZIIn-W zW)$GJ{xxr0Iv}602!XKPU9UO!q8Y>50O_rtLic{qBl8}$s;W8P9aqv?_Iyj?ak(;+ z&oF#I_Q|J#aBGD9v=gQ;T?tI?zpXaXQ%@_*vyU~$u&Eq^mqd60GauU%{HMw(EpxQLmv_$*UQjKkPlBt z*1d$@}U4CD5M6e5&!%iW_C91U$*VEegFGQPBTecKaD9gvrjx0CgdH@d`97|1*!DMW^;Ji%Ni-W8 z5>W7R@7=DjcY$-co%Xa__Co1yS5IMA+@1=;)=TbzS6e0sM<>_h^&F@l=s+=)d%IjU z?Y?}hKXbvf8wV$o1D^_2kXU2_qxG3D!wENfbe>w(>vhNoEbZlT2HGNiqANymDM=3TJ GZ~Z%CB(@^} literal 0 HcmV?d00001 diff --git a/doc/source/assets/images/neutron-network-2.png b/doc/source/assets/images/neutron-network-2.png new file mode 100644 index 0000000000000000000000000000000000000000..919935119de1655ca6c7ffb4e943a5f517b57fb0 GIT binary patch literal 11243 zcmeHtc~F!2wrx-poWOx2QHF?ggGXrwMNq^rj;&^C>LGD!eY2q+R}NLo--z}T&V z5+SYh0XraJkN^P^XohIh2sA=~7$TDlfiNZkLXx**pZls_)vLO7UcEowyLD2b%KH4i z{HDFvUTf{|?zhg4TQ=_Ah(I8=oH_mVcL>B9KlnSVz8a`kygdRQ^0M ze*&YiM(n@Lt|#2;xw&pbd2aHSoYzGBe6t$&Ld?49;2=iLy#Ncs<*%1IwjJ*4`y#E# zA06})Mf1e>+62|)=HQmBnYx5s{q!xn5Qw);sc01hvA}_-g80Iy9)Z}t1Lk^ngE@lO zq>4nG@L#hTvByGXJ>tO=#9GAZv;V&Bzt4t5Z?y~7U(rPzN%-%xrhhF?OTa@p$00di*f;cwKr@U7wru z&S9sA7oTXBuvC$nLC31}57Jle#Ae;7B4IydP45%jG&!_?eF^I9$)7%;g`*WNV-YAi zD|f414leXlb{}KS$CJBM44)=$rcM9&f{w!+`U&6tvgZ?fk;b~Z`x=G$F1O7~MVyQ< zyqid{9F7Y1*1lxj+=q5L$vne)8j=}^L;2Zl-lNC;w&aFX-#2{N$>ry|nUyv#>GXls zgX8vIiU3@_26#053SX$T((^-(kzL0sPFwX5hdGbkPu`ka5B6|?#kez z;)7j(gagUz9FM5D;D-0s<~bB!;mB7r6IzP@l)_=gn5^lWCePf-h1JJ zH|Rmi*7WM7&GtR+2*;UI+DRF|Ol57g6X(n%jImf%RWXEvPoKRDDj)8?diaA^I-j74 zTJciRH&khHuE6PJ=ij9y%)fOB!(4r^xH8i)BO91WjJC9|VN8Khh|BNzHc8i-jzYAbgdPIafw&{lt^t@4NGmb&BWOgRF4 zsSj-d%_!bTYtvJxRAJVn=kzkKM5on~k=O;^Op=7PI;crl>M}^dd`Li@j3Uq#GQvbw z(uAUQp;qYAV}Gs))~IcNZY-85WLa}By4*1%^0xX-eUI{q&84-LdAYeAMrouz>t!f%T#f;7#Ey!qAesk4=Zab7geXhgNHb3xa`w6rv{ zS{TY0y*%>JjIcV##)#%eTQ%F?y+c1aYc$gqRGue_J3gLEE}7PUTDjVVk#!NO)3$Ch z2*5~gU?lTqig`0Ww|kBtzDy8L(d3h~+?mNGmNWriC+4MNgCYbyTq382@17>f&z-J zo?ac5deuj(d4I~DCPY+|Iwx}F|Tv?gPLzzi^*7GUA%BQb4ofalAnk&ga~a`YDO=?ei7173(x5gI zg$-3+dp0w)#9Aq2tq>ju9-XNq?Q4zjbn71KvL-N|UOlHJ$iFgg(whBZU#4;>eUdLa zUOd&jFxHMqlCLh4oSmJ0(jD?z(Y5~R6<_T`M14anJtTW*BXND;=Jr)~el_4_%GQ2} z{{}yEoeYO8_-L2+O~sUYq@OSB{@Hh{J~mWpe3u!{X0x>|T5Q@E@R@;%NNsleLPA~P zux4uG+iORjX{=B7R2+n3wrI(xQv2Y_efDq}N;bJB2Ikc6?Be1ZWq6sQi$YQGD)ioG zH($#)`9+^SS>`HB`p&7ViBX&eDe+ZSYhGVylx2>y3q;gJz`>)_^skX-JJa$YxQYaf zsl6t2oDQUTl8i%;NrYIfzo$HqeF~R3U zj9bE1Ds`tMah1RFioX&KN9B;$S!nvw=NWup1+6dk_29TIjAAS%=b~G88L3PexN|WH z*CQh%J729OpLaX_Rcl0_nn&d(<~ehs-ZsA}ugaY!k>|cG;s^l8wq&-B2E&ehT3C)j zQ&4SV3wfgvv9G;M3q^6>km!qMiB&RXgtC&d?rbm!U`cCO(mGS-PMC^Xa>HvXH8yul z5T3L^R!n-66kL{!tGMWy2?3-HcdSoth%`t3A)H%M#6vv^Tb%8lo11IB+@4!E?oj6Q zIIwyue_MG`{LSey6ONmkQ_Z=u2{?hxa3*eUea(995BjPYLra>7e66 zvyEs18=8RO_3C{0)r#GbC#@hEyB&z4ZRbt1S_c!*nC!M7V~k<}qre14C!_>ma>Uc& z1Sw(SW`G59bH1aaV;20;U~;cbS_8TCL2bgEwt`!ETwci(y_$(4iJxtO;OOk^EL83! ziLI#Q#W6gsb7GR>;#SUBuMy9WkB_@~W-he7`qDqWkV-ud_``5KX3EG4gygYp_r<7< zw7kzltnD>WaX2W83}78gl6)6wasEBtyO<+*!9~7?Ioo$x+DrER-jIMsLDW19>z^M9 z@s;*RmszKrMVS(2)d;f~Hj*3Gsdy1@`EeN%v@gaMJ@Z>2kOeegyV1k95XB1*A3iJ}9v&`_KO$%GV&1jLm$Z$Ijm>h8$BW-iOiUQ- zsEAjN>MhuY!y=H_u8fFR4Ez;*`t^8)nV~P@u1AW?L4y=J*Rw9ef)43%CEKWdq1Unv zzyPKms~r^U&5=B+_Rh zL)Uhg_H`FF4K9SYxJR$LD3@`a^@Pdo3i)v?R?qHb4NWSd7^I-9vZpJ2hBocdGxW2) zlOZq-E!q|@`}3<0?b3BiGRu)rD;?PGE_a$w9f#%Rp53;$#_w9NrPVA2 zQ>l}jgu2Q6X`e+yM+l0`jf2;|(`sfE0}Jw!u!Vcg`L+@&HAsdIGDykiJ7dj}i(T1f zR*v~$*H?QLAKhViNfWtMf|zL z&-P`=-j5>v@4cT{^4838@{R(Ja_UOS7!HWdwU*fw0ULQKu(`dpY}#I+oD5Vf6b7on z!?~E%GM%H@%}7C5dy#xwih=YxvKz=wxk)P^GiEX>OOqE=kk;Tg8@DLXyU!XI+v)h+ zSbYm9<4IWU>gv)4@HZooNM?{00r4oBNh`XXklidoNKX+byg8^0mA%b+~+`?Tf)|3!bky0W45k9TO z-OWv8co}ESsF>34Ub=}ob!s#XM7=b$sISD8I^nz+HVNH5+ zhrP!R>>p(&7e%C@1NUB_PG&2K!3E%TVJoFTg2XP#?{W8RXw<9yuhayTQ*)x5qOhiy zn!j+^W-uq@W>fa?+&S?TqD)qNUtv1NZ|0mL6e6nDskA%~ZY;oeMs`KxDs${n<^avv zqGRI{($7q!)hIq*{GjN8bkYj|COEYoud_5J7WZb?Z zN4PqSD3|MxB&7XQD9_&T;gV`p&!^%U4(>!;*Bp!wD6y;^jbD`>At1yQ#PA;PQj*8i zO``zMoz^sgyz?RNjca+`X&E)9TDTPG`A$2nImVsMqAjTTu{M}%^q+qd;O^a}^TYhp zD-3Q*t9Wj~)|bW+T!njP;2wm!mz6@opobQ^OT$!+gQ=+7e1S zu|Y3V`crc%S`(o`*4glH-pGi_LHb0ZLbSNjC2btgA}8bjv&vT zg#Jq>LDJnpV6%$MY!OgVo{Yujj4khP@n9pR74Z$Hk@aEf=20#Ev_^9vi>+nP+}-{L zB`wgCe$N6>vGhQLdhewwrDC((?F4LB7mzh+|H0KN<|E7c<2(GOzUI;$ z>NT2Lv&=BSCCniE{nMQkf6{ReWijW_2+3*Co5BxjAC)4}q_qr)KH%o>WnLq(`q=ay z;^#?T4xNujzI*qM>5)0=590v24D{yMbLpix>&pfzRW4EWLE1MD`=w$JKD8yc1+ig4 zRUnr0ulQ@XGf9YJd+U35h16>_LQORT?sc_-I>~q^?KIgTWt9_lN#kqk*lv#z@qaik*=~^CapTe{yXqLN{7OE3 zc_mbEbVsFcboX`19p!;8Bxe~9Qui=~GuF+WpSV_(YpYglo;(JTYpT-nu*w{Shdh$13D%W%z>SMMSU#NM^Gq;cFW)ze2|7Pu5xxDYK0BweFA#XF! zP2m;`;<8B9(*i`j`oCnL1~V`?uI7U~|MXcs)t^^4*Qi``Q7+jarHz$+TlMq_qKcFH z^JyRb6)!}ts>*s7uk_L_28)K5aG8M+e(ZbT=IUdt7i2rJUbnVR3jeXi;K^;(UJDTO zPC#%#k0eAzi5nbIiHj~tv&pE{@)?3cs#tZztaQ4tK(T%UlBsxQA*|A`duug=&~&~}BDD_?6f#t1V6{nW#4c=t-ld+KDpQ%@ zVKA<|vP$U~iS@|}tS+KnMeoiq0`2c#?WF;+Nu^!@!9gt+)Z(~Lz%Zi6$14Fx++t&pvrQ{fUQ1-cE0drW}v6G%H_gt z>ecLg5FnxJdvjM@SgWIE3TZ0(Ze?flH65&%QqDN+ZKnWj>ja{%v?9W^LV2W{#}|*T zwLQNk-TNvwUA8!!0GZY!5Vb;90#^cpa#@*H7hX~6IFR4Iq`2uDcVzEfWbYNyjSxsxg3j@mN(kx;sWY?VVGsn-&-Tjaqt5f3C+MyQGS|y*^{H@MYe!=;7^MZ5jV-WxvW^H`?vMPB}Kh2@bmrR-#!|Gts8u;n%#2j)_5c7*Ldh%D5KuCVdbqmdr1KqIEo zPx$dSael6hkFjRs)lcrk7QqbzEr(t2P4{pm3LV6(y1Myyal^vN)Dq#omZc|qM=cV+ zLa3c0&OOi&eImRZ+ftv~%WX4NLFga&-!x4Y_MJZcGvAwu<5n$8oZRbP5qRDt%H$FE zI)yh$b5?8FK>gVJ<8|$0{)LkKpQ|UZ;WqMO(n3B>KxeDPP=RG=cx#{FWFUC>fN}Z{ zcoe(Owz4K^Kx*ScWdEj)wEbC-KQe%)h8>F;!2jN^zO!Do-LK@b&@fwQvu@eQp#&k! zLL{q2blsHDae3#AiR=TfaW#teq|NbrQ#w8b223)DlWPyu3~oy$&yed3dc$nwc+`R^ zyP8A`ktQN?w*%;2;~>@&3#)!a{NHI9d6WuVpb=4K(%Mh2%jy-7qABdNqfEtDbW%+k zb}d3O^D(Sv&G5o18Bu?!Z&}@AhT2IfbO^KJhfAl)1lDAtHGhmsVzthN-^(t%f1L|6 zWz7t)R<`5a4RMHinL3VpJ16O8$j!n02QjWlZ*8COjZZ6&%1xQY(;o4h1%48b{7$A3 z6^GM(vDJ=8$H_a!RO;VV)Y!(9Z7abgdW5DnilqVWU;jy$A9JZexVb4!jUl`60jzE zqn96;)>O7EYbQdgCSwrw1v}H4t0+!j4D%?Gh?x(|>b*e-F9|>V zMp2;;t8kb#kywcl<_~*`=!Nf;PiHMuJE$AH=S#VSs^*$C^<8s^d}aS3u4BnkL(P$A z!ADp?4%@W!HX%X%Jq+~xps!a`m_s7!F$DW19jHs-@il2963(N1$pp-?W73j+_uS5D zfn^Tx!^!gxm&WRcZs~n~WU+&BZ+tzGt7Hn0Z#B}(Z^}X+kHz-Tr!P7xTZ&Z;kv7M1 zU?VVYEgPiRq;#|_iLsVzrY0r?dWfo96OnPx0zDssWex-6j>T5)EMidxVzs6^ooLs~ z>A3P&bxZ_N$7E+(Cm26qq?8p4UR=!88W_!8`KUs#^PFsHJWeJLi0Nz>{IeQeh1J-N zEJ7+bK{y(5boD7MY1FDf4dK!vk&97fa{1EdT>w9h!|H|pN?jAcl4RbQGS*=2U-wR& zbe=2qs=HF&NgfKwVpj_W_JqfyIduxX7iMJVf&#FRw4;yuB4RcseJb>Q%aqj3$h3Jo zweYNU_Lb|4rmX@KLo`>u|GH1l3R5S@+cv{65Y*_7QIA&%ZzOlMy@_*u#v8gq_Mu!= ze|{8$Q4E0P(YVcqIfP)LQ&I*i8m}nNLd+<1wg7SRkXzfKsg-}?I zlgx$V{o`xY6%6AhX4{THet`7oUOt|DS8DSPaORjQUr^wF2cUReCFpwl%2F>l8YnS= zZf*`rZ7}V9M-Uh&DzqO*!0f3V0B=GdH+UE;r;k7R-^d!G=5AA z8_K2QprVN^{)R0AvJ6l5`IkOPXAd|0)mgyIKaSp{(X2i7#zj8jB8Qr+rfYXJZM{D0d)@k12Y7iek-Sk;kNyT7r5(cl)k=f{CWEi>{byzPyUw9O?v8_Uix;Sb|A_0 zjbo|0f*#HlNoTlYbpnflp%|RIu$0nz-vSCCSp5Fh-AL=)sD_u?Uwr)W?bJ^KLEJ^n z?*!P;8-jxNTA??F@8f)O{#Py6B3`prKmAqB-&^8BsGV^8_io**{~Yz0?EO&7`vCg# zSYm>PiUa-nIh;avV=ho@bJT$r56oXDUL4gH>0cuf$JeQBG`Fg|?kx2ltNnpoConAh zXjDu));Ga=j2JeOJKkDcC9qAanc}90+!htY$LIg`O9F~ne7_XbSeWQy#5V`7&8gugH)Z}Vb z4zH{fNj2gP{Sa!C44wCQ>w7f=Cxi!r!)`olyAhw&9~tryV^x7XNcR4!=IXwoB;T*TTKJK?ZV1m87D8l^ihfMqg;WVo0TcHzMX zI?nP!Q%%fO4kwY61nt0EbX;Si-ezK@wq3AreyyCL<3(8eM{)JZFjlzuw$7vznpYK1 z#M(CLd?^(?pqZ(xuYU(3;9yHHbl|tzkpn#!CK3=aV(K_-Kn)r!%;c%QSc2>CH1Y*pQ(eOywm<7&jFUZmCY1*0f8HJBQsb5qko=d!QM3*-t&h>LWC-?nQ=50 z%cPYvW9$MA(aP`e22agvP!Z6Mg1&ntjvSuuTz~A?h;l+Dhn%_|@iHuC9E>!EKE1hg ze_Ni_DxSK3h z=SjjjvQoadY9=CVW~F+Dio3{~@*TAB@vT|SF23=S!#SC2d19l~_8)UrV>|6)-V_8K zec5?eOMh5Yv0QqWR*_u8Oed;%4Km1VarhEPCS#C0#Ox1?a{Aj6%bw*ey zi?=w^7*oXIxUt9L9O@Cp*T}y#u4gdMCyl*x2U6VrS$-}Um9PJd2Q z-6CLOHqiW4wy>AR^GNcynqVLh#4<1tD2j0~5GJ(Ed=e&<9SrDRfqDAkN;Z6hM+nBC z&39nEwCzjvLH{bdPs$#2D&zqWfEJ`L{|n8)SN8=W;kLgk15@o+a#Zjsb*J1hJoJO8 z)K7o)g3X{UY2I2^M5Q)^Ar77rm!1JC5*NP}L(#Ou*u5v9fDeFhJ%&c3Q6#oDJocSi zcP)?;_$44zDIyzy%2^K@4bV^RC99?aEV>qWd!TeO?0y#b5(%Xqi`MLA|579*CiG^d zIsxJdrw9eXZl=0zB3IbLffmPZu-la_z=(+Xw@O71U1s35z(AUjf8V&X~!Q){l+OJN4=wuLOZ|( z`seq-0sxBj`Q4WCcEcTjQ2H@VpP{zBhw%VfnAbu7gYEb?`_kQjlJZYLgqexSBXZop z9&9=-{3j)G1S6+$a8~J)Kyx!J#XlT%X?&$0Lz-Bb++hB3YlWxeRJHU{?eArwyRo6* zFe5?1 z;7b2Dq)#iqGyM6Aa@DHeK4p`I4oGtTU3VT5x1}Ur$GWbax6hic7E_l$cG5+>6GDBo z)EkoMNF=Qw$x8m=+4dooQ0HAZCxbvK`ta*!QAMhLqq5$x{iU~;swudzCp9)-qL*vmVQR;#rN!+cA#CwAAPhPL};rJr>`AdDa(SY&LMWZRlT0`hUNR f`TxdSr!vLnS%)6&RKoxSe9qWAe_d?nck_P$>ZA$( literal 0 HcmV?d00001 diff --git a/doc/source/assets/images/neutron-network-3.png b/doc/source/assets/images/neutron-network-3.png new file mode 100644 index 0000000000000000000000000000000000000000..34f03ed5c9aa81863dda17be757d70daaa05735b GIT binary patch literal 13293 zcmeI3dpy*6|M$PxqPEa>poHv!y-Fk^r%~Fr`jynSNQ|k35-Ay{F=%ORskG~gGNH0d z>ohrz;|y7(iAjUOU`QH_nZcMD=6JuSc7Koix*yl?ci)fuas74Qm&Z6fX1?E<&*^=5 zJ>PFPjvYO)dgZ2-2!gCWbnu(+5JY_uzK>`vhiB4a-XtQ3b>*RN_I@AzkUQL&_T?1b zOcb5-(~HcPg}j{ZyISf9sBS8Vsd)fNe8XZ7(cGMaremN z#Fo>R+bFsUvtM_G?p0@8eb$`xXqoxqXsXWOz0viR>pC7xmGVko702DsGB$nkApL{~ zI&9#SmQhORC&B9k%v;*sK##fo5RIS8|(lkeTE}TGE?kQuL3KZ8?rq+aKa_xsP;^ zp>1lpnqMRzR`1u&{i!*v)iO0}&r=GK-NutfCyTBTzOjRPn&}n|D5N(n?v! ztGGS)y@y8+){u~YgNygOtbs&*KAK*+?*g^@r&JCqkNoyq1=h)<>4_Ann2c@Mw+&%qcq)Xl$G#&xw)PJ zH6+pm^I9`e^Jo+M(F$>9eaHhh6Jx}v`Ya0u6IgX>HS-wdj#Z#{M(^0Tb$H`W^+e5s zd<)I-C%d?vBb1T-oJj^Y+i`9KVq~h8+c==TAYCOuA0$)tdH%-Y0LKN=H5cn59b}6B zq{SfWr;7u|Z%*=BMB=rsPs?wo1_WgEeUf-&D^UfznkE(=<@4^Y{ryD#=QV<`m(sa#>kt~4~|6g zUbo0YeQo?%X2Y%JbrZW9`!y3ICYw`yqK7}tAisJKYrT^u*xGgyyZ!s4ZOanlt&?a+ z4J9*pt+BNgb40=eR!I9mdlQZG3JZxM~w@U!~s5l_{u8$Y^ljY|LwmPb&!G zu&}Eh2+x1FpZSLL{0p!?0WmqmdcAskv}OR5HN$fTLsK>4=EKCyE$nXLcF?3ooN=UB4$ zcFGhzOTPx^?4!n=Y{7VM@UB6pfoiEC4_Aydb!sind{4J|T*_JOK42{G7ZqAJ#0>LC zNeNOC#hK7)q`E~Q5OjpsZZ7dGDlaU2Qp!2+7Kcn)Zg-gZhHk^$)}2gQB2bpx2OlXZ zLC)wa`NTRsPs(B{Ws#BI@oplv(SpIjmiZ9{7Co4CGj_!OPmzVBMB34il;@%^yByk1 zCPR{I*`{L;bfGH7Z6x7<6IP8QmdNz)yT-m&s-Oui^i`j z81-K#E1QjF&AAQHb8kCBY;g@I&40wA*;Z|VJM>?r-MQ2CZC*DuFE3ATd~PwsW|Ny{ zQge)VMv=vgkA^WK*)~}f;5AtEq)ix$w#cM*huAvC%)M<*bHYm&4!B4r^Jw_Rv6S2d z(ab=QySqEr zU2)!Y`~86f2WUA?G4o?SVU5COM}C9AIN{xa_;Z!)JAzo1d8*Z+FjC%_QB- zJ^pfd_`GUQvCXKqlir`NMI?utf`SJ7e)ih#kkowoI78@7w=qe6{Vv9DbxO-pv=hqF z(Gh2rOH0#BZH)$(RlgSHf6Gnl@ks09owPK}ANoa3G=_JEJP$qNLK6r}MJ}6e=jU~w zx81DG9u9>Khhc*wPWv`*;M~r?)elHT%laaAK6r?N74&u#D7)w8<}Mh_>Jhd(Ncdph zm6Oj-498Am(d~tWH^I10T4Wv&EXmH>5}dO-1Jm;9Q+bgcZ5?OJcAqlekXKmv()L|@ z{smVLk5;1WB~d2Azxp9Xw;Us_!bnLmIbQpZn^VhuH|#HywYbPylrpkNSwxW(DZOx? zbHaD%oAYo=ih@H?gu%Itb26fYXh`Yl>FMme?#be>Z(`{-WrcQ=xvH^#0HMA&bP#~Wg^@joY)78Us_t~ zR(*%pI-Rn}Oo9XN>ERJU;$3gHls&hUQR|c6>AH%1C_*1TX)N9f%rB*7EM$lNL$F81 zAcHKzu&_$XoB?Gn;qL*pTdCo2$@`YE%c_MI*C>@0=G6UCUORPsd^{-4*d>0j>kkU} zvw`Y?bKpR^E4nz%r*~WoS_*>Aae#YZw@0cL-^Gx565e(2c9v?nb|#gaw<-A-&Y~GH z9v<+R02f{CS#-u=@c>5{`)ti@{auX8OjYxUL%A#s_Vr^c9ntk2cI3I+%97O$ zsbiOSJ32Txz;~?uX6>K=Zwo^H3e3_=FGX*tSSchX$eG;Wy@KxZHqNR`9vtx-8doUY zaHy7sP*RLyvO6zraHA|pwaV0TCzh{dXIsstRmZerh_8pPHWF{?>WR!jH8$iYtzerkCS_Ted~?!4F7#+=+-_rQvQL(IOb^S@c* zrw9WVe+bvRl^~8v5GUM#sBow;HFDPjHkfPyxP};mTYBKf;OhW=YGE#uBAZYu>ZD2u zgQDQ7oaptIrSmopgd?Uj_oGLTrlqC1^Uo-i`NPU`bLu6fB|MjdikB_s9Y{06SFV}W z8}2BN$zfQvdE6?y6g1_r>AEiFXs(w>FW#KN5su1aaf8)=SDgw8-9=8vWFIX^+oz?RFK5ddS<=RY;hbWh zc70B{c_!L8)=?lYMjOrV_s*c!l?s;PP=C95R%n(mZ>H=NuxT!9T^IQlOl6qjeHfXQ z{nQ6yf)ATQ@J^5|&e3s$2@oV=t%pcb)}<47}V^~FKUc_*T)mWLEik?6Zls^c4n zFIo<1?Ai2cv_j7St7jX#fEgZKdsV>ugjwnqg!-`Pd6||Na_?|gSJz0^(E>B>PQo}? z#!Rf(&U3~)%zW+A+jsBY<+Z07CoH_{NY_!VU9~lX|+mI-tB#>F^rQB1iD4rZ+Bp5wGM+nsm7IFe?yS(}#nlQH=mDIcx5KP^1AqpG)B7Qj^KA04BNPQGO+#q?isK~!e$~^ zHmq9m-O${>$A*W8OThAUz;npD&+Uxq>x!fK-}0yrj3cSLy@=OuD1s9OjwH>_QJX_dZq3Net+f2Q1z+n(6{l{DAL*f=**ngG$2u5v@|?d_I7wv0vK45j8&)D<~m zMpJKU*)n?dAPRfsH(gg4&j$aQAEt_`%XMA*x;!bT)(6>a%C52=O{y#PH^}i)MOUBh zKnNF+W{jiYdAaL-^{m?JeC)`LO`d%sN|dE;7uefK3_F`&E!E-PsHcuyj<%HMb_SYp zu}EpX5+d|z@)}+dcf)36^vs%B`v{_I0`9xX;2ruA)=D==M@ooSYfSA)^E+KMT03~T z*NwVg-#lv%DS*nks!kCkF_5mp8}ByMPGNKF<~XA4Yp<8R%fFBnd8l(D+9}7)G@T7= z<08p(kr>y>N!BysY9)6L6QzW(UzdX`u|sB`tyo-eHJIn<`)33U6#6X7|N7>o<#DkQ z5ewn9zIO&i4?X=jU<5|?(P_KT8r0z~~%V8HqKIjNI zzK$BXeNiY{mXno6cxO4B0E1zMQRF)HgGzKeal0n+?1*V&1-`i$!T`ho(pERpPB+r4 zckg3o30|R%GwSYM+yu~H2@2U#P-K--s_*h~Zf}-xCfjXyq?l9R2BM>i_?6adq3F<4 zz3byt5hi0|>1^43$ic+B;*rs@&t|8pN*+B#e@{11TU;Az&REgLl+xgI`w=x^Aow(6 zBszLwlzve!dB}ZxRc#eMG94vMbzNw45JCFWct`d6tI?L#m7Mu@e3}L_y7LF1SpVic z|KC!tyRU8B`o&ugdO>C?)~`85xAB|Q*yDN=uamg2%dLWMa`^_hn8p_%5xg^&0@U`< zRxHjoRaq|*e`o~!AU7Rt(IEK|srvqgu21)Axn%?h+wEPnmOdnP_3vbNj35!sgSZb# zh*jIxw*G9#HLu=j>`{W?yEP(>zYn>oD!*T~ybR<(MnI9u%)N)JISAqmo_JgHs(2Tb zMB>gxJC$%GpoBW>JhNw8BKY?tP`FefM8mRb(UsfTw)Dqsf46ZG?pKtY2{=m!|HAM1!z4^fPN17BD$fL*q6e5r!6>}V_ExTk!YL8MtUVp0-@A8WM zmqy(SUce5=7i%K&FI9_ls$_XlZ}2e&cErYwY=zT77VakM==h=;0R}UI$oGCAB#qY2 zj$JF_IE0vA+*EE(&(T1fO-D7*v_=S^D|F#C=1SqW_|#~M87_RPUaTH3Zj-85X5l1!(~n8HNR+0MAW$orSp7$ zqhMhDb{jomFQzxLB8K&Tt7E3KCDK%SznQ(a<`PDZHo7`2MUcpjRsS~td0Lc5<*Dyc z{|iT-13|ZmpoTbafhE*)?U#K|&_)&>ChGKJxNb?b2E2yn%h~ zNn0;9LZ;Sk(DAu-WxeauL7%m*uk$}FX2pq$ff-c_j&gR!vy*gCi*tNDAE#WbXUTak zOU*j*vuoW!v5RMBW;!5SKiB97*(^NT@#Knyk}y1H7*gQFEEZ!t>ggS;KC3qD@pT+k zMvg+E@KP>`0xlZ@Yrw5Jj~9$oFp#f+u7olV09%4&=4H9B_VOeFoUrU8rj{AhiLa^U zAXUu!IKoGTg$)<7?gx1H+2E*|{G5jm<0{+RS@k@_zw^Y$*0qA@dIiaHt~*5Y@wsRR z&^aK`0KZ(<4VHt3!T^+c{hPov`B=Mt(UY$|Lu*`AIksNvemx%iY<&u7@IqGqGTF@d zT}oQ=k~<4xe?tHGIKMZr-pyc$0|weMHXFC|LBno)`=#>`s#HS!Pn~|NY~+5c9pO(} zgm#It!>RNt26~(AOUlNN*X|E|tQ!(m?FYz)m5FzeuxJiT#hJTx?El1)V$fk^1bV+r-dq zh=E-3#swGU5=|+^qZDMUUfJ~EC08v1_q!m~cse>dDqw-PQ^SA$pmjPS$#z+>IP-@m zOI&D9-M#6yD)WJ?^LG3A1w9_8u8X9hKsAa-aLP-$X{>vliN^kM>*qf*9C!Eya&vy9Ud$IQ zEgDW8&Ei^|_oy-(DjoEQ*kIti*z2i-LSBCXs+Yd}r9&HRdUbG+0HlW=2r%v|JRq6~ zwFH(w3>3#HA@M3-O`we_{+(?e7dh3}gR|K&_t;AS46Dor`T|wrADqwcOS{=A4sHFN z?^OyY^#{38W}7To1(2k1GKxu;-%v>2)-hUX5_AYc6dkxq`X+@IzF$3ZYQ0 zR7VmW*UZMG`{A%X`zED#tVaI!ZbUd2b~3kZCE^Q&4P_Wy)4-|G7E09-qeMJr*E$5L zIq=WxOJ~;`_E+5A)*cA1mpPI^mt)vgOeQ8miRS@ccGUFh&*MH~;^XPkusUT?HRU{^ zh(}421MY5Lg_`Mh3wjpiJdXE1mM|=j8g@Z0CwvmO8?n3KUF&xEm4)G8G&1`5>W&|s z>o2&((rxDM20qsbbwXxeuULF`fH0x7WIF95e?$;BTH8; zEW}0__YGaFt9&YJ#LhWu?zD3*2wJpWZiP9woK>qQGQD))6tKlG$0?F z`a%A_{qBk)5CbZ_-KNigSQ(woEh^lZq=Wc=o%}i^F2t5c<87NXqd%e7J~cG0|8+P} zs^DE@OjS^3h7+k05pfgEgT>&S!e5$iL<^k|q`^M5_3RsLnu-vNH=e6F7dT65neiAC znPTcY<9Ys7*_~WI2gm%(RjXH;AQane>`M~0S~g@Z#i$`s-4i!Jx&S6{KYI%lhM|A) z*;}vyS8$Qa{9m$~`QwFVUXA;naoFoU!;ZetL@r%)JPQnEGl6fauR(>t^1s|^}xsp}L4){fd z=ntl@>w4GoXB*_3pCu}1lT*mDuaKt4kT$hL`2$oEi0z4(98%*V+OK}jJf6t(si}oJ zN=X$@4e3vBJ!|Dx;tAMS1%5(d6@Dq{ke)g|0~*h7&{six9`)5?F2)sS1XSH#SfFd9$R%&7YsFdH@nYdwxv z390vjrMU3c?h4Pa@DWW9^hbL7RgC&<2R`s%Og0X-tI_ zOBl}*&5<_sH!qWQVFM{7QX@}PFEVWt07&y2>z()+0D#YBzzg43>Ul!k#{lgF*32&I zomP>-QU(W-!?Fum4LH5idMt*C6ot}C!R080?O_7my@KN8F!NCj*`azhLn#LgDo{uKRN#gX@J&+*mk!|j6V#S^IZ ztKH)m#>=a{{*G2{puC}Y;i1Hx5V!DMAvI=rH7zi4J?Io}7iKtkDOd6njfQQGD ziayHO1a`=;b)V7P74O^?tYL~$LR3mzhWY2Ce?mzIXo_KS`<}WUJ=i+b?-z+vf#Xae zmjgTqur+Mn;4{h~`Ipd)fD3*dq9-Gw&fYV&xHpz{F7;ndjzmaPJzvxYH;A2cwz*2G zypXzgbKrB_e7@n>jzW574q%*9cLxBbLD1FqbvTaYe6yypY?0jF)FSNj36q~@`ENl_ zr=q{Zcm)qOFd{g*WVj(Z{H?p?M zDkOf8ha-;kuEa)%9iq8C9b>RF-{o=+H));@>f>NH`WNrh!fHM4C@)ql?%o)C>GR5fYmIk-k}m!) zlyr&VI$^Id9Q9!71iU9#m@?fM#o^{X{w1!I`&H>V+#ul)*mxv zRT<60!vpdwLZAT6Ysn)^_QH9?mk>d`kN7?QH2T>OMtzF=z~|ZE-c`B(hi-)a9|`;9 z&PLNUHHom&u_rM^t;Ic-Q)jp-z~{YzGCNuc41+39o17;@!p!or&H>xe&O$wRy2-;lZaHFi;Wxo z{t<`Eq~Gb%r(EeHvNp@@Vt^owAgRmt)7~^{QakfTKQblJ{w})mvwYokefl0#|k5i^&qF#LSX7x}d&lW9K z6a~)~ZsPGApN;-TzVyglh}{zFgK0F}-BjuusarRxpenLM4WHTuF)f)qa$sp`Jxjy! zE-ISzdp);3j4|ZF3=7K)?VY;T^=tTHe34~B0q@%;&6?+>%+rzf7qK2&9A6LYJjE!f zx0}PQo1tHw6n-_t%HCj@xNV4i?Y*0E<>t-Oi@9nAmS$mBaVfqSuPWzoeBi8FwXbNy zeCR1(KL!+q{FZnEZhOc`HJ=qfB=VVKD&OxI3K~WSx#)#fEG>CpBaRPW>6d(mVWv$) zz8cl=9W7593Md-z6?43}(z-Ot9qFJ>SYPNguiW+~p|1%`@;V^@jclY1t8`b3|K{1@ z=^_`nZ_nqi;>-X1`~R@jEfHPFOxy4}P)}v|Ovz_|wZu6BP9TZNG!rr=qh5{87a8%Vd zmZ~nF`JsnnqkkOs?#yA!B8h|k^kF>ryI)izZ|ot54kqhP1H^+;1^@x5_kJj!Kq(3$ zrU4Yup{)QcFHF^0kg=hu@t)bs zy+-U3#O;DYl&Z!9Md>uv-E+}#w!lmvU2fIq-1dI9@8Rkb7)@9F??m)}RFeJH%wF@G zmYCEJ5WzzNoS}IY_3i*%DE|Uq2d)E)&cvcqIyK&1lLX~ThuDfpR#MGv+hQB2OG1q> z(sX_ax&%=7;YJ_^mD$UDqAE^HjRYP&j5EhE(-$+pDVHFt`mYdc-J{8*- z4{FXcW8&@N;$j4pGN9`42y(&;SzZ->DRGZvGPaA7FLwTsFlnJqZwxhb?GLfr{b$`Y zM+Gl{Rl=U=`{Q3&W3~&iNKd9@@VDd-E;pzsEF5U+=G)%}L3*?Qcrg%)O2^%O*z7Rl zEjj}@ej~66=O3BDt%h8q-grmtyg zOi6X@Nu8f14(1<@SFrFNIOo@1T;q3tQIMADzbJ^8ih^jqz^5%ajKp$EP!`sVDeb$% zE0VD}Y=BiCfpVeqp%T=>fTa+7w})}<1SVx4jIwYELhH1?zny_6JI2V;VTaM|oABJr zP%(l$wx;BE(nx{!-wmkL-eFs*!Bo{#qDCHBJ6Gbc8OGwn_4&<&TJ8XnG5>e}TW2`i zxqcs@h*1l^KaPsyY@aRG(QH40H?$FA@AojqbZ4)TndHfX;-!L-ecTo`>kpY0=}L!B zALLg$O1;?&(;VdEBKASF`Sop;m)^`i*120{UYb8M4o>5aV>Hcv=kLcGKJ-hlR7c${ zBrC4)I5oyuWJQj|ftQj`q!f_vINEFC%05Am(f5C`(SM{2e{6NQtONREm)lwrpGj6? zrTx94%-iV-&OZ5YzMuUnRb1MKNAK#}hn>|(-q+_xt|Lz>LdMgY_A?u%xERuEIlVwq z-yzz(97KokH*&QxLoaZ4Oj{DZXNtPJ_5oSuC*8qpEl}kvmcn~g{;)x!GsT8##))=M z+#&Q?o{H$dhq^9tQ>;avU48Cc-(%x8L%+2{Ug7P9e`*C@d8Q|@MsJ}uEchn6Zt@)q z)pP34CdsesQ1yh6gl;*rF_y8UQAXtwH?gr4uSmX0Z|)#Y@LdV9sG8k*sFqwR74G{S zU<*NU?FZ?vivF)YD5RFRdeGhcuWnri>T=rnFX}>yzeeZgU1j2j8Sx0R#X2bg2armt zH7>4#HtH7=+a!uTvvK215})<(F)OEpH6$wMS}<86!ARhc-2lyvOiZn&Ozl=fCMh3_ zE~VqxGqO76$OvOUtxR4+6xbVc5cS7qiupJgi!uvhODyjOF5bNJ* z5j9e@QPx!&R8TJuOOe_Aj^hzfO2c-8toLh$qEYpD(Hu>&H=YF>`KP8BKB{F>&u?N; z6BE*`d&G4_X@ocRHD_pEI{5*-IKNgFdw4v!b~@}oKgQW4IdI&+;z~j!W-rBVyTich z*x3q$%0EKFuF~aU_2#EB)k?yTVona`a_`B~O?n{hNhi%s(WGy+9$#=7Ic&@6U8M-P zCVV2bx#{NuHKdBvOKtGlE3Ts^~(a(kmesdZ(r$9Ppp{ z`$8**zCX08RG6nSE2MV4Fk@6rS5fJ{^O`G7dY(HYO$7RNX(tW3+gzak0(!>+z%S^J z7kSH>D0gDutaMcOxs(%qTobvhufhaCal-`OFUmteM)%otmh2-ZLbp2(n%9haq4kS< zgJhDtHTm@)(NcSVHRad}9qiI!BypcLG`Cfl7lDX}*07fXr!d@HeM989w=5~lY%G44 zTY6{-RtIUyG2f6n2ijic452wORIDA<5z?V4OSRZgRrY0fRfdT}GS`kGs^by`L)3}z z1(rPzW7#eggTO- zlqZZ>{OpZAe6HGg(GAyKO}EIh&T75Qxe(c86Zj zfe_Jo=M z1oDB)jeePc8wj;D=2}!T4oPJ6uUh(8#SuO6gchu5n8R66)=?z5pX(9I?@6{C6~Btq zi5K%zC9{^FivPFt!V6XX8SKkMoha*)Eg8)sL)+*_W?#5kTEyKp5a=CQux-GU8GOZ? z`Z6+2g8g9*MWO^VQ;n|n3PPs3-e^O?i8sTA^)D=}OHi?h1C2riA-$JMjk1Y?;1+Wf(j-Ke_XGgo$E`l8 zj`BJV=uR9~V4d&TJ+?KUqlUx=7MNWI&>fbcvZ8;r Date: Thu, 22 Aug 2024 16:24:21 +0100 Subject: [PATCH 448/574] docs: Add SSH guide This is really easy win for people using DevStack for the first time. Change-Id: I8de2d4d115d34e9d87dd461016b5b894d3f000e7 Signed-off-by: Stephen Finucane --- doc/source/index.rst | 2 ++ doc/source/networking.rst | 40 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index a5a11e251b..70871ef876 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -133,6 +133,8 @@ there. You can ``source openrc`` in your shell, and then use the ``openstack`` command line tool to manage your devstack. +You can :ref:`create a VM and SSH into it `. + You can ``cd /opt/stack/tempest`` and run tempest tests that have been configured to work with your devstack. diff --git a/doc/source/networking.rst b/doc/source/networking.rst index e65c7ef195..f7df4f2e46 100644 --- a/doc/source/networking.rst +++ b/doc/source/networking.rst @@ -68,7 +68,7 @@ Shared Guest Interface .. warning:: This is not a recommended configuration. Because of interactions - between ovs and bridging, if you reboot your box with active + between OVS and bridging, if you reboot your box with active networking you may lose network connectivity to your system. If you need your guests accessible on the network, but only have 1 @@ -114,3 +114,41 @@ For IPv6, ``FIXED_RANGE_V6`` will default to the first /64 of the value of ``FIXED_RANGE_V6`` will just use the value of that directly. ``SUBNETPOOL_PREFIX_V6`` will just default to the value of ``IPV6_ADDRS_SAFE_TO_USE`` directly. + +.. _ssh: + +SSH access to instances +======================= + +To validate connectivity, you can create an instance using the +``$PRIVATE_NETWORK_NAME`` network (default: ``private``), create a floating IP +using the ``$PUBLIC_NETWORK_NAME`` network (default: ``public``), and attach +this floating IP to the instance: + +.. code-block:: shell + + openstack keypair create --public-key ~/.ssh/id_rsa.pub test-keypair + openstack server create --network private --key-name test-keypair ... test-server + fip_id=$(openstack floating ip create public -f value -c id) + openstack server add floating ip test-server ${fip_id} + +Once done, ensure you have enabled SSH and ICMP (ping) access for the security +group used for the instance. You can either create a custom security group and +specify it when creating the instance or add it after creation, or you can +modify the ``default`` security group created by default for each project. +Let's do the latter: + +.. code-block:: shell + + openstack security group rule create --proto icmp --dst-port 0 default + openstack security group rule create --proto tcp --dst-port 22 default + +Finally, SSH into the instance. If you used the Cirros instance uploaded by +default, then you can run the following: + +.. code-block:: shell + + openstack server ssh test-server -- -l cirros + +This will connect using the ``cirros`` user and the keypair you configured when +creating the instance. From 14f60b951aa03ded3779057c7fef28f6c3ee15cc Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Mon, 26 Aug 2024 11:56:31 +0100 Subject: [PATCH 449/574] docs: Expand SSH guide Detail how one can SSH into guests running on a remote DevStack host. Change-Id: I9f988b1193d67859b129f05d08b32a23e50aee49 Signed-off-by: Stephen Finucane --- doc/source/networking.rst | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/doc/source/networking.rst b/doc/source/networking.rst index f7df4f2e46..93332f07a5 100644 --- a/doc/source/networking.rst +++ b/doc/source/networking.rst @@ -152,3 +152,41 @@ default, then you can run the following: This will connect using the ``cirros`` user and the keypair you configured when creating the instance. + +Remote SSH access to instances +============================== + +You can also SSH to created instances on your DevStack host from other hosts. +This can be helpful if you are e.g. deploying DevStack in a VM on an existing +cloud and wish to do development on your local machine. To do this, you will +either need to configure the guest to be `locally accessible `__ or you will need to enable tunneling for the floating IP +address range of the ``$PUBLIC_NETWORK_NAME`` network (default: ``public``) +defined by ``$FLOATING_RANGE`` (default: ``172.24.4.0/24``). We're going to use +a useful utility called `shuttle`__ here, but there are many other ways to +accomplish this. + +First, ensure you have allowed SSH and HTTP(S) traffic to your DevStack host. +Allowing HTTP(S) traffic is necessary so you can use the OpenStack APIs +remotely. How you do this will depend on where your DevStack host is running. + +Once this is done, install ``sshuttle`` on your localhost: + +.. code-block:: bash + + sudo apt-get install sshuttle || yum install sshuttle + +Finally, start ``sshuttle`` using the floating IP address range. Assuming you +are using the default value for ``$FLOATING_RANGE``, you can do: + +.. code-block:: bash + + sshuttle -r username@devstack-host 172.24.4.0/24 + +(where ``username`` and ``devstack-host`` are the username and hostname of your +DevStack host). + +You should now be able to create an instance and SSH into it, using the +instructions provided :ref:`above `. + +.. __: https://github.com/sshuttle/sshuttle From 49933804c9d151ce4a220c188ba4301afa0bf98c Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 27 Aug 2024 12:08:23 +0100 Subject: [PATCH 450/574] docs: Expand SSH guide further smooney noted that using your DevStack host as a jump host is yet another reasonable option. Add this option also. Change-Id: I24887c254e131a8979653a7d17e64a708acf294a Signed-off-by: Stephen Finucane --- doc/source/networking.rst | 76 +++++++++++++++++++++++++++++++-------- 1 file changed, 61 insertions(+), 15 deletions(-) diff --git a/doc/source/networking.rst b/doc/source/networking.rst index 93332f07a5..05b4f34164 100644 --- a/doc/source/networking.rst +++ b/doc/source/networking.rst @@ -158,26 +158,66 @@ Remote SSH access to instances You can also SSH to created instances on your DevStack host from other hosts. This can be helpful if you are e.g. deploying DevStack in a VM on an existing -cloud and wish to do development on your local machine. To do this, you will -either need to configure the guest to be `locally accessible `__ or you will need to enable tunneling for the floating IP -address range of the ``$PUBLIC_NETWORK_NAME`` network (default: ``public``) -defined by ``$FLOATING_RANGE`` (default: ``172.24.4.0/24``). We're going to use -a useful utility called `shuttle`__ here, but there are many other ways to -accomplish this. +cloud and wish to do development on your local machine. There are a few ways to +do this. + +.. rubric:: Configure instances to be locally accessible + +The most obvious way is to configure guests to be locally accessible, as +described `above `__. This has the advantage of +requiring no further effort on the client. However, it is more involved and +requires either support from your cloud or some inadvisable workarounds. + +.. rubric:: Use your DevStack host as a jump host + +You can choose to use your DevStack host as a jump host. To SSH to a instance +this way, pass the standard ``-J`` option to the ``openstack ssh`` / ``ssh`` +command. For example: + +.. code-block:: + + openstack server ssh test-server -- -l cirros -J username@devstack-host + +(where ``test-server`` is name of an existing instance, as described +:ref:`previously `, and ``username`` and ``devstack-host`` are the +username and hostname of your DevStack host). + +This can also be configured via your ``~/.ssh/config`` file, making it rather +effortless. However, it only allows SSH access. If you want to access e.g. a +web application on the instance, you will need to configure an SSH tunnel and +forward select ports using the ``-L`` option. For example, to forward HTTP +traffic: + +.. code-block:: -First, ensure you have allowed SSH and HTTP(S) traffic to your DevStack host. -Allowing HTTP(S) traffic is necessary so you can use the OpenStack APIs -remotely. How you do this will depend on where your DevStack host is running. + openstack server ssh test-server -- -l cirros -L 8080:username@devstack-host:80 -Once this is done, install ``sshuttle`` on your localhost: +(where ``test-server`` is name of an existing instance, as described +:ref:`previously `, and ``username`` and ``devstack-host`` are the +username and hostname of your DevStack host). + +As you can imagine, this can quickly get out of hand, particularly for more +complex guest applications with multiple ports. + +.. rubric:: Use a proxy or VPN tool + +You can use a proxy or VPN tool to enable tunneling for the floating IP +address range of the ``$PUBLIC_NETWORK_NAME`` network (default: ``public``) +defined by ``$FLOATING_RANGE`` (default: ``172.24.4.0/24``). There are many +such tools available to do this. For example, we could use a useful utility +called `shuttle`__. To enable tunneling using ``shuttle``, first ensure you +have allowed SSH and HTTP(S) traffic to your DevStack host. Allowing HTTP(S) +traffic is necessary so you can use the OpenStack APIs remotely. How you do +this will depend on where your DevStack host is running. Once this is done, +install ``sshuttle`` on your localhost: .. code-block:: bash sudo apt-get install sshuttle || yum install sshuttle -Finally, start ``sshuttle`` using the floating IP address range. Assuming you -are using the default value for ``$FLOATING_RANGE``, you can do: +Finally, start ``sshuttle`` on your localhost using the floating IP address +range. For example, assuming you are using the default value for +``$FLOATING_RANGE``, you can do: .. code-block:: bash @@ -186,7 +226,13 @@ are using the default value for ``$FLOATING_RANGE``, you can do: (where ``username`` and ``devstack-host`` are the username and hostname of your DevStack host). -You should now be able to create an instance and SSH into it, using the -instructions provided :ref:`above `. +You should now be able to create an instance and SSH into it: + +.. code-block:: bash + + openstack server ssh test-server -- -l cirros + +(where ``test-server`` is name of an existing instance, as described +:ref:`previously `) .. __: https://github.com/sshuttle/sshuttle From 6a8f65b476883d5cccffbcad8650894850033231 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Wed, 21 Aug 2024 15:57:32 +0100 Subject: [PATCH 451/574] lib/swift: Consistently quota variables Change-Id: I6c3245a77cdc2849067568cfda5a838afda687e3 Signed-off-by: Stephen Finucane --- lib/swift | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/lib/swift b/lib/swift index 1ebf073318..3659624d5b 100644 --- a/lib/swift +++ b/lib/swift @@ -844,14 +844,14 @@ function stop_swift { function swift_configure_tempurls { # note we are using swift credentials! - openstack --os-cloud "" \ - --os-region-name $REGION_NAME \ - --os-auth-url $KEYSTONE_SERVICE_URI \ - --os-username=swift \ - --os-password=$SERVICE_PASSWORD \ - --os-user-domain-name=$SERVICE_DOMAIN_NAME \ - --os-project-name=$SERVICE_PROJECT_NAME \ - --os-project-domain-name=$SERVICE_DOMAIN_NAME \ + openstack --os-cloud="" \ + --os-region-name="$REGION_NAME" \ + --os-auth-url="$KEYSTONE_SERVICE_URI" \ + --os-username="swift" \ + --os-password="$SERVICE_PASSWORD" \ + --os-user-domain-name="$SERVICE_DOMAIN_NAME" \ + --os-project-name="$SERVICE_PROJECT_NAME" \ + --os-project-domain-name="$SERVICE_DOMAIN_NAME" \ object store account \ set --property "Temp-URL-Key=$SWIFT_TEMPURL_KEY" } From 31980e436b73db18297a295969069cf00bd43754 Mon Sep 17 00:00:00 2001 From: Jan Hartkopf Date: Mon, 10 Oct 2022 10:40:38 +0200 Subject: [PATCH 452/574] Configure option backup_ceph_max_snapshots in Cinder backup Sets the config option backup_ceph_max_snapshots for the Cinder Ceph backup driver to the specified value. Depends-On: https://review.opendev.org/c/openstack/cinder/+/810457 Signed-off-by: Jan Hartkopf Change-Id: I9e12e395288db1fe59490b4075bb2d933ccd4d78 --- lib/cinder_backups/ceph | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/cinder_backups/ceph b/lib/cinder_backups/ceph index 4b180490d7..ea9b44fe8c 100644 --- a/lib/cinder_backups/ceph +++ b/lib/cinder_backups/ceph @@ -19,6 +19,7 @@ set +o xtrace # Defaults # -------- +CINDER_BAK_CEPH_MAX_SNAPSHOTS=${CINDER_BAK_CEPH_MAX_SNAPSHOTS:-0} CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups} CINDER_BAK_CEPH_POOL_PG=${CINDER_BAK_CEPH_POOL_PG:-8} CINDER_BAK_CEPH_POOL_PGP=${CINDER_BAK_CEPH_POOL_PGP:-8} @@ -38,6 +39,7 @@ function configure_cinder_backup_ceph { iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver" iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE" + iniset $CINDER_CONF DEFAULT backup_ceph_max_snapshots "$CINDER_BAK_CEPH_MAX_SNAPSHOTS" iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL" iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER" iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0 From f49d475bf2e186ef3d7800e2bb55c9c360e7ac95 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Wed, 2 Oct 2024 09:36:55 +0000 Subject: [PATCH 453/574] Catch and print the postgresql initdb error The logs are stored, by default, in /var/lib/pgsql/initdb_postgresql.log. Related-Bug: #2083482 Change-Id: I2c83e896819b20cd7a1ee8d8ee33354fb047a6d9 --- lib/databases/postgresql | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/databases/postgresql b/lib/databases/postgresql index b21418b75e..2aa38ccf76 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -46,6 +46,10 @@ function recreate_database_postgresql { createdb -h $DATABASE_HOST -U$DATABASE_USER -l C -T template0 -E utf8 $db } +function _exit_pg_init { + sudo cat /var/lib/pgsql/initdb_postgresql.log +} + function configure_database_postgresql { local pg_conf pg_dir pg_hba check_role version echo_summary "Configuring and starting PostgreSQL" @@ -53,7 +57,9 @@ function configure_database_postgresql { pg_hba=/var/lib/pgsql/data/pg_hba.conf pg_conf=/var/lib/pgsql/data/postgresql.conf if ! sudo [ -e $pg_hba ]; then + trap _exit_pg_init EXIT sudo postgresql-setup initdb + trap - EXIT fi elif is_ubuntu; then version=`psql --version | cut -d ' ' -f3 | cut -d. -f1-2` From 803a7d44c49f6adbfa878ee520756869df1ffe8a Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Tue, 1 Oct 2024 07:42:34 -0700 Subject: [PATCH 454/574] Add image format enforcement toggle Related to blueprint glance-as-defender Needed-By: https://review.opendev.org/c/openstack/tempest/+/931028 Change-Id: I8b22ed85eefde399f2e472780106dd39e51a5700 --- lib/glance | 4 ++++ lib/tempest | 1 + 2 files changed, 5 insertions(+) diff --git a/lib/glance b/lib/glance index 274687112e..2eb4954f4a 100644 --- a/lib/glance +++ b/lib/glance @@ -104,6 +104,9 @@ GLANCE_ENABLE_QUOTAS=$(trueorfalse True GLANCE_ENABLE_QUOTAS) # For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope GLANCE_ENFORCE_SCOPE=$(trueorfalse True GLANCE_ENFORCE_SCOPE) +# Flag to disable image format inspection on upload +GLANCE_ENFORCE_IMAGE_FORMAT=$(trueorfalse True GLANCE_ENFORCE_IMAGE_FORMAT) + GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance} GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs GLANCE_API_CONF=$GLANCE_CONF_DIR/glance-api.conf @@ -343,6 +346,7 @@ function configure_glance { # Only use these if you know what you are doing! See OSSN-0065 iniset $GLANCE_API_CONF DEFAULT show_image_direct_url $GLANCE_SHOW_DIRECT_URL iniset $GLANCE_API_CONF DEFAULT show_multiple_locations $GLANCE_SHOW_MULTIPLE_LOCATIONS + iniset $GLANCE_API_CONF image_format require_image_format_match $GLANCE_ENFORCE_IMAGE_FORMAT # Configure glance_store configure_glance_store $USE_CINDER_FOR_GLANCE $GLANCE_ENABLE_MULTIPLE_STORES diff --git a/lib/tempest b/lib/tempest index 310db2daa6..eeeef67a8b 100644 --- a/lib/tempest +++ b/lib/tempest @@ -368,6 +368,7 @@ function configure_tempest { if [[ -n "$image_conversion" ]]; then iniset $TEMPEST_CONFIG image-feature-enabled image_conversion True fi + iniset $TEMPEST_CONFIG image-feature-enabled image_format_enforcement $GLANCE_ENFORCE_IMAGE_FORMAT fi iniset $TEMPEST_CONFIG network project_network_cidr $FIXED_RANGE From 9f9dccd608d5415e0de988a2aa0e74d049038e2a Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Mon, 7 Oct 2024 11:57:58 -0700 Subject: [PATCH 455/574] Switch devstack nodeset to Ubuntu 24.04 (Noble) Ref: https://governance.openstack.org/tc/goals/selected/migrate-ci-jobs-to-ubuntu-noble.html Change-Id: I7ac8f9443c386e56c4ca45a171e104f0b350bc7f --- .zuul.yaml | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index a1c251a398..6a6b686ac1 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -439,7 +439,7 @@ description: | Minimal devstack base job, intended for use by jobs that need less than the normal minimum set of required-projects. - nodeset: openstack-single-node-jammy + nodeset: openstack-single-node-noble required-projects: - opendev.org/openstack/requirements vars: @@ -686,7 +686,7 @@ - job: name: devstack-multinode parent: devstack - nodeset: openstack-two-node-jammy + nodeset: openstack-two-node-noble description: | Simple multinode test to verify multinode functionality on devstack side. This is not meant to be used as a parent job. @@ -727,18 +727,18 @@ configure_swap_size: 4096 - job: - name: devstack-platform-ubuntu-noble + name: devstack-platform-ubuntu-jammy parent: tempest-full-py3 - description: Ubuntu 24.04 LTS (noble) platform test - nodeset: openstack-single-node-noble + description: Ubuntu 22.04 LTS (Jammy) platform test + nodeset: openstack-single-node-jammy timeout: 9000 vars: configure_swap_size: 8192 - job: - name: devstack-platform-ubuntu-jammy-ovn-source - parent: devstack-platform-ubuntu-jammy - description: Ubuntu 22.04 LTS (jammy) platform test (OVN from source) + name: devstack-platform-ubuntu-noble-ovn-source + parent: devstack-platform-ubuntu-noble + description: Ubuntu 24.04 LTS (noble) platform test (OVN from source) voting: false vars: devstack_localrc: @@ -748,10 +748,10 @@ OVS_SYSCONFDIR: "/usr/local/etc/openvswitch" - job: - name: devstack-platform-ubuntu-jammy-ovs + name: devstack-platform-ubuntu-noble-ovs parent: tempest-full-py3 - description: Ubuntu 22.04 LTS (jammy) platform test (OVS) - nodeset: openstack-single-node-jammy + description: Ubuntu 24.04 LTS (noble) platform test (OVS) + nodeset: openstack-single-node-noble voting: false timeout: 9000 vars: @@ -853,7 +853,7 @@ - job: name: devstack-unit-tests - nodeset: ubuntu-jammy + nodeset: ubuntu-noble description: | Runs unit tests on devstack project. @@ -873,9 +873,9 @@ - devstack-platform-centos-9-stream - devstack-platform-debian-bookworm - devstack-platform-rocky-blue-onyx - - devstack-platform-ubuntu-jammy-ovn-source - - devstack-platform-ubuntu-jammy-ovs - - devstack-platform-ubuntu-noble + - devstack-platform-ubuntu-noble-ovn-source + - devstack-platform-ubuntu-noble-ovs + - devstack-platform-ubuntu-jammy - devstack-multinode - devstack-unit-tests - openstack-tox-bashate @@ -1002,6 +1002,6 @@ - devstack-platform-centos-9-stream - devstack-platform-debian-bookworm - devstack-platform-rocky-blue-onyx - - devstack-platform-ubuntu-jammy-ovn-source - - devstack-platform-ubuntu-jammy-ovs - - devstack-platform-ubuntu-noble + - devstack-platform-ubuntu-noble-ovn-source + - devstack-platform-ubuntu-noble-ovs + - devstack-platform-ubuntu-jammy From e8e3eb6dc9353a9df663bfdbb6448bb001ba6ee4 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 11 Oct 2024 15:20:17 +0100 Subject: [PATCH 456/574] doc: Update WSGI section Change-Id: Iaefe12e7fdeddb0fb6fe272cd4df3ce46470cc28 Signed-off-by: Stephen Finucane --- doc/source/configuration.rst | 28 +++++++++++----------------- stackrc | 2 +- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index a83b2de0df..f5f30c4d09 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -351,26 +351,21 @@ Example disabling RabbitMQ in ``local.conf``:: disable_service rabbit - Apache Frontend --------------- -The Apache web server can be enabled for wsgi services that support -being deployed under HTTPD + mod_wsgi. By default, services that -recommend running under HTTPD + mod_wsgi are deployed under Apache. To -use an alternative deployment strategy (e.g. eventlet) for services -that support an alternative to HTTPD + mod_wsgi set -``ENABLE_HTTPD_MOD_WSGI_SERVICES`` to ``False`` in your -``local.conf``. - -Each service that can be run under HTTPD + mod_wsgi also has an -override toggle available that can be set in your ``local.conf``. - -Keystone is run under Apache with ``mod_wsgi`` by default. +The Apache web server is enabled for services that support via WSGI. Today this +means HTTPD and uWSGI but historically this meant HTTPD + mod_wsgi. This +historical legacy is captured by the naming of many variables, which include +``MOD_WSGI`` rather than ``UWSGI``. -Example (Keystone):: - - KEYSTONE_USE_MOD_WSGI="True" +Some services support alternative deployment strategies (e.g. eventlet). You +can enable these ``ENABLE_HTTPD_MOD_WSGI_SERVICES`` to ``False`` in your +``local.conf``. In addition, each service that can be run under HTTPD + +mod_wsgi also has an override toggle available that can be set in your +``local.conf``. These are, however, slowly being removed as services have +adopted standardized deployment mechanisms and more generally moved away from +eventlet. Example (Nova):: @@ -388,7 +383,6 @@ Example (Cinder):: CINDER_USE_MOD_WSGI="True" - Libraries from Git ------------------ diff --git a/stackrc b/stackrc index ab1f8a6ffd..33c18ce0a8 100644 --- a/stackrc +++ b/stackrc @@ -85,7 +85,7 @@ fi # Global toggle for enabling services under mod_wsgi. If this is set to # ``True`` all services that use HTTPD + mod_wsgi as the preferred method of # deployment, will be deployed under Apache. If this is set to ``False`` all -# services will rely on the local toggle variable (e.g. ``KEYSTONE_USE_MOD_WSGI``) +# services will rely on the local toggle variable. ENABLE_HTTPD_MOD_WSGI_SERVICES=True # Set the default Nova APIs to enable From 851d82ccca40d61cbfd2319c38e0128bdaea247e Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 11 Oct 2024 15:37:57 +0100 Subject: [PATCH 457/574] stackrc: Die if attempting to use mod_wsgi We do not want to support this anymore. Change-Id: I8823e98809ed6b66c27dbcf21a00eea68ef403e8 Signed-off-by: Stephen Finucane --- stackrc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stackrc b/stackrc index 33c18ce0a8..c5a0fd4d2f 100644 --- a/stackrc +++ b/stackrc @@ -223,6 +223,9 @@ GIT_TIMEOUT=${GIT_TIMEOUT:-0} # proxy uwsgi in front of it, or "mod_wsgi", which runs in # apache. mod_wsgi is deprecated, don't use it. WSGI_MODE=${WSGI_MODE:-"uwsgi"} +if [[ "$WSGI_MODE" != "uwsgi" ]]; then + die $LINENO "$WSGI_MODE is no longer a supported WSGI mode. Only uwsgi is valid." +fi # Repositories # ------------ From dda40363e6a78b5f3f018a962e5dff14e2c12cd9 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 11 Oct 2024 15:20:01 +0100 Subject: [PATCH 458/574] lib/nova: Remove NOVA_USE_MOD_WSGI We are in the process of migrating away from Eventlet. Nothing is setting this to a non-default value, meaning there is no reason to keep it around any more. Drop it. Change-Id: I036851810360539335502481955769b2308e3dcc Signed-off-by: Stephen Finucane --- doc/source/configuration.rst | 4 ---- lib/nova | 38 ++++-------------------------------- 2 files changed, 4 insertions(+), 38 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index f5f30c4d09..6b8a80563d 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -367,10 +367,6 @@ mod_wsgi also has an override toggle available that can be set in your adopted standardized deployment mechanisms and more generally moved away from eventlet. -Example (Nova):: - - NOVA_USE_MOD_WSGI="True" - Example (Swift):: SWIFT_USE_MOD_WSGI="True" diff --git a/lib/nova b/lib/nova index 35c6893763..20e19da9a4 100644 --- a/lib/nova +++ b/lib/nova @@ -75,14 +75,6 @@ NOVA_CPU_CELL=${NOVA_CPU_CELL:-1} NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini} -# Toggle for deploying Nova-API under a wsgi server. We default to -# true to use UWSGI, but allow False so that fall back to the -# eventlet server can happen for grenade runs. -# NOTE(cdent): We can adjust to remove the eventlet-base api service -# after pike, at which time we can stop using NOVA_USE_MOD_WSGI to -# mean "use uwsgi" because we'll be always using uwsgi. -NOVA_USE_MOD_WSGI=${NOVA_USE_MOD_WSGI:-True} - # We do not need to report service status every 10s for devstack-like # deployments. In the gate this generates extra work for the services and the # database which are already taxed. @@ -393,11 +385,7 @@ function create_nova_accounts { create_service_user "nova" "admin" local nova_api_url - if [[ "$NOVA_USE_MOD_WSGI" == "False" ]]; then - nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT" - else - nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST/compute" - fi + nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST/compute" get_or_create_service "nova_legacy" "compute_legacy" "Nova Compute Service (Legacy 2.0)" get_or_create_endpoint \ @@ -513,11 +501,6 @@ function create_nova_conf { iniset $NOVA_CONF oslo_policy enforce_new_defaults False iniset $NOVA_CONF oslo_policy enforce_scope False fi - if is_service_enabled tls-proxy && [ "$NOVA_USE_MOD_WSGI" == "False" ]; then - # Set the service port for a proxy to take the original - iniset $NOVA_CONF DEFAULT osapi_compute_listen_port "$NOVA_SERVICE_PORT_INT" - iniset $NOVA_CONF DEFAULT osapi_compute_link_prefix $NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT - fi configure_keystone_authtoken_middleware $NOVA_CONF nova fi @@ -998,17 +981,8 @@ function start_nova_api { local old_path=$PATH export PATH=$NOVA_BIN_DIR:$PATH - if [ "$NOVA_USE_MOD_WSGI" == "False" ]; then - run_process n-api "$NOVA_BIN_DIR/nova-api" - nova_url=$service_protocol://$SERVICE_HOST:$service_port - # Start proxy if tsl enabled - if is_service_enabled tls-proxy; then - start_tls_proxy nova '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT - fi - else - run_process "n-api" "$(which uwsgi) --procname-prefix nova-api --ini $NOVA_UWSGI_CONF" - nova_url=$service_protocol://$SERVICE_HOST/compute/v2.1/ - fi + run_process "n-api" "$(which uwsgi) --procname-prefix nova-api --ini $NOVA_UWSGI_CONF" + nova_url=$service_protocol://$SERVICE_HOST/compute/v2.1/ echo "Waiting for nova-api to start..." if ! wait_for_service $SERVICE_TIMEOUT $nova_url; then @@ -1114,11 +1088,7 @@ function start_nova_rest { local compute_cell_conf=$NOVA_CONF run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf" - if [ "$NOVA_USE_MOD_WSGI" == "False" ]; then - run_process n-api-meta "$NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf" - else - run_process n-api-meta "$(which uwsgi) --procname-prefix nova-api-meta --ini $NOVA_METADATA_UWSGI_CONF" - fi + run_process n-api-meta "$(which uwsgi) --procname-prefix nova-api-meta --ini $NOVA_METADATA_UWSGI_CONF" export PATH=$old_path } From 7e8d5efdf2e0d1ff784e8aee2838e4bc7942856e Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 11 Oct 2024 15:34:18 +0100 Subject: [PATCH 459/574] lib/cinder: Remove CINDER_USE_MOD_WSGI Like Nova, nothing is setting this to False nowadays so there's no reason to persist with it. Change-Id: I0e1550992dde81c601175ef04da771ce8ca1dd29 Signed-off-by: Stephen Finucane --- doc/source/configuration.rst | 4 ---- lib/cinder | 39 ++++++------------------------------ 2 files changed, 6 insertions(+), 37 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 6b8a80563d..9a1fd4e179 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -375,10 +375,6 @@ Example (Heat):: HEAT_USE_MOD_WSGI="True" -Example (Cinder):: - - CINDER_USE_MOD_WSGI="True" - Libraries from Git ------------------ diff --git a/lib/cinder b/lib/cinder index dc284920e0..259018e7ab 100644 --- a/lib/cinder +++ b/lib/cinder @@ -160,10 +160,6 @@ fi # Supported backup drivers are in lib/cinder_backups CINDER_BACKUP_DRIVER=${CINDER_BACKUP_DRIVER:-swift} -# Toggle for deploying Cinder under a wsgi server. Legacy mod_wsgi -# reference should be cleaned up to more accurately refer to uwsgi. -CINDER_USE_MOD_WSGI=${CINDER_USE_MOD_WSGI:-True} - # Source the enabled backends if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then for be in ${CINDER_ENABLED_BACKENDS//,/ }; do @@ -393,14 +389,8 @@ function configure_cinder { if is_service_enabled tls-proxy; then if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then # Set the service port for a proxy to take the original - if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then - iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT - iniset $CINDER_CONF oslo_middleware enable_proxy_headers_parsing True - else - iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT - iniset $CINDER_CONF DEFAULT public_endpoint $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT - iniset $CINDER_CONF DEFAULT osapi_volume_base_URL $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT - fi + iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT + iniset $CINDER_CONF oslo_middleware enable_proxy_headers_parsing True fi fi @@ -411,7 +401,7 @@ function configure_cinder { iniset_rpc_backend cinder $CINDER_CONF # Format logging - setup_logging $CINDER_CONF $CINDER_USE_MOD_WSGI + setup_logging $CINDER_CONF if is_service_enabled c-api; then write_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" "/volume" @@ -477,11 +467,7 @@ function create_cinder_accounts { create_service_user "cinder" $extra_role local cinder_api_url - if [[ "$CINDER_USE_MOD_WSGI" == "False" ]]; then - cinder_api_url="$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT" - else - cinder_api_url="$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume" - fi + cinder_api_url="$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume" # block-storage is the official service type get_or_create_service "cinder" "block-storage" "Cinder Volume Service" @@ -609,10 +595,6 @@ function start_cinder { local service_port=$CINDER_SERVICE_PORT local service_protocol=$CINDER_SERVICE_PROTOCOL local cinder_url - if is_service_enabled tls-proxy && [ "$CINDER_USE_MOD_WSGI" == "False" ]; then - service_port=$CINDER_SERVICE_PORT_INT - service_protocol="http" - fi if [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then if is_service_enabled c-vol; then # Delete any old stack.conf @@ -629,17 +611,8 @@ function start_cinder { fi if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then - if [ "$CINDER_USE_MOD_WSGI" == "False" ]; then - run_process c-api "$CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF" - cinder_url=$service_protocol://$SERVICE_HOST:$service_port - # Start proxy if tls enabled - if is_service_enabled tls-proxy; then - start_tls_proxy cinder '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_PORT_INT - fi - else - run_process "c-api" "$(which uwsgi) --procname-prefix cinder-api --ini $CINDER_UWSGI_CONF" - cinder_url=$service_protocol://$SERVICE_HOST/volume/v3 - fi + run_process "c-api" "$(which uwsgi) --procname-prefix cinder-api --ini $CINDER_UWSGI_CONF" + cinder_url=$service_protocol://$SERVICE_HOST/volume/v3 fi echo "Waiting for Cinder API to start..." From e1465f1d73ad146c820d047932af1410dbc18675 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 11 Oct 2024 15:38:42 +0100 Subject: [PATCH 460/574] lib/placement: Remove support for deploying with mod_wsgi Change-Id: If17deabc35d35dca1d94b0d15d258769f347b130 Signed-off-by: Stephen Finucane --- files/apache-placement-api.template | 27 ----------------- lib/placement | 47 ++--------------------------- 2 files changed, 3 insertions(+), 71 deletions(-) delete mode 100644 files/apache-placement-api.template diff --git a/files/apache-placement-api.template b/files/apache-placement-api.template deleted file mode 100644 index 011abb95fc..0000000000 --- a/files/apache-placement-api.template +++ /dev/null @@ -1,27 +0,0 @@ -# NOTE(sbauza): This virtualhost is only here because some directives can -# only be set by a virtualhost or server context, so that's why the port is not bound. -# TODO(sbauza): Find a better way to identify a free port that is not corresponding to an existing -# vhost. - - WSGIDaemonProcess placement-api processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% - WSGIProcessGroup placement-api - WSGIScriptAlias / %PUBLICWSGI% - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%M" - - ErrorLog /var/log/%APACHE_NAME%/placement-api.log - %SSLENGINE% - %SSLCERTFILE% - %SSLKEYFILE% - - -Alias /placement %PUBLICWSGI% - - SetHandler wsgi-script - Options +ExecCGI - WSGIProcessGroup placement-api - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - diff --git a/lib/placement b/lib/placement index 63fdfb6c1a..6297ab24fe 100644 --- a/lib/placement +++ b/lib/placement @@ -71,32 +71,6 @@ function cleanup_placement { remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "placement-api" } -# _config_placement_apache_wsgi() - Set WSGI config files -function _config_placement_apache_wsgi { - local placement_api_apache_conf - local venv_path="" - local placement_bin_dir="" - placement_bin_dir=$(get_python_exec_prefix) - placement_api_apache_conf=$(apache_site_config_for placement-api) - - if [[ ${USE_VENV} = True ]]; then - venv_path="python-path=${PROJECT_VENV["placement"]}/lib/$(python_version)/site-packages" - placement_bin_dir=${PROJECT_VENV["placement"]}/bin - fi - - sudo cp $FILES/apache-placement-api.template $placement_api_apache_conf - sudo sed -e " - s|%APACHE_NAME%|$APACHE_NAME|g; - s|%PUBLICWSGI%|$placement_bin_dir/placement-api|g; - s|%SSLENGINE%|$placement_ssl|g; - s|%SSLCERTFILE%|$placement_certfile|g; - s|%SSLKEYFILE%|$placement_keyfile|g; - s|%USER%|$STACK_USER|g; - s|%VIRTUALENV%|$venv_path|g - s|%APIWORKERS%|$API_WORKERS|g - " -i $placement_api_apache_conf -} - # create_placement_conf() - Write config function create_placement_conf { rm -f $PLACEMENT_CONF @@ -112,11 +86,7 @@ function configure_placement { sudo install -d -o $STACK_USER $PLACEMENT_CONF_DIR create_placement_conf - if [[ "$WSGI_MODE" == "uwsgi" ]]; then - write_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" "/placement" - else - _config_placement_apache_wsgi - fi + write_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" "/placement" if [[ "$PLACEMENT_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then iniset $PLACEMENT_CONF oslo_policy enforce_new_defaults True iniset $PLACEMENT_CONF oslo_policy enforce_scope True @@ -147,7 +117,6 @@ function init_placement { # install_placement() - Collect source and prepare function install_placement { - install_apache_wsgi # Install the openstackclient placement client plugin for CLI pip_install_gr osc-placement git_clone $PLACEMENT_REPO $PLACEMENT_DIR $PLACEMENT_BRANCH @@ -156,12 +125,7 @@ function install_placement { # start_placement_api() - Start the API processes ahead of other things function start_placement_api { - if [[ "$WSGI_MODE" == "uwsgi" ]]; then - run_process "placement-api" "$(which uwsgi) --procname-prefix placement --ini $PLACEMENT_UWSGI_CONF" - else - enable_apache_site placement-api - restart_apache_server - fi + run_process "placement-api" "$(which uwsgi) --procname-prefix placement --ini $PLACEMENT_UWSGI_CONF" echo "Waiting for placement-api to start..." if ! wait_for_service $SERVICE_TIMEOUT $PLACEMENT_SERVICE_PROTOCOL://$PLACEMENT_SERVICE_HOST/placement; then @@ -175,12 +139,7 @@ function start_placement { # stop_placement() - Disable the api service and stop it. function stop_placement { - if [[ "$WSGI_MODE" == "uwsgi" ]]; then - stop_process "placement-api" - else - disable_apache_site placement-api - restart_apache_server - fi + stop_process "placement-api" } # Restore xtrace From 169be464c2ac1c5fc16396e1c320465c4ce6a658 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 11 Oct 2024 15:42:10 +0100 Subject: [PATCH 461/574] lib/keystone: Remove support for deploying with mod_wsgi Change-Id: I2409385a87ee7cc7869bba9711bf95ab5fe77dc7 Signed-off-by: Stephen Finucane --- lib/keystone | 46 ++++++---------------------------------------- 1 file changed, 6 insertions(+), 40 deletions(-) diff --git a/lib/keystone b/lib/keystone index 7d6b05fd41..8f4f4b1366 100644 --- a/lib/keystone +++ b/lib/keystone @@ -51,15 +51,6 @@ KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf KEYSTONE_PUBLIC_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-public.ini KEYSTONE_PUBLIC_UWSGI=$KEYSTONE_BIN_DIR/keystone-wsgi-public -# KEYSTONE_DEPLOY defines how keystone is deployed, allowed values: -# - mod_wsgi : Run keystone under Apache HTTPd mod_wsgi -# - uwsgi : Run keystone under uwsgi -if [[ "$WSGI_MODE" == "uwsgi" ]]; then - KEYSTONE_DEPLOY=uwsgi -else - KEYSTONE_DEPLOY=mod_wsgi -fi - # Select the Identity backend driver KEYSTONE_IDENTITY_BACKEND=${KEYSTONE_IDENTITY_BACKEND:-sql} @@ -144,15 +135,9 @@ function is_keystone_enabled { # cleanup_keystone() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_keystone { - if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - # These files will be created if we are running WSGI_MODE="mod_wsgi" - disable_apache_site keystone - sudo rm -f $(apache_site_config_for keystone) - else - stop_process "keystone" - remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "keystone-wsgi-public" - sudo rm -f $(apache_site_config_for keystone-wsgi-public) - fi + stop_process "keystone" + remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "keystone-wsgi-public" + sudo rm -f $(apache_site_config_for keystone-wsgi-public) } # _config_keystone_apache_wsgi() - Set WSGI config files of Keystone @@ -241,12 +226,7 @@ function configure_keystone { iniset $KEYSTONE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - iniset $KEYSTONE_CONF DEFAULT logging_exception_prefix "%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s" - _config_keystone_apache_wsgi - else # uwsgi - write_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" "/identity" - fi + write_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" "/identity" iniset $KEYSTONE_CONF DEFAULT max_token_size 16384 @@ -543,10 +523,6 @@ function install_keystone { if is_service_enabled ldap; then setup_develop $KEYSTONE_DIR ldap fi - - if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - install_apache_wsgi - fi } # start_keystone() - Start running processes @@ -559,12 +535,7 @@ function start_keystone { auth_protocol="http" fi - if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - enable_apache_site keystone - restart_apache_server - else # uwsgi - run_process keystone "$(which uwsgi) --procname-prefix keystone --ini $KEYSTONE_PUBLIC_UWSGI_CONF" "" - fi + run_process keystone "$(which uwsgi) --procname-prefix keystone --ini $KEYSTONE_PUBLIC_UWSGI_CONF" "" echo "Waiting for keystone to start..." # Check that the keystone service is running. Even if the tls tunnel @@ -589,12 +560,7 @@ function start_keystone { # stop_keystone() - Stop running processes function stop_keystone { - if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - disable_apache_site keystone - restart_apache_server - else - stop_process keystone - fi + stop_process keystone } # bootstrap_keystone() - Initialize user, role and project From 29545a5109df51f9e98b715b16968090a3928ab7 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 16 Oct 2024 02:50:33 +0000 Subject: [PATCH 462/574] Updated from generate-devstack-plugins-list Change-Id: I374de22c7c916f9497c55bf404141776fd17f6c8 --- doc/source/plugin-registry.rst | 2 -- 1 file changed, 2 deletions(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 21cf52c736..8b9d1f2b96 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -48,9 +48,7 @@ openstack/ironic-inspector `https://opendev.org/openstack/ironic-i openstack/ironic-prometheus-exporter `https://opendev.org/openstack/ironic-prometheus-exporter `__ openstack/ironic-ui `https://opendev.org/openstack/ironic-ui `__ openstack/keystone `https://opendev.org/openstack/keystone `__ -openstack/kuryr-kubernetes `https://opendev.org/openstack/kuryr-kubernetes `__ openstack/kuryr-libnetwork `https://opendev.org/openstack/kuryr-libnetwork `__ -openstack/kuryr-tempest-plugin `https://opendev.org/openstack/kuryr-tempest-plugin `__ openstack/magnum `https://opendev.org/openstack/magnum `__ openstack/magnum-ui `https://opendev.org/openstack/magnum-ui `__ openstack/manila `https://opendev.org/openstack/manila `__ From 50b0b602279fc0eb8d2bb9cab1d235197df72ec6 Mon Sep 17 00:00:00 2001 From: Takashi Natsume Date: Sun, 29 Sep 2024 21:28:47 +0900 Subject: [PATCH 463/574] Replace deprecated datetime.utcnow() The datetime.utcnow() is deprecated in Python 3.12. Replace datetime.utcnow() with datetime.now(datetime.timezone.utc).replace(tzinfo=None). Change-Id: I9bf6f69d9e174d490bb4f3eaef3b364ddf97a954 Signed-off-by: Takashi Natsume --- tools/outfilter.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/outfilter.py b/tools/outfilter.py index e910f79ff2..55f9ee1487 100644 --- a/tools/outfilter.py +++ b/tools/outfilter.py @@ -76,7 +76,8 @@ def main(): # with zuulv3 native jobs and ansible capture it may become # clearer what to do if HAS_DATE.search(line) is None: - now = datetime.datetime.utcnow() + now = datetime.datetime.now(datetime.timezone.utc).replace( + tzinfo=None) ts_line = ("%s | %s" % ( now.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3], line)) From 2e04d0fa20d2f6ceaf537423dad6b00d289b531c Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Thu, 24 Oct 2024 00:54:28 +0900 Subject: [PATCH 464/574] Globally skip devstack job for pre-commit config update pre-commit has been introduced to number of projects like oslo to run lint checks such as hacking. The pre-commit config file does not affect functionality, so devstack job is not needed when only the file is updated. Change-Id: I4294fe0c4df2c36c8575613b05a1f9c2eb745d18 --- .zuul.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.zuul.yaml b/.zuul.yaml index a1c251a398..b5477d1a8f 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -432,6 +432,8 @@ - ^releasenotes/.*$ # Translations - ^.*/locale/.*po$ + # pre-commit config + - ^.pre-commit-config.yaml$ - job: name: devstack-minimal From 2fdb729e04e8227ac5bfe619df20c1854bf255bc Mon Sep 17 00:00:00 2001 From: Jay Faulkner Date: Tue, 12 Nov 2024 15:02:15 -0800 Subject: [PATCH 465/574] Use venv module for PROJECT_VENV building Currently, if USE_VENV=True, PROJECT_VENVs are initialized using the tools/build_venv.sh script; this script depends on the virtualenv module, which is much less commonly available than the built-in venv module which we already use many places. This changes the build_venv.sh script to use `python -m venv` instead. Needed-By: https://review.opendev.org/c/openstack/ironic/+/930776 Change-Id: I89fa2c0c4261e715064e77a766d98a34299532b3 --- tools/build_venv.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/build_venv.sh b/tools/build_venv.sh index cfa39a82e0..a439163b5d 100755 --- a/tools/build_venv.sh +++ b/tools/build_venv.sh @@ -38,7 +38,7 @@ if [[ -z "$TOP_DIR" ]]; then fi # Build new venv -virtualenv $VENV_DEST +python$PYTHON3_VERSION -m venv --system-site-packages $VENV_DEST # Install modern pip PIP_VIRTUAL_ENV=$VENV_DEST pip_install -U pip From f41479f370e75c7ea7f17a62135f3af99b91c781 Mon Sep 17 00:00:00 2001 From: Joel Capitao Date: Mon, 30 Sep 2024 15:14:20 +0200 Subject: [PATCH 466/574] Pull RDO Trunk repos when CentOS Stream official RPM not available The RDO project releases centos-release-openstack-* RPM a couple of weeks after the OpenStack upstream project cut master branch. In order to fill the gap in the meantime, we have to pull the repos from RDO Trunk. Change-Id: If95a687f2d7579779129eeb689cfa46b92450dc5 --- stack.sh | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index dcfd398c01..bfa0573f21 100755 --- a/stack.sh +++ b/stack.sh @@ -308,8 +308,11 @@ function _install_rdo { # adding delorean-deps repo to provide current master rpms sudo wget https://trunk.rdoproject.org/centos9-master/delorean-deps.repo -O /etc/yum.repos.d/delorean-deps.repo else - # For stable/unmaintained branches use corresponding release rpm - sudo dnf -y install centos-release-openstack-${rdo_release} + if sudo dnf provides centos-release-openstack-${rdo_release} >/dev/null 2>&1; then + sudo dnf -y install centos-release-openstack-${rdo_release} + else + sudo wget https://trunk.rdoproject.org/centos9-${rdo_release}/delorean-deps.repo -O /etc/yum.repos.d/delorean-deps.repo + fi fi fi sudo dnf -y update From c6c5e12f6b1b5f7dac3d9f942c5ab8135618112d Mon Sep 17 00:00:00 2001 From: Joel Capitao Date: Wed, 13 Nov 2024 10:33:28 +0100 Subject: [PATCH 467/574] Revert "Use SETUPTOOLS_USE_DISTUTILS=stdlib for global pip installs" This partially reverts commit 18b4251bf4f689923a19bf7fbc50d5c2ea422b21. Support for loading distutils from the standard library is now deprecated since v70.3.0 [1]. It was needed initially to ease the transition and can be removed now. [1] https://setuptools.pypa.io/en/latest/history.html#v70-3-0 Change-Id: Ib929219ae81b802a4632963ef71a258edf4deee5 --- inc/python | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/inc/python b/inc/python index 2083b74dc1..c94e5a4952 100644 --- a/inc/python +++ b/inc/python @@ -199,13 +199,7 @@ function pip_install { echo "Using python $PYTHON3_VERSION to install $package_dir" else local cmd_pip="python$PYTHON3_VERSION -m pip" - # See - # https://github.com/pypa/setuptools/issues/2232 - # http://lists.openstack.org/pipermail/openstack-discuss/2020-August/016905.html - # this makes setuptools >=50 use the platform distutils. - # We only want to do this on global pip installs, not if - # installing in a virtualenv - local sudo_pip="sudo -H LC_ALL=en_US.UTF-8 SETUPTOOLS_USE_DISTUTILS=stdlib " + local sudo_pip="sudo -H LC_ALL=en_US.UTF-8" echo "Using python $PYTHON3_VERSION to install $package_dir" fi From b8cd9d11730206eb81c08b6d181503068b93c1d5 Mon Sep 17 00:00:00 2001 From: Abhishek Kekane Date: Thu, 7 Nov 2024 08:27:13 +0000 Subject: [PATCH 468/574] Support for s3 backend of glance This commit introduces support for s3 backend for glance. You can enabled it in your deployment by adding below options in your local.conf file. For single store support: enable_service s3api s-proxy s-account s-container disable_service tls-proxy GLANCE_USE_S3 = True For multistore support: enable_service s3api s-proxy s-account s-container disable_service tls-proxy GLANCE_USE_S3 = True GLANCE_ENABLE_MULTIPLE_STORES: True NOTE: At the moment devstack does not support tls with s3, this support will be added soon. Needed-By: https://review.opendev.org/c/openstack/glance/+/934311 Change-Id: Ic7264dc7faccb5e68c8df3b929eaa6d04149c6a2 --- lib/glance | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 57 insertions(+), 4 deletions(-) diff --git a/lib/glance b/lib/glance index 2eb4954f4a..5c3643d008 100644 --- a/lib/glance +++ b/lib/glance @@ -41,6 +41,12 @@ else GLANCE_BIN_DIR=$(get_python_exec_prefix) fi +#S3 for Glance +GLANCE_USE_S3=$(trueorfalse False GLANCE_USE_S3) +GLANCE_S3_DEFAULT_BACKEND=${GLANCE_S3_DEFAULT_BACKEND:-s3_fast} +GLANCE_S3_BUCKET_ON_PUT=$(trueorfalse True GLANCE_S3_BUCKET_ON_PUT) +GLANCE_S3_BUCKET_NAME=${GLANCE_S3_BUCKET_NAME:-images} + # Cinder for Glance USE_CINDER_FOR_GLANCE=$(trueorfalse False USE_CINDER_FOR_GLANCE) # GLANCE_CINDER_DEFAULT_BACKEND should be one of the values @@ -174,6 +180,34 @@ function cleanup_glance { remove_uwsgi_config "$GLANCE_UWSGI_CONF" "glance-wsgi-api" } +# Set multiple s3 store related config options +# +function configure_multiple_s3_stores { + enabled_backends="${GLANCE_S3_DEFAULT_BACKEND}:s3" + + iniset $GLANCE_API_CONF DEFAULT enabled_backends ${enabled_backends} + iniset $GLANCE_API_CONF glance_store default_backend $GLANCE_S3_DEFAULT_BACKEND +} + +# Set common S3 store options to given config section +# +# Arguments: +# config_section +# +function set_common_s3_store_params { + local config_section="$1" + openstack ec2 credential create + iniset $GLANCE_API_CONF $config_section s3_store_host "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$S3_SERVICE_PORT" + iniset $GLANCE_API_CONF $config_section s3_store_access_key "$(openstack ec2 credential list -c Access -f value)" + iniset $GLANCE_API_CONF $config_section s3_store_secret_key "$(openstack ec2 credential list -c Secret -f value)" + iniset $GLANCE_API_CONF $config_section s3_store_create_bucket_on_put $GLANCE_S3_BUCKET_ON_PUT + iniset $GLANCE_API_CONF $config_section s3_store_bucket $GLANCE_S3_BUCKET_NAME + iniset $GLANCE_API_CONF $config_section s3_store_bucket_url_format "path" + if is_service_enabled tls-proxy; then + iniset $GLANCE_API_CONF $config_section s3_store_cacert $SSL_BUNDLE_FILE + fi +} + # Set multiple cinder store related config options for each of the cinder store # function configure_multiple_cinder_stores { @@ -258,7 +292,6 @@ function configure_glance_store { local be if [[ "$glance_enable_multiple_stores" == "False" ]]; then - # Configure traditional glance_store if [[ "$use_cinder_for_glance" == "True" ]]; then # set common glance_store parameters iniset $GLANCE_API_CONF glance_store stores "cinder,file,http" @@ -281,7 +314,7 @@ function configure_glance_store { if [[ "$use_cinder_for_glance" == "True" ]]; then # Configure multiple cinder stores for glance configure_multiple_cinder_stores - else + elif ! is_service_enabled s-proxy && [[ "$GLANCE_USE_S3" == "False" ]]; then # Configure multiple file stores for glance configure_multiple_file_stores fi @@ -360,8 +393,15 @@ function configure_glance { # No multiple stores for swift yet if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "False" ]]; then - # Store the images in swift if enabled. - if is_service_enabled s-proxy; then + # Return if s3api is enabled for glance + if [[ "$GLANCE_USE_S3" == "True" ]]; then + if is_service_enabled s3api; then + # set common glance_store parameters + iniset $GLANCE_API_CONF glance_store stores "s3,file,http" + iniset $GLANCE_API_CONF glance_store default_store s3 + fi + elif is_service_enabled s-proxy; then + # Store the images in swift if enabled. iniset $GLANCE_API_CONF glance_store default_store swift iniset $GLANCE_API_CONF glance_store swift_store_create_container_on_put True @@ -379,6 +419,12 @@ function configure_glance { iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v3 iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version 3 fi + else + if [[ "$GLANCE_USE_S3" == "True" ]]; then + if is_service_enabled s3api; then + configure_multiple_s3_stores + fi + fi fi # We need to tell glance what it's public endpoint is so that the version @@ -484,6 +530,13 @@ function create_glance_accounts { configure_glance_quotas fi + if is_service_enabled s3api && [[ "$GLANCE_USE_S3" == "True" ]]; then + if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "False" ]]; then + set_common_s3_store_params glance_store + else + set_common_s3_store_params $GLANCE_S3_DEFAULT_BACKEND + fi + fi fi } From 3f1cd9b076b1338d42031e3801cb6316d169c51e Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Tue, 19 Nov 2024 10:50:05 +0000 Subject: [PATCH 469/574] Remove the Neutron WSGI experimental jobs Neutron is moving all the jobs to use the Neutron API WSGI module, thus this module is no longer an experimental configuration. Change-Id: Iaf708cd5e6ab414b262a0daecb3909ace2376f68 --- .zuul.yaml | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index b5477d1a8f..3632dc03e8 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -956,15 +956,6 @@ # things, this job is not experimental but often is used to test # things that are not yet production ready or to test what will be # the new default after a deprecation period has ended. - # * neutron-fullstack-with-uwsgi: maintained by neutron for fullstack test - # when neutron-api is served by uwsgi, it's in exprimental for testing. - # the next cycle we can remove this job if things turn out to be - # stable enough. - # * neutron-functional-with-uwsgi: maintained by neutron for functional - # test. Next cycle we can remove this one if things turn out to be - # stable engouh with uwsgi. - # * neutron-ovn-tempest-with-uwsgi: maintained by neutron for tempest test. - # Next cycle we can remove this if everything run out stable enough. # * nova-multi-cell: maintained by nova and now is voting in the # check queue for nova changes but relies on devstack configuration @@ -972,9 +963,6 @@ jobs: - nova-multi-cell - nova-next - - neutron-fullstack-with-uwsgi - - neutron-functional-with-uwsgi - - neutron-ovn-tempest-with-uwsgi - devstack-plugin-ceph-tempest-py3: irrelevant-files: - ^.*\.rst$ From df0bfe4d5c3519d5c4deb5033bec2217c90c9425 Mon Sep 17 00:00:00 2001 From: 0weng Date: Tue, 12 Nov 2024 11:44:13 -0800 Subject: [PATCH 470/574] Doc: Fix variable name in logging config example $LOGDIR --> $DEST Change-Id: I461422f48525edf4ecb388b2f962edb03795db50 --- doc/source/configuration.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 9a1fd4e179..3cfba716ca 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -323,7 +323,7 @@ a file, keep service logs and disable color in the stored files. [[local|localrc]] DEST=/opt/stack/ - LOGFILE=$LOGDIR/stack.sh.log + LOGFILE=$DEST/stack.sh.log LOG_COLOR=False Database Backend From c1eeb773a85b0485b6329e325ac1685d8e3b2dc4 Mon Sep 17 00:00:00 2001 From: Artem Goncharov Date: Thu, 10 Oct 2024 15:26:52 +0200 Subject: [PATCH 471/574] lib/keystone: Migrate Keystone to WSGI module path Depends-on: https://review.opendev.org/c/openstack/keystone/+/932060 Change-Id: I10bea74fb0bce1888d324a61f23c25b8f7082e97 --- lib/keystone | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/keystone b/lib/keystone index 8f4f4b1366..76e2598ba3 100644 --- a/lib/keystone +++ b/lib/keystone @@ -49,7 +49,7 @@ fi KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone} KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf KEYSTONE_PUBLIC_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-public.ini -KEYSTONE_PUBLIC_UWSGI=$KEYSTONE_BIN_DIR/keystone-wsgi-public +KEYSTONE_PUBLIC_UWSGI=keystone.wsgi.api:application # Select the Identity backend driver KEYSTONE_IDENTITY_BACKEND=${KEYSTONE_IDENTITY_BACKEND:-sql} @@ -226,7 +226,7 @@ function configure_keystone { iniset $KEYSTONE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - write_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" "/identity" + write_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" "/identity" "" "keystone-api" iniset $KEYSTONE_CONF DEFAULT max_token_size 16384 From 6578d6ad27f04bdbfd1c30a13a7fc7ae47c2fb49 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Sat, 23 Nov 2024 21:44:17 +0900 Subject: [PATCH 472/574] Remove unused python-etcd3 python-etcd3 can't be used since etcd3 driver was removed from tooz in tooz 5.0.0 [1]. [1] 6bc02cda5b452bbf4821621eafc031bd676f8a2f Change-Id: I30b895b4473e2676085e27969a43b91be9b1e413 --- lib/libraries | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lib/libraries b/lib/libraries index 9ea32304fc..fa418785dd 100755 --- a/lib/libraries +++ b/lib/libraries @@ -131,12 +131,7 @@ function install_libs { # python client libraries we might need from git can go here _install_lib_from_source "python-barbicanclient" - # etcd (because tooz does not have a hard dependency on these) - # - # NOTE(sdague): this is currently a work around because tooz - # doesn't pull in etcd3. - pip_install etcd3 pip_install etcd3gw } From ef63c690f119e3d9a7890215ee8832da4f5fb4dc Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Sun, 24 Nov 2024 22:48:08 +0900 Subject: [PATCH 473/574] Drop redundant lib/oslo It was kept for compatibility in renaming which was merged long ago[1], and is no longer necessary. [1] 3ed99c0b27122ff00e2d236086ab16b0cc1887c1 Depends-on: https://review.opendev.org/c/openstack/grenade/+/936095 Change-Id: I6a66359c19d0385beafb4c5e57b6ec3cd6d9cc54 --- clean.sh | 2 +- lib/libraries | 2 +- lib/oslo | 11 ----------- 3 files changed, 2 insertions(+), 13 deletions(-) delete mode 100644 lib/oslo diff --git a/clean.sh b/clean.sh index 6a31cc624a..092f557a88 100755 --- a/clean.sh +++ b/clean.sh @@ -40,7 +40,7 @@ source $TOP_DIR/lib/rpc_backend source $TOP_DIR/lib/tls -source $TOP_DIR/lib/oslo +source $TOP_DIR/lib/libraries source $TOP_DIR/lib/lvm source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone diff --git a/lib/libraries b/lib/libraries index 9ea32304fc..9d5d65532e 100755 --- a/lib/libraries +++ b/lib/libraries @@ -1,6 +1,6 @@ #!/bin/bash # -# lib/oslo +# lib/libraries # # Functions to install libraries from git # diff --git a/lib/oslo b/lib/oslo deleted file mode 100644 index 3ae64c8210..0000000000 --- a/lib/oslo +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -# -# lib/oslo -# -# Functions to install **Oslo** libraries from git -# -# We need this to handle the fact that projects would like to use -# pre-released versions of oslo libraries. -# -# Included for compatibility with grenade, remove in Queens -source $TOP_DIR/lib/libraries From ec49b3e1672ef47d59509132e95f94d6be13abfe Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Wed, 27 Nov 2024 02:42:01 +0000 Subject: [PATCH 474/574] Updated from generate-devstack-plugins-list Change-Id: I344c3492159d53c68002b78ac3c385c1beca0e61 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 8b9d1f2b96..2984a5c15f 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -31,6 +31,7 @@ openstack/ceilometer `https://opendev.org/openstack/ceilomet openstack/cloudkitty `https://opendev.org/openstack/cloudkitty `__ openstack/cyborg `https://opendev.org/openstack/cyborg `__ openstack/designate `https://opendev.org/openstack/designate `__ +openstack/designate-tempest-plugin `https://opendev.org/openstack/designate-tempest-plugin `__ openstack/devstack-plugin-amqp1 `https://opendev.org/openstack/devstack-plugin-amqp1 `__ openstack/devstack-plugin-ceph `https://opendev.org/openstack/devstack-plugin-ceph `__ openstack/devstack-plugin-container `https://opendev.org/openstack/devstack-plugin-container `__ From 451236381d4f6af0072b60fc65743b55ee33ab95 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Fri, 29 Nov 2024 07:39:37 +0000 Subject: [PATCH 475/574] Add start time (in seconds) to the WSGI configuration file This new variable "start-time" is initialized when the WSGI starts and is the timestamp in seconds. Related-Bug: #2083570 Change-Id: I1b984b93d1352683097c1417b22d64341a68f72a --- lib/apache | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/apache b/lib/apache index 1420f76ff2..1c034d3c7e 100644 --- a/lib/apache +++ b/lib/apache @@ -293,6 +293,8 @@ function write_uwsgi_config { iniset "$conf" uwsgi add-header "Connection: close" # This ensures that file descriptors aren't shared between processes. iniset "$conf" uwsgi lazy-apps true + # Starting time of the WSGi server + iniset "$conf" uwsgi start-time %t # If we said bind directly to http, then do that and don't start the apache proxy if [[ -n "$http" ]]; then @@ -367,6 +369,8 @@ function write_local_uwsgi_http_config { iniset "$conf" uwsgi http-keepalive false # Increase socket timeout for slow chunked uploads iniset "$conf" uwsgi socket-timeout 30 + # Starting time of the WSGi server + iniset "$conf" uwsgi start-time %t enable_apache_mod proxy enable_apache_mod proxy_http From 97ea68ec4611391de2e245a1def655cbebc7649d Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Mon, 2 Dec 2024 17:46:42 +0530 Subject: [PATCH 476/574] Fix the db user for mariadb in ubuntu 24.04 It was fixed in past for ubuntu 22.04 with [1], removing the check for jammy so it applies to all ubuntu versions since jammy. The checks now only refer debian distros so those can be adjusted with new debian releases. [1] https://review.opendev.org/c/openstack/devstack/+/866944 Related-Bug: #1999090 Closes-Bug: #2090835 Change-Id: Iff843c5c3f9c081aa1cec6c399a6ed8c05e06abe --- lib/databases/mysql | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index e069e128e9..629014c1d8 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -104,10 +104,10 @@ function configure_database_mysql { # Set the root password - only works the first time. For Ubuntu, we already # did that with debconf before installing the package, but we still try, # because the package might have been installed already. We don't do this - # for Ubuntu 22.04 (jammy) because the authorization model change in + # for Ubuntu 22.04+ because the authorization model change in # version 10.4 of mariadb. See # https://mariadb.org/authentication-in-mariadb-10-4/ - if ! (is_ubuntu && [[ "$DISTRO" == "jammy" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then + if ! (is_ubuntu && [[ ! "$DISTRO" =~ bookworm|bullseye ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then sudo mysqladmin -u root password $DATABASE_PASSWORD || true fi @@ -124,16 +124,11 @@ function configure_database_mysql { # we need to change auth plugin for root user # TODO(frickler): simplify this logic if is_ubuntu && [[ ! "$DISTRO" =~ bookworm|bullseye ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then - if [[ "$DISTRO" == "jammy" ]]; then - # For Ubuntu 22.04 (jammy) we follow the model outlined in - # https://mariadb.org/authentication-in-mariadb-10-4/ - sudo mysql -e "ALTER USER $DATABASE_USER@localhost IDENTIFIED VIA mysql_native_password USING PASSWORD('$DATABASE_PASSWORD');" - else - sudo mysql $cmd_args -e "UPDATE mysql.user SET plugin='' WHERE user='$DATABASE_USER' AND host='localhost';" - sudo mysql $cmd_args -e "FLUSH PRIVILEGES;" - fi + # For Ubuntu 22.04+ we follow the model outlined in + # https://mariadb.org/authentication-in-mariadb-10-4/ + sudo mysql -e "ALTER USER $DATABASE_USER@localhost IDENTIFIED VIA mysql_native_password USING PASSWORD('$DATABASE_PASSWORD');" fi - if ! (is_ubuntu && [[ "$DISTRO" == "jammy" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then + if ! (is_ubuntu && [[ ! "$DISTRO" =~ bookworm|bullseye ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then # Create DB user if it does not already exist sudo mysql $cmd_args -e "CREATE USER IF NOT EXISTS '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';" # Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases: From 320c2bf42ae41d751c72d80a6c85b26f3f6951bd Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 3 Dec 2024 17:04:39 +0000 Subject: [PATCH 477/574] Install setuptools 'core' extra Under as-yet-unidentified conditions, we can end up with a version of packaging that is too old for the version of latest version of setuptools. This is a known issue and expected behavior and per [1] $subject is the preferred resolution. [1] https://github.com/pypa/setuptools/issues/4483#issuecomment-2237219597 Change-Id: I9232f3fae1598297e83c4ea37339896f7dcbd44f Signed-off-by: Stephen Finucane --- inc/python | 4 ++-- lib/infra | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/inc/python b/inc/python index c94e5a4952..857f1f2d06 100644 --- a/inc/python +++ b/inc/python @@ -40,8 +40,8 @@ function setup_devstack_virtualenv { # Using system site packages to enable nova to use libguestfs. # This package is currently installed via the distro and not # available on pypi. - python$PYTHON3_VERSION -m venv --system-site-packages $DEVSTACK_VENV - pip_install -U pip setuptools + $PYTHON -m venv --system-site-packages "${DEVSTACK_VENV}" + pip_install -U pip setuptools[core] #NOTE(rpittau): workaround for simplejson removal in osc # https://review.opendev.org/c/openstack/python-openstackclient/+/920001 pip_install -U simplejson diff --git a/lib/infra b/lib/infra index 2aad00354a..f4760c352c 100644 --- a/lib/infra +++ b/lib/infra @@ -31,7 +31,7 @@ function install_infra { local PIP_VIRTUAL_ENV="$REQUIREMENTS_DIR/.venv" [ ! -d $PIP_VIRTUAL_ENV ] && ${VIRTUALENV_CMD} $PIP_VIRTUAL_ENV # We don't care about testing git pbr in the requirements venv. - PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr setuptools + PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr setuptools[core] PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install $REQUIREMENTS_DIR # Unset the PIP_VIRTUAL_ENV so that PBR does not end up trapped From 9486709dc5e6f156dc5beb051f1861ea362ae10c Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 3 Dec 2024 17:07:57 +0000 Subject: [PATCH 478/574] Revert "Install simplejson in devstack venv" This reverts commit 6990b06cd321930f69907ba42ee744755f8029fe. This should no longer be necessary as packages are requiring simplejson. Change-Id: I74b0f93457f91e7d53d54737d52f67075088faca Signed-off-by: Stephen Finucane --- inc/python | 3 --- 1 file changed, 3 deletions(-) diff --git a/inc/python b/inc/python index 857f1f2d06..bd58905e9e 100644 --- a/inc/python +++ b/inc/python @@ -42,9 +42,6 @@ function setup_devstack_virtualenv { # available on pypi. $PYTHON -m venv --system-site-packages "${DEVSTACK_VENV}" pip_install -U pip setuptools[core] - #NOTE(rpittau): workaround for simplejson removal in osc - # https://review.opendev.org/c/openstack/python-openstackclient/+/920001 - pip_install -U simplejson fi if [[ ":$PATH:" != *":$DEVSTACK_VENV/bin:"* ]] ; then export PATH="$DEVSTACK_VENV/bin:$PATH" From 5bf9d13f2737ca9c8a15b7d250a48ef8be935a05 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Mon, 9 Dec 2024 14:03:44 +0000 Subject: [PATCH 479/574] lib/placement: Migrate placement to WSGI module path Change-Id: If9e2cc9247d707a451ef394615e547515115f9e0 Signed-off-by: Stephen Finucane Depends-on: https://review.opendev.org/c/openstack/placement/+/919569 --- lib/placement | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/placement b/lib/placement index 6297ab24fe..03aaa0344b 100644 --- a/lib/placement +++ b/lib/placement @@ -37,7 +37,7 @@ if [[ ${USE_VENV} = True ]]; then else PLACEMENT_BIN_DIR=$(get_python_exec_prefix) fi -PLACEMENT_UWSGI=$PLACEMENT_BIN_DIR/placement-api +PLACEMENT_UWSGI=placement.wsgi.api:application PLACEMENT_UWSGI_CONF=$PLACEMENT_CONF_DIR/placement-uwsgi.ini if is_service_enabled tls-proxy; then @@ -86,7 +86,7 @@ function configure_placement { sudo install -d -o $STACK_USER $PLACEMENT_CONF_DIR create_placement_conf - write_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" "/placement" + write_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" "/placement" "" "placement-api" if [[ "$PLACEMENT_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then iniset $PLACEMENT_CONF oslo_policy enforce_new_defaults True iniset $PLACEMENT_CONF oslo_policy enforce_scope True From 05f7d302cfa2da73b2887afcde92ef65b1001194 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Thu, 7 Dec 2023 10:48:10 +0000 Subject: [PATCH 480/574] lib/cinder: Migrate cinder to WSGI module path Change-Id: I494dae51c65318299d4fe2ff5887c97ac2be3224 Signed-off-by: Stephen Finucane Depends-on: https://review.opendev.org/c/openstack/cinder/+/902876 --- lib/cinder | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/cinder b/lib/cinder index 259018e7ab..b557d4b10b 100644 --- a/lib/cinder +++ b/lib/cinder @@ -62,7 +62,7 @@ CINDER_STATE_PATH=${CINDER_STATE_PATH:=$DATA_DIR/cinder} CINDER_CONF_DIR=/etc/cinder CINDER_CONF=$CINDER_CONF_DIR/cinder.conf -CINDER_UWSGI=$CINDER_BIN_DIR/cinder-wsgi +CINDER_UWSGI=cinder.wsgi.api:application CINDER_UWSGI_CONF=$CINDER_CONF_DIR/cinder-api-uwsgi.ini CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini @@ -404,7 +404,7 @@ function configure_cinder { setup_logging $CINDER_CONF if is_service_enabled c-api; then - write_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" "/volume" + write_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" "/volume" "" "cinder-api" fi if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then From b8cbcff693f3f1ddfa9c60c7c826629987a2d23e Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Mon, 25 Mar 2024 20:00:10 +0900 Subject: [PATCH 481/574] Drop removed glance-cache.conf options These were removed when glance-registry was removed[1]. [1] 30680961994b36ed12713c0f106b661535ce41c6 Change-Id: Iaa4a35fddcd4763e12e5140b71e4022421c476fc --- lib/glance | 4 ---- 1 file changed, 4 deletions(-) diff --git a/lib/glance b/lib/glance index 5c3643d008..6d6b158e74 100644 --- a/lib/glance +++ b/lib/glance @@ -446,10 +446,6 @@ function configure_glance { iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_CACHE_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ iniset $GLANCE_CACHE_CONF DEFAULT image_cache_driver $GLANCE_CACHE_DRIVER - iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI - iniset $GLANCE_CACHE_CONF DEFAULT admin_tenant_name $SERVICE_PROJECT_NAME - iniset $GLANCE_CACHE_CONF DEFAULT admin_user glance - iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD # Store specific confs iniset $GLANCE_CACHE_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ From c9a4454450429491c34184d0ceb85eaba62cc525 Mon Sep 17 00:00:00 2001 From: Fernando Royo Date: Thu, 12 Dec 2024 11:01:36 +0100 Subject: [PATCH 482/574] Removing start_ovn_services call The function _start_ovn is responsible for starting the OVS/OVN services. However, its final action is a call to _start_ovn_services, which restarts all OVS/OVN services without any justified reason. This patch removes that call to avoid unnecessarily restarting all OVS/OVN services immediately after they have been started. Closes-Bug: #2091614 Change-Id: Ia791ecb734531fa933c570d687ac9224ed6b27e4 --- lib/neutron_plugins/ovn_agent | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index be3a9e78b2..b7633c8c17 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -704,30 +704,6 @@ function _start_ovs { fi } -function _start_ovn_services { - _start_process "$OVSDB_SERVER_SERVICE" - _start_process "$OVS_VSWITCHD_SERVICE" - - if is_service_enabled ovn-northd ; then - _start_process "$OVN_NORTHD_SERVICE" - fi - if is_service_enabled ovn-controller ; then - _start_process "$OVN_CONTROLLER_SERVICE" - fi - if is_service_enabled ovn-controller-vtep ; then - _start_process "$OVN_CONTROLLER_VTEP_SERVICE" - fi - if is_service_enabled ovs-vtep ; then - _start_process "devstack@ovs-vtep.service" - fi - if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent ; then - _start_process "devstack@q-ovn-metadata-agent.service" - fi - if is_service_enabled q-ovn-agent neutron-ovn-agent ; then - _start_process "devstack@q-ovn-agent.service" - fi -} - # start_ovn() - Start running processes, including screen function start_ovn { echo "Starting OVN" @@ -797,8 +773,6 @@ function start_ovn { # Format logging setup_logging $OVN_AGENT_CONF fi - - _start_ovn_services } function _stop_ovs_dp { From bf04bf517b839fa495384f636b7f8d4f05c6fa0e Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Fri, 27 Dec 2024 13:01:16 +0530 Subject: [PATCH 483/574] Switch to OVS/OVN LTS branches As discussed in Epoxy PTG[1] switching jobs to run with latest OVS/OVN LTS branches. Ubuntu noble and CentOS 9-stream also including these LTS versions. [1] https://etherpad.opendev.org/p/oct2024-ptg-neutron Change-Id: Iecb33628641cd33b6e46d09759e3180cc0bd55e9 --- .zuul.yaml | 4 ++-- lib/neutron_plugins/ovn_agent | 2 +- lib/neutron_plugins/ovs_source | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 2fbfa0417c..74ce39cdfa 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -745,8 +745,8 @@ vars: devstack_localrc: OVN_BUILD_FROM_SOURCE: True - OVN_BRANCH: "v21.06.0" - OVS_BRANCH: "a4b04276ab5934d087669ff2d191a23931335c87" + OVN_BRANCH: "branch-24.03" + OVS_BRANCH: "branch-3.3" OVS_SYSCONFDIR: "/usr/local/etc/openvswitch" - job: diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index be3a9e78b2..ad5c1f3003 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -28,7 +28,7 @@ source ${TOP_DIR}/lib/neutron_plugins/ovs_source OVN_REPO=${OVN_REPO:-https://github.com/ovn-org/ovn.git} OVN_REPO_NAME=$(basename ${OVN_REPO} | cut -f1 -d'.') OVN_REPO_NAME=${OVN_REPO_NAME:-ovn} -OVN_BRANCH=${OVN_BRANCH:-v20.06.1} +OVN_BRANCH=${OVN_BRANCH:-branch-24.03} # The commit removing OVN bits from the OVS tree, it is the commit that is not # present in OVN tree and is used to distinguish if OVN is part of OVS or not. # https://github.com/openvswitch/ovs/commit/05bf1dbb98b0635a51f75e268ef8aed27601401d diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source index 75e7d7cb36..6b6f531a01 100644 --- a/lib/neutron_plugins/ovs_source +++ b/lib/neutron_plugins/ovs_source @@ -20,7 +20,7 @@ Q_BUILD_OVS_FROM_GIT=$(trueorfalse False Q_BUILD_OVS_FROM_GIT) OVS_REPO=${OVS_REPO:-https://github.com/openvswitch/ovs.git} OVS_REPO_NAME=$(basename ${OVS_REPO} | cut -f1 -d'.') OVS_REPO_NAME=${OVS_REPO_NAME:-ovs} -OVS_BRANCH=${OVS_BRANCH:-0047ca3a0290f1ef954f2c76b31477cf4b9755f5} +OVS_BRANCH=${OVS_BRANCH:-branch-3.3} # Functions From b609c80a36f7ac77c1eb7ec256e6808ab483440b Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Tue, 7 Jan 2025 10:51:03 +0900 Subject: [PATCH 484/574] doc: Use dnf instead of yum The yum command has been replaced by the dnf command in recent releases of Fedora-based distributions. Use the native command instead of the alias kept for compatibility. Change-Id: I0a1dfdaca91164eff2c25795f66976ec70356574 --- doc/source/guides/multinode-lab.rst | 2 +- doc/source/guides/single-machine.rst | 4 ++-- doc/source/guides/single-vm.rst | 4 ++-- doc/source/networking.rst | 2 +- doc/source/plugins.rst | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index 658422b0af..4b50b2c4ae 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -24,7 +24,7 @@ Install a couple of packages to bootstrap configuration: :: - apt-get install -y git sudo || yum install -y git sudo + apt-get install -y git sudo || dnf install -y git sudo Network Configuration --------------------- diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst index a4385b5b4b..263fbb9d6f 100644 --- a/doc/source/guides/single-machine.rst +++ b/doc/source/guides/single-machine.rst @@ -62,7 +62,7 @@ to have sudo privileges: .. code-block:: console - $ apt-get install sudo -y || yum install -y sudo + $ apt-get install sudo -y || dnf install -y sudo $ echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack .. note:: On some systems you may need to use ``sudo visudo``. @@ -81,7 +81,7 @@ We'll grab the latest version of DevStack via https: .. code-block:: console - $ sudo apt-get install git -y || sudo yum install -y git + $ sudo apt-get install git -y || sudo dnf install -y git $ git clone https://opendev.org/openstack/devstack $ cd devstack diff --git a/doc/source/guides/single-vm.rst b/doc/source/guides/single-vm.rst index 7dac18b333..4272a4b180 100644 --- a/doc/source/guides/single-vm.rst +++ b/doc/source/guides/single-vm.rst @@ -56,8 +56,8 @@ passed as the user-data file when booting the VM. write_files: - content: | #!/bin/sh - DEBIAN_FRONTEND=noninteractive sudo apt-get -qqy update || sudo yum update -qy - DEBIAN_FRONTEND=noninteractive sudo apt-get install -qqy git || sudo yum install -qy git + DEBIAN_FRONTEND=noninteractive sudo apt-get -qqy update || sudo dnf update -qy + DEBIAN_FRONTEND=noninteractive sudo apt-get install -qqy git || sudo dnf install -qy git sudo chown stack:stack /home/stack cd /home/stack git clone https://opendev.org/openstack/devstack diff --git a/doc/source/networking.rst b/doc/source/networking.rst index 05b4f34164..10e1c3ff2c 100644 --- a/doc/source/networking.rst +++ b/doc/source/networking.rst @@ -213,7 +213,7 @@ install ``sshuttle`` on your localhost: .. code-block:: bash - sudo apt-get install sshuttle || yum install sshuttle + sudo apt-get install sshuttle || dnf install sshuttle Finally, start ``sshuttle`` on your localhost using the floating IP address range. For example, assuming you are using the default value for diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index dd75b5a22d..fe567e2277 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -232,7 +232,7 @@ an early phase of its execution. These packages may be defined in a plugin as files that contain new-line separated lists of packages required by the plugin -Supported packaging systems include apt and yum across multiple +Supported packaging systems include apt and dnf across multiple distributions. To enable a plugin to hook into this and install package dependencies, packages may be listed at the following locations in the top-level of the plugin repository: From 9a1cdbc3c809f785ad01a3bbdfef8f552eafce30 Mon Sep 17 00:00:00 2001 From: Rajat Dhasmana Date: Wed, 8 Jan 2025 18:43:40 +0530 Subject: [PATCH 485/574] Update glance image size limit The image size limit for glance using the unified limits is set to 1000MB (~1GB). This is pretty low given that a volume's minimum size is 1GB and when uploaded to glance fills out the whole limit. The limit issue can also be seen by a recent tempest change[1] where uploading two volumes failed[2] across various jobs due to this limit. We do have a config option in devstack ``GLANCE_LIMIT_IMAGE_SIZE_TOTAL`` but that will need to be configured for various jobs and a 2GB seems to be a sensible default which this patch configures. [1] https://review.opendev.org/c/openstack/tempest/+/938592 [2] Jan 07 23:05:33 devstack-ceph cinder-volume[909965]: ERROR oslo_messaging.rpc.server cinder.exception.ImageLimitExceeded: HTTP 413 Request Entity Too Large: The request returned a 413 Request Entity Too Large. This generally means that rate limiting or a quota threshold was breached.: The response body:: Project dfe8648c188d46409349eac2c449c0b4 is over a limit for [Resource image_size_total is over limit of 1000 due to current usage 1024 and delta 0] Change-Id: I533b7444e5f71275ea3d5c18914e306b1dbbc5cb --- lib/glance | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/glance b/lib/glance index 5c3643d008..9655cc4103 100644 --- a/lib/glance +++ b/lib/glance @@ -137,7 +137,7 @@ GLANCE_UWSGI=$GLANCE_BIN_DIR/glance-wsgi-api GLANCE_UWSGI_CONF=$GLANCE_CONF_DIR/glance-uwsgi.ini # Glance default limit for Devstack -GLANCE_LIMIT_IMAGE_SIZE_TOTAL=${GLANCE_LIMIT_IMAGE_SIZE_TOTAL:-1000} +GLANCE_LIMIT_IMAGE_SIZE_TOTAL=${GLANCE_LIMIT_IMAGE_SIZE_TOTAL:-2000} # If wsgi mode is uwsgi run glance under uwsgi, else default to eventlet # TODO(mtreinish): Remove the eventlet path here and in all the similar From ad698f0b8c658fcdddb11d5edae1e77f08b5d0b5 Mon Sep 17 00:00:00 2001 From: Jakub Libosvar Date: Wed, 8 Jan 2025 15:26:41 -0500 Subject: [PATCH 486/574] Introduce SERVICES_FOR_TEMPEST variable for localrc This variable overrides the `ENABLED_SERVICES` global variable only for the `configure_tempest()` function from `lib/tempest`. If the `SERVICES_FOR_TEMPEST` variable is not defined then `ENABLED_SERVICES` is used as it had used it before. This is useful for cases where Tempest is executed from a remote node. Change-Id: Ic62e48f2f1eb861ec64f51e03353868076cbcc04 --- doc/source/guides/multinode-lab.rst | 42 +++++++++++++++++++++++++++++ lib/tempest | 2 ++ 2 files changed, 44 insertions(+) diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index 658422b0af..e6b0b96eb4 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -210,6 +210,48 @@ only needs to be performed for subnodes. .. _Cells v2: https://docs.openstack.org/nova/latest/user/cells.html +Configure Tempest Node to run the Tempest tests +----------------------------------------------- + +If there is a need to execute Tempest tests against different Cluster +Controller node then it can be done by re-using the ``local.conf`` file from +the Cluster Controller node but with not enabled Controller services in +``ENABLED_SERVICES`` variable. This variable needs to contain only ``tempest`` +as a configured service. Then variable ``SERVICES_FOR_TEMPEST`` must be +configured to contain those services that were enabled on the Cluster +Controller node in the ``ENABLED_SERVICES`` variable. For example the +``local.conf`` file could look as follows: + +:: + + [[local|localrc]] + HOST_IP=192.168.42.12 # change this per compute node + FIXED_RANGE=10.4.128.0/20 + FLOATING_RANGE=192.168.42.128/25 + LOGFILE=/opt/stack/logs/stack.sh.log + ADMIN_PASSWORD=labstack + DATABASE_PASSWORD=supersecret + RABBIT_PASSWORD=supersecret + SERVICE_PASSWORD=supersecret + DATABASE_TYPE=mysql + SERVICE_HOST=192.168.42.11 + MYSQL_HOST=$SERVICE_HOST + RABBIT_HOST=$SERVICE_HOST + GLANCE_HOSTPORT=$SERVICE_HOST:9292 + NOVA_VNC_ENABLED=True + NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_lite.html" + VNCSERVER_LISTEN=$HOST_IP + VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN + ENABLED_SERVICES=tempest + SERVICES_FOR_TEMPEST=keystone,nova,neutron,glance + +Then just execute the devstack: + +:: + + ./stack.sh + + Cleaning Up After DevStack -------------------------- diff --git a/lib/tempest b/lib/tempest index eeeef67a8b..b8f9915a87 100644 --- a/lib/tempest +++ b/lib/tempest @@ -197,6 +197,8 @@ function configure_tempest { pip_install_gr testrepository fi + local ENABLED_SERVICES=${SERVICES_FOR_TEMPEST:=$ENABLED_SERVICES} + local image_lines local images local num_images From a976168235bd79c9a8c960aa4889fe9ab03570c0 Mon Sep 17 00:00:00 2001 From: Fernando Royo Date: Wed, 4 Dec 2024 16:44:52 +0100 Subject: [PATCH 487/574] Refactor readiness and custom config for ovn-nortd Initially, this patch ensured that the custom configuration and readiness checks were applied after every restart of the OVN North services. However, after removing the call that triggered the restarting of the OVN/OVS services in [1], this patch now serves as a refactor, separating the readiness checks and custom configuration into a dedicated function. [1] https://review.opendev.org/c/openstack/devstack/+/937606 Related-bug: #2091614 Related-bug: #2091019 Change-Id: Icba271292830204da94aa3353e93d52088d82eec --- lib/neutron_plugins/ovn_agent | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index b7633c8c17..8c5d82d3f0 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -704,6 +704,25 @@ function _start_ovs { fi } +function _wait_for_ovn_and_set_custom_config { + # Wait for the service to be ready + # Check for socket and db files for both OVN NB and SB + wait_for_sock_file $OVN_RUNDIR/ovnnb_db.sock + wait_for_sock_file $OVN_RUNDIR/ovnsb_db.sock + wait_for_db_file $OVN_DATADIR/ovnnb_db.db + wait_for_db_file $OVN_DATADIR/ovnsb_db.db + + if is_service_enabled tls-proxy; then + sudo ovn-nbctl --db=unix:$OVN_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem + sudo ovn-sbctl --db=unix:$OVN_RUNDIR/ovnsb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem + fi + + sudo ovn-nbctl --db=unix:$OVN_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 + sudo ovn-sbctl --db=unix:$OVN_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 + sudo ovs-appctl -t $OVN_RUNDIR/ovnnb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL + sudo ovs-appctl -t $OVN_RUNDIR/ovnsb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL +} + # start_ovn() - Start running processes, including screen function start_ovn { echo "Starting OVN" @@ -725,21 +744,8 @@ function start_ovn { _start_process "$OVN_NORTHD_SERVICE" fi - # Wait for the service to be ready - # Check for socket and db files for both OVN NB and SB - wait_for_sock_file $OVN_RUNDIR/ovnnb_db.sock - wait_for_sock_file $OVN_RUNDIR/ovnsb_db.sock - wait_for_db_file $OVN_DATADIR/ovnnb_db.db - wait_for_db_file $OVN_DATADIR/ovnsb_db.db + _wait_for_ovn_and_set_custom_config - if is_service_enabled tls-proxy; then - sudo ovn-nbctl --db=unix:$OVN_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem - sudo ovn-sbctl --db=unix:$OVN_RUNDIR/ovnsb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem - fi - sudo ovn-nbctl --db=unix:$OVN_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 - sudo ovn-sbctl --db=unix:$OVN_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 - sudo ovs-appctl -t $OVN_RUNDIR/ovnnb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL - sudo ovs-appctl -t $OVN_RUNDIR/ovnsb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL fi if is_service_enabled ovn-controller ; then From 7129f3a45e66060d19a250e31fd35156e45a8af8 Mon Sep 17 00:00:00 2001 From: Eric Harney Date: Fri, 10 Jan 2025 11:02:35 -0500 Subject: [PATCH 488/574] Quiet regex SyntaxWarning in mlock_report Use a raw string to avoid SyntaxWarnings being issued by this script. Change-Id: I81557158013aa36fe27235c461486dfbc37c9f27 --- tools/mlock_report.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/mlock_report.py b/tools/mlock_report.py index 1b081bbe6f..8cbda15895 100644 --- a/tools/mlock_report.py +++ b/tools/mlock_report.py @@ -6,7 +6,7 @@ LCK_SUMMARY_REGEX = re.compile( - "^VmLck:\s+(?P[\d]+)\s+kB", re.MULTILINE) + r"^VmLck:\s+(?P[\d]+)\s+kB", re.MULTILINE) def main(): From 497b4fdf97d8b4e5f1ea1130f4e145014bbb462c Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 18 Oct 2024 13:47:55 +0100 Subject: [PATCH 489/574] lib/glance: Migrate Glance to WSGI module path We also remove an out-of-date note from the called method. Change-Id: I7cc9fd6a568246342395388c31ae0a0918a2c79a Signed-off-by: Stephen Finucane Depends-on: https://review.opendev.org/c/openstack/glance/+/932701 --- lib/apache | 4 +--- lib/glance | 7 +++---- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/lib/apache b/lib/apache index 1c034d3c7e..fc174f3901 100644 --- a/lib/apache +++ b/lib/apache @@ -313,9 +313,7 @@ function write_uwsgi_config { # For services using chunked encoding, the only services known to use this # currently are Glance and Swift, we need to use an http proxy instead of # mod_proxy_uwsgi because the chunked encoding gets dropped. See: -# https://github.com/unbit/uwsgi/issues/1540 You can workaround this on python2 -# but that involves having apache buffer the request before sending it to -# uwsgi. +# https://github.com/unbit/uwsgi/issues/1540. function write_local_uwsgi_http_config { local conf=$1 local wsgi=$2 diff --git a/lib/glance b/lib/glance index 9655cc4103..4e519102ec 100644 --- a/lib/glance +++ b/lib/glance @@ -133,7 +133,7 @@ GLANCE_SERVICE_PORT=${GLANCE_SERVICE_PORT:-9292} GLANCE_SERVICE_PORT_INT=${GLANCE_SERVICE_PORT_INT:-19292} GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT} GLANCE_SERVICE_PROTOCOL=${GLANCE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} -GLANCE_UWSGI=$GLANCE_BIN_DIR/glance-wsgi-api +GLANCE_UWSGI=glance.wsgi.api:application GLANCE_UWSGI_CONF=$GLANCE_CONF_DIR/glance-uwsgi.ini # Glance default limit for Devstack @@ -472,12 +472,11 @@ function configure_glance { fi if [[ "$GLANCE_STANDALONE" == False ]]; then - write_local_uwsgi_http_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" "/image" + write_local_uwsgi_http_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" "/image" "glance-api" # Grab our uwsgi listen address and use that to fill out our # worker_self_reference_url config iniset $GLANCE_API_CONF DEFAULT worker_self_reference_url \ - $(awk '-F= ' '/^http-socket/ { print "http://"$2}' \ - $GLANCE_UWSGI_CONF) + $(awk '-F= ' '/^http-socket/ { print "http://"$2}' $GLANCE_UWSGI_CONF) else write_local_proxy_http_config glance "http://$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT_INT" "/image" iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS From d84761e18676a04fc9d1b9e68dff9c573fdd3ba1 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 28 Jan 2025 02:20:15 +0000 Subject: [PATCH 490/574] Updated from generate-devstack-plugins-list Change-Id: Ic2239e12306226943c645b7c439d5636f8c3df0e --- doc/source/plugin-registry.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 2984a5c15f..f7873c962d 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -38,6 +38,7 @@ openstack/devstack-plugin-container `https://opendev.org/openstack/devstack openstack/devstack-plugin-kafka `https://opendev.org/openstack/devstack-plugin-kafka `__ openstack/devstack-plugin-nfs `https://opendev.org/openstack/devstack-plugin-nfs `__ openstack/devstack-plugin-open-cas `https://opendev.org/openstack/devstack-plugin-open-cas `__ +openstack/devstack-plugin-prometheus `https://opendev.org/openstack/devstack-plugin-prometheus `__ openstack/freezer `https://opendev.org/openstack/freezer `__ openstack/freezer-api `https://opendev.org/openstack/freezer-api `__ openstack/freezer-tempest-plugin `https://opendev.org/openstack/freezer-tempest-plugin `__ @@ -169,7 +170,6 @@ x/rsd-virt-for-nova `https://opendev.org/x/rsd-virt-for-nov x/scalpels `https://opendev.org/x/scalpels `__ x/slogging `https://opendev.org/x/slogging `__ x/stackube `https://opendev.org/x/stackube `__ -x/tap-as-a-service-dashboard `https://opendev.org/x/tap-as-a-service-dashboard `__ x/tatu `https://opendev.org/x/tatu `__ x/trio2o `https://opendev.org/x/trio2o `__ x/valet `https://opendev.org/x/valet `__ From a08a53de424e3ed8cad4cbaf566d0b08f8ad5199 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Thu, 30 Jan 2025 12:43:30 +0100 Subject: [PATCH 491/574] Remove leftover from the usage of the removed lib/neutron module In the patch [1] lib/neutron module was removed completely but it left call to the non existing currently function 'start_neutron_api' when the neutron-api service is enabled. Devstack should start neutron in the same way in case when q-svc or neutron-api service is enabled and this patch is removing that leftover call to the 'start_neutron_api' function and make it behave the same way for both service names. Additionally this patch adds service "neutron-api" to be checked when initial networks are going to be created. It is like that as just one of the services "q-svc" or "neutron-api" is enough to create those initial networks. [1] https://review.opendev.org/c/openstack/devstack/+/865014 Related-bug: #2096912 Change-Id: I1287af6a31f60b4e522f0ce3ea525e3336ffd8ba --- stack.sh | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/stack.sh b/stack.sh index bfa0573f21..b1c7df5d73 100755 --- a/stack.sh +++ b/stack.sh @@ -1307,10 +1307,7 @@ if is_service_enabled ovn-controller ovn-controller-vtep; then start_ovn_services fi -if is_service_enabled neutron-api; then - echo_summary "Starting Neutron" - start_neutron_api -elif is_service_enabled q-svc; then +if is_service_enabled q-svc neutron-api; then echo_summary "Starting Neutron" configure_neutron_after_post_config start_neutron_service_and_check @@ -1327,7 +1324,7 @@ if is_service_enabled neutron; then start_neutron fi # Once neutron agents are started setup initial network elements -if is_service_enabled q-svc && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then +if is_service_enabled q-svc neutron-api && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then echo_summary "Creating initial neutron network elements" # Here's where plugins can wire up their own networks instead # of the code in lib/neutron_plugins/services/l3 From 12abc726e68b547258978c7fbe3630d133f72943 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Mon, 10 Feb 2025 13:48:37 -0500 Subject: [PATCH 492/574] Remove Neutron Linux Bridge agent code Linux Bridge agent support was removed in the Neutron master branch in [0], let's remove any code here now as well since it will just fail. [0] https://review.opendev.org/c/openstack/neutron/+/927216 Change-Id: Idffa6579000322acfeb860189fb83a317d56bb4e --- doc/source/guides/neutron.rst | 40 +--------- doc/source/overview.rst | 2 +- lib/neutron | 19 +---- lib/neutron_plugins/linuxbridge_agent | 104 -------------------------- lib/neutron_plugins/ml2 | 3 - lib/neutron_plugins/services/l3 | 10 --- tools/worlddump.py | 1 - 7 files changed, 3 insertions(+), 176 deletions(-) delete mode 100644 lib/neutron_plugins/linuxbridge_agent diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index fb36b3ec5b..a7adeeff73 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -451,44 +451,6 @@ by default. If you want to remove all the extension drivers (even 'port_security'), set ``Q_ML2_PLUGIN_EXT_DRIVERS`` to blank. -Using Linux Bridge instead of Open vSwitch ------------------------------------------- - -The configuration for using the Linux Bridge ML2 driver is fairly -straight forward. The Linux Bridge configuration for DevStack is similar -to the :ref:`Open vSwitch based single interface ` -setup, with small modifications for the interface mappings. - - -:: - - [[local|localrc]] - HOST_IP=172.18.161.6 - SERVICE_HOST=172.18.161.6 - MYSQL_HOST=172.18.161.6 - RABBIT_HOST=172.18.161.6 - GLANCE_HOSTPORT=172.18.161.6:9292 - ADMIN_PASSWORD=secret - DATABASE_PASSWORD=secret - RABBIT_PASSWORD=secret - SERVICE_PASSWORD=secret - - ## Neutron options - Q_USE_SECGROUP=True - FLOATING_RANGE="172.18.161.0/24" - IPV4_ADDRS_SAFE_TO_USE="10.0.0.0/24" - Q_FLOATING_ALLOCATION_POOL=start=172.18.161.250,end=172.18.161.254 - PUBLIC_NETWORK_GATEWAY="172.18.161.1" - PUBLIC_INTERFACE=eth0 - - Q_USE_PROVIDERNET_FOR_PUBLIC=True - - # Linuxbridge Settings - Q_AGENT=linuxbridge - LB_PHYSICAL_INTERFACE=eth0 - PUBLIC_PHYSICAL_NETWORK=default - LB_INTERFACE_MAPPINGS=default:eth0 - Using MacVTap instead of Open vSwitch ------------------------------------------ @@ -556,7 +518,7 @@ the MacVTap mechanism driver: [[local|localrc]] ... - Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,linuxbridge,macvtap + Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,macvtap ... For the MacVTap compute node, use this local.conf: diff --git a/doc/source/overview.rst b/doc/source/overview.rst index 4384081769..81e58a341e 100644 --- a/doc/source/overview.rst +++ b/doc/source/overview.rst @@ -56,7 +56,7 @@ OpenStack Network ----------------- - Neutron: A basic configuration approximating the original FlatDHCP - mode using linuxbridge or OpenVSwitch. + mode using OpenVSwitch. Services -------- diff --git a/lib/neutron b/lib/neutron index bcef8a5042..69ff212991 100644 --- a/lib/neutron +++ b/lib/neutron @@ -241,8 +241,7 @@ TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} # If using VLANs for tenant networks, or if using flat or VLAN # provider networks, set in ``localrc`` to the name of the physical # network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the -# openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge -# agent, as described below. +# openvswitch agent, as described below. # # Example: ``PHYSICAL_NETWORK=default`` PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public} @@ -257,18 +256,6 @@ PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public} # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex} -# With the linuxbridge agent, if using VLANs for tenant networks, -# or if using flat or VLAN provider networks, set in ``localrc`` to -# the name of the network interface to use for the physical -# network. -# -# Example: ``LB_PHYSICAL_INTERFACE=eth1`` -if [[ $Q_AGENT == "linuxbridge" && -z ${LB_PHYSICAL_INTERFACE} ]]; then - default_route_dev=$( (ip route; ip -6 route) | grep ^default | head -n 1 | awk '{print $5}') - die_if_not_set $LINENO default_route_dev "Failure retrieving default route device" - LB_PHYSICAL_INTERFACE=$default_route_dev -fi - # With the openvswitch plugin, set to True in ``localrc`` to enable # provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. # @@ -889,10 +876,6 @@ function cleanup_neutron { neutron_ovs_base_cleanup fi - if [[ $Q_AGENT == "linuxbridge" ]]; then - neutron_lb_cleanup - fi - # delete all namespaces created by neutron for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do sudo ip netns delete ${ns} diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent deleted file mode 100644 index a392bd0baf..0000000000 --- a/lib/neutron_plugins/linuxbridge_agent +++ /dev/null @@ -1,104 +0,0 @@ -#!/bin/bash -# -# Neutron Linux Bridge L2 agent -# ----------------------------- - -# Save trace setting -_XTRACE_NEUTRON_LB=$(set +o | grep xtrace) -set +o xtrace - -function neutron_lb_cleanup { - sudo ip link delete $PUBLIC_BRIDGE - - bridge_list=`ls /sys/class/net/*/bridge/bridge_id 2>/dev/null | cut -f5 -d/` - if [[ -z "$bridge_list" ]]; then - return - fi - if [[ "$Q_ML2_TENANT_NETWORK_TYPE" = "vxlan" ]]; then - for port in $(echo $bridge_list | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e vxlan-[0-9a-f\-]*); do - sudo ip link delete $port - done - elif [[ "$Q_ML2_TENANT_NETWORK_TYPE" = "vlan" ]]; then - for port in $(echo $bridge_list | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e ${LB_PHYSICAL_INTERFACE}\.[0-9a-f\-]*); do - sudo ip link delete $port - done - fi - for bridge in $(echo $bridge_list |grep -o -e brq[0-9a-f\-]*); do - sudo ip link delete $bridge - done -} - -function is_neutron_ovs_base_plugin { - # linuxbridge doesn't use OVS - return 1 -} - -function neutron_plugin_create_nova_conf { - : -} - -function neutron_plugin_install_agent_packages { - : -} - -function neutron_plugin_configure_dhcp_agent { - local conf_file=$1 - : -} - -function neutron_plugin_configure_l3_agent { - local conf_file=$1 - sudo ip link add $PUBLIC_BRIDGE type bridge - set_mtu $PUBLIC_BRIDGE $PUBLIC_BRIDGE_MTU -} - -function neutron_plugin_configure_plugin_agent { - # Setup physical network interface mappings. Override - # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more - # complex physical network configurations. - if [[ "$LB_INTERFACE_MAPPINGS" == "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$LB_PHYSICAL_INTERFACE" != "" ]]; then - LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE - fi - if [[ "$PUBLIC_BRIDGE" != "" ]] && [[ "$PUBLIC_PHYSICAL_NETWORK" != "" ]]; then - if is_service_enabled q-l3 || is_service_enabled neutron-l3; then - iniset /$Q_PLUGIN_CONF_FILE linux_bridge bridge_mappings "$PUBLIC_PHYSICAL_NETWORK:$PUBLIC_BRIDGE" - fi - fi - if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE linux_bridge physical_interface_mappings $LB_INTERFACE_MAPPINGS - fi - if [[ "$Q_USE_SECGROUP" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver - if ! running_in_container; then - enable_kernel_bridge_firewall - fi - else - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver - fi - AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-linuxbridge-agent" - iniset /$Q_PLUGIN_CONF_FILE agent tunnel_types $Q_TUNNEL_TYPES - - # Configure vxlan tunneling - if [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then - if [[ "$Q_ML2_TENANT_NETWORK_TYPE" == "vxlan" ]]; then - iniset /$Q_PLUGIN_CONF_FILE vxlan enable_vxlan "True" - iniset /$Q_PLUGIN_CONF_FILE vxlan local_ip $TUNNEL_ENDPOINT_IP - else - iniset /$Q_PLUGIN_CONF_FILE vxlan enable_vxlan "False" - fi - else - iniset /$Q_PLUGIN_CONF_FILE vxlan enable_vxlan "False" - fi -} - -function neutron_plugin_setup_interface_driver { - local conf_file=$1 - iniset $conf_file DEFAULT interface_driver linuxbridge -} - -function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0 -} - -# Restore xtrace -$_XTRACE_NEUTRON_LB diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index c2e78c65cc..687167bf79 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -114,9 +114,6 @@ function neutron_plugin_configure_service { populate_ml2_config /$Q_PLUGIN_CONF_FILE securitygroup enable_security_group=$Q_USE_SECGROUP populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS - if [[ "$Q_ML2_PLUGIN_MECHANISM_DRIVERS" == *"linuxbridge"* ]]; then - iniset $NEUTRON_CONF experimental linuxbridge True - fi populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 overlay_ip_version=$TUNNEL_IP_VERSION if [[ -n "$Q_ML2_PLUGIN_TYPE_DRIVERS" ]]; then diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index c6d4663114..bbedc57a44 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -334,16 +334,6 @@ function _neutron_configure_router_v4 { local ext_gw_interface="none" if is_neutron_ovs_base_plugin; then ext_gw_interface=$(_neutron_get_ext_gw_interface) - elif [[ "$Q_AGENT" = "linuxbridge" ]]; then - # Get the device the neutron router and network for $FIXED_RANGE - # will be using. - if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then - # in provider nets a bridge mapping uses the public bridge directly - ext_gw_interface=$PUBLIC_BRIDGE - else - # e.x. brq3592e767-da for NET_ID 3592e767-da66-4bcb-9bec-cdb03cd96102 - ext_gw_interface=brq${EXT_NET_ID:0:11} - fi fi if [[ "$ext_gw_interface" != "none" ]]; then local cidr_len=${FLOATING_RANGE#*/} diff --git a/tools/worlddump.py b/tools/worlddump.py index edbfa268db..26ced3f653 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -31,7 +31,6 @@ 'nova-compute', 'neutron-dhcp-agent', 'neutron-l3-agent', - 'neutron-linuxbridge-agent', 'neutron-metadata-agent', 'neutron-openvswitch-agent', 'cinder-volume', From 4a1d242a1a274e6d5d93c3d06055d313f4170a88 Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Wed, 5 Feb 2025 20:50:05 +0000 Subject: [PATCH 493/574] enable multinode supprot for spice and serial proxy This change mirrors change Ie02734bb598d27560cf5d674c9e1d9b8dca3801f which ensure that its posible to enable vnc for vms on compute nodes without deploying the vnc proxy. In this change two new flags are added NOVA_SPICE_ENABLED and NOVA_SERIAL_ENABLED to enable configuration of the relevent console create_nova_conf is also modifed to include the db url if the console proxies are deployed on a host. As spice supprot is nolonger avaible in qemu as packged by ubuntu 24.04 and centos 9 a devstack-two-node-debian-bookworm to allow testing with spice in a multinode job. Change-Id: Ie944e518122f2b0059f28acbf68fb7ad0a560ca4 --- .zuul.yaml | 30 ++++++++++++++++++++++++++++++ lib/nova | 9 ++++++--- lib/tempest | 8 +++++++- 3 files changed, 43 insertions(+), 4 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 74ce39cdfa..6cf79f5f03 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -311,6 +311,36 @@ - compute1 - compute2 +- nodeset: + name: devstack-two-node-debian-bookworm + nodes: + - name: controller + label: debian-bookworm + - name: compute1 + label: debian-bookworm + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + - job: name: devstack-base parent: openstack-multinode-fips diff --git a/lib/nova b/lib/nova index 95ed4d035c..810a3d9554 100644 --- a/lib/nova +++ b/lib/nova @@ -127,6 +127,9 @@ QEMU_CONF=/etc/libvirt/qemu.conf # ``NOVA_VNC_ENABLED`` can be used to forcibly enable VNC configuration. # In multi-node setups allows compute hosts to not run ``n-novnc``. NOVA_VNC_ENABLED=$(trueorfalse False NOVA_VNC_ENABLED) +# same as ``NOVA_VNC_ENABLED`` but for Spice and serial console respectively. +NOVA_SPICE_ENABLED=$(trueorfalse False NOVA_SPICE_ENABLED) +NOVA_SERIAL_ENABLED=$(trueorfalse False NOVA_SERIAL_ENABLED) # Get hypervisor configuration # ---------------------------- @@ -464,7 +467,7 @@ function create_nova_conf { # only setup database connections and cache backend if there are services # that require them running on the host. The ensures that n-cpu doesn't # leak a need to use the db in a multinode scenario. - if is_service_enabled n-api n-cond n-sched; then + if is_service_enabled n-api n-cond n-sched n-spice n-novnc n-sproxy; then # If we're in multi-tier cells mode, we want our control services pointing # at cell0 instead of cell1 to ensure isolation. If not, we point everything # at the main database like normal. @@ -716,7 +719,7 @@ function configure_console_compute { iniset $NOVA_CPU_CONF vnc enabled false fi - if is_service_enabled n-spice; then + if is_service_enabled n-spice || [ "$NOVA_SPICE_ENABLED" != False ]; then # Address on which instance spiceservers will listen on compute hosts. # For multi-host, this should be the management ip of the compute host. SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS:-$default_proxyclient_addr} @@ -726,7 +729,7 @@ function configure_console_compute { iniset $NOVA_CPU_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" fi - if is_service_enabled n-sproxy; then + if is_service_enabled n-sproxy || [ "$NOVA_SERIAL_ENABLED" != False ]; then iniset $NOVA_CPU_CONF serial_console enabled True iniset $NOVA_CPU_CONF serial_console base_url "ws://$SERVICE_HOST:$((6082 + offset))/" fi diff --git a/lib/tempest b/lib/tempest index b8f9915a87..29b01f186f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -514,9 +514,15 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute-feature-enabled volume_multiattach True fi - if is_service_enabled n-novnc; then + if is_service_enabled n-novnc || [ "$NOVA_VNC_ENABLED" != False ]; then iniset $TEMPEST_CONFIG compute-feature-enabled vnc_console True fi + if is_service_enabled n-spice || [ "$NOVA_SPICE_ENABLED" != False ]; then + iniset $TEMPEST_CONFIG compute-feature-enabled spice_console True + fi + if is_service_enabled n-sproxy || [ "$NOVA_SERIAL_ENABLED" != False ]; then + iniset $TEMPEST_CONFIG compute-feature-enabled serial_console True + fi # Network iniset $TEMPEST_CONFIG network project_networks_reachable false From 754f1c66f53240e3ebda53fbb95bfdeee05b5796 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Thu, 13 Feb 2025 08:39:24 +0000 Subject: [PATCH 494/574] [eventlet-removal] Remove "logger" mechanism from ML2/OVN CI jobs The "logger" mechanism is a testing class that is still calling monkey_patch. This mechanism driver is not relevant nor neccessary for the ML2/OVN CI jobs. Change-Id: I539b202ca81f62f4ae26b5275fd6b245d2066fe7 --- lib/neutron_plugins/ovn_agent | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 01dc1edfdd..71b5e3350d 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -161,8 +161,10 @@ fi # Defaults Overwrite # ------------------ - -Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn,logger} +# NOTE(ralonsoh): during the eventlet removal, the "logger" mech +# driver has been removed from this list. Re-add it once the removal +# is finished or the mech driver does not call monkey_patch(). +Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn} Q_ML2_PLUGIN_TYPE_DRIVERS=${Q_ML2_PLUGIN_TYPE_DRIVERS:-local,flat,vlan,geneve} Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"geneve"} Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS:-"vni_ranges=1:65536"} From 1aac81ee881534276fd7d6540ed356a85d064a13 Mon Sep 17 00:00:00 2001 From: Vasyl Saienko Date: Sat, 18 Jan 2025 08:40:51 +0000 Subject: [PATCH 495/574] Allow to enable atop It may be required to troubleshoot performance related bugs during tests exection, to have ability to inspect environment processes and theirs status during test run this patch installs atop by default if not explicitly disabled. Related-Bug: #2095224 Change-Id: Iedbd61f3ce3cd2255ea5f2a7a93ba2f39ad28ff2 --- .zuul.yaml | 1 + lib/atop | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ stack.sh | 7 +++++++ unstack.sh | 5 +++++ 4 files changed, 61 insertions(+) create mode 100644 lib/atop diff --git a/.zuul.yaml b/.zuul.yaml index 74ce39cdfa..aec7113ab4 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -357,6 +357,7 @@ '{{ devstack_conf_dir }}/.localrc.auto': logs '{{ devstack_conf_dir }}/.stackenv': logs '{{ devstack_log_dir }}/dstat-csv.log': logs + '{{ devstack_log_dir }}/atop': logs '{{ devstack_log_dir }}/devstacklog.txt': logs '{{ devstack_log_dir }}/devstacklog.txt.summary': logs '{{ devstack_log_dir }}/tcpdump.pcap': logs diff --git a/lib/atop b/lib/atop new file mode 100644 index 0000000000..e0b14cb039 --- /dev/null +++ b/lib/atop @@ -0,0 +1,48 @@ +#!/bin/bash +# +# lib/atop +# Functions to start and stop atop + +# Dependencies: +# +# - ``functions`` file + +# ``stack.sh`` calls the entry points in this order: +# +# - configure_atop +# - install_atop +# - start_atop +# - stop_atop + +# Save trace setting +_XTRACE_ATOP=$(set +o | grep xtrace) +set +o xtrace + +function configure_atop { + cat </dev/null +# /etc/default/atop +# see man atoprc for more possibilities to configure atop execution + +LOGOPTS="-R" +LOGINTERVAL=${ATOP_LOGINTERVAL:-"30"} +LOGGENERATIONS=${ATOP_LOGGENERATIONS:-"1"} +LOGPATH=$LOGDIR/atop +EOF +} + +function install_atop { + install_package atop +} + +# start_() - Start running processes +function start_atop { + start_service atop +} + +# stop_atop() stop atop process +function stop_atop { + stop_service atop +} + +# Restore xtrace +$_XTRACE_ATOP diff --git a/stack.sh b/stack.sh index bfa0573f21..c2a4b5dc9a 100755 --- a/stack.sh +++ b/stack.sh @@ -641,6 +641,7 @@ source $TOP_DIR/lib/swift source $TOP_DIR/lib/neutron source $TOP_DIR/lib/ldap source $TOP_DIR/lib/dstat +source $TOP_DIR/lib/atop source $TOP_DIR/lib/tcpdump source $TOP_DIR/lib/etcd3 source $TOP_DIR/lib/os-vif @@ -1093,6 +1094,12 @@ save_stackenv $LINENO # A better kind of sysstat, with the top process per time slice start_dstat +if is_service_enabled atop; then + configure_atop + install_atop + start_atop +fi + # Run a background tcpdump for debugging # Note: must set TCPDUMP_ARGS with the enabled service if is_service_enabled tcpdump; then diff --git a/unstack.sh b/unstack.sh index 1b2d8dd62a..29c80718f8 100755 --- a/unstack.sh +++ b/unstack.sh @@ -73,6 +73,7 @@ source $TOP_DIR/lib/swift source $TOP_DIR/lib/neutron source $TOP_DIR/lib/ldap source $TOP_DIR/lib/dstat +source $TOP_DIR/lib/atop source $TOP_DIR/lib/etcd3 # Extras Source @@ -174,6 +175,10 @@ fi stop_dstat +if is_service_enabled atop; then + stop_atop +fi + # NOTE: Cinder automatically installs the lvm2 package, independently of the # enabled backends. So if Cinder is enabled, and installed successfully we are # sure lvm2 (lvremove, /etc/lvm/lvm.conf, etc.) is here. From 4ed29f85911642fb5d01f919703697746be19d7c Mon Sep 17 00:00:00 2001 From: Christian Schwede Date: Wed, 26 Feb 2025 11:40:10 +0100 Subject: [PATCH 496/574] Fix Swift rsync/replication configuration Swift rsync and replication services are up and running, however they fail to replicate any data if needed and if used with more than one replica. This patch removes a deprecated option setting[1] and replaces it with the required setting to use the correct rsync module suffix. Additionally it removes an outdated subdirectory suffix in the rsyncd configuration that has been removed as well[2]. Closes-Bug: #2100272 [1] https://opendev.org/openstack/swift/commit/675145ef4a131d548cc1122689732b9b65e5def4 [2] https://opendev.org/openstack/devstack/commit/0e58d22897457831b9dbf02d66a2f29d43803597 Change-Id: I5283405d00883a4dd11b7c001b1bba3776e576b8 --- files/swift/rsyncd.conf | 24 ++++++++++++------------ lib/swift | 4 ++-- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/files/swift/rsyncd.conf b/files/swift/rsyncd.conf index c49f716fa7..937d6c4b9a 100644 --- a/files/swift/rsyncd.conf +++ b/files/swift/rsyncd.conf @@ -6,74 +6,74 @@ address = 127.0.0.1 [account6612] max connections = 25 -path = %SWIFT_DATA_DIR%/1/node/ +path = %SWIFT_DATA_DIR%/1/ read only = false lock file = %SWIFT_DATA_DIR%/run/account6612.lock [account6622] max connections = 25 -path = %SWIFT_DATA_DIR%/2/node/ +path = %SWIFT_DATA_DIR%/2/ read only = false lock file = %SWIFT_DATA_DIR%/run/account6622.lock [account6632] max connections = 25 -path = %SWIFT_DATA_DIR%/3/node/ +path = %SWIFT_DATA_DIR%/3/ read only = false lock file = %SWIFT_DATA_DIR%/run/account6632.lock [account6642] max connections = 25 -path = %SWIFT_DATA_DIR%/4/node/ +path = %SWIFT_DATA_DIR%/4/ read only = false lock file = %SWIFT_DATA_DIR%/run/account6642.lock [container6611] max connections = 25 -path = %SWIFT_DATA_DIR%/1/node/ +path = %SWIFT_DATA_DIR%/1/ read only = false lock file = %SWIFT_DATA_DIR%/run/container6611.lock [container6621] max connections = 25 -path = %SWIFT_DATA_DIR%/2/node/ +path = %SWIFT_DATA_DIR%/2/ read only = false lock file = %SWIFT_DATA_DIR%/run/container6621.lock [container6631] max connections = 25 -path = %SWIFT_DATA_DIR%/3/node/ +path = %SWIFT_DATA_DIR%/3/ read only = false lock file = %SWIFT_DATA_DIR%/run/container6631.lock [container6641] max connections = 25 -path = %SWIFT_DATA_DIR%/4/node/ +path = %SWIFT_DATA_DIR%/4/ read only = false lock file = %SWIFT_DATA_DIR%/run/container6641.lock [object6613] max connections = 25 -path = %SWIFT_DATA_DIR%/1/node/ +path = %SWIFT_DATA_DIR%/1/ read only = false lock file = %SWIFT_DATA_DIR%/run/object6613.lock [object6623] max connections = 25 -path = %SWIFT_DATA_DIR%/2/node/ +path = %SWIFT_DATA_DIR%/2/ read only = false lock file = %SWIFT_DATA_DIR%/run/object6623.lock [object6633] max connections = 25 -path = %SWIFT_DATA_DIR%/3/node/ +path = %SWIFT_DATA_DIR%/3/ read only = false lock file = %SWIFT_DATA_DIR%/run/object6633.lock [object6643] max connections = 25 -path = %SWIFT_DATA_DIR%/4/node/ +path = %SWIFT_DATA_DIR%/4/ read only = false lock file = %SWIFT_DATA_DIR%/run/object6643.lock diff --git a/lib/swift b/lib/swift index 3659624d5b..862927437d 100644 --- a/lib/swift +++ b/lib/swift @@ -318,8 +318,8 @@ function generate_swift_config_services { iniuncomment ${swift_node_config} DEFAULT mount_check iniset ${swift_node_config} DEFAULT mount_check false - iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode - iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes + iniuncomment ${swift_node_config} ${server_type}-replicator rsync_module + iniset ${swift_node_config} ${server_type}-replicator rsync_module "{replication_ip}::${server_type}{replication_port}" # Using a sed and not iniset/iniuncomment because we want to a global # modification and make sure it works for new sections. From e650b827904fe8835800a96332937bb1c8f4516e Mon Sep 17 00:00:00 2001 From: Rajat Dhasmana Date: Wed, 26 Feb 2025 00:38:26 +0530 Subject: [PATCH 497/574] Tempest: Add support for extend attached encrypted volumes tests We've LUKSv1 and LUKSv2 tests[1] in tempest that requires the ``extend_attached_encrypted_volume`` option to be True but currently there is no way to set it in devstack. This patch adds the parameter ``TEMPEST_EXTEND_ATTACHED_ENCRYPTED_VOLUME`` to enable the tests. [1] https://github.com/openstack/tempest/blob/cb03598a65f47c51406fc86c9c1503fe42424848/tempest/api/volume/admin/test_encrypted_volumes_extend.py Change-Id: Id3a3483629794ac38cb314812eeff84b677f35cd --- lib/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tempest b/lib/tempest index 29b01f186f..c9486f6310 100644 --- a/lib/tempest +++ b/lib/tempest @@ -586,6 +586,7 @@ function configure_tempest { TEMPEST_VOLUME_REVERT_TO_SNAPSHOT=${TEMPEST_VOLUME_REVERT_TO_SNAPSHOT:-True} fi iniset $TEMPEST_CONFIG volume-feature-enabled volume_revert $(trueorfalse False TEMPEST_VOLUME_REVERT_TO_SNAPSHOT) + iniset $TEMPEST_CONFIG volume-feature-enabled extend_attached_encrypted_volume ${TEMPEST_EXTEND_ATTACHED_ENCRYPTED_VOLUME:-False} if [[ "$CINDER_BACKUP_DRIVER" == *"swift"* ]]; then iniset $TEMPEST_CONFIG volume backup_driver swift fi From 85576bbfd430a9f419fbd837dfa20a2ef687da94 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Wed, 26 Feb 2025 18:04:09 +0000 Subject: [PATCH 498/574] tools: Set user_domain_id in generated clouds.yaml MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If you specify a username, you also need to specify a domain that the user exists in. Failure to do so results in the following error: Expecting to find domain in user. The server could not comply with the request since it is either malformed or otherwise incorrect. The client is assumed to be in error. (HTTP 400)␏ This was mostly being masked for us in python-openstackclient by this little helper in osc-lib [1], but we can't rely on that for openstacksdk (and shouldn't really rely on it elsewhere either). We also deprecate the '--os-identity-api-version' and '--os-volume-api-version' options and will remove them shortly: both services only have v3 APIs nowadays. [1] https://github.com/openstack/osc-lib/blob/3.2.0/osc_lib/cli/client_config.py#L136-L147 Change-Id: I5537b0a7d58efb8a325ed61bad358f677f7a3cdf Signed-off-by: Stephen Finucane --- lib/keystone | 2 +- tools/update_clouds_yaml.py | 29 ++++++++++++++++++++--------- 2 files changed, 21 insertions(+), 10 deletions(-) diff --git a/lib/keystone b/lib/keystone index 76e2598ba3..8371045026 100644 --- a/lib/keystone +++ b/lib/keystone @@ -592,7 +592,7 @@ function bootstrap_keystone { # create_ldap_domain() - Create domain file and initialize domain with a user function create_ldap_domain { # Creates domain Users - openstack --os-identity-api-version=3 domain create --description "LDAP domain" Users + openstack domain create --description "LDAP domain" Users # Create domain file inside etc/keystone/domains KEYSTONE_LDAP_DOMAIN_FILE=$KEYSTONE_CONF_DIR/domains/keystone.Users.conf diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py index 74dcdb2a07..c0a54838cc 100755 --- a/tools/update_clouds_yaml.py +++ b/tools/update_clouds_yaml.py @@ -14,14 +14,14 @@ # Update the clouds.yaml file. - import argparse import os.path +import sys import yaml -class UpdateCloudsYaml(object): +class UpdateCloudsYaml: def __init__(self, args): if args.file: self._clouds_path = args.file @@ -32,6 +32,14 @@ def __init__(self, args): self._create_directory = True self._clouds = {} + if args.os_identity_api_version != '3': + print("ERROR: Only identity API v3 is supported") + sys.exit(1) + + if args.os_volume_api_version != '3': + print("ERROR: Only block storage API v3 is supported") + sys.exit(1) + self._cloud = args.os_cloud self._cloud_data = { 'region_name': args.os_region_name, @@ -40,20 +48,23 @@ def __init__(self, args): 'auth': { 'auth_url': args.os_auth_url, 'username': args.os_username, + 'user_domain_id': 'default', 'password': args.os_password, }, } + if args.os_project_name and args.os_system_scope: print( - "WARNING: os_project_name and os_system_scope were both" - " given. os_system_scope will take priority.") - if args.os_project_name and not args.os_system_scope: + "WARNING: os_project_name and os_system_scope were both " + "given. os_system_scope will take priority." + ) + + if args.os_system_scope: # system-scoped + self._cloud_data['auth']['system_scope'] = args.os_system_scope + elif args.os_project_name: # project-scoped self._cloud_data['auth']['project_name'] = args.os_project_name - if args.os_identity_api_version == '3' and not args.os_system_scope: - self._cloud_data['auth']['user_domain_id'] = 'default' self._cloud_data['auth']['project_domain_id'] = 'default' - if args.os_system_scope: - self._cloud_data['auth']['system_scope'] = args.os_system_scope + if args.os_cacert: self._cloud_data['cacert'] = args.os_cacert From 1c96b4ef5fbe5425d9c6e80b34bbb805a31a0808 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Wed, 26 Feb 2025 18:12:23 +0000 Subject: [PATCH 499/574] openrc: Stop setting OS_VOLUME_API_VERSION, CINDER_VERSION We have not needed these in years. Change-Id: I4d76a7d3a8513ce5a927d533b34fb609e5dacdfa Signed-off-by: Stephen Finucane --- openrc | 5 ----- 1 file changed, 5 deletions(-) diff --git a/openrc b/openrc index 5ec7634638..e800abeb3d 100644 --- a/openrc +++ b/openrc @@ -72,8 +72,3 @@ if [[ ! -v OS_CACERT ]] ; then export OS_CACERT=$DEFAULT_OS_CACERT fi fi - -# Currently cinderclient needs you to specify the *volume api* version. This -# needs to match the config of your catalog returned by Keystone. -export CINDER_VERSION=${CINDER_VERSION:-3} -export OS_VOLUME_API_VERSION=${OS_VOLUME_API_VERSION:-$CINDER_VERSION} From ca15453625638d2660b7fd2fce261096f9f15dd0 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Thu, 27 Feb 2025 13:24:31 +0000 Subject: [PATCH 500/574] tools: Remove --os-identity-api-version, --os-volume-api-version opts There is only one volume API and one identity API, and their collective number is 3. Change-Id: Ie269817c5bb0eddd8cfcf279a46cffe4a56377b2 Signed-off-by: Stephen Finucane Depends-on: https://review.opendev.org/c/openstack/openstacksdk/+/942898 --- tools/update_clouds_yaml.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py index c0a54838cc..87312d9469 100755 --- a/tools/update_clouds_yaml.py +++ b/tools/update_clouds_yaml.py @@ -32,19 +32,9 @@ def __init__(self, args): self._create_directory = True self._clouds = {} - if args.os_identity_api_version != '3': - print("ERROR: Only identity API v3 is supported") - sys.exit(1) - - if args.os_volume_api_version != '3': - print("ERROR: Only block storage API v3 is supported") - sys.exit(1) - self._cloud = args.os_cloud self._cloud_data = { 'region_name': args.os_region_name, - 'identity_api_version': args.os_identity_api_version, - 'volume_api_version': args.os_volume_api_version, 'auth': { 'auth_url': args.os_auth_url, 'username': args.os_username, @@ -100,8 +90,6 @@ def main(): parser.add_argument('--file') parser.add_argument('--os-cloud', required=True) parser.add_argument('--os-region-name', default='RegionOne') - parser.add_argument('--os-identity-api-version', default='3') - parser.add_argument('--os-volume-api-version', default='3') parser.add_argument('--os-cacert') parser.add_argument('--os-auth-url', required=True) parser.add_argument('--os-username', required=True) From a0938e6dcf93d77530685391b7850d34ffa206fc Mon Sep 17 00:00:00 2001 From: Ivan Anfimov Date: Sat, 1 Mar 2025 20:46:26 +0000 Subject: [PATCH 501/574] Fix for CSS problems in Horizon After installation DevStack icons are not displayed. Change-Id: I1bedf97e4d2b7f13b4a0c5b98e29ac53cf502e96 Closes-Bug: #2093844 --- files/apache-horizon.template | 1 + 1 file changed, 1 insertion(+) diff --git a/files/apache-horizon.template b/files/apache-horizon.template index da7a7d26c3..98d02e168e 100644 --- a/files/apache-horizon.template +++ b/files/apache-horizon.template @@ -10,6 +10,7 @@ DocumentRoot %HORIZON_DIR%/.blackhole/ Alias %WEBROOT%/media %HORIZON_DIR%/openstack_dashboard/static Alias %WEBROOT%/static %HORIZON_DIR%/static + Alias /static %HORIZON_DIR%/static RedirectMatch "^/$" "%WEBROOT%/" From 9f2f499ded039dc2545c4e8860204f013f460350 Mon Sep 17 00:00:00 2001 From: Vasyl Saienko Date: Fri, 28 Feb 2025 07:47:11 +0000 Subject: [PATCH 502/574] Pre create logs directory for atop The race may happen and atop can't start due to missing logs directory. This patch pre-creates directory before starting atop process. Closes-Bug: #2100871 Change-Id: I89e3100dc86d60266913b5c5776db65e8882847c --- lib/atop | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/atop b/lib/atop index e0b14cb039..25c8e9a83f 100644 --- a/lib/atop +++ b/lib/atop @@ -19,7 +19,8 @@ _XTRACE_ATOP=$(set +o | grep xtrace) set +o xtrace function configure_atop { - cat </dev/null + mkdir -p $LOGDIR/atop + cat </dev/null # /etc/default/atop # see man atoprc for more possibilities to configure atop execution From ab9fb4eb8732b1cc5505b8c9e722a7310038efdf Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Wed, 5 Mar 2025 09:34:25 -0500 Subject: [PATCH 503/574] Remove openstack network section from overview This section is old and doesn't make much sense anymore, let's remove it. Neutron is already mentioned as a default service and has its own config guide. TrivialFix Change-Id: I2a2ed574f9eca7b87fb9bb6422568ed4fc55f057 --- doc/source/overview.rst | 6 ------ 1 file changed, 6 deletions(-) diff --git a/doc/source/overview.rst b/doc/source/overview.rst index 81e58a341e..c978e8d2cf 100644 --- a/doc/source/overview.rst +++ b/doc/source/overview.rst @@ -52,12 +52,6 @@ Web Server - Apache -OpenStack Network ------------------ - -- Neutron: A basic configuration approximating the original FlatDHCP - mode using OpenVSwitch. - Services -------- From 0572e59775c91494fb6009ac4be539fb892226c7 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Sat, 15 Mar 2025 10:44:05 +0900 Subject: [PATCH 504/574] Skip functional tests for .gitreview update ... because the file does not affect any functionality. Also apply the consistent irrelevant files to skip functional tests to avoid unnecessary jobs. Change-Id: Ibce79d6b7627c26aa69989ed17ae32d7c3b63d19 --- .zuul.yaml | 72 +++++++++++++++--------------------------------------- 1 file changed, 20 insertions(+), 52 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 7d72ab101a..48dd55e2d2 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -455,7 +455,7 @@ pre-run: playbooks/pre.yaml run: playbooks/devstack.yaml post-run: playbooks/post.yaml - irrelevant-files: + irrelevant-files: &common-irrelevant-files # Documentation related - ^.*\.rst$ - ^api-ref/.*$ @@ -465,6 +465,8 @@ - ^.*/locale/.*po$ # pre-commit config - ^.pre-commit-config.yaml$ + # gitreview config + - ^.gitreview$ - job: name: devstack-minimal @@ -915,35 +917,21 @@ - ironic-tempest-bios-ipmi-direct-tinyipa - swift-dsvm-functional - grenade: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - neutron-ovs-grenade-multinode: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - neutron-ovn-tempest-ovs-release: voting: false - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - tempest-multinode-full-py3: voting: false - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - openstacksdk-functional-devstack: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - tempest-ipv6-only: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - nova-ceph-multistore: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files gate: jobs: - devstack @@ -958,27 +946,17 @@ - devstack-unit-tests - openstack-tox-bashate - neutron-ovs-grenade-multinode: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - ironic-tempest-bios-ipmi-direct-tinyipa - swift-dsvm-functional - grenade: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - openstacksdk-functional-devstack: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - tempest-ipv6-only: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - nova-ceph-multistore: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files # Please add a note on each job and conditions for the job not # being experimental any more, so we can keep this list somewhat # pruned. @@ -995,25 +973,15 @@ - nova-multi-cell - nova-next - devstack-plugin-ceph-tempest-py3: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - neutron-ovs-tempest-dvr: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - neutron-ovs-tempest-dvr-ha-multinode-full: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - cinder-tempest-lvm-multibackend: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - tempest-pg-full: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - devstack-no-tls-proxy periodic: jobs: From da40accd158ed55200de93a4191dbe334c82db22 Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Fri, 21 Mar 2025 10:33:51 -0700 Subject: [PATCH 505/574] Update DEVSTACK_SERIES to 2025.2 stable/2025.1 branch has been created now and current master is for 2025.2. Change-Id: Iba81d280ebf1bd488bd590bdc4e31c49782c7099 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index c05d4e2d98..0319fc8a50 100644 --- a/stackrc +++ b/stackrc @@ -252,7 +252,7 @@ REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements} # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="2025.1" +DEVSTACK_SERIES="2025.2" ############## # From b9be941b9b96478e6bfcceb1e75ae5c66d467f07 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Wed, 26 Mar 2025 10:09:38 -0700 Subject: [PATCH 506/574] Reduce unnecessary apache restarts Systemd limits the total number of restarts that a service can undergo in a short period of time. On faster nodes all of our apache restarts hit that limit and we eventually fail. Mitigate this by removing unnecessary restarts. Change-Id: I425bb9eec525d82372f05edc63e4fb931e5a4887 --- lib/apache | 16 +++++++++++----- lib/tls | 15 ++++++++++++--- 2 files changed, 23 insertions(+), 8 deletions(-) diff --git a/lib/apache b/lib/apache index fc174f3901..15b4297809 100644 --- a/lib/apache +++ b/lib/apache @@ -53,13 +53,16 @@ APACHE_LOG_DIR="/var/log/${APACHE_NAME}" # Enable apache mod and restart apache if it isn't already enabled. function enable_apache_mod { local mod=$1 + local should_restart=$2 # Apache installation, because we mark it NOPRIME if is_ubuntu; then # Skip mod_version as it is not a valid mod to enable # on debuntu, instead it is built in. if [[ "$mod" != "version" ]] && ! a2query -m $mod ; then sudo a2enmod $mod - restart_apache_server + if [[ "$should_restart" != "norestart" ]] ; then + restart_apache_server + fi fi elif is_fedora; then # pass @@ -113,15 +116,18 @@ function install_apache_uwsgi { fi if is_ubuntu; then - # we've got to enable proxy and proxy_uwsgi for this to work - sudo a2enmod proxy - sudo a2enmod proxy_uwsgi + if ! a2query -m proxy || ! a2query -m proxy_uwsgi ; then + # we've got to enable proxy and proxy_uwsgi for this to work + sudo a2enmod proxy + sudo a2enmod proxy_uwsgi + restart_apache_server + fi elif is_fedora; then # redhat is missing a nice way to turn on/off modules echo "LoadModule proxy_uwsgi_module modules/mod_proxy_uwsgi.so" \ | sudo tee /etc/httpd/conf.modules.d/02-proxy-uwsgi.conf + restart_apache_server fi - restart_apache_server } # install_apache_wsgi() - Install Apache server and wsgi module diff --git a/lib/tls b/lib/tls index 0a598e14f7..cff5c630a5 100644 --- a/lib/tls +++ b/lib/tls @@ -452,6 +452,7 @@ function enable_mod_ssl { # =============== function tune_apache_connections { + local should_restart=$1 local tuning_file=$APACHE_SETTINGS_DIR/connection-tuning.conf if ! [ -f $tuning_file ] ; then sudo bash -c "cat > $tuning_file" << EOF @@ -494,7 +495,12 @@ ThreadLimit 64 MaxRequestsPerChild 0 EOF - restart_apache_server + if [ "$should_restart" != "norestart" ] ; then + # Only restart the apache server if we know we really want to + # do so. Too many restarts in a short period of time is treated + # as an error by systemd. + restart_apache_server + fi fi } @@ -509,7 +515,8 @@ function start_tls_proxy { # 8190 is the default apache size. local f_header_size=${6:-8190} - tune_apache_connections + # We don't restart apache here as we'll do it at the end of the function. + tune_apache_connections norestart local config_file config_file=$(apache_site_config_for $b_service) @@ -558,7 +565,9 @@ $listen_string EOF for mod in headers ssl proxy proxy_http; do - enable_apache_mod $mod + # We don't need to restart here as we will restart once at the end + # of the function. + enable_apache_mod $mod norestart done enable_apache_site $b_service restart_apache_server From cb177ba84bd90f70a02fbac8b60549e7323ec7d2 Mon Sep 17 00:00:00 2001 From: Ivan Anfimov Date: Sat, 1 Mar 2025 22:15:04 +0000 Subject: [PATCH 507/574] Fix for module proxy_uwsgi_module is already loaded Rocky Linux 9.5 64 bit httpd -t [Sun Mar 02 01:10:49.272260 2025] [so:warn] [pid 201497:tid 201497] AH01574: module proxy_uwsgi_module is already loaded, skipping Change-Id: Id6a88c2b7958789f7d4947d3259276f120f5f44e --- lib/apache | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lib/apache b/lib/apache index 15b4297809..744c0f10b6 100644 --- a/lib/apache +++ b/lib/apache @@ -122,11 +122,6 @@ function install_apache_uwsgi { sudo a2enmod proxy_uwsgi restart_apache_server fi - elif is_fedora; then - # redhat is missing a nice way to turn on/off modules - echo "LoadModule proxy_uwsgi_module modules/mod_proxy_uwsgi.so" \ - | sudo tee /etc/httpd/conf.modules.d/02-proxy-uwsgi.conf - restart_apache_server fi } From f41a16c11801f986a6e799e02b5340adf6b04fbb Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Wed, 9 Apr 2025 18:14:00 +0530 Subject: [PATCH 508/574] Fix python3 version for rpm distros pythonX.Y version is virtually provided since long[1], and pythonXY-devel no longer provided in latest CentOS and Fedora releases. So switching to use pythonX.Y-devel as that will also pull pythonX.Y as a dependency. Additionally install pythonX.Y-pip as for rpm distros it don't install pip via source. [1] https://src.fedoraproject.org/rpms/python3/c/75005c20f68f3b4ceb734e876b37009c8c3b99f3 Change-Id: I990586cce876533c67e3da4c97d9e5995c762340 --- inc/python | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/inc/python b/inc/python index bd58905e9e..cd90ac82c6 100644 --- a/inc/python +++ b/inc/python @@ -489,11 +489,7 @@ function install_python3 { if is_ubuntu; then apt_get install python${PYTHON3_VERSION} python${PYTHON3_VERSION}-dev elif is_fedora; then - if [ "$os_VENDOR" = "Fedora" ]; then - install_package python${PYTHON3_VERSION//.} - else - install_package python${PYTHON3_VERSION//.} python${PYTHON3_VERSION//.}-devel - fi + install_package python${PYTHON3_VERSION}-devel python${PYTHON3_VERSION}-pip fi } From c4340a64ee35b2b8b5395461b6702ef765786465 Mon Sep 17 00:00:00 2001 From: Gorka Eguileor Date: Fri, 8 Jul 2022 14:08:03 +0200 Subject: [PATCH 509/574] Add support for shared os_brick file lock path There can be problems with some os-brick connectors if nova and cinder run on the same host with different lock path locations, which we currently do, or if Cinder and Glance with cinder store run on the same host, and a recent os-brick change (Ic52338278eb5bb3d90ce582fe6b23f37eb5568c4) allows for an os-brick specific lock_path to facilitate these kind of deployment. This patch adds the ``lock_path`` configuration option in the ``[os_brick]`` section of the nova, cinder, and glance config files. If the os-brick, cinder, nova, or glance-store changes are not present then the new config option is be ignored in the respective service, and it will be used otherwise, so there's no need to make this patch dependent on any other since we won't be worse off than we are now. Change-Id: Ibe7da160460151734224863cddec5e0d549b4938 --- lib/cinder | 16 ++++++++++++++++ stack.sh | 5 +++++ 2 files changed, 21 insertions(+) diff --git a/lib/cinder b/lib/cinder index b557d4b10b..2b565c9535 100644 --- a/lib/cinder +++ b/lib/cinder @@ -59,6 +59,7 @@ else fi CINDER_STATE_PATH=${CINDER_STATE_PATH:=$DATA_DIR/cinder} +OS_BRICK_LOCK_PATH=${OS_BRICK_LOCK_PATH:=$DATA_DIR/os_brick} CINDER_CONF_DIR=/etc/cinder CINDER_CONF=$CINDER_CONF_DIR/cinder.conf @@ -511,6 +512,21 @@ function init_cinder { mkdir -p $CINDER_STATE_PATH/volumes } + +function init_os_brick { + mkdir -p $OS_BRICK_LOCK_PATH + if is_service_enabled cinder; then + iniset $CINDER_CONF os_brick lock_path $OS_BRICK_LOCK_PATH + fi + if is_service_enabled nova; then + iniset $NOVA_CONF os_brick lock_path $OS_BRICK_LOCK_PATH + fi + if is_service_enabled glance; then + iniset $GLANCE_API_CONF os_brick lock_path $OS_BRICK_LOCK_PATH + iniset $GLANCE_CACHE_CONF os_brick lock_path $OS_BRICK_LOCK_PATH + fi +} + # install_cinder() - Collect source and prepare function install_cinder { git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH diff --git a/stack.sh b/stack.sh index 04b5f4ca6a..afca5250d5 100755 --- a/stack.sh +++ b/stack.sh @@ -1005,6 +1005,11 @@ if is_service_enabled tls-proxy; then fix_system_ca_bundle_path fi +if is_service_enabled cinder || [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then + # os-brick setup required by glance, cinder, and nova + init_os_brick +fi + # Extras Install # -------------- From 3fe8873a15db27d2d8b7df4e708210ee3ca1465d Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Sat, 26 Apr 2025 02:19:56 +0000 Subject: [PATCH 510/574] Updated from generate-devstack-plugins-list Change-Id: I7932dc96301cb2a52607007aa6935bbf6aa66397 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index f7873c962d..e84c946287 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -24,6 +24,7 @@ official OpenStack projects. ======================================== === Plugin Name URL ======================================== === +openstack/aetos `https://opendev.org/openstack/aetos `__ openstack/aodh `https://opendev.org/openstack/aodh `__ openstack/barbican `https://opendev.org/openstack/barbican `__ openstack/blazar `https://opendev.org/openstack/blazar `__ From ea23079321231c63e78e84cd58088663e92134a8 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Wed, 30 Apr 2025 21:41:37 +0530 Subject: [PATCH 511/574] Fix cert detection with custom PYTHON3_VERSION PYTHON3_VERSION was not considered for detecting ca path with GLOBAL_VENV=False, this patch fixes it. Related-Bug: #2109591 Change-Id: Ie597494a2a11293cbd20e6d0b23dcb31bf343957 --- lib/tls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tls b/lib/tls index cff5c630a5..fa0a448d7d 100644 --- a/lib/tls +++ b/lib/tls @@ -367,7 +367,7 @@ function fix_system_ca_bundle_path { if [[ "$GLOBAL_VENV" == "True" ]] ; then capath=$($DEVSTACK_VENV/bin/python3 -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass') else - capath=$(python3 -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass') + capath=$(python$PYTHON3_VERSION -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass') fi if [[ ! $capath == "" && ! $capath =~ ^/etc/.* && ! -L $capath ]]; then if is_fedora; then From 24870ec45a9486a31239218d74d0fd3fa3e5f118 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Mon, 5 May 2025 14:13:06 +0530 Subject: [PATCH 512/574] Pass PYTHON env var to memory tracker It was not honoring PYTHON3_VERSION when running with GLOBAL_VENV=false, this patch fixes it. Related-Bug: #2109591 Change-Id: Ib34c099b897e59fce24cab6e98dc31a505e4922e --- lib/dstat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/dstat b/lib/dstat index 870c901d2a..9bd0370847 100644 --- a/lib/dstat +++ b/lib/dstat @@ -33,7 +33,7 @@ function start_dstat { # To enable memory_tracker add: # enable_service memory_tracker # to your localrc - run_process memory_tracker "$TOP_DIR/tools/memory_tracker.sh" "" "root" + run_process memory_tracker "$TOP_DIR/tools/memory_tracker.sh" "" "root" "PYTHON=python${PYTHON3_VERSION}" # TODO(jh): Fail when using the old service name otherwise consumers might # never notice that is has been removed. From bf1ef3278c30572f5b958c5f1d2f59839e3d2b87 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Mon, 5 May 2025 18:08:12 +0530 Subject: [PATCH 513/574] Honor PYTHON3_VERSION for mod_wsgi rpm installation Different variants of mod_wsgi are provided like:- python3-mod_wsgi.x86_64 python3.11-mod_wsgi.x86_64 python3.12-mod_wsgi.x86_64 Adjust script to also consider PYTHON3_VERSION var to correctly install the package. Related-Bug: #2109591 Change-Id: I6bbfd92ef727ef9b343cd5778bb78f43c13165ad --- lib/apache | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/apache b/lib/apache index 744c0f10b6..5ab74b7087 100644 --- a/lib/apache +++ b/lib/apache @@ -137,7 +137,7 @@ function install_apache_wsgi { install_package libapache2-mod-wsgi-py3 elif is_fedora; then sudo rm -f /etc/httpd/conf.d/000-* - install_package httpd python3-mod_wsgi + install_package httpd python${PYTHON3_VERSION}-mod_wsgi # rpm distros dont enable httpd by default so enable it to support reboots. sudo systemctl enable httpd # For consistency with Ubuntu, switch to the worker mpm, as From 74837e0b30ea782b073eb6d23b0aa3060068a3a1 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 11 Oct 2024 16:15:47 +0100 Subject: [PATCH 514/574] lib/neutron: Deploy under uWSGI by default Change-Id: I6256ca1725c56859947d957156d865949879b130 Signed-off-by: Stephen Finucane Depends-on: https://review.opendev.org/c/openstack/grenade/+/949166 --- lib/neutron | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/lib/neutron b/lib/neutron index 69ff212991..78ff6cfb24 100644 --- a/lib/neutron +++ b/lib/neutron @@ -82,11 +82,9 @@ NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} # NEUTRON_DEPLOY_MOD_WSGI defines how neutron is deployed, allowed values: -# - False (default) : Run neutron under Eventlet -# - True : Run neutron under uwsgi -# TODO(annp): Switching to uwsgi in next cycle if things turn out to be stable -# enough -NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI) +# - False : Run neutron under Eventlet +# - True (default) : Run neutron under uwsgi +NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse True NEUTRON_DEPLOY_MOD_WSGI) NEUTRON_UWSGI=neutron.wsgi.api:application NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini From d040e15961050f6e3fe538d8d65f13df554870f7 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 11 Oct 2024 16:18:19 +0100 Subject: [PATCH 515/574] lib/neutron: Remove NEUTRON_DEPLOY_MOD_WSGI neutron no longer supports running under eventlet, so this option is a no-op. Change-Id: Ib2767c0e2bb5aad5d8173dc5653e44a42c2bd499 Signed-off-by: Stephen Finucane --- lib/neutron | 111 +++++++++++++++------------------------------------- 1 file changed, 32 insertions(+), 79 deletions(-) diff --git a/lib/neutron b/lib/neutron index 78ff6cfb24..ea2d8e728a 100644 --- a/lib/neutron +++ b/lib/neutron @@ -62,11 +62,9 @@ if is_service_enabled tls-proxy; then Q_PROTOCOL="https" fi - # Set up default directories GITDIR["python-neutronclient"]=$DEST/python-neutronclient - NEUTRON_DIR=$DEST/neutron NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas @@ -81,11 +79,6 @@ NEUTRON_CONF_DIR=/etc/neutron NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} -# NEUTRON_DEPLOY_MOD_WSGI defines how neutron is deployed, allowed values: -# - False : Run neutron under Eventlet -# - True (default) : Run neutron under uwsgi -NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse True NEUTRON_DEPLOY_MOD_WSGI) - NEUTRON_UWSGI=neutron.wsgi.api:application NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini @@ -153,7 +146,7 @@ _Q_PLUGIN_EXTRA_CONF_PATH=/etc/neutron # The name of the service in the endpoint URL NEUTRON_ENDPOINT_SERVICE_NAME=${NEUTRON_ENDPOINT_SERVICE_NAME-"networking"} -if [[ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" && -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then +if [[ -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then NEUTRON_ENDPOINT_SERVICE_NAME="networking" fi @@ -451,9 +444,7 @@ function configure_neutron { # for state reports is more than adequate. iniset $NEUTRON_CONF DEFAULT rpc_state_report_workers 0 - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_UWSGI" "/networking" "" "neutron-api" - fi + write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_UWSGI" "/networking" "" "neutron-api" } function configure_neutron_nova { @@ -499,11 +490,7 @@ function create_nova_conf_neutron { # Migrated from keystone_data.sh function create_neutron_accounts { local neutron_url - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - neutron_url=$Q_PROTOCOL://$SERVICE_HOST/ - else - neutron_url=$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/ - fi + neutron_url=$Q_PROTOCOL://$SERVICE_HOST/ if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME fi @@ -634,34 +621,25 @@ function start_neutron_service_and_check { fi # Start the Neutron service - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - # The default value of "rpc_workers" is None (not defined). If - # "rpc_workers" is explicitly set to 0, the RPC workers process - # should not be executed. - local rpc_workers - rpc_workers=$(iniget_multiline $NEUTRON_CONF DEFAULT rpc_workers) - - enable_service neutron-api - run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" - neutron_url=$Q_PROTOCOL://$Q_HOST/ - if [ "$rpc_workers" != "0" ]; then - enable_service neutron-rpc-server - fi - enable_service neutron-periodic-workers - _enable_ovn_maintenance - if [ "$rpc_workers" != "0" ]; then - run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" - fi - run_process neutron-periodic-workers "$NEUTRON_BIN_DIR/neutron-periodic-workers $cfg_file_options" - _run_ovn_maintenance - else - run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options" - neutron_url=$service_protocol://$Q_HOST:$service_port/ - # Start proxy if enabled - if is_service_enabled tls-proxy; then - start_tls_proxy neutron '*' $Q_PORT $Q_HOST $Q_PORT_INT - fi - fi + # The default value of "rpc_workers" is None (not defined). If + # "rpc_workers" is explicitly set to 0, the RPC workers process + # should not be executed. + local rpc_workers + rpc_workers=$(iniget_multiline $NEUTRON_CONF DEFAULT rpc_workers) + + enable_service neutron-api + run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" + neutron_url=$Q_PROTOCOL://$Q_HOST/ + if [ "$rpc_workers" != "0" ]; then + enable_service neutron-rpc-server + fi + enable_service neutron-periodic-workers + _enable_ovn_maintenance + if [ "$rpc_workers" != "0" ]; then + run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" + fi + run_process neutron-periodic-workers "$NEUTRON_BIN_DIR/neutron-periodic-workers $cfg_file_options" + _run_ovn_maintenance if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME fi @@ -723,14 +701,10 @@ function stop_other { [ ! -z "$pid" ] && sudo kill -9 $pid fi - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - stop_process neutron-rpc-server - stop_process neutron-periodic-workers - stop_process neutron-api - _stop_ovn_maintenance - else - stop_process q-svc - fi + stop_process neutron-rpc-server + stop_process neutron-periodic-workers + stop_process neutron-api + _stop_ovn_maintenance if is_service_enabled q-l3 neutron-l3; then sudo pkill -f "radvd -C $DATA_DIR/neutron/ra" @@ -841,14 +815,12 @@ function _configure_public_network_connectivity { # cleanup_neutron() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_neutron { - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - stop_process neutron-api - stop_process neutron-rpc-server - stop_process neutron-periodic-workers - _stop_ovn_maintenance - remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "neutron-api" - sudo rm -f $(apache_site_config_for neutron-api) - fi + stop_process neutron-api + stop_process neutron-rpc-server + stop_process neutron-periodic-workers + _stop_ovn_maintenance + remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "neutron-api" + sudo rm -f $(apache_site_config_for neutron-api) if [[ -n "$OVS_PHYSICAL_BRIDGE" ]]; then _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False True "inet" @@ -955,12 +927,6 @@ function _configure_neutron_common { # Format logging setup_logging $NEUTRON_CONF - if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then - # Set the service port for a proxy to take the original - iniset $NEUTRON_CONF DEFAULT bind_port "$Q_PORT_INT" - iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True - fi - _neutron_setup_rootwrap } @@ -1026,25 +992,12 @@ function _configure_neutron_plugin_agent { neutron_plugin_configure_plugin_agent } -function _replace_api_paste_composite { - local sep - sep=$(echo -ne "\x01") - # Replace it - $sudo sed -i -e "s/\/\: neutronversions_composite/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/\: neutronversions_composite/" "$Q_API_PASTE_FILE" - $sudo sed -i -e "s/\/healthcheck\: healthcheck/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/healthcheck\: healthcheck/" "$Q_API_PASTE_FILE" - $sudo sed -i -e "s/\/v2.0\: neutronapi_v2_0/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/v2.0\: neutronapi_v2_0/" "$Q_API_PASTE_FILE" -} - # _configure_neutron_service() - Set config files for neutron service # It is called when q-svc is enabled. function _configure_neutron_service { Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE - if [[ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" && -n "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then - _replace_api_paste_composite - fi - # Update either configuration file with plugin iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS From 5cb2abf79ef103838ab0f922643f4a62ddf16cfb Mon Sep 17 00:00:00 2001 From: Mohammed Naser Date: Tue, 25 Feb 2025 22:54:05 -0500 Subject: [PATCH 516/574] Switch ZSWAP_ZPOOL to zsmalloc The z3fold compressed pages allocator is on it's way out of the Linux kernel and running this on newer systems will give you an error: + lib/host:configure_zswap:45 : echo z3fold z3fold tee: /sys/module/zswap/parameters/zpool: No such file or directory So, get ahead of things and move to the much faster recommended by the kernel developers alternative. [1]: https://lore.kernel.org/all/20240904233343.933462-1-yosryahmed@google.com/T/#u Change-Id: I7c137114dd7585d3179a8d5dee818bb379bbcb1f --- lib/host | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/host b/lib/host index a812c39612..58062eff6b 100644 --- a/lib/host +++ b/lib/host @@ -35,7 +35,7 @@ ENABLE_ZSWAP=$(trueorfalse False ENABLE_ZSWAP) # lz4 is very fast although it does not have the best compression # zstd has much better compression but more latency ZSWAP_COMPRESSOR=${ZSWAP_COMPRESSOR:="lz4"} -ZSWAP_ZPOOL=${ZSWAP_ZPOOL:="z3fold"} +ZSWAP_ZPOOL=${ZSWAP_ZPOOL:="zsmalloc"} function configure_zswap { if [[ $ENABLE_ZSWAP == "True" ]] ; then # Centos 9 stream seems to only support enabling but not run time From cbae98949193085f0ef5cfc250d5e8cc7530de5f Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Mon, 12 May 2025 15:17:42 +0100 Subject: [PATCH 517/574] Remove libvirt apparmor workaround This is triggering bug #2065685 [1] on Ubuntu 24.04 (Noble) if podman is installed (since that brings in the broken paast and pasta packages). Given the workaround is nearly 10 years old [2], it should not be necessary anymore. [1] https://bugs.launchpad.net/ubuntu/+source/apparmor/+bug/2065685 [2] https://bugs.launchpad.net/networking-ovn/+bug/1466631 Change-Id: I525b1f30bca7093791f927ff647db7745d25df22 Signed-off-by: Stephen Finucane Related-bug: #2065685 Related-bug: #1466631 --- lib/neutron_plugins/ovn_agent | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index 71b5e3350d..e58cd4fb38 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -300,18 +300,6 @@ function create_public_bridge { _configure_public_network_connectivity } -function _disable_libvirt_apparmor { - if ! sudo aa-status --enabled ; then - return 0 - fi - # NOTE(arosen): This is used as a work around to allow newer versions - # of libvirt to work with ovs configured ports. See LP#1466631. - # requires the apparmor-utils - install_package apparmor-utils - # disables apparmor for libvirtd - sudo aa-complain /etc/apparmor.d/usr.sbin.libvirtd -} - # OVN compilation functions # ------------------------- @@ -614,7 +602,6 @@ function init_ovn { # in the ovn, ovn-nb, or ovs databases. We're going to trash them and # create new ones on each devstack run. - _disable_libvirt_apparmor local mkdir_cmd="mkdir -p ${OVN_DATADIR}" if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]]; then From f5b9596f24c03079c6ea2bcac29b1973bf5fe589 Mon Sep 17 00:00:00 2001 From: yatinkarel Date: Fri, 2 May 2025 15:51:01 +0530 Subject: [PATCH 518/574] Temporary fixes rhel 9 distros With [1] py39 constraints drop can't run fips jobs on centos 9-stream or rocky9. As a workaround can run with PYTHON3_VERSION: 3.11 in affected jobs. Until centos 10-stream support is ready[1] we also need to install libvirt-python and uwsgi from source as uwsgi and libvirt rpms are bundled with python3. [1] https://review.opendev.org/c/openstack/requirements/+/948285 [2] https://review.opendev.org/c/openstack/devstack/+/937251 Depends-On: https://review.opendev.org/c/openstack/devstack/+/946763 Depends-On: https://review.opendev.org/c/openstack/devstack/+/948558 Depends-On: https://review.opendev.org/c/openstack/devstack/+/948786 Depends-On: https://review.opendev.org/c/openstack/devstack/+/948797 Related-Bug: #2109591 Change-Id: I80d4c65ba0728c3e4b18738c6b0d539409f19976 --- .zuul.yaml | 7 +++++++ lib/apache | 2 +- lib/nova_plugins/functions-libvirt | 8 +++++++- 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 48dd55e2d2..9552fa3b47 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -735,6 +735,10 @@ parent: tempest-full-py3 description: CentOS 9 Stream platform test nodeset: devstack-single-node-centos-9-stream + vars: + devstack_localrc: + # TODO(ykarel) Remove this when moving to 10-stream + PYTHON3_VERSION: 3.11 timeout: 9000 voting: false @@ -760,6 +764,9 @@ voting: false vars: configure_swap_size: 4096 + devstack_localrc: + # TODO(ykarel) Remove this when moving to rocky10 + PYTHON3_VERSION: 3.11 - job: name: devstack-platform-ubuntu-jammy diff --git a/lib/apache b/lib/apache index 744c0f10b6..b971ecf8cd 100644 --- a/lib/apache +++ b/lib/apache @@ -89,7 +89,7 @@ function install_apache_uwsgi { # didn't fix Python 3.10 compatibility before release. Should be # fixed in uwsgi 4.9.0; can remove this when packages available # or we drop this release - elif is_fedora && ! is_openeuler && ! [[ $DISTRO =~ f36 ]]; then + elif is_fedora && ! is_openeuler && ! [[ $DISTRO =~ f36|rhel9 ]]; then # Note httpd comes with mod_proxy_uwsgi and it is loaded by # default; the mod_proxy_uwsgi package actually conflicts now. # See: diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index ba2e98e304..35840539da 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -93,7 +93,13 @@ function install_libvirt { # as the base system version is too old. We should have # pre-installed these install_package $qemu_package - install_package libvirt libvirt-devel python3-libvirt + install_package libvirt libvirt-devel + + if [[ $DISTRO =~ rhel9 ]]; then + pip_install_gr libvirt-python + else + install_package python3-libvirt + fi if is_arch "aarch64"; then install_package edk2-aarch64 From 7fedf7f78764542a05429a22e980de4a1486faa1 Mon Sep 17 00:00:00 2001 From: Ivan Anfimov Date: Wed, 7 May 2025 18:17:40 +0000 Subject: [PATCH 519/574] Remove temporary fix for problems with CSS styles Fixed by Horizon side: https://review.opendev.org/c/openstack/horizon/+/949036 Change-Id: I8acb029b0562381cdbe28f0ee32f3aed07de5784 --- files/apache-horizon.template | 1 - 1 file changed, 1 deletion(-) diff --git a/files/apache-horizon.template b/files/apache-horizon.template index 98d02e168e..da7a7d26c3 100644 --- a/files/apache-horizon.template +++ b/files/apache-horizon.template @@ -10,7 +10,6 @@ DocumentRoot %HORIZON_DIR%/.blackhole/ Alias %WEBROOT%/media %HORIZON_DIR%/openstack_dashboard/static Alias %WEBROOT%/static %HORIZON_DIR%/static - Alias /static %HORIZON_DIR%/static RedirectMatch "^/$" "%WEBROOT%/" From 46e14fb1f70e25aada290f6f5648800ec7a147b3 Mon Sep 17 00:00:00 2001 From: Jay Faulkner Date: Sun, 18 May 2025 16:59:13 -0700 Subject: [PATCH 520/574] Bubble up image download failures Currently, we're still returning 0 out of the upload_image method despite the download failing. This changes behavior such that if the image download fails, it returns an exit code of 1 to the caller to be handled (or fail early) accordingly. Change-Id: I901dc065b51946f363145ae888cca602946ceeea --- functions | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/functions b/functions index 42d08d7c4a..829fc86c55 100644 --- a/functions +++ b/functions @@ -147,7 +147,8 @@ function upload_image { if [[ $rc -ne 0 ]]; then if [[ "$attempt" -eq "$max_attempts" ]]; then echo "Not found: $image_url" - return + # Signal failure to download to the caller, so they can fail early + return 1 fi echo "Download failed, retrying in $attempt second, attempt: $attempt" sleep $attempt From 5d41cb1f51cccdbecf375cf84f9893b29f8c3ffc Mon Sep 17 00:00:00 2001 From: Eric Harney Date: Tue, 20 May 2025 13:17:49 -0400 Subject: [PATCH 521/574] Silence SyntaxWarnings in outfilter.py Use raw strings for these regexes. Change-Id: If5d35fa527b464f34a0d2335e5c6b388be726a54 --- tools/outfilter.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/outfilter.py b/tools/outfilter.py index 55f9ee1487..c9907b072a 100644 --- a/tools/outfilter.py +++ b/tools/outfilter.py @@ -26,8 +26,8 @@ import re import sys -IGNORE_LINES = re.compile('(set \+o|xtrace)') -HAS_DATE = re.compile('^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3} \|') +IGNORE_LINES = re.compile(r'(set \+o|xtrace)') +HAS_DATE = re.compile(r'^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3} \|') def get_options(): From 6ebe6f1b26b371b11ecd9cb6d0d154839ba6941e Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 22 May 2025 02:13:55 +0000 Subject: [PATCH 522/574] Updated from generate-devstack-plugins-list Change-Id: I443a7715aa85e4ffe65994dc77f7091c7f441876 --- doc/source/plugin-registry.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index e84c946287..f3f11cfe16 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -44,6 +44,7 @@ openstack/freezer `https://opendev.org/openstack/freezer openstack/freezer-api `https://opendev.org/openstack/freezer-api `__ openstack/freezer-tempest-plugin `https://opendev.org/openstack/freezer-tempest-plugin `__ openstack/freezer-web-ui `https://opendev.org/openstack/freezer-web-ui `__ +openstack/grian-ui `https://opendev.org/openstack/grian-ui `__ openstack/heat `https://opendev.org/openstack/heat `__ openstack/heat-dashboard `https://opendev.org/openstack/heat-dashboard `__ openstack/ironic `https://opendev.org/openstack/ironic `__ From 0e8042deff33bffbab732f70a66ece162aa470f7 Mon Sep 17 00:00:00 2001 From: Balazs Gibizer Date: Tue, 29 Apr 2025 15:36:28 +0200 Subject: [PATCH 523/574] Add SYSTEMD_ENV_VARS dictionary This will allow to pass env variables from zuul job definitions to to systemd service files via the local conf. The first use case of this is to pass OS_NOVA_DISABLE_EVENTLET_PATCHING=true to nova services that already supports running in native threading mode instead of with Eventlet. During the Eventlet removal effort this will allow us to have separate jobs testing the same service in different concurrency mode. Change-Id: I675043e42006286bb7e1190ea9462fb8d8daa38c --- functions-common | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/functions-common b/functions-common index e265256ccf..db2367cef6 100644 --- a/functions-common +++ b/functions-common @@ -43,6 +43,9 @@ declare -A -g GITREPO declare -A -g GITBRANCH declare -A -g GITDIR +# Systemd service file environment variables per service +declare -A -g SYSTEMD_ENV_VARS + KILL_PATH="$(which kill)" # Save these variables to .stackenv @@ -1642,6 +1645,9 @@ function _run_under_systemd { user=$STACK_USER fi local env_vars="$5" + if [[ -v SYSTEMD_ENV_VARS[$service] ]]; then + env_vars="${SYSTEMD_ENV_VARS[$service]} $env_vars" + fi if [[ "$command" =~ "uwsgi" ]] ; then if [[ "$GLOBAL_VENV" == "True" ]] ; then cmd="$cmd --venv $DEVSTACK_VENV" From df3fa124689402831543a24fc036f55e9e0bab33 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 29 May 2025 02:49:43 +0000 Subject: [PATCH 524/574] Updated from generate-devstack-plugins-list Change-Id: Ibebfa75ae6b233d6a913ffb3f8dced4290a8ab1b --- doc/source/plugin-registry.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index f3f11cfe16..9185263443 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -114,7 +114,6 @@ starlingx/nfv `https://opendev.org/starlingx/nfv `__ vexxhost/openstack-operator `https://opendev.org/vexxhost/openstack-operator `__ x/almanach `https://opendev.org/x/almanach `__ -x/apmec `https://opendev.org/x/apmec `__ x/bilean `https://opendev.org/x/bilean `__ x/broadview-collector `https://opendev.org/x/broadview-collector `__ x/collectd-openstack-plugins `https://opendev.org/x/collectd-openstack-plugins `__ From d2e309f0481f2e8577737bc2e5d6761a70b93dca Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Tue, 17 Jun 2025 21:15:16 +0900 Subject: [PATCH 525/574] Bump etcd to 3.25.x The etcd project maintains release branches for the current version and pevious release[1]. Because 3.26.0 was already released, 3.24.x is no longer supported. Bump it to the latest bug fix release of 3.25.x . Also, the binary for s390x has been restored upstream so add it back. [1] https://etcd.io/docs/v3.6/op-guide/versioning/ Change-Id: I108466c65bd1ebd1e42c75dfbe9b2173d04ba122 --- stackrc | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/stackrc b/stackrc index 0319fc8a50..817b445c7f 100644 --- a/stackrc +++ b/stackrc @@ -705,12 +705,11 @@ fi EXTRA_CACHE_URLS="" # etcd3 defaults -ETCD_VERSION=${ETCD_VERSION:-v3.4.27} -ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"a32d21e006252dbc3405b0645ba8468021ed41376974b573285927bf39b39eb9"} -ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"ed7e257c225b9b9545fac22246b97f4074a4b5109676e92dbaebfb9315b69cc0"} -ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"eb8825e0bc2cbaf9e55947f5ee373ebc9ca43b6a2ea5ced3b992c81855fff37e"} -# etcd v3.2.x and later doesn't have anything for s390x -ETCD_SHA256_S390X=${ETCD_SHA256_S390X:-""} +ETCD_VERSION=${ETCD_VERSION:-v3.5.21} +ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"adddda4b06718e68671ffabff2f8cee48488ba61ad82900e639d108f2148501c"} +ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"95bf6918623a097c0385b96f139d90248614485e781ec9bee4768dbb6c79c53f"} +ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"6fb6ecb3d1b331eb177dc610a8efad3aceb1f836d6aeb439ba0bfac5d5c2a38c"} +ETCD_SHA256_S390X=${ETCD_SHA256_S390X:-"a211a83961ba8a7e94f7d6343ad769e699db21a715ba4f3b68cf31ea28f9c951"} # Make sure etcd3 downloads the correct architecture if is_arch "x86_64"; then ETCD_ARCH="amd64" @@ -722,15 +721,8 @@ elif is_arch "ppc64le"; then ETCD_ARCH="ppc64le" ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_PPC64} elif is_arch "s390x"; then - # An etcd3 binary for s390x is not available on github like it is - # for other arches. Only continue if a custom download URL was - # provided. - if [[ -n "${ETCD_DOWNLOAD_URL}" ]]; then - ETCD_ARCH="s390x" - ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_S390X} - else - exit_distro_not_supported "etcd3. No custom ETCD_DOWNLOAD_URL provided." - fi + ETCD_ARCH="s390x" + ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_S390X} else exit_distro_not_supported "invalid hardware type - $ETCD_ARCH" fi From 5822439d95b02a7033f6333cda1dfafdc342b852 Mon Sep 17 00:00:00 2001 From: Jay Faulkner Date: Wed, 18 Jun 2025 08:20:50 -0700 Subject: [PATCH 526/574] Update base OS recommendation to 24.04 This is what all of OpenStack tests on now, it's likely a better choice for a default. Worth noting 22.04 doesn't work for latest-ironic (at least). Change-Id: Ibe4c1d4416dded4ac3280cb6ef423b0792b584ab --- doc/source/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index 70871ef876..a07bb84922 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -40,7 +40,7 @@ Start with a clean and minimal install of a Linux system. DevStack attempts to support the two latest LTS releases of Ubuntu, Rocky Linux 9 and openEuler. -If you do not have a preference, Ubuntu 22.04 (Jammy) is the +If you do not have a preference, Ubuntu 24.04 (Noble) is the most tested, and will probably go the smoothest. Add Stack User (optional) From 37c755e70721fa49d9c344158592761a75b124d3 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Mon, 23 Jun 2025 22:32:56 +0900 Subject: [PATCH 527/574] Drop logic for Python < 3 Python 2 support was removed globally multiple cycles ago. Change-Id: I503ef9be68e59c8983d245f1fbb689651eb564ff --- tools/outfilter.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/tools/outfilter.py b/tools/outfilter.py index 55f9ee1487..df03a779b5 100644 --- a/tools/outfilter.py +++ b/tools/outfilter.py @@ -90,13 +90,10 @@ def main(): if outfile: # We've opened outfile as a binary file to get the - # non-buffered behaviour. on python3, sys.stdin was + # non-buffered behaviour. on python3, sys.stdin was # opened with the system encoding and made the line into # utf-8, so write the logfile out in utf-8 bytes. - if sys.version_info < (3,): - outfile.write(ts_line) - else: - outfile.write(ts_line.encode('utf-8')) + outfile.write(ts_line.encode('utf-8')) outfile.flush() From a8aecbad4f7f4cd4b6ddc57b78844cede5325d4d Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Sat, 23 Nov 2024 21:50:13 +0900 Subject: [PATCH 528/574] Allow installing etcd3gw from source ... to enable forward testing in etcd3gw. Change-Id: I249243fc913a82c28d096ef48aacecd07f2c2694 --- lib/libraries | 9 +++++++-- stackrc | 4 ++++ tests/test_libs_from_pypi.sh | 2 +- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/lib/libraries b/lib/libraries index fa418785dd..c3248f11b2 100755 --- a/lib/libraries +++ b/lib/libraries @@ -27,6 +27,7 @@ GITDIR["castellan"]=$DEST/castellan GITDIR["cliff"]=$DEST/cliff GITDIR["cursive"]=$DEST/cursive GITDIR["debtcollector"]=$DEST/debtcollector +GITDIR["etcd3gw"]=$DEST/etcd3gw GITDIR["futurist"]=$DEST/futurist GITDIR["openstacksdk"]=$DEST/openstacksdk GITDIR["os-client-config"]=$DEST/os-client-config @@ -131,8 +132,12 @@ function install_libs { # python client libraries we might need from git can go here _install_lib_from_source "python-barbicanclient" - # etcd (because tooz does not have a hard dependency on these) - pip_install etcd3gw + if use_library_from_git etcd3gw ; then + _install_lib_from_source "etcd3gw" + else + # etcd (because tooz does not have a hard dependency on these) + pip_install etcd3gw + fi } # Restore xtrace diff --git a/stackrc b/stackrc index c05d4e2d98..ddd623fc06 100644 --- a/stackrc +++ b/stackrc @@ -395,6 +395,10 @@ GITBRANCH["futurist"]=${FUTURIST_BRANCH:-$TARGET_BRANCH} GITREPO["debtcollector"]=${DEBTCOLLECTOR_REPO:-${GIT_BASE}/openstack/debtcollector.git} GITBRANCH["debtcollector"]=${DEBTCOLLECTOR_BRANCH:-$TARGET_BRANCH} +# etcd3gw library +GITREPO["etcd3gw"]=${ETCD3GW_REPO:-${GIT_BASE}/openstack/etcd3gw.git} +GITBRANCH["etcd3gw"]=${ETCD3GW_BRANCH:-$BRANCHLESS_TARGET_BRANCH} + # helpful state machines GITREPO["automaton"]=${AUTOMATON_REPO:-${GIT_BASE}/openstack/automaton.git} GITBRANCH["automaton"]=${AUTOMATON_BRANCH:-$TARGET_BRANCH} diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index 839e3a1328..9552c93c4f 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -45,7 +45,7 @@ ALL_LIBS+=" oslo.cache oslo.reports osprofiler cursive" ALL_LIBS+=" keystoneauth ironic-lib neutron-lib oslo.privsep" ALL_LIBS+=" diskimage-builder os-vif python-brick-cinderclient-ext" ALL_LIBS+=" castellan python-barbicanclient ovsdbapp os-ken os-resource-classes" -ALL_LIBS+=" oslo.limit" +ALL_LIBS+=" oslo.limit etcd3gw" # Generate the above list with # echo ${!GITREPO[@]} From 06633c6c3033cc92329e1849266f4f2ed33d2124 Mon Sep 17 00:00:00 2001 From: Artem Goncharov Date: Fri, 27 Jun 2025 15:05:51 +0200 Subject: [PATCH 529/574] Restart slapd after cleanup A bug in openldap mdb (memory database) causes it to crash in an attempt to delete nonexisting tree, which is exactly what we do in the cleanup. After the coredump it does not start automatically (what maybe make sense to change). The fix is merged in https://bugs.openldap.org/show_bug.cgi?id=10336 but we do not have this fix in Noble. For now try simply to restart the process. Change-Id: Iae597aae345d12a2c82f66342ff40ac0a387eddf Signed-off-by: Artem Goncharov --- lib/ldap | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/ldap b/lib/ldap index b0195db258..66c2afc4d5 100644 --- a/lib/ldap +++ b/lib/ldap @@ -82,6 +82,14 @@ function init_ldap { # Remove data but not schemas clear_ldap_state + if is_ubuntu; then + # a bug in OpenLDAP 2.6.7+ + # (https://bugs.openldap.org/show_bug.cgi?id=10336) causes slapd crash + # after deleting nonexisting tree. It is fixed upstream, but Ubuntu is + # still not having a fix in Noble. Try temporarily simly restarting the + # process. + sudo service $LDAP_SERVICE_NAME restart + fi # Add our top level ldap nodes if ldapsearch -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -b "$LDAP_BASE_DN" | grep -q "Success"; then From 47aa8d1744dad23c4ace34a7edbff78360fb0079 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Sat, 12 Jul 2025 00:25:42 +0900 Subject: [PATCH 530/574] Drop unused [service_user] auth_strategy The option does not actually exist. Change-Id: I659bba38ca038fa370a411ae43ca942b6390c779 Signed-off-by: Takashi Kajinami --- lib/cinder | 1 - lib/nova | 1 - 2 files changed, 2 deletions(-) diff --git a/lib/cinder b/lib/cinder index b557d4b10b..eb8a63dbfc 100644 --- a/lib/cinder +++ b/lib/cinder @@ -732,7 +732,6 @@ function configure_cinder_volume_upload { function init_cinder_service_user_conf { configure_keystone_authtoken_middleware $CINDER_CONF cinder service_user iniset $CINDER_CONF service_user send_service_user_token True - iniset $CINDER_CONF service_user auth_strategy keystone } # Restore xtrace diff --git a/lib/nova b/lib/nova index 810a3d9554..2357d87ee3 100644 --- a/lib/nova +++ b/lib/nova @@ -843,7 +843,6 @@ function init_nova_service_user_conf { iniset $NOVA_CONF service_user user_domain_name "$SERVICE_DOMAIN_NAME" iniset $NOVA_CONF service_user project_name "$SERVICE_PROJECT_NAME" iniset $NOVA_CONF service_user project_domain_name "$SERVICE_DOMAIN_NAME" - iniset $NOVA_CONF service_user auth_strategy keystone } function conductor_conf { From e221349e56414e1cb89c75311bf659bd869e16a7 Mon Sep 17 00:00:00 2001 From: Grzegorz Grasza Date: Mon, 14 Jul 2025 12:37:23 +0200 Subject: [PATCH 531/574] keystone: Set user_enabled_default for LDAP domain When using the LDAP identity backend, stack.sh fails during the create_keystone_accounts phase when trying to verify the newly created demo user. This is caused by a BadRequestException from the Keystone API with the error, 'enabled' is a required property. The error occurs because the default LDAP user object created by the DevStack scripts does not contain an attribute that Keystone can map to its mandatory enabled property. This change fixes the issue by adding user_enabled_emulation = True to the domain-specific LDAP configuration in the create_ldap_domain function. This tells Keystone to assume a user is enabled if the attribute is not explicitly defined in their LDAP entry, which resolves the schema incompatibility and allows the script to complete successfully. Signed-off-by: Grzegorz Grasza Change-Id: I15ddf0b88ee93615c318d4845a026ca1e25c3e69 --- lib/keystone | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/keystone b/lib/keystone index 8371045026..0311e24c67 100644 --- a/lib/keystone +++ b/lib/keystone @@ -608,6 +608,7 @@ function create_ldap_domain { iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_name_attribute "cn" iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_mail_attribute "mail" iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_id_attribute "uid" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_enabled_emulation "True" iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user "cn=Manager,dc=openstack,dc=org" iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap url "ldap://localhost" iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap suffix $LDAP_BASE_DN From 9c180f2f060bfed65bc1b24c16010466b48dc0da Mon Sep 17 00:00:00 2001 From: Ghanshyam Mann Date: Fri, 4 Jul 2025 11:40:38 -0700 Subject: [PATCH 532/574] Configure 'manager' role in tempest In this release, nova is implementing the manager role in policy[depends-on], and Tempest added (depends-on) a new config option to decide if new defaults are present in testing env. Setting the manager role availability in Tempest so that test can use manager role user to perform the required operation in nova. Depends-On: https://review.opendev.org/c/openstack/nova/+/953063 Depends-On: https://review.opendev.org/c/openstack/tempest/+/953265 Change-Id: I69e32c7de5a63df1c21979f748b77e512068eeec Signed-off-by: Ghanshyam Mann --- lib/tempest | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/tempest b/lib/tempest index c9486f6310..286cb56d41 100644 --- a/lib/tempest +++ b/lib/tempest @@ -524,6 +524,10 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute-feature-enabled serial_console True fi + # NOTE(gmaan): Since 2025.2, 'manager' role is available in nova. + local nova_policy_roles="admin,manager,member,reader" + iniset $TEMPEST_CONFIG compute-feature-enabled nova_policy_roles $nova_policy_roles + # Network iniset $TEMPEST_CONFIG network project_networks_reachable false iniset $TEMPEST_CONFIG network public_network_id "$public_network_id" From 6180e73702cfef2011c32f315cde97128a4b7eec Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Thu, 26 Jun 2025 07:56:44 +0000 Subject: [PATCH 533/574] Replace the OVN Metadata agent with the OVN agent The OVN Metadata agent is replaced in any CI job with the OVN agent. This is an incremental step on the deprecation of the OVN Metadata agent. Related-Bug: #2112313 Signed-off-by: Rodolfo Alonso Hernandez Change-Id: I4e8d12762099c91d773c4f5e5699bc9fed43a9c9 --- .zuul.yaml | 4 ++-- lib/neutron_plugins/ovn_agent | 3 ++- stackrc | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 9552fa3b47..9f9c69c925 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -620,7 +620,7 @@ ovsdb-server: true # Neutron services q-svc: true - q-ovn-metadata-agent: true + q-ovn-agent: true # Swift services s-account: true s-container: true @@ -657,7 +657,7 @@ ovs-vswitchd: true ovsdb-server: true # Neutron services - q-ovn-metadata-agent: true + q-ovn-agent: true # Cinder services c-bak: true c-vol: true diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index e58cd4fb38..b128fde2b6 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -96,8 +96,9 @@ OVN_META_CONF=$NEUTRON_CONF_DIR/neutron_ovn_metadata_agent.ini OVN_META_DATA_HOST=${OVN_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)} # OVN agent configuration +# The OVN agent is configured, by default, with the "metadata" extension. OVN_AGENT_CONF=$NEUTRON_CONF_DIR/plugins/ml2/ovn_agent.ini -OVN_AGENT_EXTENSIONS=${OVN_AGENT_EXTENSIONS:-} +OVN_AGENT_EXTENSIONS=${OVN_AGENT_EXTENSIONS:-metadata} # If True (default) the node will be considered a gateway node. ENABLE_CHASSIS_AS_GW=$(trueorfalse True ENABLE_CHASSIS_AS_GW) diff --git a/stackrc b/stackrc index 0319fc8a50..325af580ad 100644 --- a/stackrc +++ b/stackrc @@ -75,7 +75,7 @@ if ! isset ENABLED_SERVICES ; then # OVN ENABLED_SERVICES+=,ovn-controller,ovn-northd,ovs-vswitchd,ovsdb-server # Neutron - ENABLED_SERVICES+=,q-svc,q-ovn-metadata-agent + ENABLED_SERVICES+=,q-svc,q-ovn-agent # Dashboard ENABLED_SERVICES+=,horizon # Additional services From a8f98073b97a2485a7505ebe36dba9cad1e0a7e1 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Thu, 17 Jul 2025 10:20:27 +0000 Subject: [PATCH 534/574] Fix the nodeset "devstack-single-node-opensuse-15" The label "opensuse-15" is no longer available since [1]. Since there are pending references to this nodeset from older branches, use an empty node list for it to fix the zuul config until all references can be dropped. [1]https://review.opendev.org/c/openstack/project-config/+/955214 Signed-off-by: Rodolfo Alonso Hernandez Change-Id: I2f5105178482402aa108910d1bd1ec2f2c7c8933 --- .zuul.yaml | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 9552fa3b47..693edffe6e 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -48,15 +48,10 @@ nodes: - controller +# TODO(frickler): drop this dummy nodeset once all references have been removed - nodeset: name: devstack-single-node-opensuse-15 - nodes: - - name: controller - label: opensuse-15 - groups: - - name: tempest - nodes: - - controller + nodes: [] - nodeset: name: devstack-single-node-debian-bookworm From bfa9e547a901df5dd74926385010421157b6fca7 Mon Sep 17 00:00:00 2001 From: Ghanshyam Maan Date: Sat, 26 Jul 2025 00:58:51 +0000 Subject: [PATCH 535/574] Avoid setting iso image in tempest config Tempest use image_ref and image_ref_alt as their base image to run test against and perform ssh etc. Most of the iso image require ssh to be enabled explicitly so avoid setting them as image_ref and image_ref_alt unless it is explicitly requested. One example that how setting iso image in tempest can fail tests- https://review.opendev.org/c/openstack/tempest/+/954404 Needed-By: https://review.opendev.org/c/openstack/whitebox-tempest-plugin/+/955950 Change-Id: Ic385a702758d9d38880ec92cfdce2528766fc95d Signed-off-by: Ghanshyam Maan --- lib/tempest | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index c9486f6310..cac2633324 100644 --- a/lib/tempest +++ b/lib/tempest @@ -105,6 +105,8 @@ TEMPEST_CONCURRENCY=${TEMPEST_CONCURRENCY:-$(nproc)} TEMPEST_FLAVOR_RAM=${TEMPEST_FLAVOR_RAM:-192} TEMPEST_FLAVOR_ALT_RAM=${TEMPEST_FLAVOR_ALT_RAM:-256} +TEMPEST_USE_ISO_IMAGE=$(trueorfalse False TEMPEST_USE_ISO_IMAGE) + # Functions # --------- @@ -161,12 +163,20 @@ function get_active_images { # start with a fresh array in case we are called multiple times img_array=() - while read -r IMAGE_NAME IMAGE_UUID; do + # NOTE(gmaan): Most of the iso image require ssh to be enabled explicitly + # and if we set those iso images in image_ref and image_ref_alt that can + # cause test to fail because many tests using image_ref and image_ref_alt + # to boot server also perform ssh. We skip to set iso image in tempest + # unless it is requested via TEMPEST_USE_ISO_IMAGE. + while read -r IMAGE_NAME IMAGE_UUID DISK_FORMAT; do + if [[ "$DISK_FORMAT" == "iso" ]] && [[ "$TEMPEST_USE_ISO_IMAGE" == False ]]; then + continue + fi if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then img_id="$IMAGE_UUID" fi img_array+=($IMAGE_UUID) - done < <(openstack --os-cloud devstack-admin image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }') + done < <(openstack --os-cloud devstack-admin image list --long --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2,$4 }') } function poll_glance_images { From 5c338f47d57fe849215d6b9f5c1f4eb53c193ab1 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Mon, 4 Aug 2025 23:31:17 +0900 Subject: [PATCH 536/574] Fix uninialized os_VENDOR ... to fix the wrong libvirt group name detected. Closes-Bug: #2119496 Change-Id: I2988fcb5010f333eab5a88b83ff14aab1cb15ebd Signed-off-by: Takashi Kajinami --- stackrc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/stackrc b/stackrc index 0319fc8a50..44bc6b321d 100644 --- a/stackrc +++ b/stackrc @@ -615,6 +615,11 @@ case "$VIRT_DRIVER" in LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} LIBVIRT_CPU_MODE=${LIBVIRT_CPU_MODE:-custom} LIBVIRT_CPU_MODEL=${LIBVIRT_CPU_MODEL:-Nehalem} + + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + if [[ "$os_VENDOR" =~ (Debian|Ubuntu) ]]; then # The groups change with newer libvirt. Older Ubuntu used # 'libvirtd', but now uses libvirt like Debian. Do a quick check From 90b0a6760ba95e8b2fa4a43dda1d0eeb56c00f2d Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Tue, 5 Aug 2025 12:55:21 +0900 Subject: [PATCH 537/574] Drop old libvirt group detection for Ubuntu/Debian The "libvirtd" group was used in quite old Ubuntu such as Xenial, and the "libvirt" group is used instead in recent versions. Change-Id: I2df747d54d3cb395c245ecc2aa24dcbf395e7a46 Signed-off-by: Takashi Kajinami --- stackrc | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/stackrc b/stackrc index 44bc6b321d..811a285d19 100644 --- a/stackrc +++ b/stackrc @@ -621,11 +621,7 @@ case "$VIRT_DRIVER" in fi if [[ "$os_VENDOR" =~ (Debian|Ubuntu) ]]; then - # The groups change with newer libvirt. Older Ubuntu used - # 'libvirtd', but now uses libvirt like Debian. Do a quick check - # to see if libvirtd group already exists to handle grenade's case. - LIBVIRT_GROUP=$(cut -d ':' -f 1 /etc/group | grep 'libvirtd$' || true) - LIBVIRT_GROUP=${LIBVIRT_GROUP:-libvirt} + LIBVIRT_GROUP=libvirt else LIBVIRT_GROUP=libvirtd fi From ec96b1a067684bf729f4dbd84dce9db02171b234 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Din=C3=A7er=20=C3=87elik?= Date: Wed, 6 Aug 2025 12:27:15 +0300 Subject: [PATCH 538/574] Fix default settings for Ubuntu aarch64 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes-Bug: #2080957 Change-Id: I441897937253f8d44144fa7f5f4622f42bf74a5f Signed-off-by: Dinçer Çelik --- lib/nova_plugins/functions-libvirt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 35840539da..c0713f9953 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -71,7 +71,7 @@ function install_libvirt { if is_ubuntu; then install_package qemu-system libvirt-clients libvirt-daemon-system libvirt-dev python3-libvirt systemd-coredump if is_arch "aarch64"; then - install_package qemu-efi + install_package qemu-efi-aarch64 fi #pip_install_gr elif is_fedora; then From 4f065ca80e4589513ca639cb39d3899943698b41 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 13 May 2025 14:56:44 +0100 Subject: [PATCH 539/574] Replace use of fgrep It is deprecated. Change-Id: Iad071865361d51c148fc157d715bdf517ec5b94b Signed-off-by: Stephen Finucane --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 04b5f4ca6a..a06c7be9a7 100755 --- a/stack.sh +++ b/stack.sh @@ -356,7 +356,7 @@ async_init # Certain services such as rabbitmq require that the local hostname resolves # correctly. Make sure it exists in /etc/hosts so that is always true. LOCAL_HOSTNAME=`hostname -s` -if ! fgrep -qwe "$LOCAL_HOSTNAME" /etc/hosts; then +if ! grep -Fqwe "$LOCAL_HOSTNAME" /etc/hosts; then sudo sed -i "s/\(^127.0.0.1.*\)/\1 $LOCAL_HOSTNAME/" /etc/hosts fi From 67fa02fc5fd6a8786baff61695a578338462b3d3 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Wed, 6 Aug 2025 10:53:32 +0100 Subject: [PATCH 540/574] Remove requirement on system oslo.utils This was only working because the noVNC package on Ubuntu pulls in oslo.utils. Change-Id: I3733df3e2667f16082b3ff57d39cf086d81fbe02 Signed-off-by: Stephen Finucane --- tools/verify-ipv6-address.py | 41 +++++++++++++++++++++++++++ tools/verify-ipv6-only-deployments.sh | 24 ++++++---------- 2 files changed, 49 insertions(+), 16 deletions(-) create mode 100644 tools/verify-ipv6-address.py diff --git a/tools/verify-ipv6-address.py b/tools/verify-ipv6-address.py new file mode 100644 index 0000000000..dc18fa6d8a --- /dev/null +++ b/tools/verify-ipv6-address.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python3 + +import argparse +import ipaddress +import sys + +def main(): + parser = argparse.ArgumentParser( + description="Check if a given string is a valid IPv6 address.", + formatter_class=argparse.RawTextHelpFormatter, + ) + parser.add_argument( + "address", + help=( + "The IPv6 address string to validate.\n" + "Examples:\n" + " 2001:0db8:85a3:0000:0000:8a2e:0370:7334\n" + " 2001:db8::1\n" + " ::1\n" + " fe80::1%eth0 (scope IDs are handled)" + ), + ) + args = parser.parse_args() + + try: + # try to create a IPv6Address: if we fail to parse or get an + # IPv4Address then die + ip_obj = ipaddress.ip_address(args.address.strip('[]')) + if isinstance(ip_obj, ipaddress.IPv6Address): + sys.exit(0) + else: + sys.exit(1) + except ValueError: + sys.exit(1) + except Exception as e: + print(f"An unexpected error occurred during validation: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/tools/verify-ipv6-only-deployments.sh b/tools/verify-ipv6-only-deployments.sh index 0f0cba8afe..a1acecbb3f 100755 --- a/tools/verify-ipv6-only-deployments.sh +++ b/tools/verify-ipv6-only-deployments.sh @@ -33,28 +33,23 @@ function verify_devstack_ipv6_setting { echo $TUNNEL_IP_VERSION "TUNNEL_IP_VERSION is not set to 6 so TUNNEL_ENDPOINT_IP cannot be an IPv6 address." exit 1 fi - is_service_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_host'"))') - if [[ "$is_service_host_ipv6" != "True" ]]; then + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_service_host"; then echo $SERVICE_HOST "SERVICE_HOST is not IPv6 which means devstack cannot deploy services on IPv6 addresses." exit 1 fi - is_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_host_ipv6'"))') - if [[ "$is_host_ipv6" != "True" ]]; then + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_host_ipv6"; then echo $HOST_IPV6 "HOST_IPV6 is not IPv6 which means devstack cannot deploy services on IPv6 addresses." exit 1 fi - is_service_listen_address=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_listen_address'"))') - if [[ "$is_service_listen_address" != "True" ]]; then + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_service_listen_address"; then echo $SERVICE_LISTEN_ADDRESS "SERVICE_LISTEN_ADDRESS is not IPv6 which means devstack cannot deploy services on IPv6 addresses." exit 1 fi - is_service_local_host=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_local_host'"))') - if [[ "$is_service_local_host" != "True" ]]; then + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_service_local_host"; then echo $SERVICE_LOCAL_HOST "SERVICE_LOCAL_HOST is not IPv6 which means devstack cannot deploy services on IPv6 addresses." exit 1 fi - is_tunnel_endpoint_ip=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_tunnel_endpoint_ip'"))') - if [[ "$is_tunnel_endpoint_ip" != "True" ]]; then + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_tunnel_endpoint_ip"; then echo $TUNNEL_ENDPOINT_IP "TUNNEL_ENDPOINT_IP is not IPv6 which means devstack will not deploy with an IPv6 endpoint address." exit 1 fi @@ -63,8 +58,7 @@ function verify_devstack_ipv6_setting { } function sanity_check_system_ipv6_enabled { - system_ipv6_enabled=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_ipv6_enabled())') - if [[ $system_ipv6_enabled != "True" ]]; then + if [ ! -f "/proc/sys/net/ipv6/conf/default/disable_ipv6" ] || [ "$(cat /proc/sys/net/ipv6/conf/default/disable_ipv6)" -ne "0" ]; then echo "IPv6 is disabled in system" exit 1 fi @@ -78,10 +72,8 @@ function verify_service_listen_address_is_ipv6 { for endpoint in ${endpoints}; do local endpoint_address='' endpoint_address=$(echo "$endpoint" | awk -F/ '{print $3}' | awk -F] '{print $1}') - endpoint_address=$(echo $endpoint_address | tr -d []) - local is_endpoint_ipv6='' - is_endpoint_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$endpoint_address'"))') - if [[ "$is_endpoint_ipv6" != "True" ]]; then + endpoint_address=$(echo $endpoint_address | tr -d '[]') + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$endpoint_address"; then all_ipv6=False echo $endpoint ": This is not an IPv6 endpoint which means corresponding service is not listening on an IPv6 address." continue From b6bf0b126b46042341cc3d47a92ad9b0d9b9a523 Mon Sep 17 00:00:00 2001 From: Sean Mooney Date: Tue, 5 Aug 2025 16:44:26 +0000 Subject: [PATCH 541/574] Use novnc from source by default This change restores the default devstack behavior in the zuul jobs by removing the override of NOVNC_FROM_PACKAGE from devstack-base. When installed locally, devstack defaults to installing novnc from git. As reported in bug #2109592, Ubuntu and possibly other distros have a packaging bug where the python3-novnc package 1) exists and 2) depends on `oslo.config` and, as a result, `oslo.utils`. The reason python3-novnc existing is a bug is that novnc has not had any Python deliverable since the 0.6.0 release around 2016. So this package is no longer used and is effectively empty since novnc fully moved to using JavaScript. For unrelated reasons, devstack creates the global venv with --site-packages to install `libvirt-python`, which also means that any other Python dependencies installed at the system level also infect the devstack venv. In the past, this was not a problem, but as of epoxy, Nova requires a newer version of oslo than Ubuntu provides in the distro package. This is where the python3-novnc package and its incorrect dependency on oslo breaks CI. This is not seen locally, as devstack uses novnc from git. This change makes CI do that also. Closes-Bug: #2109592 Change-Id: I8f018e1e57e3f54997d2cf55b1b3aa728e82899b Signed-off-by: Sean Mooney --- .zuul.yaml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index b42c800068..eee450a6ad 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -352,6 +352,12 @@ nodes (everything but the controller). required-projects: - opendev.org/openstack/devstack + # this is a workaround for a packaging bug in ubuntu + # remove when https://bugs.launchpad.net/nova/+bug/2109592 + # is resolved and oslo.config is not a dep of the novnc deb + # via the defunct python3-novnc package. + - novnc/novnc + roles: - zuul: opendev.org/openstack/openstack-zuul-jobs vars: @@ -369,7 +375,6 @@ LOG_COLOR: false VERBOSE: true VERBOSE_NO_TIMESTAMP: true - NOVNC_FROM_PACKAGE: true ERROR_ON_CLONE: true # Gate jobs can't deal with nested virt. Disable it by default. LIBVIRT_TYPE: '{{ devstack_libvirt_type | default("qemu") }}' @@ -442,7 +447,6 @@ LOG_COLOR: false VERBOSE: true VERBOSE_NO_TIMESTAMP: true - NOVNC_FROM_PACKAGE: true ERROR_ON_CLONE: true LIBVIRT_TYPE: qemu devstack_services: From 3b3aab52646e7a7ed737716efbfbe7fbef170911 Mon Sep 17 00:00:00 2001 From: Joel Capitao Date: Fri, 8 Nov 2024 16:11:06 +0000 Subject: [PATCH 542/574] Support CentOS Stream 10 This patch includes changes required to run devstack on CentOS Stream 10 which has been already published in official repos by CentOS team [1]: - Add RDO deps repository for CS10. - remove xinetd package from installation for swift. Note that rsync-daemon is installed which should work fine. - Use python3-distro to identify the distro - Add devstack-single-node-centos-10-stream nodeset - Add devstack-platform-centos-10-stream job to the check pipeline. Closes https://issues.redhat.com/browse/RDO-379 [1] https://mirror.stream.centos.org/10-stream/ Change-Id: I33a6c5530482c28a24f2043cd4195e7bcd46427d Signed-off-by: Cyril Roelandt Signed-off-by: Sean Mooney --- .zuul.yaml | 50 ++++++++++++++++++++++++++++++++++++++++++++ files/rpms/general | 8 ++++--- files/rpms/n-cpu | 2 +- files/rpms/nova | 2 +- files/rpms/swift | 2 +- functions-common | 11 +++++----- stack.sh | 14 +++++++++---- tools/install_pip.sh | 2 +- 8 files changed, 74 insertions(+), 17 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index eee450a6ad..3deab35e87 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -38,6 +38,16 @@ nodes: - controller +- nodeset: + name: devstack-single-node-centos-10-stream + nodes: + - name: controller + label: centos-10-stream-8GB + groups: + - name: tempest + nodes: + - controller + - nodeset: name: devstack-single-node-centos-9-stream nodes: @@ -86,6 +96,36 @@ nodes: - controller +- nodeset: + name: openstack-two-node-centos-10-stream + nodes: + - name: controller + label: centos-10-stream-8GB + - name: compute1 + label: centos-10-stream-8GB + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + - nodeset: name: openstack-two-node-centos-9-stream nodes: @@ -729,6 +769,14 @@ # we often have to rush things through devstack to stabilise the gate, # and these platforms don't have the round-the-clock support to avoid # becoming blockers in that situation. +- job: + name: devstack-platform-centos-10-stream + parent: tempest-full-py3 + description: CentOS 10 Stream platform test + nodeset: devstack-single-node-centos-10-stream + timeout: 9000 + voting: false + - job: name: devstack-platform-centos-9-stream parent: tempest-full-py3 @@ -911,6 +959,7 @@ - devstack - devstack-ipv6 - devstack-enforce-scope + - devstack-platform-centos-10-stream - devstack-platform-centos-9-stream - devstack-platform-debian-bookworm - devstack-platform-rocky-blue-onyx @@ -994,6 +1043,7 @@ - devstack-no-tls-proxy periodic-weekly: jobs: + - devstack-platform-centos-10-stream - devstack-platform-centos-9-stream - devstack-platform-debian-bookworm - devstack-platform-rocky-blue-onyx diff --git a/files/rpms/general b/files/rpms/general index 8a5755cc37..6f4572c708 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -10,9 +10,10 @@ glibc-langpack-en # dist:rhel9 graphviz # needed only for docs httpd httpd-devel -iptables-nft # dist:rhel9 +iptables-nft # dist:rhel9,rhel10 iptables-services -java-1.8.0-openjdk-headless +java-1.8.0-openjdk-headless # not:rhel10 +java-21-openjdk-headless # dist:rhel10 libffi-devel libjpeg-turbo-devel # Pillow 3.0.0 libxml2-devel # lxml @@ -23,7 +24,8 @@ net-tools openssh-server openssl openssl-devel # to rebuild pyOpenSSL if needed -pcre-devel # for python-pcre +pcre2-devel # dist:rhel10 for python-pcre2 +pcre-devel # not:rhel10 for python-pcre pkgconfig postgresql-devel # psycopg2 psmisc diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu index 7ce5a72d6b..5683862ee0 100644 --- a/files/rpms/n-cpu +++ b/files/rpms/n-cpu @@ -1,6 +1,6 @@ cryptsetup dosfstools -genisoimage # not:rhel9 +genisoimage # not:rhel9,rhel10 iscsi-initiator-utils libosinfo lvm2 diff --git a/files/rpms/nova b/files/rpms/nova index e0f13b854a..3ed2943c1d 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -1,7 +1,7 @@ conntrack-tools curl ebtables -genisoimage # not:rhel9 required for config_drive +genisoimage # not:rhel9,rhel10 required for config_drive iptables iputils kernel-modules # not:openEuler-22.03 diff --git a/files/rpms/swift b/files/rpms/swift index 49a1833dc4..cf614335c1 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -4,4 +4,4 @@ memcached rsync-daemon sqlite xfsprogs -xinetd # not:f36,rhel9 +xinetd # not:f36,rhel9,rhel10 diff --git a/functions-common b/functions-common index db2367cef6..85ee294afa 100644 --- a/functions-common +++ b/functions-common @@ -423,7 +423,7 @@ function _ensure_lsb_release { elif [[ -x $(command -v zypper 2>/dev/null) ]]; then sudo zypper -n install lsb-release elif [[ -x $(command -v dnf 2>/dev/null) ]]; then - sudo dnf install -y redhat-lsb-core || sudo dnf install -y openeuler-lsb + sudo dnf install -y python3-distro || sudo dnf install -y openeuler-lsb else die $LINENO "Unable to find or auto-install lsb_release" fi @@ -436,9 +436,9 @@ function _ensure_lsb_release { # - os_VENDOR # - os_PACKAGE function GetOSVersion { - # CentOS Stream 9 and RHEL 9 do not provide lsb_release + # CentOS Stream 9 or later and RHEL 9 or later do not provide lsb_release source /etc/os-release - if [[ "${ID}${VERSION}" == "centos9" ]] || [[ "${ID}${VERSION}" =~ "rhel9" ]]; then + if [[ "${ID}${VERSION}" =~ "centos" ]] || [[ "${ID}${VERSION}" =~ "rhel" ]]; then os_RELEASE=${VERSION_ID} os_CODENAME="n/a" os_VENDOR=$(echo $NAME | tr -d '[:space:]') @@ -485,9 +485,8 @@ function GetDistro { "$os_VENDOR" =~ (OracleServer) || \ "$os_VENDOR" =~ (Rocky) || \ "$os_VENDOR" =~ (Virtuozzo) ]]; then - # Drop the . release as we assume it's compatible - # XXX re-evaluate when we get RHEL10 - DISTRO="rhel${os_RELEASE::1}" + MAJOR_VERSION=$(echo $os_RELEASE | cut -d. -f1) + DISTRO="rhel${MAJOR_VERSION}" elif [[ "$os_VENDOR" =~ (openEuler) ]]; then DISTRO="openEuler-$os_RELEASE" else diff --git a/stack.sh b/stack.sh index 04b5f4ca6a..2e130aec58 100755 --- a/stack.sh +++ b/stack.sh @@ -230,7 +230,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bookworm|jammy|noble|rhel9" +SUPPORTED_DISTROS="bookworm|jammy|noble|rhel9|rhel10" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" @@ -302,16 +302,17 @@ function _install_epel { } function _install_rdo { - if [[ $DISTRO == "rhel9" ]]; then + if [[ $DISTRO =~ "rhel" ]]; then + VERSION=${DISTRO:4:2} rdo_release=${TARGET_BRANCH#*/} if [[ "$TARGET_BRANCH" == "master" ]]; then # adding delorean-deps repo to provide current master rpms - sudo wget https://trunk.rdoproject.org/centos9-master/delorean-deps.repo -O /etc/yum.repos.d/delorean-deps.repo + sudo wget https://trunk.rdoproject.org/centos${VERSION}-master/delorean-deps.repo -O /etc/yum.repos.d/delorean-deps.repo else if sudo dnf provides centos-release-openstack-${rdo_release} >/dev/null 2>&1; then sudo dnf -y install centos-release-openstack-${rdo_release} else - sudo wget https://trunk.rdoproject.org/centos9-${rdo_release}/delorean-deps.repo -O /etc/yum.repos.d/delorean-deps.repo + sudo wget https://trunk.rdoproject.org/centos${VERSION}-${rdo_release}/delorean-deps.repo -O /etc/yum.repos.d/delorean-deps.repo fi fi fi @@ -408,6 +409,11 @@ elif [[ $DISTRO == "rhel9" ]]; then if is_package_installed curl-minimal; then sudo dnf swap -y curl-minimal curl fi +elif [[ $DISTRO == "rhel10" ]]; then + # for CentOS Stream 10 repository + sudo dnf config-manager --set-enabled crb + # rabbitmq and other packages are provided by RDO repositories. + _install_rdo elif [[ $DISTRO == "openEuler-22.03" ]]; then # There are some problem in openEuler. We should fix it first. Some required # package/action runs before fixup script. So we can't fix there. diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 91b180c06f..79f97c5f7a 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -127,7 +127,7 @@ if [[ -n $PYPI_ALTERNATIVE_URL ]]; then configure_pypi_alternative_url fi -if is_fedora && [[ ${DISTRO} == f* || ${DISTRO} == rhel9 ]]; then +if is_fedora && [[ ${DISTRO} == f* || ${DISTRO} == rhel* ]]; then # get-pip.py will not install over the python3-pip package in # Fedora 34 any more. # https://bugzilla.redhat.com/show_bug.cgi?id=1988935 From 9c295d0da30b8e4d0809623e886dc9aaf6f52c25 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Thu, 7 Aug 2025 10:53:59 +0100 Subject: [PATCH 543/574] Remove dead checks for Python 3.6 Change-Id: I9fab7209955ebdfda0f309aa0160749bd0f962e6 Signed-off-by: Stephen Finucane --- tools/install_pip.sh | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 79f97c5f7a..027693fc0a 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -38,7 +38,6 @@ FILES=$TOP_DIR/files # [1] https://opendev.org/openstack/project-config/src/branch/master/nodepool/elements/cache-devstack/source-repository-pip PIP_GET_PIP_URL=${PIP_GET_PIP_URL:-"https://bootstrap.pypa.io/get-pip.py"} -PIP_GET_PIP36_URL=${PIP_GET_PIP36_URL:-"https://bootstrap.pypa.io/pip/3.6/get-pip.py"} GetDistro echo "Distro: $DISTRO" @@ -57,14 +56,8 @@ function get_versions { function install_get_pip { - if [[ "$PYTHON3_VERSION" = "3.6" ]]; then - _pip_url=$PIP_GET_PIP36_URL - _local_pip="$FILES/$(basename $_pip_url)-py36" - else - _pip_url=$PIP_GET_PIP_URL - _local_pip="$FILES/$(basename $_pip_url)" - fi - + _pip_url=$PIP_GET_PIP_URL + _local_pip="$FILES/$(basename $_pip_url)" # If get-pip.py isn't python, delete it. This was probably an # outage on the server. From fdc41d76abf353b6a9b206bf92c6b9adb90b253a Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Thu, 7 Aug 2025 10:54:45 +0100 Subject: [PATCH 544/574] Remove dead checks for Fedora 36 It is EOL. Change-Id: I609cfce8a98f9933380ddbc719ed22e6fcda4785 Signed-off-by: Stephen Finucane --- files/rpms/swift | 1 - lib/apache | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/files/rpms/swift b/files/rpms/swift index cf614335c1..c3921a47d4 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -4,4 +4,3 @@ memcached rsync-daemon sqlite xfsprogs -xinetd # not:f36,rhel9,rhel10 diff --git a/lib/apache b/lib/apache index 449d2e70d4..c49da711e1 100644 --- a/lib/apache +++ b/lib/apache @@ -89,7 +89,7 @@ function install_apache_uwsgi { # didn't fix Python 3.10 compatibility before release. Should be # fixed in uwsgi 4.9.0; can remove this when packages available # or we drop this release - elif is_fedora && ! is_openeuler && ! [[ $DISTRO =~ f36|rhel9 ]]; then + elif is_fedora && ! is_openeuler && ! [[ $DISTRO =~ rhel9 ]]; then # Note httpd comes with mod_proxy_uwsgi and it is loaded by # default; the mod_proxy_uwsgi package actually conflicts now. # See: From 1a74605eb4c30e28a99edd2e824c2ce38d6315ad Mon Sep 17 00:00:00 2001 From: Douglas Mendizabal Date: Fri, 8 Aug 2025 10:44:41 -0400 Subject: [PATCH 545/574] Fix iniset to escape backslash characters MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch fixes an issue in iniset where backslash (\) characters are removed from the config value. This patch ensures that backslash characters (\) are escaped in addition to the ampersand (&) character that was already being escaped. Closes-Bug: #2120180 Signed-off-by: Douglas Mendizábal Change-Id: Ica53ed42269931d151daf815d2e2c10c1f9e29a8 --- inc/ini-config | 6 ++++-- tests/test_ini_config.sh | 15 ++++++++++++++- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/inc/ini-config b/inc/ini-config index f65e42d3a5..920d4775fa 100644 --- a/inc/ini-config +++ b/inc/ini-config @@ -189,8 +189,10 @@ function iniset { local option=$3 local value=$4 - # Escape the ampersand character (&) - value=$(echo $value | sed -e 's/&/\\&/g') + # Escape the ampersand (&) and backslash (\) characters for sed + # Order of substitution matters: we escape backslashes first before + # adding more backslashes to escape ampersands + value=$(echo $value | sed -e 's/\\/\\\\/g' -e 's/&/\\&/g') if [[ -z $section || -z $option ]]; then $xtrace diff --git a/tests/test_ini_config.sh b/tests/test_ini_config.sh index 6367cde441..fd3896d6ba 100755 --- a/tests/test_ini_config.sh +++ b/tests/test_ini_config.sh @@ -47,6 +47,9 @@ multi = foo2 [fff] ampersand = +[ggg] +backslash = + [key_with_spaces] rgw special key = something @@ -88,7 +91,7 @@ fi # test iniget_sections VAL=$(iniget_sections "${TEST_INI}") -assert_equal "$VAL" "default aaa bbb ccc ddd eee fff key_with_spaces \ +assert_equal "$VAL" "default aaa bbb ccc ddd eee fff ggg key_with_spaces \ del_separate_options del_same_option del_missing_option \ del_missing_option_multi del_no_options" @@ -134,6 +137,16 @@ done VAL=$(iniget ${TEST_INI} fff ampersand) assert_equal "$VAL" "&y" "iniset ampersands in option" +# Test with backslash in value +iniset ${TEST_INI} ggg backslash 'foo\bar' +VAL=$(iniget ${TEST_INI} ggg backslash) +assert_equal "$VAL" 'foo\bar' "iniset backslash in value" + +# Test with both ampersand and backslash +iniset ${TEST_INI} ggg backslash 'foo\bar&baz' +VAL=$(iniget ${TEST_INI} ggg backslash) +assert_equal "$VAL" 'foo\bar&baz' "iniset ampersand and backslash in value" + # test empty option if ini_has_option ${SUDO_ARG} ${TEST_INI} ddd empty; then passed "ini_has_option: ddd.empty present" From 34689f587966f9fd512a03c85762bd79dd4a4e9d Mon Sep 17 00:00:00 2001 From: Ghanshyam Maan Date: Wed, 27 Aug 2025 02:40:21 +0000 Subject: [PATCH 546/574] Configure nova 'service' role in tempest In this release, nova is implementing the service role in policy[depends-on], and Tempest being branchless needs to decide if service defaults are present in testing release/ env (Needed-By). Setting the service role availability in Tempest so that from this release onward, tests can use service role user to perform the required operation in nova. Depends-On: https://review.opendev.org/c/openstack/nova/+/957578 Needed-By: https://review.opendev.org/c/openstack/tempest/+/892639 Change-Id: I463cb85f3fcb9f2fdd7aa4a0a5f2ae49782e3fc1 Signed-off-by: Ghanshyam Maan --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 53851209d2..1ebe9c5f1f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -535,7 +535,7 @@ function configure_tempest { fi # NOTE(gmaan): Since 2025.2, 'manager' role is available in nova. - local nova_policy_roles="admin,manager,member,reader" + local nova_policy_roles="admin,manager,member,reader,service" iniset $TEMPEST_CONFIG compute-feature-enabled nova_policy_roles $nova_policy_roles # Network From 2aae15c93f4383c0a38dff276700c762e56a7134 Mon Sep 17 00:00:00 2001 From: Tobias Urdin Date: Tue, 6 May 2025 08:54:39 +0200 Subject: [PATCH 547/574] Use profile rbd for Ceph authx for cinder-backup Use the RBD profile instead of setting explicit permissions. Change-Id: Idc2258e3b69df3df57894c17018a2a35043c8fa9 Signed-off-by: Tobias Urdin --- lib/cinder_backups/ceph | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/cinder_backups/ceph b/lib/cinder_backups/ceph index 4b180490d7..c46b90c5b9 100644 --- a/lib/cinder_backups/ceph +++ b/lib/cinder_backups/ceph @@ -32,7 +32,7 @@ function configure_cinder_backup_ceph { if [[ "$REMOTE_CEPH" = "False" && "$CEPH_REPLICAS" -ne 1 ]]; then sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID} fi - sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring + sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "profile rbd" osd "profile rbd pool=${CINDER_BAK_CEPH_POOL}, profile rbd pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring fi From 2d7ff93390ca3c0f47a8b37c5f4912de69ab5b2f Mon Sep 17 00:00:00 2001 From: Jan Jasek Date: Mon, 1 Sep 2025 12:08:48 +0200 Subject: [PATCH 548/574] Remove debian-bullseye nodeset Horizon no longer use debian-bullseye nodeset Change-Id: I78094a9dd7e51641dfb9b1a851b46744184df702 Signed-off-by: Jan Jasek --- .zuul.yaml | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 3deab35e87..7bfd3e33b6 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -73,19 +73,6 @@ nodes: - controller -# Note(sean-k-mooney): this is still used by horizon for -# horizon-integration-tests, horizon-integration-pytest and -# horizon-ui-pytest, remove when horizon is updated. -- nodeset: - name: devstack-single-node-debian-bullseye - nodes: - - name: controller - label: debian-bullseye - groups: - - name: tempest - nodes: - - controller - - nodeset: name: devstack-single-node-rockylinux-9 nodes: From f6d8dab0e885b8de8c0f44388d538da7d4f9b7ec Mon Sep 17 00:00:00 2001 From: Saikumar Pulluri Date: Thu, 4 Sep 2025 08:48:42 -0400 Subject: [PATCH 549/574] Add service type to keystone authtoken middleware Configuring devstack's configure_keystone_authtoken_middleware to set service_type as an additional option. Needed-By: https://review.opendev.org/c/openstack/barbican/+/958845 Needed-By: https://review.opendev.org/c/openstack/manila/+/955393 Change-Id: I140c8392465965d68f52489b5e5bf3e47ae979be Signed-off-by: Saikumar Pulluri --- lib/keystone | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/keystone b/lib/keystone index 0311e24c67..241909cb9d 100644 --- a/lib/keystone +++ b/lib/keystone @@ -432,6 +432,7 @@ function configure_keystone_authtoken_middleware { local conf_file=$1 local admin_user=$2 local section=${3:-keystone_authtoken} + local service_type=$4 iniset $conf_file $section auth_type password iniset $conf_file $section interface public @@ -444,6 +445,9 @@ function configure_keystone_authtoken_middleware { iniset $conf_file $section cafile $SSL_BUNDLE_FILE iniset $conf_file $section memcached_servers $MEMCACHE_SERVERS + if [[ -n "$service_type" ]]; then + iniset $conf_file $section service_type $service_type + fi } # configure_auth_token_middleware conf_file admin_user IGNORED [section] From 2145b0a0031977ef2809a3eaa9abe6937e4777e6 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Wed, 10 Sep 2025 10:24:17 +0200 Subject: [PATCH 550/574] Add Rocky Linux Red Quartz singlenode job (10) Change-Id: Iaad9eb034348d559809108d254601d51719ff3e0 Signed-off-by: Michal Nasiadka --- .zuul.yaml | 21 +++++++++++++++++++++ functions-common | 11 ++++------- 2 files changed, 25 insertions(+), 7 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 3deab35e87..0a81dd82c2 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -96,6 +96,16 @@ nodes: - controller +- nodeset: + name: devstack-single-node-rockylinux-10 + nodes: + - name: controller + label: rockylinux-10-8GB + groups: + - name: tempest + nodes: + - controller + - nodeset: name: openstack-two-node-centos-10-stream nodes: @@ -815,6 +825,16 @@ # TODO(ykarel) Remove this when moving to rocky10 PYTHON3_VERSION: 3.11 +- job: + name: devstack-platform-rocky-red-quartz + parent: tempest-full-py3 + description: Rocky Linux Red Quartz platform test + nodeset: devstack-single-node-rockylinux-10 + timeout: 9000 + voting: false + vars: + configure_swap_size: 4096 + - job: name: devstack-platform-ubuntu-jammy parent: tempest-full-py3 @@ -963,6 +983,7 @@ - devstack-platform-centos-9-stream - devstack-platform-debian-bookworm - devstack-platform-rocky-blue-onyx + - devstack-platform-rocky-red-quartz - devstack-platform-ubuntu-noble-ovn-source - devstack-platform-ubuntu-noble-ovs - devstack-platform-ubuntu-jammy diff --git a/functions-common b/functions-common index 85ee294afa..37c1862c28 100644 --- a/functions-common +++ b/functions-common @@ -438,13 +438,10 @@ function _ensure_lsb_release { function GetOSVersion { # CentOS Stream 9 or later and RHEL 9 or later do not provide lsb_release source /etc/os-release - if [[ "${ID}${VERSION}" =~ "centos" ]] || [[ "${ID}${VERSION}" =~ "rhel" ]]; then + if [[ "${ID}" =~ (centos|rocky|rhel) ]]; then os_RELEASE=${VERSION_ID} - os_CODENAME="n/a" + os_CODENAME=$(echo $VERSION | grep -oP '(?<=[(])[^)]*') os_VENDOR=$(echo $NAME | tr -d '[:space:]') - elif [[ "${ID}${VERSION}" =~ "rocky9" ]]; then - os_VENDOR="Rocky" - os_RELEASE=${VERSION_ID} else _ensure_lsb_release @@ -483,7 +480,7 @@ function GetDistro { "$os_VENDOR" =~ (AlmaLinux) || \ "$os_VENDOR" =~ (Scientific) || \ "$os_VENDOR" =~ (OracleServer) || \ - "$os_VENDOR" =~ (Rocky) || \ + "$os_VENDOR" =~ (RockyLinux) || \ "$os_VENDOR" =~ (Virtuozzo) ]]; then MAJOR_VERSION=$(echo $os_RELEASE | cut -d. -f1) DISTRO="rhel${MAJOR_VERSION}" @@ -544,7 +541,7 @@ function is_fedora { [ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \ [ "$os_VENDOR" = "RedHatEnterprise" ] || \ [ "$os_VENDOR" = "RedHatEnterpriseLinux" ] || \ - [ "$os_VENDOR" = "Rocky" ] || \ + [ "$os_VENDOR" = "RockyLinux" ] || \ [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "CentOSStream" ] || \ [ "$os_VENDOR" = "AlmaLinux" ] || \ [ "$os_VENDOR" = "OracleServer" ] || [ "$os_VENDOR" = "Virtuozzo" ] From 1aa22aa6d4b8dac710b50b6aabd3ce9ce8280a98 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Tue, 6 May 2025 17:28:37 +0100 Subject: [PATCH 551/574] lib/glance: Remove support for deploying in standalone mode Change-Id: Ia70accd3e04bf9bea7fa50c18541fc71cff75f5f Signed-off-by: Stephen Finucane Depends-on: https://review.opendev.org/c/openstack/glance/+/933614 --- lib/glance | 52 ++++++++++++---------------------------------------- 1 file changed, 12 insertions(+), 40 deletions(-) diff --git a/lib/glance b/lib/glance index b596b53271..4dade5142d 100644 --- a/lib/glance +++ b/lib/glance @@ -83,13 +83,6 @@ GLANCE_DEFAULT_BACKEND=${GLANCE_DEFAULT_BACKEND:-fast} GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache} GLANCE_CACHE_DRIVER=${GLANCE_CACHE_DRIVER:-centralized_db} -# Full Glance functionality requires running in standalone mode. If we are -# not in uwsgi mode, then we are standalone, otherwise allow separate control. -if [[ "$WSGI_MODE" != "uwsgi" ]]; then - GLANCE_STANDALONE=True -fi -GLANCE_STANDALONE=${GLANCE_STANDALONE:-False} - # File path for each store specified in GLANCE_MULTIPLE_FILE_STORES, the store # identifier will be appended to this path at runtime. If GLANCE_MULTIPLE_FILE_STORES # has fast,cheap specified then filepath will be generated like $DATA_DIR/glance/fast @@ -139,14 +132,7 @@ GLANCE_UWSGI_CONF=$GLANCE_CONF_DIR/glance-uwsgi.ini # Glance default limit for Devstack GLANCE_LIMIT_IMAGE_SIZE_TOTAL=${GLANCE_LIMIT_IMAGE_SIZE_TOTAL:-2000} -# If wsgi mode is uwsgi run glance under uwsgi, else default to eventlet -# TODO(mtreinish): Remove the eventlet path here and in all the similar -# conditionals below after the Pike release -if [[ "$WSGI_MODE" == "uwsgi" ]]; then - GLANCE_URL="$GLANCE_SERVICE_PROTOCOL://$GLANCE_SERVICE_HOST/image" -else - GLANCE_URL="$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" -fi +GLANCE_URL="$GLANCE_SERVICE_PROTOCOL://$GLANCE_SERVICE_HOST/image" # Functions # --------- @@ -451,12 +437,11 @@ function configure_glance { iniset $GLANCE_CACHE_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ # Set default configuration options for the glance-image-import - iniset $GLANCE_IMAGE_IMPORT_CONF image_import_opts image_import_plugins [] + iniset $GLANCE_IMAGE_IMPORT_CONF image_import_opts image_import_plugins "[]" iniset $GLANCE_IMAGE_IMPORT_CONF inject_metadata_properties ignore_user_roles admin iniset $GLANCE_IMAGE_IMPORT_CONF inject_metadata_properties inject cp -p $GLANCE_DIR/etc/schema-image.json $GLANCE_SCHEMA_JSON - cp -p $GLANCE_DIR/etc/metadefs/*.json $GLANCE_METADEF_DIR if is_service_enabled tls-proxy; then @@ -467,19 +452,15 @@ function configure_glance { iniset $GLANCE_CACHE_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/%(project_id)s" fi - if [[ "$GLANCE_STANDALONE" == False ]]; then - write_local_uwsgi_http_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" "/image" "glance-api" - # Grab our uwsgi listen address and use that to fill out our - # worker_self_reference_url config - iniset $GLANCE_API_CONF DEFAULT worker_self_reference_url \ - $(awk '-F= ' '/^http-socket/ { print "http://"$2}' $GLANCE_UWSGI_CONF) - else - write_local_proxy_http_config glance "http://$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT_INT" "/image" - iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS - iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT - iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS" - iniset $GLANCE_API_CONF DEFAULT worker_self_reference_url $GLANCE_URL - fi + write_local_uwsgi_http_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" "/image" "glance-api" + + # Grab our uwsgi listen address and use that to fill out our + # worker_self_reference_url config + iniset $GLANCE_API_CONF DEFAULT worker_self_reference_url $(awk '-F= ' '/^http-socket/ { print "http://"$2}' $GLANCE_UWSGI_CONF) + + # Configure the Python binary used for "import" plugins. If unset, these + # will attempt the uwsgi binary instead. + iniset $GLANCE_API_CONF wsgi python_interpreter $PYTHON if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then iniset $GLANCE_API_CONF oslo_policy enforce_scope true @@ -664,17 +645,8 @@ function start_glance_remote_clone { # start_glance() - Start running processes function start_glance { local service_protocol=$GLANCE_SERVICE_PROTOCOL - if is_service_enabled tls-proxy; then - if [[ "$WSGI_MODE" != "uwsgi" ]]; then - start_tls_proxy glance-service '*' $GLANCE_SERVICE_PORT $GLANCE_SERVICE_HOST $GLANCE_SERVICE_PORT_INT - fi - fi - if [[ "$GLANCE_STANDALONE" == False ]]; then - run_process g-api "$(which uwsgi) --procname-prefix glance-api --ini $GLANCE_UWSGI_CONF" - else - run_process g-api "$GLANCE_BIN_DIR/glance-api --config-dir=$GLANCE_CONF_DIR" - fi + run_process g-api "$(which uwsgi) --procname-prefix glance-api --ini $GLANCE_UWSGI_CONF" if is_service_enabled g-api-r; then echo "Starting the g-api-r clone service..." From 2df0d7ab8230a0cc7ca1c5a90c254717c9ff2dc6 Mon Sep 17 00:00:00 2001 From: Ghanshyam Maan Date: Thu, 28 Aug 2025 03:39:25 +0000 Subject: [PATCH 552/574] Configure glance user in cinder conf Cinder talk to glance for new image location APIs which are default to 'service' role[1]. That needs cinder to have the glance service user configured. We need to assign admin role also to service user so that it can access images from glance. Needed-By: https://review.opendev.org/c/openstack/glance/+/958715 [1] https://review.opendev.org/c/openstack/glance/+/958715 Change-Id: I52d118672c053b9d6890bc6289bf12dcf5d7dce3 Signed-off-by: Ghanshyam Maan --- lib/cinder | 3 +++ lib/glance | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index eb8a63dbfc..aef6854062 100644 --- a/lib/cinder +++ b/lib/cinder @@ -419,6 +419,9 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT glance_ca_certificates_file $SSL_BUNDLE_FILE fi + # Set glance credentials (used for location APIs) + configure_keystone_authtoken_middleware $CINDER_CONF glance glance + # Set nova credentials (used for os-assisted-snapshots) configure_keystone_authtoken_middleware $CINDER_CONF nova nova iniset $CINDER_CONF nova region_name "$REGION_NAME" diff --git a/lib/glance b/lib/glance index b596b53271..31a9ae9745 100644 --- a/lib/glance +++ b/lib/glance @@ -503,7 +503,9 @@ function configure_glance { function create_glance_accounts { if is_service_enabled g-api; then - create_service_user "glance" + # When cinder talk to glance service APIs user needs service + # role for RBAC checks and admin role for cinder to access images. + create_service_user "glance" "admin" # required for swift access if is_service_enabled s-proxy; then From f09da620cb5973f9a77233a700b06612462678e5 Mon Sep 17 00:00:00 2001 From: Yatin Karel Date: Mon, 15 Sep 2025 17:16:50 +0530 Subject: [PATCH 553/574] Restore os_CODENAME for old rhel distros With [1] fips based jobs which runs on 9-stream started to fail as os_CODENAME not applicable on those. This patch adds fallback as before. Moving fips jobs to 10-stream/rocky requires some more work due to [2] [1] https://review.opendev.org/c/openstack/devstack/+/960342 [2] https://fedoraproject.org/wiki/Changes/RemoveFipsModeSetup Change-Id: I6d7ba4f5698e9b4837b29662b0b7f883b3c5de35 Signed-off-by: Yatin Karel --- functions-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions-common b/functions-common index 37c1862c28..0ae51e3df1 100644 --- a/functions-common +++ b/functions-common @@ -440,7 +440,7 @@ function GetOSVersion { source /etc/os-release if [[ "${ID}" =~ (centos|rocky|rhel) ]]; then os_RELEASE=${VERSION_ID} - os_CODENAME=$(echo $VERSION | grep -oP '(?<=[(])[^)]*') + os_CODENAME=$(echo $VERSION | grep -oP '(?<=[(])[^)]*' || echo 'n/a') os_VENDOR=$(echo $NAME | tr -d '[:space:]') else _ensure_lsb_release From f72801c1081e9f63bb1a98a66950d65ee8cf6ecb Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Mon, 15 Sep 2025 12:54:09 +0100 Subject: [PATCH 554/574] lib/neutron: Prepare for move of api-paste, rootwrap conf Change-Id: I70ba357f9af668fb7a7cb737d13fe24e572eb0ff Signed-off-by: Stephen Finucane --- lib/neutron | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/lib/neutron b/lib/neutron index ea2d8e728a..dec15fb782 100644 --- a/lib/neutron +++ b/lib/neutron @@ -996,7 +996,13 @@ function _configure_neutron_plugin_agent { # It is called when q-svc is enabled. function _configure_neutron_service { Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini - cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE + if test -r $NEUTRON_DIR/etc/neutron/api-paste.ini; then + cp $NEUTRON_DIR/etc/neutron/api-paste.ini $Q_API_PASTE_FILE + else + # TODO(stephenfin): Remove this branch once [1] merges + # [1] https://review.opendev.org/c/openstack/neutron/+/961130 + cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE + fi # Update either configuration file with plugin iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS @@ -1076,6 +1082,8 @@ function _neutron_setup_rootwrap { if test -r $NEUTRON_DIR/etc/neutron/rootwrap.conf; then sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE else + # TODO(stephenfin): Remove this branch once [1] merges + # [1] https://review.opendev.org/c/openstack/neutron/+/961130 sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE fi sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE From e4cb49c690677b437d817ad8c736edcb96b674e3 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Sun, 21 Sep 2025 03:12:55 +0900 Subject: [PATCH 555/574] Remove option for apache < 2.4 apache 2.4 was released long time ago and is now available in recent operating systems. Change-Id: If367869e8490159f31c7d6c0207e182dd7ecb164 Signed-off-by: Takashi Kajinami --- files/apache-cinder-api.template | 12 ++---------- files/apache-horizon.template | 14 ++------------ files/apache-nova-api.template | 4 +--- files/apache-nova-metadata.template | 4 +--- 4 files changed, 6 insertions(+), 28 deletions(-) diff --git a/files/apache-cinder-api.template b/files/apache-cinder-api.template index e1246f11b6..e401803abc 100644 --- a/files/apache-cinder-api.template +++ b/files/apache-cinder-api.template @@ -6,21 +6,13 @@ Listen %PUBLICPORT% WSGIScriptAlias / %CINDER_BIN_DIR%/cinder-wsgi WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/%APACHE_NAME%/c-api.log %SSLENGINE% %SSLCERTFILE% %SSLKEYFILE% - = 2.4> - Require all granted - - - Order allow,deny - Allow from all - + Require all granted diff --git a/files/apache-horizon.template b/files/apache-horizon.template index da7a7d26c3..c6c55ecf27 100644 --- a/files/apache-horizon.template +++ b/files/apache-horizon.template @@ -21,19 +21,9 @@ Options Indexes FollowSymLinks MultiViews AllowOverride None - # Apache 2.4 uses mod_authz_host for access control now (instead of - # "Allow") - - Order allow,deny - Allow from all - - = 2.4> - Require all granted - + Require all granted - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/%APACHE_NAME%/horizon_error.log LogLevel warn CustomLog /var/log/%APACHE_NAME%/horizon_access.log combined diff --git a/files/apache-nova-api.template b/files/apache-nova-api.template index bcf406edf3..66fcf73cf2 100644 --- a/files/apache-nova-api.template +++ b/files/apache-nova-api.template @@ -6,9 +6,7 @@ Listen %PUBLICPORT% WSGIScriptAlias / %PUBLICWSGI% WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%M" - + ErrorLogFormat "%M" ErrorLog /var/log/%APACHE_NAME%/nova-api.log %SSLENGINE% %SSLCERTFILE% diff --git a/files/apache-nova-metadata.template b/files/apache-nova-metadata.template index 6231c1ced8..64be03166e 100644 --- a/files/apache-nova-metadata.template +++ b/files/apache-nova-metadata.template @@ -6,9 +6,7 @@ Listen %PUBLICPORT% WSGIScriptAlias / %PUBLICWSGI% WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%M" - + ErrorLogFormat "%M" ErrorLog /var/log/%APACHE_NAME%/nova-metadata.log %SSLENGINE% %SSLCERTFILE% From a3e37c86cab4ec43a4a6d1c1386abf12bc034db7 Mon Sep 17 00:00:00 2001 From: Ghanshyam Maan Date: Mon, 22 Sep 2025 06:04:42 +0000 Subject: [PATCH 556/574] Update DEVSTACK_SERIES to 2026.1 stable/2025.2 branch has been created now and current master is for 2026.1 Change-Id: Ibec78664417207ca7784c548ab15c1c6ef0e0663 Signed-off-by: Ghanshyam Maan --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 1e306cc685..93f8b1cd6d 100644 --- a/stackrc +++ b/stackrc @@ -252,7 +252,7 @@ REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements} # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="2025.2" +DEVSTACK_SERIES="2026.1" ############## # From 093bc57518a72b63a59389df63491d476069fc5c Mon Sep 17 00:00:00 2001 From: Ghanshyam Maan Date: Mon, 22 Sep 2025 06:56:25 +0000 Subject: [PATCH 557/574] Cap stable/2025.2 network, swift, volume api_extensions for tempest This commit cap the network, volume and swift extensions on Tempest's config option api_extensions. In 2025.2, no new extension in neutron. and no new extensions in swift and cinder. Change-Id: I1f9a2f53fa1e513f58d7dd8706d57f2481ab3d47 Signed-off-by: Ghanshyam Maan --- lib/tempest | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 58 insertions(+), 3 deletions(-) diff --git a/lib/tempest b/lib/tempest index 1ebe9c5f1f..892e7fd72a 100644 --- a/lib/tempest +++ b/lib/tempest @@ -805,7 +805,48 @@ function configure_tempest { DISABLE_NETWORK_API_EXTENSIONS+=", l3_agent_scheduler" fi - local network_api_extensions=${NETWORK_API_EXTENSIONS:-"all"} + DEFAULT_NET_EXT="address-scope,agent,allowed-address-pairs,auto-allocated-topology" + DEFAULT_NET_EXT+=",availability_zone,binding,default-subnetpools,dhcp_agent_scheduler" + DEFAULT_NET_EXT+=",dvr,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,flavors" + DEFAULT_NET_EXT+=",l3-flavors,l3-ha,l3_agent_scheduler,multi-provider,net-mtu" + DEFAULT_NET_EXT+=",network-ip-availability,network_availability_zone,pagination" + DEFAULT_NET_EXT+=",port-security,project-id,provider,quotas,quota_details,rbac-policies" + DEFAULT_NET_EXT+=",revision-if-match,router,router_availability_zone,security-group,service-type,sorting" + DEFAULT_NET_EXT+=",standard-attr-description,standard-attr-revisions,standard-attr-tag,standard-attr-timestamp" + DEFAULT_NET_EXT+=",subnet-service-types,subnet_allocation,net-mtu-writable,ip-substring-filtering" + DEFAULT_NET_EXT+=",availability_zone_filter,filter-validation,empty-string-filtering,port-mac-address-regenerate" + DEFAULT_NET_EXT+=",port-security-groups-filtering,fip-port-details,binding-extended" + DEFAULT_NET_EXT+=",subnet_onboard,l3-port-ip-change-not-allowed,agent-resources-synced" + DEFAULT_NET_EXT+=",floatingip-pools,rbac-security-groups,subnetpool-prefix-ops,router-admin-state-down-before-update" + DEFAULT_NET_EXT+=",rbac-subnetpool,tag-ports-during-bulk-creation,stateful-security-group,address-group,extraroute-atomic" + DEFAULT_NET_EXT+=",port-numa-affinity-policy,rbac-address-scope,security-groups-remote-address-group,trunk,trunk-details" + DEFAULT_NET_EXT+=",rbac-address-group,port-device-profile" + DEFAULT_NET_EXT+=",multiple-external-gateways,qos-pps-minimum,l3-ext-ndp-proxy,rbac-bgpvpn" + DEFAULT_NET_EXT+=",qos-pps,ecmp_routes,bgp,floating-ip-port-forwarding-port-ranges" + # New in Yoga + DEFAULT_NET_EXT+=",security-groups-shared-filtering,security-groups-normalized-cidr,quota-check-limit" + DEFAULT_NET_EXT+=",port-resource-request-groups" + # New in Zed + DEFAULT_NET_EXT+=",port-mac-override,floating-ip-port-forwarding-detail,network-cascade-delete" + # New in 2023.1 + DEFAULT_NET_EXT+=",port-hints,floating-ip-distributed" + # New in 2023.2 + DEFAULT_NET_EXT+=",port-hint-ovs-tx-steering,enable-default-route-bfd" + DEFAULT_NET_EXT+=",enable-default-route-ecmp,standard-attr-fwaas-v2" + DEFAULT_NET_EXT+=",allowed-address-pairs-atomic,network_ha" + DEFAULT_NET_EXT+=",security-groups-rules-belongs-to-default-sg" + DEFAULT_NET_EXT+=",port-hardware-offload-type" + # New in 2024.1 + DEFAULT_NET_EXT+=",vpn-aes-ccm-gcm,tap-mirror,subnet-external-network" + DEFAULT_NET_EXT+=",port-numa-affinity-policy-socket" + # New in 2024.2 + DEFAULT_NET_EXT+=",tag-creation,quota-check-limit-default,port-trusted-vif" + DEFAULT_NET_EXT+=",uplink-status-propagation-updatable" + # New in 2025.1 + DEFAULT_NET_EXT+=",qing,vpn-aes-ctr" + # New in 2025.2: None + local network_api_extensions=${NETWORK_API_EXTENSIONS:-$DEFAULT_NET_EXT} + if [[ ! -z "$DISABLE_NETWORK_API_EXTENSIONS" ]]; then # Enabled extensions are either the ones explicitly specified or those available on the API endpoint network_api_extensions=${NETWORK_API_EXTENSIONS:-$(iniget $tmp_cfg_file network-feature-enabled api_extensions | tr -d " ")} @@ -817,7 +858,10 @@ function configure_tempest { fi iniset $TEMPEST_CONFIG network-feature-enabled api_extensions $network_api_extensions # Swift API Extensions - local object_storage_api_extensions=${OBJECT_STORAGE_API_EXTENSIONS:-"all"} + DEFAULT_SWIFT_OPT="account_quotas,bulk_delete,bulk_upload,container_quotas" + DEFAULT_SWIFT_OPT+=",container_sync,crossdomain,formpost,ratelimit,slo" + DEFAULT_SWIFT_OPT+=",staticweb,tempauth,tempurl,versioned_writes" + local object_storage_api_extensions=${OBJECT_STORAGE_API_EXTENSIONS:-$DEFAULT_SWIFT_OPT} if [[ ! -z "$DISABLE_OBJECT_STORAGE_API_EXTENSIONS" ]]; then # Enabled extensions are either the ones explicitly specified or those available on the API endpoint object_storage_api_extensions=${OBJECT_STORAGE_API_EXTENSIONS:-$(iniget $tmp_cfg_file object-storage-feature-enabled discoverable_apis | tr -d " ")} @@ -826,7 +870,18 @@ function configure_tempest { fi iniset $TEMPEST_CONFIG object-storage-feature-enabled discoverable_apis $object_storage_api_extensions # Cinder API Extensions - local volume_api_extensions=${VOLUME_API_EXTENSIONS:-"all"} + DEFAULT_VOL_EXT="OS-SCH-HNT,backups,capabilities,cgsnapshots,consistencygroups" + DEFAULT_VOL_EXT+=",encryption,os-admin-actions,os-availability-zone" + DEFAULT_VOL_EXT+=",os-extended-services,os-extended-snapshot-attributes" + DEFAULT_VOL_EXT+=",os-hosts,os-quota-class-sets,os-quota-sets" + DEFAULT_VOL_EXT+=",os-services,os-snapshot-actions,os-snapshot-manage" + DEFAULT_VOL_EXT+=",os-snapshot-unmanage,os-types-extra-specs,os-types-manage" + DEFAULT_VOL_EXT+=",os-used-limits,os-vol-host-attr,os-vol-image-meta" + DEFAULT_VOL_EXT+=",os-vol-mig-status-attr,os-vol-tenant-attr,os-volume-actions" + DEFAULT_VOL_EXT+=",os-volume-encryption-metadata,os-volume-manage" + DEFAULT_VOL_EXT+=",os-volume-transfer,os-volume-type-access" + DEFAULT_VOL_EXT+=",os-volume-unmanage,qos-specs,scheduler-stats" + local volume_api_extensions=${VOLUME_API_EXTENSIONS:-$DEFAULT_VOL_EXT} if [[ ! -z "$DISABLE_VOLUME_API_EXTENSIONS" ]]; then # Enabled extensions are either the ones explicitly specified or those available on the API endpoint volume_api_extensions=${VOLUME_API_EXTENSIONS:-$(iniget $tmp_cfg_file volume-feature-enabled api_extensions | tr -d " ")} From a8fc640b674a744e887e641ca9addc85ac46e480 Mon Sep 17 00:00:00 2001 From: Fernando Ferraz Date: Fri, 19 Sep 2025 12:10:50 -0300 Subject: [PATCH 558/574] Avoid timeout in capture-system-logs due to df command The `df` command can stall indefinitely on stale NFS mounts, causing the playbook to time out. This leads to the entire job failing with POST_FAILURE status, discarding controller logs and impacting troubleshooting. This patch changes `capture-system-logs` to run `df` with a 60s timeout to prevent hangs from stale NFS mounts. If 'df' times out, the mount output may help debug which NFS share is unresponsive. Change-Id: Ife3945802c93bd77d60b60e433ea09aade38a522 Signed-off-by: Fernando Ferraz --- .zuul.yaml | 1 + roles/capture-system-logs/tasks/main.yaml | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 3b0e5dbde0..d81da3d8f5 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -446,6 +446,7 @@ /etc/sudoers.d: logs '{{ stage_dir }}/iptables.txt': logs '{{ stage_dir }}/df.txt': logs + '{{ stage_dir }}/mount.txt': logs '{{ stage_dir }}/pip2-freeze.txt': logs '{{ stage_dir }}/pip3-freeze.txt': logs '{{ stage_dir }}/dpkg-l.txt': logs diff --git a/roles/capture-system-logs/tasks/main.yaml b/roles/capture-system-logs/tasks/main.yaml index 77b5ec5098..4b5ec4836b 100644 --- a/roles/capture-system-logs/tasks/main.yaml +++ b/roles/capture-system-logs/tasks/main.yaml @@ -4,7 +4,13 @@ executable: /bin/bash cmd: | sudo iptables-save > {{ stage_dir }}/iptables.txt - df -h > {{ stage_dir }}/df.txt + + # NOTE(sfernand): Run 'df' with a 60s timeout to prevent hangs from + # stale NFS mounts. + timeout -s 9 60s df -h > {{ stage_dir }}/df.txt || true + # If 'df' times out, the mount output helps debug which NFS share + # is unresponsive. + mount > {{ stage_dir }}/mount.txt for py_ver in 2 3; do if [[ `which python${py_ver}` ]]; then From f8b3c902bef911938e03d4fc4f13fc9851a843f5 Mon Sep 17 00:00:00 2001 From: Balazs Gibizer Date: Thu, 25 Sep 2025 19:39:26 +0200 Subject: [PATCH 559/574] [nova-fake-virt]Restore compute restartability Since the stable-compute-uuid nova feature the compute nodes created via VIRT_DRIVER=fake cannot be restarted as these computes are not writing the compute_id file to disk at first startup. Therefore any subsequent restart will fail as nova-compute will refuse to start due to the missing compute_id file but having a service already in the DB. After this patch fake-virt uses a variant of the fake virt driver that actually writes compute_id file to disk. To allow multiple fake computes running on the same machine each compute now has a separate state_path created so each can store a separate compute_id file. Signed-off-by: Balazs Gibizer Change-Id: I813cab3c89554d1e319b2b3e5c3affd8ec5d887e --- lib/nova | 9 +++++++++ lib/nova_plugins/hypervisor-fake | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 2357d87ee3..b289ec6d6d 100644 --- a/lib/nova +++ b/lib/nova @@ -1085,6 +1085,15 @@ function start_nova_compute { # gets its own configuration and own log file. local fake_conf="${NOVA_FAKE_CONF}-${i}" iniset $fake_conf DEFAULT host "${HOSTNAME}${i}" + # Ensure that each fake compute has its own state path so that it + # can have its own compute_id file + local state_path + state_path="$NOVA_STATE_PATH/${HOSTNAME}${i}" + COMPUTE_ID=$(uuidgen) + sudo mkdir -p "$state_path" + iniset $fake_conf DEFAULT state_path "$state_path" + # use the generated UUID as the stable compute node UUID + echo "$COMPUTE_ID" | sudo tee "$state_path/compute_id" run_process "n-cpu-${i}" "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF --config-file $fake_conf" done else diff --git a/lib/nova_plugins/hypervisor-fake b/lib/nova_plugins/hypervisor-fake index 87ee49fa4b..39cb45ca67 100644 --- a/lib/nova_plugins/hypervisor-fake +++ b/lib/nova_plugins/hypervisor-fake @@ -36,7 +36,7 @@ function cleanup_nova_hypervisor { # configure_nova_hypervisor - Set config files, create data dirs, etc function configure_nova_hypervisor { - iniset $NOVA_CONF DEFAULT compute_driver "fake.FakeDriver" + iniset $NOVA_CONF DEFAULT compute_driver "fake.FakeDriverWithoutFakeNodes" # Disable arbitrary limits iniset $NOVA_CONF quota driver nova.quota.NoopQuotaDriver } From 581d0a1d607538ffea0f41548fae25b4c6529cff Mon Sep 17 00:00:00 2001 From: Yatin Karel Date: Mon, 29 Sep 2025 17:03:48 +0530 Subject: [PATCH 560/574] [subnode setup] Fail the playbook when any node setup fails Currently on the multinode jobs, job continue to run even if devstack setup fails on any of the subnode and then fails later when required conditions are not met. This patch changes it to fail if any of the node setup fails using any_errors_fatal: true. Change-Id: I2acd8a1fe0802ee1880df2ef794f8e7d7478b67b Signed-off-by: Yatin Karel --- roles/orchestrate-devstack/tasks/main.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/orchestrate-devstack/tasks/main.yaml b/roles/orchestrate-devstack/tasks/main.yaml index 2b8ae01a62..b8ee7e35a7 100644 --- a/roles/orchestrate-devstack/tasks/main.yaml +++ b/roles/orchestrate-devstack/tasks/main.yaml @@ -4,6 +4,7 @@ when: inventory_hostname == 'controller' - name: Setup devstack on sub-nodes + any_errors_fatal: true block: - name: Distribute the build sshkey for the user "stack" From b20eaa6e142f2716eb1b85ed8eabd5bd71515a02 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Fri, 3 Oct 2025 09:40:46 +0200 Subject: [PATCH 561/574] Add AlmaLinux 10 platform job AlmaLinux 10 has been introduced in OpenDev to increase hardware coverage - it supports x86-64-v2 (compared to v3 required by CentOS Stream 10 and Rocky Linux 10) Change-Id: I5c91f2166bfce51cadef9c22a22a6031223604c7 Signed-off-by: Michal Nasiadka --- .zuul.yaml | 33 ++++++++++++++++++++++++++++----- functions-common | 2 +- 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 3b0e5dbde0..927945d8f0 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -39,10 +39,10 @@ - controller - nodeset: - name: devstack-single-node-centos-10-stream + name: devstack-single-node-almalinux-10 nodes: - name: controller - label: centos-10-stream-8GB + label: almalinux-10-8GB groups: - name: tempest nodes: @@ -58,10 +58,15 @@ nodes: - controller -# TODO(frickler): drop this dummy nodeset once all references have been removed - nodeset: - name: devstack-single-node-opensuse-15 - nodes: [] + name: devstack-single-node-centos-10-stream + nodes: + - name: controller + label: centos-10-stream-8GB + groups: + - name: tempest + nodes: + - controller - nodeset: name: devstack-single-node-debian-bookworm @@ -73,6 +78,11 @@ nodes: - controller +# TODO(frickler): drop this dummy nodeset once all references have been removed +- nodeset: + name: devstack-single-node-opensuse-15 + nodes: [] + - nodeset: name: devstack-single-node-rockylinux-9 nodes: @@ -766,6 +776,16 @@ # we often have to rush things through devstack to stabilise the gate, # and these platforms don't have the round-the-clock support to avoid # becoming blockers in that situation. +- job: + name: devstack-platform-almalinux-purple-lion + parent: tempest-full-py3 + description: AlmaLinux 10 platform test + nodeset: devstack-single-node-almalinux-10 + timeout: 9000 + voting: false + vars: + configure_swap_size: 4096 + - job: name: devstack-platform-centos-10-stream parent: tempest-full-py3 @@ -966,6 +986,7 @@ - devstack - devstack-ipv6 - devstack-enforce-scope + - devstack-platform-almalinux-purple-lion - devstack-platform-centos-10-stream - devstack-platform-centos-9-stream - devstack-platform-debian-bookworm @@ -1051,10 +1072,12 @@ - devstack-no-tls-proxy periodic-weekly: jobs: + - devstack-platform-almalinux-purple-lion - devstack-platform-centos-10-stream - devstack-platform-centos-9-stream - devstack-platform-debian-bookworm - devstack-platform-rocky-blue-onyx + - devstack-platform-rocky-red-quartz - devstack-platform-ubuntu-noble-ovn-source - devstack-platform-ubuntu-noble-ovs - devstack-platform-ubuntu-jammy diff --git a/functions-common b/functions-common index 0ae51e3df1..c2042c4fef 100644 --- a/functions-common +++ b/functions-common @@ -438,7 +438,7 @@ function _ensure_lsb_release { function GetOSVersion { # CentOS Stream 9 or later and RHEL 9 or later do not provide lsb_release source /etc/os-release - if [[ "${ID}" =~ (centos|rocky|rhel) ]]; then + if [[ "${ID}" =~ (almalinux|centos|rocky|rhel) ]]; then os_RELEASE=${VERSION_ID} os_CODENAME=$(echo $VERSION | grep -oP '(?<=[(])[^)]*' || echo 'n/a') os_VENDOR=$(echo $NAME | tr -d '[:space:]') From 8060e12a7bddf16179098e611f7052291eafa1e3 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Sun, 12 Oct 2025 13:41:57 +0900 Subject: [PATCH 562/574] Drop logic for CentOS (Stream) 8 Change-Id: I528652ced464fadd565e16e89a7e0ef826d42611 Signed-off-by: Takashi Kajinami --- files/rpms/n-cpu | 3 +-- files/rpms/nova | 3 +-- lib/neutron_plugins/ovn_agent | 3 --- stack.sh | 31 +------------------------------ tools/fixup_stuff.sh | 13 ------------- 5 files changed, 3 insertions(+), 50 deletions(-) diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu index 5683862ee0..3d50f3a062 100644 --- a/files/rpms/n-cpu +++ b/files/rpms/n-cpu @@ -1,10 +1,9 @@ cryptsetup dosfstools -genisoimage # not:rhel9,rhel10 iscsi-initiator-utils libosinfo lvm2 sg3_utils # Stuff for diablo volumes sysfsutils -xorriso # not:rhel8 +xorriso diff --git a/files/rpms/nova b/files/rpms/nova index 3ed2943c1d..c323224279 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -1,7 +1,7 @@ conntrack-tools curl ebtables -genisoimage # not:rhel9,rhel10 required for config_drive +genisoimage iptables iputils kernel-modules # not:openEuler-22.03 @@ -11,4 +11,3 @@ polkit rabbitmq-server # NOPRIME sqlite sudo -xorriso # not:rhel8 diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index b128fde2b6..e346f4d1cd 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -407,9 +407,6 @@ function install_ovn { sudo mkdir -p $OVS_PREFIX/var/log/ovn sudo chown $(whoami) $OVS_PREFIX/var/log/ovn else - # Load fixup_ovn_centos - source ${TOP_DIR}/tools/fixup_stuff.sh - fixup_ovn_centos install_package $(get_packages openvswitch) install_package $(get_packages ovn) fi diff --git a/stack.sh b/stack.sh index c8be7fa216..67c7a74de9 100755 --- a/stack.sh +++ b/stack.sh @@ -366,36 +366,7 @@ fi # to speed things up SKIP_EPEL_INSTALL=$(trueorfalse False SKIP_EPEL_INSTALL) -if [[ $DISTRO == "rhel8" ]]; then - # If we have /etc/ci/mirror_info.sh assume we're on a OpenStack CI - # node, where EPEL is installed (but disabled) and already - # pointing at our internal mirror - if [[ -f /etc/ci/mirror_info.sh ]]; then - SKIP_EPEL_INSTALL=True - sudo dnf config-manager --set-enabled epel - fi - - # PowerTools repo provides libyaml-devel required by devstack itself and - # EPEL packages assume that the PowerTools repository is enable. - sudo dnf config-manager --set-enabled PowerTools - - # CentOS 8.3 changed the repository name to lower case. - sudo dnf config-manager --set-enabled powertools - - if [[ ${SKIP_EPEL_INSTALL} != True ]]; then - _install_epel - fi - # Along with EPEL, CentOS (and a-likes) require some packages only - # available in RDO repositories (e.g. OVS, or later versions of - # kvm) to run. - _install_rdo - - # NOTE(cgoncalves): workaround RHBZ#1154272 - # dnf fails for non-privileged users when expired_repos.json doesn't exist. - # RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=1154272 - # Patch: https://github.com/rpm-software-management/dnf/pull/1448 - echo "[]" | sudo tee /var/cache/dnf/expired_repos.json -elif [[ $DISTRO == "rhel9" ]]; then +if [[ $DISTRO == "rhel9" ]]; then # for CentOS Stream 9 repository sudo dnf config-manager --set-enabled crb # for RHEL 9 repository diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index faea44f1e0..fbac5ad2f1 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -89,19 +89,6 @@ EOF if is_package_installed python3-setuptools; then sudo dnf reinstall -y python3-setuptools fi - # Workaround CentOS 8-stream iputils and systemd Bug - # https://bugzilla.redhat.com/show_bug.cgi?id=2037807 - if [[ $os_VENDOR == "CentOSStream" && $os_RELEASE -eq 8 ]]; then - sudo sysctl -w net.ipv4.ping_group_range='0 2147483647' - fi -} - -function fixup_ovn_centos { - if [[ $os_VENDOR != "CentOS" ]]; then - return - fi - # OVN packages are part of this release for CentOS - yum_install centos-release-openstack-victoria } function fixup_ubuntu { From 84652d3cb8022f20cbcfaef747306c95763b8039 Mon Sep 17 00:00:00 2001 From: Balazs Gibizer Date: Wed, 22 Jan 2025 17:00:59 +0100 Subject: [PATCH 563/574] Follow up for creating parent dir for config files The commit Ia5fcfe6c63f5cc40b11f7e1f3be244d7897f26f6 wanted to enable config file creation even if its parent dir not exists. But missed that the caller of merge_config_file, merge_config_group already checks for hte missing directory. So creating the missing dir in merge_config_file is too late. This patch moves the dir creation to the caller. Change-Id: Ied0f321f31bf8888dce71cc18749f35dde303390 Signed-off-by: Balazs Gibizer --- inc/meta-config | 13 ++++++++----- tests/test_meta_config.sh | 24 +++++++++++++++++++----- 2 files changed, 27 insertions(+), 10 deletions(-) diff --git a/inc/meta-config b/inc/meta-config index b9d9649e4b..1215bb8307 100644 --- a/inc/meta-config +++ b/inc/meta-config @@ -90,7 +90,6 @@ function merge_config_file { local real_configfile real_configfile=$(eval echo $configfile) if [ ! -f $real_configfile ]; then - mkdir -p $(dirname $real_configfile) || die $LINENO "could not create the directory of $real_configfile ($configfile)" touch $real_configfile || die $LINENO "could not create config file $real_configfile ($configfile)" fi @@ -186,11 +185,15 @@ function merge_config_group { break fi dir=$(dirname $realconfigfile) - if [[ -d $dir ]]; then - merge_config_file $localfile $group $configfile - else - die $LINENO "bogus config file specification $configfile ($configfile=$realconfigfile, $dir is not a directory)" + + test -e $dir && ! test -d $dir && die $LINENO "bogus config file specification $configfile ($configfile=$realconfigfile, $dir exists but it is not a directory)" + + if ! [[ -e $dir ]] ; then + sudo mkdir -p $dir || die $LINENO "could not create the directory of $real_configfile ($configfile)" + sudo chown ${STACK_USER} $dir fi + + merge_config_file $localfile $group $configfile done done } diff --git a/tests/test_meta_config.sh b/tests/test_meta_config.sh index 087aaf468b..30479f245a 100755 --- a/tests/test_meta_config.sh +++ b/tests/test_meta_config.sh @@ -137,6 +137,9 @@ foo=bar [some] random=config +[[test12|run_tests.sh/test.conf]] +foo=bar + [[test-multi-sections|test-multi-sections.conf]] [sec-1] cfg_item1 = abcd @@ -389,13 +392,12 @@ EXPECT_VAL=0 check_result "$VAL" "$EXPECT_VAL" set -e -echo -n "merge_config_group test10 not directory: " +echo -n "merge_config_group test10 create directory: " set +e -# function is expected to fail and exit, running it -# in a subprocess to let this script proceed -(merge_config_group test.conf test10) +STACK_USER=$(id -u -n) +merge_config_group test.conf test10 VAL=$? -EXPECT_VAL=255 +EXPECT_VAL=0 check_result "$VAL" "$EXPECT_VAL" set -e @@ -414,9 +416,21 @@ random = config non = sense' check_result "$VAL" "$EXPECT_VAL" +echo -n "merge_config_group test12 directory as file: " +set +e +# function is expected to fail and exit, running it +# in a subprocess to let this script proceed +(merge_config_group test.conf test12) +VAL=$? +EXPECT_VAL=255 +check_result "$VAL" "$EXPECT_VAL" +set -e + rm -f test.conf test1c.conf test2a.conf \ test-space.conf test-equals.conf test-strip.conf \ test-colon.conf test-env.conf test-multiline.conf \ test-multi-sections.conf test-same.conf rm -rf test-etc +rm -rf does-not-exist-dir + From eee708742af669833d6a85d4c7289accb49c18a2 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Fri, 31 Oct 2025 17:09:11 +0000 Subject: [PATCH 564/574] Remove use of pkg_resources Change-Id: I5d0697f39bab0a5ff956c3cc41c26ffe601ef6b9 Signed-off-by: Stephen Finucane --- inc/python | 3 +-- tools/fixup_stuff.sh | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/inc/python b/inc/python index cd90ac82c6..3969c1fa82 100644 --- a/inc/python +++ b/inc/python @@ -273,8 +273,7 @@ function use_library_from_git { function lib_installed_from_git { local name=$1 local safe_name - safe_name=$(python -c "from pkg_resources import safe_name; \ - print(safe_name('${name}'))") + safe_name=$(python -c "from packaging import canonicalize_name; print(canonicalize_name('${name}'))") # Note "pip freeze" doesn't always work here, because it tries to # be smart about finding the remote of the git repo the package # was installed from. This doesn't work with zuul which clones diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index faea44f1e0..d8283b2591 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -84,7 +84,7 @@ EOF # python3-setuptools RPM are deleted, it breaks some tools such as semanage # (used in diskimage-builder) that use the -s flag of the python # interpreter, enforcing the use of the packages from /usr/lib. - # Importing setuptools/pkg_resources in a such environment fails. + # Importing setuptools in a such environment fails. # Enforce the package re-installation to fix those applications. if is_package_installed python3-setuptools; then sudo dnf reinstall -y python3-setuptools From 1c86258e6b0ccf95e6119d3a6271afa38e05d0a3 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Thu, 2 Oct 2025 17:48:03 +0100 Subject: [PATCH 565/574] Enable response validation in Keystone Unlike other projects, Keystone was previously enabling validation for all responses. This is a bad idea (TM). Quoting from the Keystone docs for the new '[api] response_validation' option added in [1]: 'warn' is the current recommendation for production environments. If you find it necessary to enable the 'ignore' option, please report the issues you are seeing to the Keystone team so we can improve our schemas. 'error' should not be used in a production environment. This is because schema validation happens *after* the response body has been generated, meaning any side effects will still happen and the call may be non-idempotent despite the user receiving a HTTP 500 error. DevStack is not used for production environments and is instead the test harness used for the bulk of our integration testing. We *do* want failed response validation to result in an error, since it will highlight bugs in our schemas. Thus, we should override the default value for this option. [1] https://review.opendev.org/c/openstack/keystone/+/962851 Change-Id: I9fc2c5dce9511165ad2c1ab18db5eb439d357d9b Signed-off-by: Stephen Finucane Related-bug: #2126676 --- lib/keystone | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/keystone b/lib/keystone index 241909cb9d..840103b9f4 100644 --- a/lib/keystone +++ b/lib/keystone @@ -195,6 +195,10 @@ function configure_keystone { iniset $KEYSTONE_CONF cache backend $CACHE_BACKEND iniset $KEYSTONE_CONF cache memcache_servers $MEMCACHE_SERVERS + # Enable errors if response validation fails. We want this enabled in CI + # and development contexts to highlights bugs in our response schemas. + iniset $KEYSTONE_CONF api response_validation error + iniset_rpc_backend keystone $KEYSTONE_CONF oslo_messaging_notifications local service_port=$KEYSTONE_SERVICE_PORT From 3566a15b8eeebb0dfc36e47f87129b108d2980e1 Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Thu, 2 Oct 2025 17:46:42 +0100 Subject: [PATCH 566/574] Enable response validation in Nova Per inline description Change-Id: I85c959461e4e96b69d252e06fc697a1c61488411 Signed-off-by: Stephen Finucane --- lib/nova | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/nova b/lib/nova index b289ec6d6d..460b4adc85 100644 --- a/lib/nova +++ b/lib/nova @@ -453,6 +453,10 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT metadata_listen "$NOVA_SERVICE_LISTEN_ADDRESS" iniset $NOVA_CONF DEFAULT shutdown_timeout $NOVA_SHUTDOWN_TIMEOUT + # Enable errors if response validation fails. We want this enabled in CI + # and development contexts to highlights bugs in our response schemas. + iniset $NOVA_CONF api response_validation error + iniset $NOVA_CONF key_manager backend nova.keymgr.conf_key_mgr.ConfKeyManager iniset $NOVA_CONF DEFAULT report_interval $NOVA_SERVICE_REPORT_INTERVAL From a041a7fc66453958ce1d34421330fd27393bbd65 Mon Sep 17 00:00:00 2001 From: Ghanshyam Maan Date: Thu, 6 Nov 2025 18:39:36 +0000 Subject: [PATCH 567/574] Re-add the ironic job in gate Ironic job ironic-tempest-bios-ipmi-direct-tinyipa has been renamed to ironic-tempest-bios-ipmi-direct - https://review.opendev.org/c/openstack/ironic/+/950192 Zuul did not give any error or warning when this jobs was ignored to run and it went unnotice until I manually checked the gate jobs. Change-Id: I9b6bb94456ccfd17c2e38cdaa772aef372d169d0 Signed-off-by: Ghanshyam Maan --- .zuul.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 927945d8f0..a751c70a6a 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -998,7 +998,7 @@ - devstack-multinode - devstack-unit-tests - openstack-tox-bashate - - ironic-tempest-bios-ipmi-direct-tinyipa + - ironic-tempest-bios-ipmi-direct - swift-dsvm-functional - grenade: irrelevant-files: *common-irrelevant-files @@ -1031,7 +1031,7 @@ - openstack-tox-bashate - neutron-ovs-grenade-multinode: irrelevant-files: *common-irrelevant-files - - ironic-tempest-bios-ipmi-direct-tinyipa + - ironic-tempest-bios-ipmi-direct - swift-dsvm-functional - grenade: irrelevant-files: *common-irrelevant-files From f8ebb4939c46eed2f69bece7a7d9c8ff31b61353 Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Thu, 10 Jul 2025 21:15:21 +0200 Subject: [PATCH 568/574] Add support for trixie and platform job Dropping libapache2-mod-proxy-uwsgi package for debuntu, which is no longer needed for Jammy, Bookworm and beyond. libpcre3-dev is removed form the set of packages pre installed for debian systems. This change adds both single and two node nodesets for trixie. Signed-off-by: Dr. Jens Harbott Signed-off-by: Sean Mooney Change-Id: Ib056d2ad64b31657ea8ef9359fed78e589b01b88 --- .zuul.yaml | 51 +++++++++++++++++++++++++++++++++++++++++++++ files/debs/general | 2 -- lib/apache | 2 +- lib/databases/mysql | 14 +++++++++++-- stack.sh | 3 +-- 5 files changed, 65 insertions(+), 7 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 927945d8f0..357c085fe9 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -68,6 +68,16 @@ nodes: - controller +- nodeset: + name: devstack-single-node-debian-trixie + nodes: + - name: controller + label: debian-trixie-8GB + groups: + - name: tempest + nodes: + - controller + - nodeset: name: devstack-single-node-debian-bookworm nodes: @@ -383,6 +393,36 @@ nodes: - compute1 +- nodeset: + name: devstack-two-node-debian-trixie + nodes: + - name: controller + label: debian-trixie-8GB + - name: compute1 + label: debian-trixie-8GB + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + - job: name: devstack-base parent: openstack-multinode-fips @@ -806,6 +846,15 @@ timeout: 9000 voting: false +- job: + name: devstack-platform-debian-trixie + parent: tempest-full-py3 + description: Debian Trixie platform test + nodeset: devstack-single-node-debian-trixie + timeout: 9000 + vars: + configure_swap_size: 4096 + - job: name: devstack-platform-debian-bookworm parent: tempest-full-py3 @@ -990,6 +1039,7 @@ - devstack-platform-centos-10-stream - devstack-platform-centos-9-stream - devstack-platform-debian-bookworm + - devstack-platform-debian-trixie - devstack-platform-rocky-blue-onyx - devstack-platform-rocky-red-quartz - devstack-platform-ubuntu-noble-ovn-source @@ -1021,6 +1071,7 @@ - devstack - devstack-ipv6 - devstack-platform-debian-bookworm + - devstack-platform-debian-trixie - devstack-platform-ubuntu-noble # NOTE(danms): Disabled due to instability, see comment in the job # definition above. diff --git a/files/debs/general b/files/debs/general index 364f3cc6e2..1e63e4f582 100644 --- a/files/debs/general +++ b/files/debs/general @@ -11,10 +11,8 @@ gettext # used for compiling message catalogs git graphviz # needed for docs iputils-ping -libapache2-mod-proxy-uwsgi libffi-dev # for pyOpenSSL libjpeg-dev # Pillow 3.0.0 -libpcre3-dev # for python-pcre libpq-dev # psycopg2 libssl-dev # for pyOpenSSL libsystemd-dev # for systemd-python diff --git a/lib/apache b/lib/apache index c49da711e1..b3379a7cde 100644 --- a/lib/apache +++ b/lib/apache @@ -82,7 +82,7 @@ function install_apache_uwsgi { fi if is_ubuntu; then - local pkg_list="uwsgi uwsgi-plugin-python3 libapache2-mod-proxy-uwsgi" + local pkg_list="uwsgi uwsgi-plugin-python3" install_package ${pkg_list} # NOTE(ianw) 2022-02-03 : Fedora 35 needs to skip this and fall # into the install-from-source because the upstream packages diff --git a/lib/databases/mysql b/lib/databases/mysql index 629014c1d8..a47580ca3d 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -20,7 +20,7 @@ if [[ -z "$MYSQL_SERVICE_NAME" ]]; then MYSQL_SERVICE_NAME=mysql if is_fedora && ! is_oraclelinux; then MYSQL_SERVICE_NAME=mariadb - elif [[ "$DISTRO" =~ bookworm|bullseye ]]; then + elif [[ "$DISTRO" =~ trixie|bookworm|bullseye ]]; then MYSQL_SERVICE_NAME=mariadb fi fi @@ -107,7 +107,7 @@ function configure_database_mysql { # for Ubuntu 22.04+ because the authorization model change in # version 10.4 of mariadb. See # https://mariadb.org/authentication-in-mariadb-10-4/ - if ! (is_ubuntu && [[ ! "$DISTRO" =~ bookworm|bullseye ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then + if ! (is_ubuntu && [[ ! "$DISTRO" =~ trixie|bookworm|bullseye ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then sudo mysqladmin -u root password $DATABASE_PASSWORD || true fi @@ -119,6 +119,16 @@ function configure_database_mysql { local cmd_args="-uroot -p$DATABASE_PASSWORD -h$SERVICE_LOCAL_HOST " fi + # Workaround for mariadb > 11.6.2, + # see https://bugs.launchpad.net/nova/+bug/2116186/comments/3 + min_db_ver="11.6.2" + db_version=$(sudo mysql ${cmd_args} -e "select version();" -sN | cut -d '-' -f 1) + max_db_ver=$(printf '%s\n' ${min_db_ver} ${db_version} | sort -V | tail -n 1) + if [[ "${min_db_ver}" != "${max_db_ver}" ]]; then + iniset -sudo $my_conf mysqld innodb_snapshot_isolation OFF + restart_service $MYSQL_SERVICE_NAME + fi + # In mariadb e.g. on Ubuntu socket plugin is used for authentication # as root so it works only as sudo. To restore old "mysql like" behaviour, # we need to change auth plugin for root user diff --git a/stack.sh b/stack.sh index a7f6a5e903..5ef98eb7b7 100755 --- a/stack.sh +++ b/stack.sh @@ -1,6 +1,5 @@ #!/usr/bin/env bash - # ``stack.sh`` is an opinionated OpenStack developer installation. It # installs and configures various combinations of **Cinder**, **Glance**, # **Horizon**, **Keystone**, **Nova**, **Neutron**, and **Swift** @@ -230,7 +229,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bookworm|jammy|noble|rhel9|rhel10" +SUPPORTED_DISTROS="trixie|bookworm|jammy|noble|rhel9|rhel10" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" From 9a0db4f4999e2aa5923ed32452f1d2e41ae4ea55 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Wed, 16 Jul 2025 11:38:12 +0000 Subject: [PATCH 569/574] [Neutron] Add TARGET_ENABLE_OVN_AGENT variable to enable OVN agent The new flag ``TARGET_ENABLE_OVN_AGENT`` will be disabled by default. If enabled: * The OVN agent will be enabled, regardless of not being configured. * The OVN Metadata agent will be disabled, regardless of being configured. This variable will be used, initially, in the grenade jobs. It will be used to test the migration from the OVN Metadata agent to the OVN agent. This variable will be removed in 2026.2, along with the OVN Metadata agent, that is set as deprecated and marked for removal. Related-Bug: #2112313 Signed-off-by: Rodolfo Alonso Hernandez Change-Id: I8f91e1cb8543da489f495b8cf5196e606a0f5eea --- lib/neutron_plugins/ovn_agent | 44 ++++++++++++++++++++++++++++------- 1 file changed, 35 insertions(+), 9 deletions(-) diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index b128fde2b6..0e87b473e9 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -99,6 +99,13 @@ OVN_META_DATA_HOST=${OVN_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)} # The OVN agent is configured, by default, with the "metadata" extension. OVN_AGENT_CONF=$NEUTRON_CONF_DIR/plugins/ml2/ovn_agent.ini OVN_AGENT_EXTENSIONS=${OVN_AGENT_EXTENSIONS:-metadata} +# The variable TARGET_ENABLE_OVN_AGENT, if True, overrides the OVN Metadata +# agent service (q-ovn-metadata-agent neutron-ovn-metadata-agent) and the OVN +# agent service (q-ovn-agent neutron-ovn-agent) configuration, always disabling +# the first one (OVN Metadata agent) and enabling the second (OVN agent). +# This variable will be removed in 2026.2, along with the OVN Metadata agent +# removal. +TARGET_ENABLE_OVN_AGENT=$(trueorfalse False TARGET_ENABLE_OVN_AGENT) # If True (default) the node will be considered a gateway node. ENABLE_CHASSIS_AS_GW=$(trueorfalse True ENABLE_CHASSIS_AS_GW) @@ -301,6 +308,21 @@ function create_public_bridge { _configure_public_network_connectivity } +function is_ovn_metadata_agent_enabled { + if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent && [[ "$TARGET_ENABLE_OVN_AGENT" == "False" ]]; then + return 0 + fi + return 1 +} + +function is_ovn_agent_enabled { + if is_service_enabled q-ovn-agent neutron-ovn-agent || [[ "$TARGET_ENABLE_OVN_AGENT" == "True" ]]; then + enable_service q-ovn-agent + return 0 + fi + return 1 + +} # OVN compilation functions # ------------------------- @@ -498,9 +520,9 @@ function configure_ovn_plugin { inicomment /$Q_PLUGIN_CONF_FILE network_log local_output_log_base="$Q_LOG_DRIVER_LOG_BASE" fi - if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then + if is_ovn_metadata_agent_enabled; then populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True - elif is_service_enabled q-ovn-agent neutron-ovn-agent && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]]; then + elif is_ovn_agent_enabled && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]]; then populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True else populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=False @@ -521,9 +543,9 @@ function configure_ovn_plugin { fi if is_service_enabled n-api-meta ; then - if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then + if is_ovn_metadata_agent_enabled; then iniset $NOVA_CONF neutron service_metadata_proxy True - elif is_service_enabled q-ovn-agent neutron-ovn-agent && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]]; then + elif is_ovn_agent_enabled && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]]; then iniset $NOVA_CONF neutron service_metadata_proxy True fi fi @@ -558,10 +580,10 @@ function configure_ovn { # Metadata local sample_file="" local config_file="" - if is_service_enabled q-ovn-agent neutron-ovn-agent && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]] && is_service_enabled ovn-controller; then + if is_ovn_agent_enabled && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]] && is_service_enabled ovn-controller; then sample_file=$NEUTRON_DIR/etc/neutron/plugins/ml2/ovn_agent.ini.sample config_file=$OVN_AGENT_CONF - elif is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent && is_service_enabled ovn-controller; then + elif is_ovn_metadata_agent_enabled && is_service_enabled ovn-controller; then sample_file=$NEUTRON_DIR/etc/neutron_ovn_metadata_agent.ini.sample config_file=$OVN_META_CONF fi @@ -758,13 +780,13 @@ function start_ovn { fi fi - if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then + if is_ovn_metadata_agent_enabled; then run_process q-ovn-metadata-agent "$NEUTRON_OVN_BIN_DIR/$NEUTRON_OVN_METADATA_BINARY --config-file $OVN_META_CONF" # Format logging setup_logging $OVN_META_CONF fi - if is_service_enabled q-ovn-agent neutron-ovn-agent; then + if is_ovn_agent_enabled; then run_process q-ovn-agent "$NEUTRON_OVN_BIN_DIR/$NEUTRON_OVN_AGENT_BINARY --config-file $OVN_AGENT_CONF" # Format logging setup_logging $OVN_AGENT_CONF @@ -786,13 +808,17 @@ function _stop_process { } function stop_ovn { + # NOTE(ralonsoh): this check doesn't use "is_ovn_metadata_agent_enabled", + # instead it relies only in the configured services, disregarding the + # flag "TARGET_ENABLE_OVN_AGENT". It is needed to force the OVN Metadata + # agent stop in case the flag "TARGET_ENABLE_OVN_AGENT" is set. if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then # pkill takes care not to kill itself, but it may kill its parent # sudo unless we use the "ps | grep [f]oo" trick sudo pkill -9 -f "[h]aproxy" || : _stop_process "devstack@q-ovn-metadata-agent.service" fi - if is_service_enabled q-ovn-agent neutron-ovn-agent; then + if is_ovn_agent_enabled; then # pkill takes care not to kill itself, but it may kill its parent # sudo unless we use the "ps | grep [f]oo" trick sudo pkill -9 -f "[h]aproxy" || : From e8a11f5070fef8b9d757155b51da16481bac9aed Mon Sep 17 00:00:00 2001 From: "Dr. Jens Harbott" Date: Fri, 21 Nov 2025 21:01:50 +0100 Subject: [PATCH 570/574] Drop lib/oslo from unstack.sh This was missed in [0]. [0] I6a66359c19d0385beafb4c5e57b6ec3cd6d9cc54 Signed-off-by: Dr. Jens Harbott Change-Id: Idaee45beb63b2badc72c4a70442c5b9aa7fb19fe --- unstack.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/unstack.sh b/unstack.sh index 29c80718f8..8e8996c63b 100755 --- a/unstack.sh +++ b/unstack.sh @@ -61,7 +61,6 @@ source $TOP_DIR/lib/tls # Source project function libraries source $TOP_DIR/lib/infra -source $TOP_DIR/lib/oslo source $TOP_DIR/lib/lvm source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone From 5d602fef2a7f3ac153c907304a8f43b38c0c1a50 Mon Sep 17 00:00:00 2001 From: Ghanshyam Date: Fri, 21 Nov 2025 20:40:07 +0000 Subject: [PATCH 571/574] Revert "Cap stable/2025.2 network, swift, volume api_extensions for tempest" This reverts commit 093bc57518a72b63a59389df63491d476069fc5c. Reason for revert: This is done by mistake on master. It needs to be done on stable/2025.2 https://review.opendev.org/c/openstack/devstack/+/968073 Change-Id: I8bd7b732c181f3abb015a57b4445332614f8c6c9 Signed-off-by: Ghanshyam --- lib/tempest | 61 +++-------------------------------------------------- 1 file changed, 3 insertions(+), 58 deletions(-) diff --git a/lib/tempest b/lib/tempest index 892e7fd72a..1ebe9c5f1f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -805,48 +805,7 @@ function configure_tempest { DISABLE_NETWORK_API_EXTENSIONS+=", l3_agent_scheduler" fi - DEFAULT_NET_EXT="address-scope,agent,allowed-address-pairs,auto-allocated-topology" - DEFAULT_NET_EXT+=",availability_zone,binding,default-subnetpools,dhcp_agent_scheduler" - DEFAULT_NET_EXT+=",dvr,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,flavors" - DEFAULT_NET_EXT+=",l3-flavors,l3-ha,l3_agent_scheduler,multi-provider,net-mtu" - DEFAULT_NET_EXT+=",network-ip-availability,network_availability_zone,pagination" - DEFAULT_NET_EXT+=",port-security,project-id,provider,quotas,quota_details,rbac-policies" - DEFAULT_NET_EXT+=",revision-if-match,router,router_availability_zone,security-group,service-type,sorting" - DEFAULT_NET_EXT+=",standard-attr-description,standard-attr-revisions,standard-attr-tag,standard-attr-timestamp" - DEFAULT_NET_EXT+=",subnet-service-types,subnet_allocation,net-mtu-writable,ip-substring-filtering" - DEFAULT_NET_EXT+=",availability_zone_filter,filter-validation,empty-string-filtering,port-mac-address-regenerate" - DEFAULT_NET_EXT+=",port-security-groups-filtering,fip-port-details,binding-extended" - DEFAULT_NET_EXT+=",subnet_onboard,l3-port-ip-change-not-allowed,agent-resources-synced" - DEFAULT_NET_EXT+=",floatingip-pools,rbac-security-groups,subnetpool-prefix-ops,router-admin-state-down-before-update" - DEFAULT_NET_EXT+=",rbac-subnetpool,tag-ports-during-bulk-creation,stateful-security-group,address-group,extraroute-atomic" - DEFAULT_NET_EXT+=",port-numa-affinity-policy,rbac-address-scope,security-groups-remote-address-group,trunk,trunk-details" - DEFAULT_NET_EXT+=",rbac-address-group,port-device-profile" - DEFAULT_NET_EXT+=",multiple-external-gateways,qos-pps-minimum,l3-ext-ndp-proxy,rbac-bgpvpn" - DEFAULT_NET_EXT+=",qos-pps,ecmp_routes,bgp,floating-ip-port-forwarding-port-ranges" - # New in Yoga - DEFAULT_NET_EXT+=",security-groups-shared-filtering,security-groups-normalized-cidr,quota-check-limit" - DEFAULT_NET_EXT+=",port-resource-request-groups" - # New in Zed - DEFAULT_NET_EXT+=",port-mac-override,floating-ip-port-forwarding-detail,network-cascade-delete" - # New in 2023.1 - DEFAULT_NET_EXT+=",port-hints,floating-ip-distributed" - # New in 2023.2 - DEFAULT_NET_EXT+=",port-hint-ovs-tx-steering,enable-default-route-bfd" - DEFAULT_NET_EXT+=",enable-default-route-ecmp,standard-attr-fwaas-v2" - DEFAULT_NET_EXT+=",allowed-address-pairs-atomic,network_ha" - DEFAULT_NET_EXT+=",security-groups-rules-belongs-to-default-sg" - DEFAULT_NET_EXT+=",port-hardware-offload-type" - # New in 2024.1 - DEFAULT_NET_EXT+=",vpn-aes-ccm-gcm,tap-mirror,subnet-external-network" - DEFAULT_NET_EXT+=",port-numa-affinity-policy-socket" - # New in 2024.2 - DEFAULT_NET_EXT+=",tag-creation,quota-check-limit-default,port-trusted-vif" - DEFAULT_NET_EXT+=",uplink-status-propagation-updatable" - # New in 2025.1 - DEFAULT_NET_EXT+=",qing,vpn-aes-ctr" - # New in 2025.2: None - local network_api_extensions=${NETWORK_API_EXTENSIONS:-$DEFAULT_NET_EXT} - + local network_api_extensions=${NETWORK_API_EXTENSIONS:-"all"} if [[ ! -z "$DISABLE_NETWORK_API_EXTENSIONS" ]]; then # Enabled extensions are either the ones explicitly specified or those available on the API endpoint network_api_extensions=${NETWORK_API_EXTENSIONS:-$(iniget $tmp_cfg_file network-feature-enabled api_extensions | tr -d " ")} @@ -858,10 +817,7 @@ function configure_tempest { fi iniset $TEMPEST_CONFIG network-feature-enabled api_extensions $network_api_extensions # Swift API Extensions - DEFAULT_SWIFT_OPT="account_quotas,bulk_delete,bulk_upload,container_quotas" - DEFAULT_SWIFT_OPT+=",container_sync,crossdomain,formpost,ratelimit,slo" - DEFAULT_SWIFT_OPT+=",staticweb,tempauth,tempurl,versioned_writes" - local object_storage_api_extensions=${OBJECT_STORAGE_API_EXTENSIONS:-$DEFAULT_SWIFT_OPT} + local object_storage_api_extensions=${OBJECT_STORAGE_API_EXTENSIONS:-"all"} if [[ ! -z "$DISABLE_OBJECT_STORAGE_API_EXTENSIONS" ]]; then # Enabled extensions are either the ones explicitly specified or those available on the API endpoint object_storage_api_extensions=${OBJECT_STORAGE_API_EXTENSIONS:-$(iniget $tmp_cfg_file object-storage-feature-enabled discoverable_apis | tr -d " ")} @@ -870,18 +826,7 @@ function configure_tempest { fi iniset $TEMPEST_CONFIG object-storage-feature-enabled discoverable_apis $object_storage_api_extensions # Cinder API Extensions - DEFAULT_VOL_EXT="OS-SCH-HNT,backups,capabilities,cgsnapshots,consistencygroups" - DEFAULT_VOL_EXT+=",encryption,os-admin-actions,os-availability-zone" - DEFAULT_VOL_EXT+=",os-extended-services,os-extended-snapshot-attributes" - DEFAULT_VOL_EXT+=",os-hosts,os-quota-class-sets,os-quota-sets" - DEFAULT_VOL_EXT+=",os-services,os-snapshot-actions,os-snapshot-manage" - DEFAULT_VOL_EXT+=",os-snapshot-unmanage,os-types-extra-specs,os-types-manage" - DEFAULT_VOL_EXT+=",os-used-limits,os-vol-host-attr,os-vol-image-meta" - DEFAULT_VOL_EXT+=",os-vol-mig-status-attr,os-vol-tenant-attr,os-volume-actions" - DEFAULT_VOL_EXT+=",os-volume-encryption-metadata,os-volume-manage" - DEFAULT_VOL_EXT+=",os-volume-transfer,os-volume-type-access" - DEFAULT_VOL_EXT+=",os-volume-unmanage,qos-specs,scheduler-stats" - local volume_api_extensions=${VOLUME_API_EXTENSIONS:-$DEFAULT_VOL_EXT} + local volume_api_extensions=${VOLUME_API_EXTENSIONS:-"all"} if [[ ! -z "$DISABLE_VOLUME_API_EXTENSIONS" ]]; then # Enabled extensions are either the ones explicitly specified or those available on the API endpoint volume_api_extensions=${VOLUME_API_EXTENSIONS:-$(iniget $tmp_cfg_file volume-feature-enabled api_extensions | tr -d " ")} From fc31d8a37e59d6811b208b5dba6c312365abd2d8 Mon Sep 17 00:00:00 2001 From: Michal Nasiadka Date: Tue, 9 Dec 2025 17:49:41 +0100 Subject: [PATCH 572/574] almalinux: Switch to build OVS from source Since almalinux-10 label in OpenDev runs on both x86-64-v2 and v3, and CentOS NFV SIG OVS packages are only build for v3, these jobs have been only successful when spawned on v3 hardware. In order to make the job passable on v2 hardware - we should be building OVS from source. Rename the jobs to reflect the change Change-Id: I67b19c18b45af23cda7899f7c2edab21b8ed1ede Signed-off-by: Michal Nasiadka --- .zuul.yaml | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index 0aa2530d77..2227f185dd 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -818,7 +818,7 @@ # and these platforms don't have the round-the-clock support to avoid # becoming blockers in that situation. - job: - name: devstack-platform-almalinux-purple-lion + name: devstack-platform-almalinux-purple-lion-ovn-source parent: tempest-full-py3 description: AlmaLinux 10 platform test nodeset: devstack-single-node-almalinux-10 @@ -826,6 +826,11 @@ voting: false vars: configure_swap_size: 4096 + devstack_localrc: + OVN_BUILD_FROM_SOURCE: True + OVN_BRANCH: "branch-24.03" + OVS_BRANCH: "branch-3.3" + OVS_SYSCONFDIR: "/usr/local/etc/openvswitch" - job: name: devstack-platform-centos-10-stream @@ -1036,7 +1041,7 @@ - devstack - devstack-ipv6 - devstack-enforce-scope - - devstack-platform-almalinux-purple-lion + - devstack-platform-almalinux-purple-lion-ovn-source - devstack-platform-centos-10-stream - devstack-platform-centos-9-stream - devstack-platform-debian-bookworm @@ -1124,7 +1129,7 @@ - devstack-no-tls-proxy periodic-weekly: jobs: - - devstack-platform-almalinux-purple-lion + - devstack-platform-almalinux-purple-lion-ovn-source - devstack-platform-centos-10-stream - devstack-platform-centos-9-stream - devstack-platform-debian-bookworm From c11b16b44de613a15833e610ebf77d539e488856 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Wed, 13 Aug 2025 01:02:34 +0900 Subject: [PATCH 573/574] Fix ownership of keyring file The file should be owned by the user(STACK_USER) to run the process. Note that STACK_USER may not match the current user in case stack.sh is run by root. Also we should not assume that the group name always matches the user name. Change-Id: Iec300311cab1b1a2fa124879aa3dc6a57a6a706b Signed-off-by: Takashi Kajinami --- lib/cinder_backups/ceph | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/cinder_backups/ceph b/lib/cinder_backups/ceph index e60efbb632..e4d6b96407 100644 --- a/lib/cinder_backups/ceph +++ b/lib/cinder_backups/ceph @@ -34,7 +34,7 @@ function configure_cinder_backup_ceph { sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID} fi sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "profile rbd" osd "profile rbd pool=${CINDER_BAK_CEPH_POOL}, profile rbd pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring - sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring + sudo chown $STACK_USER ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring fi iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver" From 92a70ba9dda1669f7af00cacd6cc7f41b764f5da Mon Sep 17 00:00:00 2001 From: Gregory Thiemonge Date: Tue, 6 Jan 2026 08:52:41 +0100 Subject: [PATCH 574/574] Fix centos/rhel 9/10 dependencies An error was introduced in [0], it added an incorrect dependency on genisoimage for nova, and kept a valid dependency on xorriso in n-cpu. It breaks centos 10 deployments, the expected behavior is to use xorriso on modern rhel/centos. [0] https://review.opendev.org/c/openstack/devstack/+/963799 Change-Id: I0057f934c453ded198f8b5e58f4924260a3d2508 Signed-off-by: Gregory Thiemonge --- files/rpms/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/rpms/nova b/files/rpms/nova index c323224279..d0f843bb60 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -1,7 +1,6 @@ conntrack-tools curl ebtables -genisoimage iptables iputils kernel-modules # not:openEuler-22.03 @@ -11,3 +10,4 @@ polkit rabbitmq-server # NOPRIME sqlite sudo +xorriso