diff --git a/.gitignore b/.gitignore index 8fe56ad6ab..ad153f4a07 100644 --- a/.gitignore +++ b/.gitignore @@ -38,3 +38,5 @@ stack-screenrc userrc_early AUTHORS ChangeLog +tools/dbcounter/build/ +tools/dbcounter/dbcounter.egg-info/ diff --git a/.zuul.yaml b/.zuul.yaml index c1406716fe..2227f185dd 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -1,8 +1,18 @@ - nodeset: - name: openstack-single-node + name: openstack-single-node-jammy nodes: - name: controller - label: ubuntu-xenial + label: ubuntu-jammy + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: openstack-single-node-noble + nodes: + - name: controller + label: ubuntu-noble groups: - name: tempest nodes: @@ -29,62 +39,87 @@ - controller - nodeset: - name: openstack-single-node-xenial + name: devstack-single-node-almalinux-10 + nodes: + - name: controller + label: almalinux-10-8GB + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: devstack-single-node-centos-9-stream + nodes: + - name: controller + label: centos-9-stream + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: devstack-single-node-centos-10-stream nodes: - name: controller - label: ubuntu-xenial + label: centos-10-stream-8GB groups: - name: tempest nodes: - controller - nodeset: - name: devstack-single-node-centos-7 + name: devstack-single-node-debian-trixie nodes: - name: controller - label: centos-7 + label: debian-trixie-8GB groups: - name: tempest nodes: - controller - nodeset: - name: devstack-single-node-centos-8 + name: devstack-single-node-debian-bookworm nodes: - name: controller - label: centos-8 + label: debian-bookworm groups: - name: tempest nodes: - controller +# TODO(frickler): drop this dummy nodeset once all references have been removed - nodeset: name: devstack-single-node-opensuse-15 + nodes: [] + +- nodeset: + name: devstack-single-node-rockylinux-9 nodes: - name: controller - label: opensuse-15 + label: rockylinux-9 groups: - name: tempest nodes: - controller - nodeset: - name: devstack-single-node-fedora-latest + name: devstack-single-node-rockylinux-10 nodes: - name: controller - label: fedora-32 + label: rockylinux-10-8GB groups: - name: tempest nodes: - controller - nodeset: - name: openstack-two-node + name: openstack-two-node-centos-10-stream nodes: - name: controller - label: ubuntu-xenial + label: centos-10-stream-8GB - name: compute1 - label: ubuntu-xenial + label: centos-10-stream-8GB groups: # Node where tests are executed and test results collected - name: tempest @@ -109,12 +144,12 @@ - compute1 - nodeset: - name: openstack-two-node-focal + name: openstack-two-node-centos-9-stream nodes: - name: controller - label: ubuntu-focal + label: centos-9-stream - name: compute1 - label: ubuntu-focal + label: centos-9-stream groups: # Node where tests are executed and test results collected - name: tempest @@ -139,12 +174,12 @@ - compute1 - nodeset: - name: openstack-two-node-bionic + name: openstack-two-node-jammy nodes: - name: controller - label: ubuntu-bionic + label: ubuntu-jammy - name: compute1 - label: ubuntu-bionic + label: ubuntu-jammy groups: # Node where tests are executed and test results collected - name: tempest @@ -169,12 +204,72 @@ - compute1 - nodeset: - name: openstack-two-node-xenial + name: openstack-two-node-noble nodes: - name: controller - label: ubuntu-xenial + label: ubuntu-noble - name: compute1 - label: ubuntu-xenial + label: ubuntu-noble + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- nodeset: + name: openstack-two-node-focal + nodes: + - name: controller + label: ubuntu-focal + - name: compute1 + label: ubuntu-focal + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- nodeset: + name: openstack-two-node-bionic + nodes: + - name: controller + label: ubuntu-bionic + - name: compute1 + label: ubuntu-bionic groups: # Node where tests are executed and test results collected - name: tempest @@ -268,9 +363,69 @@ - compute1 - compute2 +- nodeset: + name: devstack-two-node-debian-bookworm + nodes: + - name: controller + label: debian-bookworm + - name: compute1 + label: debian-bookworm + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- nodeset: + name: devstack-two-node-debian-trixie + nodes: + - name: controller + label: debian-trixie-8GB + - name: compute1 + label: debian-trixie-8GB + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + - job: name: devstack-base - parent: multinode + parent: openstack-multinode-fips abstract: true description: | Base abstract Devstack job. @@ -284,8 +439,13 @@ nodes (everything but the controller). required-projects: - opendev.org/openstack/devstack + # this is a workaround for a packaging bug in ubuntu + # remove when https://bugs.launchpad.net/nova/+bug/2109592 + # is resolved and oslo.config is not a dep of the novnc deb + # via the defunct python3-novnc package. + - novnc/novnc + roles: - - zuul: opendev.org/openstack/devstack-gate - zuul: opendev.org/openstack/openstack-zuul-jobs vars: devstack_localrc: @@ -302,7 +462,6 @@ LOG_COLOR: false VERBOSE: true VERBOSE_NO_TIMESTAMP: true - NOVNC_FROM_PACKAGE: true ERROR_ON_CLONE: true # Gate jobs can't deal with nested virt. Disable it by default. LIBVIRT_TYPE: '{{ devstack_libvirt_type | default("qemu") }}' @@ -315,12 +474,15 @@ '{{ devstack_conf_dir }}/.localrc.auto': logs '{{ devstack_conf_dir }}/.stackenv': logs '{{ devstack_log_dir }}/dstat-csv.log': logs + '{{ devstack_log_dir }}/atop': logs '{{ devstack_log_dir }}/devstacklog.txt': logs '{{ devstack_log_dir }}/devstacklog.txt.summary': logs '{{ devstack_log_dir }}/tcpdump.pcap': logs '{{ devstack_log_dir }}/worlddump-latest.txt': logs + '{{ devstack_log_dir }}/qemu.coredump': logs '{{ devstack_full_log}}': logs '{{ stage_dir }}/verify_tempest_conf.log': logs + '{{ stage_dir }}/performance.json': logs '{{ stage_dir }}/apache': logs '{{ stage_dir }}/apache_config': logs '{{ stage_dir }}/etc': logs @@ -328,16 +490,20 @@ /var/log/postgresql: logs /var/log/mysql: logs /var/log/libvirt: logs + /etc/libvirt: logs + /etc/lvm: logs /etc/sudoers: logs /etc/sudoers.d: logs '{{ stage_dir }}/iptables.txt': logs '{{ stage_dir }}/df.txt': logs + '{{ stage_dir }}/mount.txt': logs '{{ stage_dir }}/pip2-freeze.txt': logs '{{ stage_dir }}/pip3-freeze.txt': logs '{{ stage_dir }}/dpkg-l.txt': logs '{{ stage_dir }}/rpm-qa.txt': logs '{{ stage_dir }}/core': logs '{{ stage_dir }}/listen53.txt': logs + '{{ stage_dir }}/services.txt': logs '{{ stage_dir }}/deprecations.log': logs '{{ stage_dir }}/audit.log': logs /etc/ceph: logs @@ -369,7 +535,6 @@ LOG_COLOR: false VERBOSE: true VERBOSE_NO_TIMESTAMP: true - NOVNC_FROM_PACKAGE: true ERROR_ON_CLONE: true LIBVIRT_TYPE: qemu devstack_services: @@ -377,7 +542,7 @@ pre-run: playbooks/pre.yaml run: playbooks/devstack.yaml post-run: playbooks/post.yaml - irrelevant-files: + irrelevant-files: &common-irrelevant-files # Documentation related - ^.*\.rst$ - ^api-ref/.*$ @@ -385,6 +550,10 @@ - ^releasenotes/.*$ # Translations - ^.*/locale/.*po$ + # pre-commit config + - ^.pre-commit-config.yaml$ + # gitreview config + - ^.gitreview$ - job: name: devstack-minimal @@ -392,7 +561,7 @@ description: | Minimal devstack base job, intended for use by jobs that need less than the normal minimum set of required-projects. - nodeset: openstack-single-node-focal + nodeset: openstack-single-node-noble required-projects: - opendev.org/openstack/requirements vars: @@ -403,17 +572,21 @@ PUBLIC_BRIDGE_MTU: '{{ external_bridge_mtu }}' devstack_services: # Shared services - dstat: true + dstat: false etcd3: true memory_tracker: true + file_tracker: true mysql: true rabbit: true + openstack-cli-server: true group-vars: subnode: devstack_services: # Shared services - dstat: true + dstat: false memory_tracker: true + file_tracker: true + openstack-cli-server: true devstack_localrc: # Multinode specific settings HOST_IP: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}" @@ -459,8 +632,17 @@ - opendev.org/openstack/nova - opendev.org/openstack/placement - opendev.org/openstack/swift + - opendev.org/openstack/os-test-images timeout: 7200 vars: + # based on observation of the integrated gate + # tempest-integrated-compute was only using ~1.7GB of swap + # when zswap and the host turning are enabled that increase + # slightly to ~2GB. we are setting the swap size to 8GB to + # be safe and account for more complex scenarios. + # we should revisit this value after some time to see if we + # can reduce it. + configure_swap_size: 8192 devstack_localrc: # Common OpenStack services settings SWIFT_REPLICAS: 1 @@ -468,6 +650,27 @@ SWIFT_HASH: 1234123412341234 DEBUG_LIBVIRT_COREDUMPS: true NOVA_VNC_ENABLED: true + OVN_DBS_LOG_LEVEL: dbg + # tune the host to optimize memory usage and hide io latency + # these setting will configure the kernel to treat the host page + # cache and swap with equal priority, and prefer deferring writes + # changing the default swappiness, dirty_ratio and + # the vfs_cache_pressure + ENABLE_SYSCTL_MEM_TUNING: true + # the net tuning optimizes ipv4 tcp fast open and config the default + # qdisk policy to pfifo_fast which effectively disable all qos. + # this minimizes the cpu load of the host network stack + ENABLE_SYSCTL_NET_TUNING: true + # zswap allows the kernel to compress pages in memory before swapping + # them to disk. this can reduce the amount of swap used and improve + # performance. effectively this trades a small amount of cpu for an + # increase in swap performance by reducing the amount of data + # written to disk. the overall speedup is proportional to the + # compression ratio and the speed of the swap device. + # NOTE: this option is ignored when not using nova with the libvirt + # virt driver. + NOVA_LIBVIRT_TB_CACHE_SIZE: 128 + ENABLE_ZSWAP: true devstack_local_conf: post-config: $NEUTRON_CONF: @@ -477,9 +680,10 @@ # Core services enabled for this branch. # This list replaces the test-matrix. # Shared services - dstat: true + dstat: false etcd3: true memory_tracker: true + file_tracker: true mysql: true rabbit: true tls-proxy: true @@ -496,13 +700,14 @@ n-sch: true # Placement service placement-api: true + # OVN services + ovn-controller: true + ovn-northd: true + ovs-vswitchd: true + ovsdb-server: true # Neutron services - q-agt: true - q-dhcp: true - q-l3: true - q-meta: true - q-metering: true q-svc: true + q-ovn-agent: true # Swift services s-account: true s-container: true @@ -513,7 +718,6 @@ c-bak: true c-sch: true c-vol: true - cinder: true # Services we don't need. # This section is not really needed, it's for readability. horizon: false @@ -527,15 +731,20 @@ # Core services enabled for this branch. # This list replaces the test-matrix. # Shared services - dstat: true + dstat: false memory_tracker: true + file_tracker: true tls-proxy: true # Nova services n-cpu: true # Placement services placement-client: true + # OVN services + ovn-controller: true + ovs-vswitchd: true + ovsdb-server: true # Neutron services - q-agt: true + q-ovn-agent: true # Cinder services c-bak: true c-vol: true @@ -553,21 +762,53 @@ GLANCE_HOSTPORT: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}:9292" Q_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" NOVA_VNC_ENABLED: true + ENABLE_CHASSIS_AS_GW: false + # tune the host to optimize memory usage and hide io latency + # these setting will configure the kernel to treat the host page + # cache and swap with equal priority, and prefer deferring writes + # changing the default swappiness, dirty_ratio and + # the vfs_cache_pressure + ENABLE_SYSCTL_MEM_TUNING: true + # the net tuning optimizes ipv4 tcp fast open and config the default + # qdisk policy to pfifo_fast which effectively disable all qos. + # this minimizes the cpu load of the host network stack + ENABLE_SYSCTL_NET_TUNING: true + # zswap allows the kernel to compress pages in memory before swapping + # them to disk. this can reduce the amount of swap used and improve + # performance. effectivly this trades a small amount of cpu for an + # increase in swap performance by reducing the amount of data + # written to disk. the overall speedup is porportional to the + # compression ratio and the speed of the swap device. + ENABLE_ZSWAP: true + # NOTE: this option is ignored when not using nova with the libvirt + # virt driver. + NOVA_LIBVIRT_TB_CACHE_SIZE: 128 - job: name: devstack-ipv6 parent: devstack description: | - Devstack single node job for integration gate with IPv6. + Devstack single node job for integration gate with IPv6, + all services and tunnels using IPv6 addresses. vars: devstack_localrc: SERVICE_IP_VERSION: 6 SERVICE_HOST: "" + TUNNEL_IP_VERSION: 6 + +- job: + name: devstack-enforce-scope + parent: devstack + description: | + This job runs the devstack with scope checks enabled. + vars: + devstack_localrc: + ENFORCE_SCOPE: true - job: name: devstack-multinode parent: devstack - nodeset: openstack-two-node-focal + nodeset: openstack-two-node-noble description: | Simple multinode test to verify multinode functionality on devstack side. This is not meant to be used as a parent job. @@ -577,43 +818,155 @@ # and these platforms don't have the round-the-clock support to avoid # becoming blockers in that situation. - job: - name: devstack-platform-centos-8 + name: devstack-platform-almalinux-purple-lion-ovn-source + parent: tempest-full-py3 + description: AlmaLinux 10 platform test + nodeset: devstack-single-node-almalinux-10 + timeout: 9000 + voting: false + vars: + configure_swap_size: 4096 + devstack_localrc: + OVN_BUILD_FROM_SOURCE: True + OVN_BRANCH: "branch-24.03" + OVS_BRANCH: "branch-3.3" + OVS_SYSCONFDIR: "/usr/local/etc/openvswitch" + +- job: + name: devstack-platform-centos-10-stream parent: tempest-full-py3 - description: Centos 8 platform test - nodeset: devstack-single-node-centos-8 + description: CentOS 10 Stream platform test + nodeset: devstack-single-node-centos-10-stream + timeout: 9000 voting: false + +- job: + name: devstack-platform-centos-9-stream + parent: tempest-full-py3 + description: CentOS 9 Stream platform test + nodeset: devstack-single-node-centos-9-stream + vars: + devstack_localrc: + # TODO(ykarel) Remove this when moving to 10-stream + PYTHON3_VERSION: 3.11 timeout: 9000 + voting: false - job: - name: devstack-platform-opensuse-15 + name: devstack-platform-debian-trixie parent: tempest-full-py3 - description: openSUSE 15.x platform test - nodeset: devstack-single-node-opensuse-15 + description: Debian Trixie platform test + nodeset: devstack-single-node-debian-trixie + timeout: 9000 + vars: + configure_swap_size: 4096 + +- job: + name: devstack-platform-debian-bookworm + parent: tempest-full-py3 + description: Debian Bookworm platform test + nodeset: devstack-single-node-debian-bookworm + timeout: 9000 + vars: + configure_swap_size: 4096 + +- job: + name: devstack-platform-rocky-blue-onyx + parent: tempest-full-py3 + description: Rocky Linux 9 Blue Onyx platform test + nodeset: devstack-single-node-rockylinux-9 + timeout: 9000 + # NOTE(danms): This has been failing lately with some repository metadata + # errors. We're marking this as non-voting until it appears to have + # stabilized: + # https://zuul.openstack.org/builds?job_name=devstack-platform-rocky-blue-onyx&skip=0 voting: false + vars: + configure_swap_size: 4096 + devstack_localrc: + # TODO(ykarel) Remove this when moving to rocky10 + PYTHON3_VERSION: 3.11 - job: - name: devstack-platform-bionic + name: devstack-platform-rocky-red-quartz parent: tempest-full-py3 - description: Ubuntu Bionic platform test - nodeset: openstack-single-node-bionic + description: Rocky Linux Red Quartz platform test + nodeset: devstack-single-node-rockylinux-10 + timeout: 9000 voting: false + vars: + configure_swap_size: 4096 - job: - name: devstack-platform-fedora-latest + name: devstack-platform-ubuntu-jammy parent: tempest-full-py3 - description: Fedora latest platform test - nodeset: devstack-single-node-fedora-latest + description: Ubuntu 22.04 LTS (Jammy) platform test + nodeset: openstack-single-node-jammy + timeout: 9000 + vars: + configure_swap_size: 8192 + +- job: + name: devstack-platform-ubuntu-noble-ovn-source + parent: devstack-platform-ubuntu-noble + description: Ubuntu 24.04 LTS (noble) platform test (OVN from source) voting: false + vars: + devstack_localrc: + OVN_BUILD_FROM_SOURCE: True + OVN_BRANCH: "branch-24.03" + OVS_BRANCH: "branch-3.3" + OVS_SYSCONFDIR: "/usr/local/etc/openvswitch" - job: - name: devstack-platform-fedora-latest-virt-preview + name: devstack-platform-ubuntu-noble-ovs parent: tempest-full-py3 - description: Fedora latest platform test using the virt-preview repo. - nodeset: devstack-single-node-fedora-latest + description: Ubuntu 24.04 LTS (noble) platform test (OVS) + nodeset: openstack-single-node-noble voting: false + timeout: 9000 vars: + configure_swap_size: 8192 devstack_localrc: - ENABLE_FEDORA_VIRT_PREVIEW_REPO: true + Q_AGENT: openvswitch + Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch + Q_ML2_TENANT_NETWORK_TYPE: vxlan + devstack_services: + # Disable OVN services + ovn-northd: false + ovn-controller: false + ovs-vswitchd: false + ovsdb-server: false + # Disable Neutron ML2/OVN services + q-ovn-metadata-agent: false + # Enable Neutron ML2/OVS services + q-agt: true + q-dhcp: true + q-l3: true + q-meta: true + q-metering: true + group-vars: + subnode: + devstack_services: + # Disable OVN services + ovn-controller: false + ovs-vswitchd: false + ovsdb-server: false + # Disable Neutron ML2/OVN services + q-ovn-metadata-agent: false + # Enable Neutron ML2/OVS services + q-agt: true + +- job: + name: devstack-no-tls-proxy + parent: tempest-full-py3 + description: | + Tempest job with tls-proxy off. + + Some gates run devstack like this and it follows different code paths. + vars: + devstack_services: + tls-proxy: false - job: name: devstack-tox-base @@ -671,6 +1024,7 @@ - job: name: devstack-unit-tests + nodeset: ubuntu-noble description: | Runs unit tests on devstack project. @@ -686,135 +1040,101 @@ jobs: - devstack - devstack-ipv6 - - devstack-platform-opensuse-15 - - devstack-platform-fedora-latest - - devstack-platform-centos-8 - - devstack-platform-bionic + - devstack-enforce-scope + - devstack-platform-almalinux-purple-lion-ovn-source + - devstack-platform-centos-10-stream + - devstack-platform-centos-9-stream + - devstack-platform-debian-bookworm + - devstack-platform-debian-trixie + - devstack-platform-rocky-blue-onyx + - devstack-platform-rocky-red-quartz + - devstack-platform-ubuntu-noble-ovn-source + - devstack-platform-ubuntu-noble-ovs + - devstack-platform-ubuntu-jammy - devstack-multinode - devstack-unit-tests - openstack-tox-bashate - - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa: - voting: false - - swift-dsvm-functional: - voting: false - irrelevant-files: &dsvm-irrelevant-files - - ^.*\.rst$ - - ^doc/.*$ - - swift-dsvm-functional-py3: - voting: false - irrelevant-files: *dsvm-irrelevant-files + - ironic-tempest-bios-ipmi-direct + - swift-dsvm-functional - grenade: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ - - neutron-grenade-multinode: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ - - neutron-tempest-linuxbridge: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files + - neutron-ovs-grenade-multinode: + irrelevant-files: *common-irrelevant-files - neutron-ovn-tempest-ovs-release: voting: false - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - tempest-multinode-full-py3: voting: false - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - openstacksdk-functional-devstack: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - tempest-ipv6-only: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - nova-ceph-multistore: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files gate: jobs: - devstack - devstack-ipv6 + - devstack-platform-debian-bookworm + - devstack-platform-debian-trixie + - devstack-platform-ubuntu-noble + # NOTE(danms): Disabled due to instability, see comment in the job + # definition above. + # - devstack-platform-rocky-blue-onyx + - devstack-enforce-scope - devstack-multinode - devstack-unit-tests - openstack-tox-bashate - - neutron-grenade-multinode: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ - - neutron-tempest-linuxbridge: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + - neutron-ovs-grenade-multinode: + irrelevant-files: *common-irrelevant-files + - ironic-tempest-bios-ipmi-direct + - swift-dsvm-functional - grenade: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - openstacksdk-functional-devstack: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - tempest-ipv6-only: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - nova-ceph-multistore: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files # Please add a note on each job and conditions for the job not # being experimental any more, so we can keep this list somewhat # pruned. # # * nova-next: maintained by nova for unreleased/undefaulted - # things - # * neutron-fullstack-with-uwsgi: maintained by neutron for fullstack test - # when neutron-api is served by uwsgi, it's in exprimental for testing. - # the next cycle we can remove this job if things turn out to be - # stable enough. - # * neutron-functional-with-uwsgi: maintained by neutron for functional - # test. Next cycle we can remove this one if things turn out to be - # stable engouh with uwsgi. - # * neutron-tempest-with-uwsgi: maintained by neutron for tempest test. - # Next cycle we can remove this if everything run out stable enough. - # * nova-multi-cell: maintained by nova and currently non-voting in the + # things, this job is not experimental but often is used to test + # things that are not yet production ready or to test what will be + # the new default after a deprecation period has ended. + # * nova-multi-cell: maintained by nova and now is voting in the # check queue for nova changes but relies on devstack configuration - # * devstack-platform-fedora-latest-virt-preview: Maintained by lyarwood - # for Nova to allow early testing of the latest versions of Libvirt and - # QEMU. Should only graduate out of experimental if it ever moves into - # the check queue for Nova. experimental: jobs: - nova-multi-cell - nova-next - - neutron-fullstack-with-uwsgi - - neutron-functional-with-uwsgi - - neutron-tempest-with-uwsgi - devstack-plugin-ceph-tempest-py3: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ - - neutron-tempest-dvr: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ - - neutron-tempest-dvr-ha-multinode-full: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files + - neutron-ovs-tempest-dvr: + irrelevant-files: *common-irrelevant-files + - neutron-ovs-tempest-dvr-ha-multinode-full: + irrelevant-files: *common-irrelevant-files - cinder-tempest-lvm-multibackend: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ + irrelevant-files: *common-irrelevant-files - tempest-pg-full: - irrelevant-files: - - ^.*\.rst$ - - ^doc/.*$ - - devstack-platform-fedora-latest-virt-preview + irrelevant-files: *common-irrelevant-files + - devstack-no-tls-proxy + periodic: + jobs: + - devstack-no-tls-proxy + periodic-weekly: + jobs: + - devstack-platform-almalinux-purple-lion-ovn-source + - devstack-platform-centos-10-stream + - devstack-platform-centos-9-stream + - devstack-platform-debian-bookworm + - devstack-platform-rocky-blue-onyx + - devstack-platform-rocky-red-quartz + - devstack-platform-ubuntu-noble-ovn-source + - devstack-platform-ubuntu-noble-ovs + - devstack-platform-ubuntu-jammy diff --git a/HACKING.rst b/HACKING.rst index f55aed8a07..6a91e0a6a8 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -74,8 +74,7 @@ of test of specific fragile functions in the ``functions`` and ``tools`` - Contains a collection of stand-alone scripts. While these may reference the top-level DevStack configuration they can generally be -run alone. There are also some sub-directories to support specific -environments such as XenServer. +run alone. Scripts @@ -275,9 +274,6 @@ your change even years from now -- why we were motivated to make a change at the time. -* **Reviewers** -- please see ``MAINTAINERS.rst`` for a list of people - that should be added to reviews of various sub-systems. - Making Changes, Testing, and CI ------------------------------- diff --git a/MAINTAINERS.rst b/MAINTAINERS.rst deleted file mode 100644 index d4968a6051..0000000000 --- a/MAINTAINERS.rst +++ /dev/null @@ -1,92 +0,0 @@ -MAINTAINERS -=========== - - -Overview --------- - -The following is a list of people known to have interests in -particular areas or sub-systems of devstack. - -It is a rather general guide intended to help seed the initial -reviewers list of a change. A +1 on a review from someone identified -as being a maintainer of its affected area is a very positive flag to -the core team for the veracity of the change. - -The ``devstack-core`` group can still be added to all reviews. - - -Format -~~~~~~ - -The format of the file is the name of the maintainer and their -gerrit-registered email. - - -Maintainers ------------ - -.. contents:: :local: - - -Ceph -~~~~ - -* Sebastien Han - -Cinder -~~~~~~ - -Fedora/CentOS/RHEL -~~~~~~~~~~~~~~~~~~ - -* Ian Wienand - -Neutron -~~~~~~~ - -MidoNet -~~~~~~~ - -* Jaume Devesa -* Ryu Ishimoto -* YAMAMOTO Takashi - -OpenDaylight -~~~~~~~~~~~~ - -* Kyle Mestery - -OpenFlow Agent (ofagent) -~~~~~~~~~~~~~~~~~~~~~~~~ - -* YAMAMOTO Takashi -* Fumihiko Kakuma - -Swift -~~~~~ - -* Chmouel Boudjnah - -SUSE -~~~~ - -* Ralf Haferkamp -* Vincent Untz - -Tempest -~~~~~~~ - -Xen -~~~ -* Bob Ball - -Zaqar (Marconi) -~~~~~~~~~~~~~~~ - -* Flavio Percoco -* Malini Kamalambal - -Oracle Linux -~~~~~~~~~~~~ -* Wiekus Beukes diff --git a/README.rst b/README.rst index f3a585a926..86b85da956 100644 --- a/README.rst +++ b/README.rst @@ -4,7 +4,7 @@ from git source trees. Goals ===== -* To quickly build dev OpenStack environments in a clean Ubuntu or Fedora +* To quickly build dev OpenStack environments in a clean Ubuntu or RockyLinux environment * To describe working configurations of OpenStack (which code branches work together? what do config files look like for those branches?) @@ -28,9 +28,9 @@ Versions The DevStack master branch generally points to trunk versions of OpenStack components. For older, stable versions, look for branches named stable/[release] in the DevStack repo. For example, you can do the -following to create a Pike OpenStack cloud:: +following to create a Zed OpenStack cloud:: - git checkout stable/pike + git checkout stable/zed ./stack.sh You can also pick specific OpenStack project releases by setting the appropriate @@ -55,7 +55,7 @@ When the script finishes executing, you should be able to access OpenStack endpoints, like so: * Horizon: http://myhost/ -* Keystone: http://myhost/identity/v2.0/ +* Keystone: http://myhost/identity/v3/ We also provide an environment file that you can use to interact with your cloud via CLI:: diff --git a/clean.sh b/clean.sh index 4cebf1d9ea..092f557a88 100755 --- a/clean.sh +++ b/clean.sh @@ -40,7 +40,7 @@ source $TOP_DIR/lib/rpc_backend source $TOP_DIR/lib/tls -source $TOP_DIR/lib/oslo +source $TOP_DIR/lib/libraries source $TOP_DIR/lib/lvm source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone @@ -50,7 +50,6 @@ source $TOP_DIR/lib/placement source $TOP_DIR/lib/cinder source $TOP_DIR/lib/swift source $TOP_DIR/lib/neutron -source $TOP_DIR/lib/neutron-legacy set -o xtrace @@ -113,7 +112,7 @@ cleanup_rpc_backend cleanup_database # Clean out data and status -sudo rm -rf $DATA_DIR $DEST/status +sudo rm -rf $DATA_DIR $DEST/status $DEST/async # Clean out the log file and log directories if [[ -n "$LOGFILE" ]] && [[ -f "$LOGFILE" ]]; then diff --git a/doc/requirements.txt b/doc/requirements.txt index ffce3ff74c..7980b93ed7 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -4,8 +4,4 @@ Pygments docutils sphinx>=2.0.0,!=2.1.0 # BSD openstackdocstheme>=2.2.1 # Apache-2.0 -nwdiag -blockdiag -sphinxcontrib-blockdiag -sphinxcontrib-nwdiag zuul-sphinx>=0.2.0 diff --git a/doc/source/assets/images/neutron-network-1.png b/doc/source/assets/images/neutron-network-1.png new file mode 100644 index 0000000000..7730ca93f1 Binary files /dev/null and b/doc/source/assets/images/neutron-network-1.png differ diff --git a/doc/source/assets/images/neutron-network-2.png b/doc/source/assets/images/neutron-network-2.png new file mode 100644 index 0000000000..919935119d Binary files /dev/null and b/doc/source/assets/images/neutron-network-2.png differ diff --git a/doc/source/assets/images/neutron-network-3.png b/doc/source/assets/images/neutron-network-3.png new file mode 100644 index 0000000000..34f03ed5c9 Binary files /dev/null and b/doc/source/assets/images/neutron-network-3.png differ diff --git a/doc/source/conf.py b/doc/source/conf.py index 2e17da17f8..bb0357286a 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -23,14 +23,14 @@ # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ 'sphinx.ext.autodoc', - 'zuul_sphinx', - 'openstackdocstheme', - 'sphinxcontrib.blockdiag', - 'sphinxcontrib.nwdiag' ] +extensions = [ + 'sphinx.ext.autodoc', + 'zuul_sphinx', + 'openstackdocstheme', +] # openstackdocstheme options -openstackdocs_repo_name = 'openstack-dev/devstack' +openstackdocs_repo_name = 'openstack/devstack' openstackdocs_pdf_link = True openstackdocs_bug_project = 'devstack' openstackdocs_bug_tag = '' diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 22f5999174..3cfba716ca 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -181,6 +181,9 @@ values that most often need to be set. If the ``*_PASSWORD`` variables are not set here you will be prompted to enter values for them by ``stack.sh``. +.. warning:: Only use alphanumeric characters in your passwords, as some + services fail to work when using special characters. + The network ranges must not overlap with any networks in use on the host. Overlap is not uncommon as RFC-1918 'private' ranges are commonly used for both the local networking and Nova's fixed and floating ranges. @@ -279,7 +282,7 @@ number of days of old log files to keep. :: - LOGDAYS=1 + LOGDAYS=2 Some coloring is used during the DevStack runs to make it easier to see what is going on. This can be disabled with:: @@ -320,7 +323,7 @@ a file, keep service logs and disable color in the stored files. [[local|localrc]] DEST=/opt/stack/ - LOGFILE=$LOGDIR/stack.sh.log + LOGFILE=$DEST/stack.sh.log LOG_COLOR=False Database Backend @@ -348,30 +351,21 @@ Example disabling RabbitMQ in ``local.conf``:: disable_service rabbit - Apache Frontend --------------- -The Apache web server can be enabled for wsgi services that support -being deployed under HTTPD + mod_wsgi. By default, services that -recommend running under HTTPD + mod_wsgi are deployed under Apache. To -use an alternative deployment strategy (e.g. eventlet) for services -that support an alternative to HTTPD + mod_wsgi set -``ENABLE_HTTPD_MOD_WSGI_SERVICES`` to ``False`` in your -``local.conf``. - -Each service that can be run under HTTPD + mod_wsgi also has an -override toggle available that can be set in your ``local.conf``. - -Keystone is run under Apache with ``mod_wsgi`` by default. +The Apache web server is enabled for services that support via WSGI. Today this +means HTTPD and uWSGI but historically this meant HTTPD + mod_wsgi. This +historical legacy is captured by the naming of many variables, which include +``MOD_WSGI`` rather than ``UWSGI``. -Example (Keystone):: - - KEYSTONE_USE_MOD_WSGI="True" - -Example (Nova):: - - NOVA_USE_MOD_WSGI="True" +Some services support alternative deployment strategies (e.g. eventlet). You +can enable these ``ENABLE_HTTPD_MOD_WSGI_SERVICES`` to ``False`` in your +``local.conf``. In addition, each service that can be run under HTTPD + +mod_wsgi also has an override toggle available that can be set in your +``local.conf``. These are, however, slowly being removed as services have +adopted standardized deployment mechanisms and more generally moved away from +eventlet. Example (Swift):: @@ -381,11 +375,6 @@ Example (Heat):: HEAT_USE_MOD_WSGI="True" -Example (Cinder):: - - CINDER_USE_MOD_WSGI="True" - - Libraries from Git ------------------ @@ -521,8 +510,8 @@ behavior: can be configured with any valid IPv6 prefix. The default values make use of an auto-generated ``IPV6_GLOBAL_ID`` to comply with RFC4193. -Service Version -~~~~~~~~~~~~~~~ +Service IP Version +~~~~~~~~~~~~~~~~~~ DevStack can enable service operation over either IPv4 or IPv6 by setting ``SERVICE_IP_VERSION`` to either ``SERVICE_IP_VERSION=4`` or @@ -542,6 +531,27 @@ optionally be used to alter the default IPv6 address:: HOST_IPV6=${some_local_ipv6_address} +Tunnel IP Version +~~~~~~~~~~~~~~~~~ + +DevStack can enable tunnel operation over either IPv4 or IPv6 by +setting ``TUNNEL_IP_VERSION`` to either ``TUNNEL_IP_VERSION=4`` or +``TUNNEL_IP_VERSION=6`` respectively. + +When set to ``4`` Neutron will use an IPv4 address for tunnel endpoints, +for example, ``HOST_IP``. + +When set to ``6`` Neutron will use an IPv6 address for tunnel endpoints, +for example, ``HOST_IPV6``. + +The default value for this setting is ``4``. Dual-mode support, for +example ``4+6`` is not supported, as this value must match the address +family of the local tunnel endpoint IP(v6) address. + +The value of ``TUNNEL_IP_VERSION`` has a direct relationship to the +setting of ``TUNNEL_ENDPOINT_IP``, which will default to ``HOST_IP`` +when set to ``4``, and ``HOST_IPV6`` when set to ``6``. + Multi-node setup ~~~~~~~~~~~~~~~~ @@ -615,7 +625,7 @@ tests can be run as follows: :: $ cd /opt/stack/tempest - $ tox -efull tempest.scenario.test_network_basic_ops + $ tox -e smoke By default tempest is downloaded and the config file is generated, but the tempest package is not installed in the system's global site-packages (the @@ -628,12 +638,6 @@ outside of tox. If you would like to install it add the following to your INSTALL_TEMPEST=True -Xenserver -~~~~~~~~~ - -If you would like to use Xenserver as the hypervisor, please refer to -the instructions in ``./tools/xen/README.md``. - Cinder ~~~~~~ @@ -648,6 +652,41 @@ with ``VOLUME_BACKING_FILE_SIZE``. VOLUME_NAME_PREFIX="volume-" VOLUME_BACKING_FILE_SIZE=24G +When running highly concurrent tests, the default per-project quotas +for volumes, backups, or snapshots may be too small. These can be +adjusted by setting ``CINDER_QUOTA_VOLUMES``, ``CINDER_QUOTA_BACKUPS``, +or ``CINDER_QUOTA_SNAPSHOTS`` to the desired value. (The default for +each is 10.) + +DevStack's Cinder LVM configuration module currently supports both iSCSI and +NVMe connections, and we can choose which one to use with options +``CINDER_TARGET_HELPER``, ``CINDER_TARGET_PROTOCOL``, ``CINDER_TARGET_PREFIX``, +and ``CINDER_TARGET_PORT``. + +Defaults use iSCSI with the LIO target manager:: + + CINDER_TARGET_HELPER="lioadm" + CINDER_TARGET_PROTOCOL="iscsi" + CINDER_TARGET_PREFIX="iqn.2010-10.org.openstack:" + CINDER_TARGET_PORT=3260 + +Additionally there are 3 supported transport protocols for NVMe, +``nvmet_rdma``, ``nvmet_tcp``, and ``nvmet_fc``, and when the ``nvmet`` target +is selected the protocol, prefix, and port defaults will change to more +sensible defaults for NVMe:: + + CINDER_TARGET_HELPER="nvmet" + CINDER_TARGET_PROTOCOL="nvmet_rdma" + CINDER_TARGET_PREFIX="nvme-subsystem-1" + CINDER_TARGET_PORT=4420 + +When selecting the RDMA transport protocol DevStack will create on Cinder nodes +a Software RoCE device on top of the ``HOST_IP_IFACE`` and if it is not defined +then on top of the interface with IP address ``HOST_IP`` or ``HOST_IPV6``. + +This Soft-RoCE device will always be created on the Nova compute side since we +cannot tell beforehand whether there will be an RDMA connection or not. + Keystone ~~~~~~~~ @@ -672,7 +711,6 @@ In RegionTwo: disable_service horizon KEYSTONE_SERVICE_HOST= - KEYSTONE_AUTH_HOST= REGION_NAME=RegionTwo KEYSTONE_REGION_NAME=RegionOne @@ -685,15 +723,22 @@ KEYSTONE_REGION_NAME to specify the region of Keystone service. KEYSTONE_REGION_NAME has a default value the same as REGION_NAME thus we omit it in the configuration of RegionOne. -Disabling Identity API v2 -+++++++++++++++++++++++++ +Glance +++++++ + +The default image size quota of 1GiB may be too small if larger images +are to be used. Change the default at setup time with: + +:: + + GLANCE_LIMIT_IMAGE_SIZE_TOTAL=5000 -The Identity API v2 is deprecated as of Mitaka and it is recommended to only -use the v3 API. It is possible to setup keystone without v2 API, by doing: +or at runtime via: :: - ENABLE_IDENTITY_V2=False + openstack --os-cloud devstack-system-admin registered limit set \ + --service glance --default-limit 5000 --region RegionOne image_size_total .. _arch-configuration: diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst index 5e0df569f7..8b5a85b3df 100644 --- a/doc/source/contributor/contributing.rst +++ b/doc/source/contributor/contributing.rst @@ -13,7 +13,7 @@ with Devstack. Communication ~~~~~~~~~~~~~ -* IRC channel ``#openstack-qa`` at FreeNode +* IRC channel ``#openstack-qa`` at OFTC. * Mailing list (prefix subjects with ``[qa][devstack]`` for faster responses) http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss @@ -42,8 +42,9 @@ Getting Your Patch Merged ~~~~~~~~~~~~~~~~~~~~~~~~~ All changes proposed to the Devstack require two ``Code-Review +2`` votes from Devstack core reviewers before one of the core reviewers can approve the patch -by giving ``Workflow +1`` vote. One exception is for patches to unblock the gate -which can be approved by single core reviewers. +by giving ``Workflow +1`` vote. There are 2 exceptions, approving patches to +unblock the gate and patches that do not relate to the Devstack's core logic, +like for example old job cleanups, can be approved by single core reviewers. Project Team Lead Duties ~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/debugging.rst b/doc/source/debugging.rst index fd0d9cdf74..3ca0ad94b4 100644 --- a/doc/source/debugging.rst +++ b/doc/source/debugging.rst @@ -20,6 +20,12 @@ provides consumption output when available memory is seen to be falling (i.e. processes are consuming memory). It also provides output showing locked (unswappable) memory. +file_tracker +------------ + +The ``file_tracker`` service periodically monitors the number of +open files in the system. + tcpdump ------- diff --git a/doc/source/guides.rst b/doc/source/guides.rst index e7ec629962..e7b46b6e55 100644 --- a/doc/source/guides.rst +++ b/doc/source/guides.rst @@ -20,7 +20,7 @@ Walk through various setups used by stackers guides/neutron guides/devstack-with-nested-kvm guides/nova - guides/devstack-with-lbaas-v2 + guides/devstack-with-octavia guides/devstack-with-ldap All-In-One Single VM @@ -69,10 +69,10 @@ Nova and devstack Guide to working with nova features :doc:`Nova and devstack `. -Configure Load-Balancer Version 2 ------------------------------------ +Configure Octavia +----------------- -Guide on :doc:`Configure Load-Balancer Version 2 `. +Guide on :doc:`Configure Octavia `. Deploying DevStack with LDAP ---------------------------- diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst deleted file mode 100644 index 5d96ca7d74..0000000000 --- a/doc/source/guides/devstack-with-lbaas-v2.rst +++ /dev/null @@ -1,145 +0,0 @@ -Devstack with Octavia Load Balancing -==================================== - -Starting with the OpenStack Pike release, Octavia is now a standalone service -providing load balancing services for OpenStack. - -This guide will show you how to create a devstack with `Octavia API`_ enabled. - -.. _Octavia API: https://docs.openstack.org/api-ref/load-balancer/v2/index.html - -Phase 1: Create DevStack + 2 nova instances --------------------------------------------- - -First, set up a vm of your choice with at least 8 GB RAM and 16 GB disk space, -make sure it is updated. Install git and any other developer tools you find -useful. - -Install devstack - -:: - - git clone https://opendev.org/openstack/devstack - cd devstack/tools - sudo ./create-stack-user.sh - cd ../.. - sudo mv devstack /opt/stack - sudo chown -R stack.stack /opt/stack/devstack - -This will clone the current devstack code locally, then setup the "stack" -account that devstack services will run under. Finally, it will move devstack -into its default location in /opt/stack/devstack. - -Edit your ``/opt/stack/devstack/local.conf`` to look like - -:: - - [[local|localrc]] - enable_plugin octavia https://opendev.org/openstack/octavia - # If you are enabling horizon, include the octavia dashboard - # enable_plugin octavia-dashboard https://opendev.org/openstack/octavia-dashboard.git - # If you are enabling barbican for TLS offload in Octavia, include it here. - # enable_plugin barbican https://opendev.org/openstack/barbican - - # ===== BEGIN localrc ===== - DATABASE_PASSWORD=password - ADMIN_PASSWORD=password - SERVICE_PASSWORD=password - SERVICE_TOKEN=password - RABBIT_PASSWORD=password - # Enable Logging - LOGFILE=$DEST/logs/stack.sh.log - VERBOSE=True - LOG_COLOR=True - # Pre-requisite - ENABLED_SERVICES=rabbit,mysql,key - # Horizon - enable for the OpenStack web GUI - # ENABLED_SERVICES+=,horizon - # Nova - ENABLED_SERVICES+=,n-api,n-crt,n-cpu,n-cond,n-sch,n-api-meta,n-sproxy - ENABLED_SERVICES+=,placement-api,placement-client - # Glance - ENABLED_SERVICES+=,g-api - # Neutron - ENABLED_SERVICES+=,q-svc,q-agt,q-dhcp,q-l3,q-meta,neutron - ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api - # Cinder - ENABLED_SERVICES+=,c-api,c-vol,c-sch - # Tempest - ENABLED_SERVICES+=,tempest - # Barbican - Optionally used for TLS offload in Octavia - # ENABLED_SERVICES+=,barbican - # ===== END localrc ===== - -Run stack.sh and do some sanity checks - -:: - - sudo su - stack - cd /opt/stack/devstack - ./stack.sh - . ./openrc - - openstack network list # should show public and private networks - -Create two nova instances that we can use as test http servers: - -:: - - #create nova instances on private network - openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1 - openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2 - openstack server list # should show the nova instances just created - - #add secgroup rules to allow ssh etc.. - openstack security group rule create default --protocol icmp - openstack security group rule create default --protocol tcp --dst-port 22:22 - openstack security group rule create default --protocol tcp --dst-port 80:80 - -Set up a simple web server on each of these instances. ssh into each instance (username 'cirros', password 'cubswin:)' or 'gocubsgo') and run - -:: - - MYIP=$(ifconfig eth0|grep 'inet addr'|awk -F: '{print $2}'| awk '{print $1}') - while true; do echo -e "HTTP/1.0 200 OK\r\n\r\nWelcome to $MYIP" | sudo nc -l -p 80 ; done& - -Phase 2: Create your load balancer ----------------------------------- - -Make sure you have the 'openstack loadbalancer' commands: - -:: - - pip install python-octaviaclient - -Create your load balancer: - -:: - - openstack loadbalancer create --name lb1 --vip-subnet-id private-subnet - openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE. - openstack loadbalancer listener create --protocol HTTP --protocol-port 80 --name listener1 lb1 - openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE. - openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1 - openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE. - openstack loadbalancer healthmonitor create --delay 5 --timeout 2 --max-retries 1 --type HTTP pool1 - openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE. - openstack loadbalancer member create --subnet-id private-subnet --address --protocol-port 80 pool1 - openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE. - openstack loadbalancer member create --subnet-id private-subnet --address --protocol-port 80 pool1 - -Please note: The fields are the IP addresses of the nova -servers created in Phase 1. -Also note, using the API directly you can do all of the above commands in one -API call. - -Phase 3: Test your load balancer --------------------------------- - -:: - - openstack loadbalancer show lb1 # Note the vip_address - curl http:// - curl http:// - -This should show the "Welcome to " message from each member server. diff --git a/doc/source/guides/devstack-with-nested-kvm.rst b/doc/source/guides/devstack-with-nested-kvm.rst index 3732f06fd8..ba483e9ec9 100644 --- a/doc/source/guides/devstack-with-nested-kvm.rst +++ b/doc/source/guides/devstack-with-nested-kvm.rst @@ -1,3 +1,5 @@ +.. _kvm_nested_virt: + ======================================================= Configure DevStack with KVM-based Nested Virtualization ======================================================= diff --git a/doc/source/guides/devstack-with-octavia.rst b/doc/source/guides/devstack-with-octavia.rst new file mode 100644 index 0000000000..55939f0f12 --- /dev/null +++ b/doc/source/guides/devstack-with-octavia.rst @@ -0,0 +1,144 @@ +Devstack with Octavia Load Balancing +==================================== + +Starting with the OpenStack Pike release, Octavia is now a standalone service +providing load balancing services for OpenStack. + +This guide will show you how to create a devstack with `Octavia API`_ enabled. + +.. _Octavia API: https://docs.openstack.org/api-ref/load-balancer/v2/index.html + +Phase 1: Create DevStack + 2 nova instances +-------------------------------------------- + +First, set up a VM of your choice with at least 8 GB RAM and 16 GB disk space, +make sure it is updated. Install git and any other developer tools you find +useful. + +Install devstack:: + + git clone https://opendev.org/openstack/devstack + cd devstack/tools + sudo ./create-stack-user.sh + cd ../.. + sudo mv devstack /opt/stack + sudo chown -R stack.stack /opt/stack/devstack + +This will clone the current devstack code locally, then setup the "stack" +account that devstack services will run under. Finally, it will move devstack +into its default location in /opt/stack/devstack. + +Edit your ``/opt/stack/devstack/local.conf`` to look like:: + + [[local|localrc]] + # ===== BEGIN localrc ===== + DATABASE_PASSWORD=password + ADMIN_PASSWORD=password + SERVICE_PASSWORD=password + SERVICE_TOKEN=password + RABBIT_PASSWORD=password + GIT_BASE=https://opendev.org + # Optional settings: + # OCTAVIA_AMP_BASE_OS=centos + # OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID=9-stream + # OCTAVIA_AMP_IMAGE_SIZE=3 + # OCTAVIA_LB_TOPOLOGY=ACTIVE_STANDBY + # OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD=True + # LIBS_FROM_GIT+=octavia-lib, + # Enable Logging + LOGFILE=$DEST/logs/stack.sh.log + VERBOSE=True + LOG_COLOR=True + enable_service rabbit + enable_plugin neutron $GIT_BASE/openstack/neutron + # Octavia supports using QoS policies on the VIP port: + enable_service q-qos + enable_service placement-api placement-client + # Octavia services + enable_plugin octavia $GIT_BASE/openstack/octavia master + enable_plugin octavia-dashboard $GIT_BASE/openstack/octavia-dashboard + enable_plugin ovn-octavia-provider $GIT_BASE/openstack/ovn-octavia-provider + enable_plugin octavia-tempest-plugin $GIT_BASE/openstack/octavia-tempest-plugin + enable_service octavia o-api o-cw o-hm o-hk o-da + # If you are enabling barbican for TLS offload in Octavia, include it here. + # enable_plugin barbican $GIT_BASE/openstack/barbican + # enable_service barbican + # Cinder (optional) + disable_service c-api c-vol c-sch + # Tempest + enable_service tempest + # ===== END localrc ===== + +.. note:: + For best performance it is highly recommended to use KVM + virtualization instead of QEMU. + Also make sure nested virtualization is enabled as documented in + :ref:`the respective guide `. + By adding ``LIBVIRT_CPU_MODE="host-passthrough"`` to your + ``local.conf`` you enable the guest VMs to make use of all features your + host's CPU provides. + +Run stack.sh and do some sanity checks:: + + sudo su - stack + cd /opt/stack/devstack + ./stack.sh + . ./openrc + + openstack network list # should show public and private networks + +Create two nova instances that we can use as test http servers:: + + # create nova instances on private network + openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1 + openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2 + openstack server list # should show the nova instances just created + + # add secgroup rules to allow ssh etc.. + openstack security group rule create default --protocol icmp + openstack security group rule create default --protocol tcp --dst-port 22:22 + openstack security group rule create default --protocol tcp --dst-port 80:80 + +Set up a simple web server on each of these instances. One possibility is to use +the `Golang test server`_ that is used by the Octavia project for CI testing +as well. +Copy the binary to your instances and start it as shown below +(username 'cirros', password 'gocubsgo'):: + + INST_IP= + scp -O test_server.bin cirros@${INST_IP}: + ssh -f cirros@${INST_IP} ./test_server.bin -id ${INST_IP} + +When started this way the test server will respond to HTTP requests with +its own IP. + +Phase 2: Create your load balancer +---------------------------------- + +Create your load balancer:: + + openstack loadbalancer create --wait --name lb1 --vip-subnet-id private-subnet + openstack loadbalancer listener create --wait --protocol HTTP --protocol-port 80 --name listener1 lb1 + openstack loadbalancer pool create --wait --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1 + openstack loadbalancer healthmonitor create --wait --delay 5 --timeout 2 --max-retries 1 --type HTTP pool1 + openstack loadbalancer member create --wait --subnet-id private-subnet --address --protocol-port 80 pool1 + openstack loadbalancer member create --wait --subnet-id private-subnet --address --protocol-port 80 pool1 + +Please note: The fields are the IP addresses of the nova +servers created in Phase 1. +Also note, using the API directly you can do all of the above commands in one +API call. + +Phase 3: Test your load balancer +-------------------------------- + +:: + + openstack loadbalancer show lb1 # Note the vip_address + curl http:// + curl http:// + +This should show the "Welcome to " message from each member server. + + +.. _Golang test server: https://opendev.org/openstack/octavia-tempest-plugin/src/branch/master/octavia_tempest_plugin/contrib/test_server diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index c0b3f58157..ef339f1f5c 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -24,7 +24,7 @@ Install a couple of packages to bootstrap configuration: :: - apt-get install -y git sudo || yum install -y git sudo + apt-get install -y git sudo || dnf install -y git sudo Network Configuration --------------------- @@ -75,13 +75,21 @@ Otherwise create the stack user: useradd -s /bin/bash -d /opt/stack -m stack +Ensure home directory for the ``stack`` user has executable permission for all, +as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750`` +which can cause issues during deployment. + +:: + + chmod +x /opt/stack + This user will be making many changes to your system during installation and operation so it needs to have sudo privileges to root without a password: :: - echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack From here on use the ``stack`` user. **Logout** and **login** as the ``stack`` user. @@ -169,7 +177,7 @@ machines, create a ``local.conf`` with: MYSQL_HOST=$SERVICE_HOST RABBIT_HOST=$SERVICE_HOST GLANCE_HOSTPORT=$SERVICE_HOST:9292 - ENABLED_SERVICES=n-cpu,q-agt,c-vol,placement-client + ENABLED_SERVICES=n-cpu,c-vol,placement-client,ovn-controller,ovs-vswitchd,ovsdb-server,q-ovn-metadata-agent NOVA_VNC_ENABLED=True NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_lite.html" VNCSERVER_LISTEN=$HOST_IP @@ -202,6 +210,48 @@ only needs to be performed for subnodes. .. _Cells v2: https://docs.openstack.org/nova/latest/user/cells.html +Configure Tempest Node to run the Tempest tests +----------------------------------------------- + +If there is a need to execute Tempest tests against different Cluster +Controller node then it can be done by re-using the ``local.conf`` file from +the Cluster Controller node but with not enabled Controller services in +``ENABLED_SERVICES`` variable. This variable needs to contain only ``tempest`` +as a configured service. Then variable ``SERVICES_FOR_TEMPEST`` must be +configured to contain those services that were enabled on the Cluster +Controller node in the ``ENABLED_SERVICES`` variable. For example the +``local.conf`` file could look as follows: + +:: + + [[local|localrc]] + HOST_IP=192.168.42.12 # change this per compute node + FIXED_RANGE=10.4.128.0/20 + FLOATING_RANGE=192.168.42.128/25 + LOGFILE=/opt/stack/logs/stack.sh.log + ADMIN_PASSWORD=labstack + DATABASE_PASSWORD=supersecret + RABBIT_PASSWORD=supersecret + SERVICE_PASSWORD=supersecret + DATABASE_TYPE=mysql + SERVICE_HOST=192.168.42.11 + MYSQL_HOST=$SERVICE_HOST + RABBIT_HOST=$SERVICE_HOST + GLANCE_HOSTPORT=$SERVICE_HOST:9292 + NOVA_VNC_ENABLED=True + NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_lite.html" + VNCSERVER_LISTEN=$HOST_IP + VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN + ENABLED_SERVICES=tempest + SERVICES_FOR_TEMPEST=keystone,nova,neutron,glance + +Then just execute the devstack: + +:: + + ./stack.sh + + Cleaning Up After DevStack -------------------------- @@ -395,7 +445,7 @@ SSH keys need to be exchanged between each compute node: 3. Verify that login via ssh works without a password:: - ssh -i /root/.ssh/id_rsa.pub stack@DESTINATION + ssh -i /root/.ssh/id_rsa stack@DESTINATION In essence, this means that every compute node's root user's public RSA key must exist in every other compute node's stack user's authorized_keys file and diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index 2c25a1c350..a7adeeff73 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -41,19 +41,8 @@ network and is on a shared subnet with other machines. The `local.conf` exhibited here assumes that 1500 is a reasonable MTU to use on that network. -.. nwdiag:: - - nwdiag { - inet [ shape = cloud ]; - router; - inet -- router; - - network hardware_network { - address = "172.18.161.0/24" - router [ address = "172.18.161.1" ]; - devstack-1 [ address = "172.18.161.6" ]; - } - } +.. image:: /assets/images/neutron-network-1.png + :alt: Network configuration for a single DevStack node DevStack Configuration @@ -100,21 +89,8 @@ also want to do multinode testing and networking. Physical Network Setup ~~~~~~~~~~~~~~~~~~~~~~ -.. nwdiag:: - - nwdiag { - inet [ shape = cloud ]; - router; - inet -- router; - - network hardware_network { - address = "172.18.161.0/24" - router [ address = "172.18.161.1" ]; - devstack-1 [ address = "172.18.161.6" ]; - devstack-2 [ address = "172.18.161.7" ]; - } - } - +.. image:: /assets/images/neutron-network-2.png + :alt: Network configuration for multiple DevStack nodes After DevStack installs and configures Neutron, traffic from guest VMs flows out of `devstack-2` (the compute node) and is encapsulated in a @@ -222,8 +198,6 @@ connect OpenStack nodes (like `devstack-2`) together. This bridge is used so that project network traffic, using the VXLAN tunneling protocol, flows between each compute node where project instances run. - - DevStack Compute Configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -268,30 +242,8 @@ to the neutron L3 service. Physical Network Setup ---------------------- -.. nwdiag:: - - nwdiag { - inet [ shape = cloud ]; - router; - inet -- router; - - network provider_net { - address = "203.0.113.0/24" - router [ address = "203.0.113.1" ]; - controller; - compute1; - compute2; - } - - network control_plane { - router [ address = "10.0.0.1" ] - address = "10.0.0.0/24" - controller [ address = "10.0.0.2" ] - compute1 [ address = "10.0.0.3" ] - compute2 [ address = "10.0.0.4" ] - } - } - +.. image:: /assets/images/neutron-network-3.png + :alt: Network configuration for provider networks On a compute node, the first interface, eth0 is used for the OpenStack management (API, message bus, etc) as well as for ssh for an @@ -499,44 +451,6 @@ by default. If you want to remove all the extension drivers (even 'port_security'), set ``Q_ML2_PLUGIN_EXT_DRIVERS`` to blank. -Using Linux Bridge instead of Open vSwitch ------------------------------------------- - -The configuration for using the Linux Bridge ML2 driver is fairly -straight forward. The Linux Bridge configuration for DevStack is similar -to the :ref:`Open vSwitch based single interface ` -setup, with small modifications for the interface mappings. - - -:: - - [[local|localrc]] - HOST_IP=172.18.161.6 - SERVICE_HOST=172.18.161.6 - MYSQL_HOST=172.18.161.6 - RABBIT_HOST=172.18.161.6 - GLANCE_HOSTPORT=172.18.161.6:9292 - ADMIN_PASSWORD=secret - DATABASE_PASSWORD=secret - RABBIT_PASSWORD=secret - SERVICE_PASSWORD=secret - - ## Neutron options - Q_USE_SECGROUP=True - FLOATING_RANGE="172.18.161.0/24" - IPV4_ADDRS_SAFE_TO_USE="10.0.0.0/24" - Q_FLOATING_ALLOCATION_POOL=start=172.18.161.250,end=172.18.161.254 - PUBLIC_NETWORK_GATEWAY="172.18.161.1" - PUBLIC_INTERFACE=eth0 - - Q_USE_PROVIDERNET_FOR_PUBLIC=True - - # Linuxbridge Settings - Q_AGENT=linuxbridge - LB_PHYSICAL_INTERFACE=eth0 - PUBLIC_PHYSICAL_NETWORK=default - LB_INTERFACE_MAPPINGS=default:eth0 - Using MacVTap instead of Open vSwitch ------------------------------------------ @@ -604,7 +518,7 @@ the MacVTap mechanism driver: [[local|localrc]] ... - Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,linuxbridge,macvtap + Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,macvtap ... For the MacVTap compute node, use this local.conf: diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst index 5b427972c4..6b8aabf8db 100644 --- a/doc/source/guides/nova.rst +++ b/doc/source/guides/nova.rst @@ -122,7 +122,7 @@ when creating the server, for example: .. code-block:: shell $ openstack --os-compute-api-version 2.37 server create --flavor cirros256 \ - --image cirros-0.3.5-x86_64-disk --nic none --wait test-server + --image cirros-0.6.3-x86_64-disk --nic none --wait test-server .. note:: ``--os-compute-api-version`` greater than or equal to 2.37 is required to use ``--nic=none``. diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst index a0e97edb37..263fbb9d6f 100644 --- a/doc/source/guides/single-machine.rst +++ b/doc/source/guides/single-machine.rst @@ -49,13 +49,21 @@ below) $ sudo useradd -s /bin/bash -d /opt/stack -m stack +Ensure home directory for the ``stack`` user has executable permission for all, +as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750`` +which can cause issues during deployment. + +.. code-block:: console + + $ sudo chmod +x /opt/stack + Since this user will be making many changes to your system, it will need to have sudo privileges: .. code-block:: console - $ apt-get install sudo -y || yum install -y sudo - $ echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + $ apt-get install sudo -y || dnf install -y sudo + $ echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack .. note:: On some systems you may need to use ``sudo visudo``. @@ -73,7 +81,7 @@ We'll grab the latest version of DevStack via https: .. code-block:: console - $ sudo apt-get install git -y || sudo yum install -y git + $ sudo apt-get install git -y || sudo dnf install -y git $ git clone https://opendev.org/openstack/devstack $ cd devstack @@ -98,6 +106,9 @@ do the following: - Set the service password. This is used by the OpenStack services (Nova, Glance, etc) to authenticate with Keystone. +.. warning:: Only use alphanumeric characters in your passwords, as some + services fail to work when using special characters. + ``local.conf`` should look something like this: .. code-block:: ini diff --git a/doc/source/guides/single-vm.rst b/doc/source/guides/single-vm.rst index 7dac18b333..4272a4b180 100644 --- a/doc/source/guides/single-vm.rst +++ b/doc/source/guides/single-vm.rst @@ -56,8 +56,8 @@ passed as the user-data file when booting the VM. write_files: - content: | #!/bin/sh - DEBIAN_FRONTEND=noninteractive sudo apt-get -qqy update || sudo yum update -qy - DEBIAN_FRONTEND=noninteractive sudo apt-get install -qqy git || sudo yum install -qy git + DEBIAN_FRONTEND=noninteractive sudo apt-get -qqy update || sudo dnf update -qy + DEBIAN_FRONTEND=noninteractive sudo apt-get install -qqy git || sudo dnf install -qy git sudo chown stack:stack /home/stack cd /home/stack git clone https://opendev.org/openstack/devstack diff --git a/doc/source/index.rst b/doc/source/index.rst index 8b8acde38c..a07bb84922 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -37,10 +37,10 @@ Install Linux ------------- Start with a clean and minimal install of a Linux system. DevStack -attempts to support the two latest LTS releases of Ubuntu, the -latest/current Fedora version, CentOS/RHEL 8 and OpenSUSE. +attempts to support the two latest LTS releases of Ubuntu, +Rocky Linux 9 and openEuler. -If you do not have a preference, Ubuntu 18.04 (Bionic Beaver) is the +If you do not have a preference, Ubuntu 24.04 (Noble) is the most tested, and will probably go the smoothest. Add Stack User (optional) @@ -57,13 +57,21 @@ to run DevStack with $ sudo useradd -s /bin/bash -d /opt/stack -m stack +Ensure home directory for the ``stack`` user has executable permission for all, +as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750`` +which can cause issues during deployment. + +.. code-block:: console + + $ sudo chmod +x /opt/stack + Since this user will be making many changes to your system, it should have sudo privileges: .. code-block:: console $ echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack - $ sudo su - stack + $ sudo -u stack -i Download DevStack ----------------- @@ -93,7 +101,10 @@ devstack git repo. This is the minimum required config to get started with DevStack. .. note:: There is a sample :download:`local.conf ` file - under the *samples* directory in the devstack repository. + under the *samples* directory in the devstack repository. + +.. warning:: Only use alphanumeric characters in your passwords, as some + services fail to work when using special characters. Start the install ----------------- @@ -102,7 +113,7 @@ Start the install $ ./stack.sh -This will take a 15 - 20 minutes, largely depending on the speed of +This will take 15 - 30 minutes, largely depending on the speed of your internet connection. Many git trees and packages will be installed during this process. @@ -122,6 +133,8 @@ there. You can ``source openrc`` in your shell, and then use the ``openstack`` command line tool to manage your devstack. +You can :ref:`create a VM and SSH into it `. + You can ``cd /opt/stack/tempest`` and run tempest tests that have been configured to work with your devstack. diff --git a/doc/source/networking.rst b/doc/source/networking.rst index e65c7ef195..10e1c3ff2c 100644 --- a/doc/source/networking.rst +++ b/doc/source/networking.rst @@ -68,7 +68,7 @@ Shared Guest Interface .. warning:: This is not a recommended configuration. Because of interactions - between ovs and bridging, if you reboot your box with active + between OVS and bridging, if you reboot your box with active networking you may lose network connectivity to your system. If you need your guests accessible on the network, but only have 1 @@ -114,3 +114,125 @@ For IPv6, ``FIXED_RANGE_V6`` will default to the first /64 of the value of ``FIXED_RANGE_V6`` will just use the value of that directly. ``SUBNETPOOL_PREFIX_V6`` will just default to the value of ``IPV6_ADDRS_SAFE_TO_USE`` directly. + +.. _ssh: + +SSH access to instances +======================= + +To validate connectivity, you can create an instance using the +``$PRIVATE_NETWORK_NAME`` network (default: ``private``), create a floating IP +using the ``$PUBLIC_NETWORK_NAME`` network (default: ``public``), and attach +this floating IP to the instance: + +.. code-block:: shell + + openstack keypair create --public-key ~/.ssh/id_rsa.pub test-keypair + openstack server create --network private --key-name test-keypair ... test-server + fip_id=$(openstack floating ip create public -f value -c id) + openstack server add floating ip test-server ${fip_id} + +Once done, ensure you have enabled SSH and ICMP (ping) access for the security +group used for the instance. You can either create a custom security group and +specify it when creating the instance or add it after creation, or you can +modify the ``default`` security group created by default for each project. +Let's do the latter: + +.. code-block:: shell + + openstack security group rule create --proto icmp --dst-port 0 default + openstack security group rule create --proto tcp --dst-port 22 default + +Finally, SSH into the instance. If you used the Cirros instance uploaded by +default, then you can run the following: + +.. code-block:: shell + + openstack server ssh test-server -- -l cirros + +This will connect using the ``cirros`` user and the keypair you configured when +creating the instance. + +Remote SSH access to instances +============================== + +You can also SSH to created instances on your DevStack host from other hosts. +This can be helpful if you are e.g. deploying DevStack in a VM on an existing +cloud and wish to do development on your local machine. There are a few ways to +do this. + +.. rubric:: Configure instances to be locally accessible + +The most obvious way is to configure guests to be locally accessible, as +described `above `__. This has the advantage of +requiring no further effort on the client. However, it is more involved and +requires either support from your cloud or some inadvisable workarounds. + +.. rubric:: Use your DevStack host as a jump host + +You can choose to use your DevStack host as a jump host. To SSH to a instance +this way, pass the standard ``-J`` option to the ``openstack ssh`` / ``ssh`` +command. For example: + +.. code-block:: + + openstack server ssh test-server -- -l cirros -J username@devstack-host + +(where ``test-server`` is name of an existing instance, as described +:ref:`previously `, and ``username`` and ``devstack-host`` are the +username and hostname of your DevStack host). + +This can also be configured via your ``~/.ssh/config`` file, making it rather +effortless. However, it only allows SSH access. If you want to access e.g. a +web application on the instance, you will need to configure an SSH tunnel and +forward select ports using the ``-L`` option. For example, to forward HTTP +traffic: + +.. code-block:: + + openstack server ssh test-server -- -l cirros -L 8080:username@devstack-host:80 + +(where ``test-server`` is name of an existing instance, as described +:ref:`previously `, and ``username`` and ``devstack-host`` are the +username and hostname of your DevStack host). + +As you can imagine, this can quickly get out of hand, particularly for more +complex guest applications with multiple ports. + +.. rubric:: Use a proxy or VPN tool + +You can use a proxy or VPN tool to enable tunneling for the floating IP +address range of the ``$PUBLIC_NETWORK_NAME`` network (default: ``public``) +defined by ``$FLOATING_RANGE`` (default: ``172.24.4.0/24``). There are many +such tools available to do this. For example, we could use a useful utility +called `shuttle`__. To enable tunneling using ``shuttle``, first ensure you +have allowed SSH and HTTP(S) traffic to your DevStack host. Allowing HTTP(S) +traffic is necessary so you can use the OpenStack APIs remotely. How you do +this will depend on where your DevStack host is running. Once this is done, +install ``sshuttle`` on your localhost: + +.. code-block:: bash + + sudo apt-get install sshuttle || dnf install sshuttle + +Finally, start ``sshuttle`` on your localhost using the floating IP address +range. For example, assuming you are using the default value for +``$FLOATING_RANGE``, you can do: + +.. code-block:: bash + + sshuttle -r username@devstack-host 172.24.4.0/24 + +(where ``username`` and ``devstack-host`` are the username and hostname of your +DevStack host). + +You should now be able to create an instance and SSH into it: + +.. code-block:: bash + + openstack server ssh test-server -- -l cirros + +(where ``test-server`` is name of an existing instance, as described +:ref:`previously `) + +.. __: https://github.com/sshuttle/sshuttle diff --git a/doc/source/overview.rst b/doc/source/overview.rst index a609333289..c978e8d2cf 100644 --- a/doc/source/overview.rst +++ b/doc/source/overview.rst @@ -23,13 +23,12 @@ strategy to include the latest Ubuntu release and the latest RHEL release.* - Ubuntu: current LTS release plus current development release -- Fedora: current release plus previous release -- RHEL/CentOS: current major release +- RHEL/CentOS/RockyLinux: current major release - Other OS platforms may continue to be included but the maintenance of those platforms shall not be assumed simply due to their presence. Having a listed point-of-contact for each additional OS will greatly increase its chance of being well-maintained. -- Patches for Ubuntu and/or Fedora will not be held up due to +- Patches for Ubuntu and/or RockyLinux will not be held up due to side-effects on other OS platforms. Databases @@ -53,12 +52,6 @@ Web Server - Apache -OpenStack Network ------------------ - -- Neutron: A basic configuration approximating the original FlatDHCP - mode using linuxbridge or OpenVSwitch. - Services -------- diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 4e7c2d7b2f..9185263443 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -24,40 +24,35 @@ official OpenStack projects. ======================================== === Plugin Name URL ======================================== === +openstack/aetos `https://opendev.org/openstack/aetos `__ openstack/aodh `https://opendev.org/openstack/aodh `__ openstack/barbican `https://opendev.org/openstack/barbican `__ openstack/blazar `https://opendev.org/openstack/blazar `__ openstack/ceilometer `https://opendev.org/openstack/ceilometer `__ -openstack/ceilometer-powervm `https://opendev.org/openstack/ceilometer-powervm `__ -openstack/cinderlib `https://opendev.org/openstack/cinderlib `__ openstack/cloudkitty `https://opendev.org/openstack/cloudkitty `__ openstack/cyborg `https://opendev.org/openstack/cyborg `__ openstack/designate `https://opendev.org/openstack/designate `__ +openstack/designate-tempest-plugin `https://opendev.org/openstack/designate-tempest-plugin `__ openstack/devstack-plugin-amqp1 `https://opendev.org/openstack/devstack-plugin-amqp1 `__ openstack/devstack-plugin-ceph `https://opendev.org/openstack/devstack-plugin-ceph `__ openstack/devstack-plugin-container `https://opendev.org/openstack/devstack-plugin-container `__ openstack/devstack-plugin-kafka `https://opendev.org/openstack/devstack-plugin-kafka `__ openstack/devstack-plugin-nfs `https://opendev.org/openstack/devstack-plugin-nfs `__ openstack/devstack-plugin-open-cas `https://opendev.org/openstack/devstack-plugin-open-cas `__ -openstack/devstack-plugin-pika `https://opendev.org/openstack/devstack-plugin-pika `__ -openstack/devstack-plugin-zmq `https://opendev.org/openstack/devstack-plugin-zmq `__ -openstack/ec2-api `https://opendev.org/openstack/ec2-api `__ +openstack/devstack-plugin-prometheus `https://opendev.org/openstack/devstack-plugin-prometheus `__ openstack/freezer `https://opendev.org/openstack/freezer `__ openstack/freezer-api `https://opendev.org/openstack/freezer-api `__ openstack/freezer-tempest-plugin `https://opendev.org/openstack/freezer-tempest-plugin `__ openstack/freezer-web-ui `https://opendev.org/openstack/freezer-web-ui `__ +openstack/grian-ui `https://opendev.org/openstack/grian-ui `__ openstack/heat `https://opendev.org/openstack/heat `__ openstack/heat-dashboard `https://opendev.org/openstack/heat-dashboard `__ openstack/ironic `https://opendev.org/openstack/ironic `__ openstack/ironic-inspector `https://opendev.org/openstack/ironic-inspector `__ openstack/ironic-prometheus-exporter `https://opendev.org/openstack/ironic-prometheus-exporter `__ openstack/ironic-ui `https://opendev.org/openstack/ironic-ui `__ -openstack/karbor `https://opendev.org/openstack/karbor `__ -openstack/karbor-dashboard `https://opendev.org/openstack/karbor-dashboard `__ openstack/keystone `https://opendev.org/openstack/keystone `__ -openstack/kuryr-kubernetes `https://opendev.org/openstack/kuryr-kubernetes `__ openstack/kuryr-libnetwork `https://opendev.org/openstack/kuryr-libnetwork `__ -openstack/kuryr-tempest-plugin `https://opendev.org/openstack/kuryr-tempest-plugin `__ openstack/magnum `https://opendev.org/openstack/magnum `__ openstack/magnum-ui `https://opendev.org/openstack/magnum-ui `__ openstack/manila `https://opendev.org/openstack/manila `__ @@ -65,55 +60,40 @@ openstack/manila-tempest-plugin `https://opendev.org/openstack/manila-t openstack/manila-ui `https://opendev.org/openstack/manila-ui `__ openstack/masakari `https://opendev.org/openstack/masakari `__ openstack/mistral `https://opendev.org/openstack/mistral `__ -openstack/monasca-analytics `https://opendev.org/openstack/monasca-analytics `__ openstack/monasca-api `https://opendev.org/openstack/monasca-api `__ -openstack/monasca-ceilometer `https://opendev.org/openstack/monasca-ceilometer `__ openstack/monasca-events-api `https://opendev.org/openstack/monasca-events-api `__ -openstack/monasca-log-api `https://opendev.org/openstack/monasca-log-api `__ openstack/monasca-tempest-plugin `https://opendev.org/openstack/monasca-tempest-plugin `__ -openstack/monasca-transform `https://opendev.org/openstack/monasca-transform `__ -openstack/murano `https://opendev.org/openstack/murano `__ openstack/networking-bagpipe `https://opendev.org/openstack/networking-bagpipe `__ openstack/networking-baremetal `https://opendev.org/openstack/networking-baremetal `__ openstack/networking-bgpvpn `https://opendev.org/openstack/networking-bgpvpn `__ openstack/networking-generic-switch `https://opendev.org/openstack/networking-generic-switch `__ -openstack/networking-hyperv `https://opendev.org/openstack/networking-hyperv `__ -openstack/networking-l2gw `https://opendev.org/openstack/networking-l2gw `__ -openstack/networking-midonet `https://opendev.org/openstack/networking-midonet `__ -openstack/networking-odl `https://opendev.org/openstack/networking-odl `__ -openstack/networking-powervm `https://opendev.org/openstack/networking-powervm `__ openstack/networking-sfc `https://opendev.org/openstack/networking-sfc `__ openstack/neutron `https://opendev.org/openstack/neutron `__ openstack/neutron-dynamic-routing `https://opendev.org/openstack/neutron-dynamic-routing `__ +openstack/neutron-fwaas `https://opendev.org/openstack/neutron-fwaas `__ +openstack/neutron-fwaas-dashboard `https://opendev.org/openstack/neutron-fwaas-dashboard `__ openstack/neutron-tempest-plugin `https://opendev.org/openstack/neutron-tempest-plugin `__ openstack/neutron-vpnaas `https://opendev.org/openstack/neutron-vpnaas `__ openstack/neutron-vpnaas-dashboard `https://opendev.org/openstack/neutron-vpnaas-dashboard `__ -openstack/nova-powervm `https://opendev.org/openstack/nova-powervm `__ +openstack/nova `https://opendev.org/openstack/nova `__ openstack/octavia `https://opendev.org/openstack/octavia `__ openstack/octavia-dashboard `https://opendev.org/openstack/octavia-dashboard `__ openstack/octavia-tempest-plugin `https://opendev.org/openstack/octavia-tempest-plugin `__ openstack/openstacksdk `https://opendev.org/openstack/openstacksdk `__ -openstack/os-loganalyze `https://opendev.org/openstack/os-loganalyze `__ openstack/osprofiler `https://opendev.org/openstack/osprofiler `__ -openstack/oswin-tempest-plugin `https://opendev.org/openstack/oswin-tempest-plugin `__ +openstack/ovn-bgp-agent `https://opendev.org/openstack/ovn-bgp-agent `__ openstack/ovn-octavia-provider `https://opendev.org/openstack/ovn-octavia-provider `__ -openstack/panko `https://opendev.org/openstack/panko `__ -openstack/patrole `https://opendev.org/openstack/patrole `__ -openstack/qinling `https://opendev.org/openstack/qinling `__ -openstack/qinling-dashboard `https://opendev.org/openstack/qinling-dashboard `__ openstack/rally-openstack `https://opendev.org/openstack/rally-openstack `__ -openstack/sahara `https://opendev.org/openstack/sahara `__ -openstack/sahara-dashboard `https://opendev.org/openstack/sahara-dashboard `__ -openstack/searchlight `https://opendev.org/openstack/searchlight `__ -openstack/searchlight-ui `https://opendev.org/openstack/searchlight-ui `__ -openstack/senlin `https://opendev.org/openstack/senlin `__ openstack/shade `https://opendev.org/openstack/shade `__ -openstack/solum `https://opendev.org/openstack/solum `__ +openstack/skyline-apiserver `https://opendev.org/openstack/skyline-apiserver `__ openstack/storlets `https://opendev.org/openstack/storlets `__ openstack/tacker `https://opendev.org/openstack/tacker `__ +openstack/tap-as-a-service `https://opendev.org/openstack/tap-as-a-service `__ openstack/telemetry-tempest-plugin `https://opendev.org/openstack/telemetry-tempest-plugin `__ openstack/trove `https://opendev.org/openstack/trove `__ openstack/trove-dashboard `https://opendev.org/openstack/trove-dashboard `__ +openstack/venus `https://opendev.org/openstack/venus `__ +openstack/venus-dashboard `https://opendev.org/openstack/venus-dashboard `__ openstack/vitrage `https://opendev.org/openstack/vitrage `__ openstack/vitrage-dashboard `https://opendev.org/openstack/vitrage-dashboard `__ openstack/vitrage-tempest-plugin `https://opendev.org/openstack/vitrage-tempest-plugin `__ @@ -134,7 +114,6 @@ starlingx/nfv `https://opendev.org/starlingx/nfv `__ vexxhost/openstack-operator `https://opendev.org/vexxhost/openstack-operator `__ x/almanach `https://opendev.org/x/almanach `__ -x/apmec `https://opendev.org/x/apmec `__ x/bilean `https://opendev.org/x/bilean `__ x/broadview-collector `https://opendev.org/x/broadview-collector `__ x/collectd-openstack-plugins `https://opendev.org/x/collectd-openstack-plugins `__ @@ -143,6 +122,7 @@ x/devstack-plugin-glusterfs `https://opendev.org/x/devstack-plugin- x/devstack-plugin-hdfs `https://opendev.org/x/devstack-plugin-hdfs `__ x/devstack-plugin-libvirt-qemu `https://opendev.org/x/devstack-plugin-libvirt-qemu `__ x/devstack-plugin-mariadb `https://opendev.org/x/devstack-plugin-mariadb `__ +x/devstack-plugin-tobiko `https://opendev.org/x/devstack-plugin-tobiko `__ x/devstack-plugin-vmax `https://opendev.org/x/devstack-plugin-vmax `__ x/drbd-devstack `https://opendev.org/x/drbd-devstack `__ x/fenix `https://opendev.org/x/fenix `__ @@ -169,6 +149,7 @@ x/networking-fortinet `https://opendev.org/x/networking-forti x/networking-hpe `https://opendev.org/x/networking-hpe `__ x/networking-huawei `https://opendev.org/x/networking-huawei `__ x/networking-infoblox `https://opendev.org/x/networking-infoblox `__ +x/networking-l2gw `https://opendev.org/x/networking-l2gw `__ x/networking-lagopus `https://opendev.org/x/networking-lagopus `__ x/networking-mlnx `https://opendev.org/x/networking-mlnx `__ x/networking-nec `https://opendev.org/x/networking-nec `__ @@ -190,14 +171,12 @@ x/rsd-virt-for-nova `https://opendev.org/x/rsd-virt-for-nov x/scalpels `https://opendev.org/x/scalpels `__ x/slogging `https://opendev.org/x/slogging `__ x/stackube `https://opendev.org/x/stackube `__ -x/tap-as-a-service `https://opendev.org/x/tap-as-a-service `__ -x/tap-as-a-service-dashboard `https://opendev.org/x/tap-as-a-service-dashboard `__ x/tatu `https://opendev.org/x/tatu `__ -x/tobiko `https://opendev.org/x/tobiko `__ x/trio2o `https://opendev.org/x/trio2o `__ x/valet `https://opendev.org/x/valet `__ x/vmware-nsx `https://opendev.org/x/vmware-nsx `__ x/vmware-vspc `https://opendev.org/x/vmware-vspc `__ +x/whitebox-neutron-tempest-plugin `https://opendev.org/x/whitebox-neutron-tempest-plugin `__ ======================================== === diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index a18a786c49..fe567e2277 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -232,19 +232,16 @@ an early phase of its execution. These packages may be defined in a plugin as files that contain new-line separated lists of packages required by the plugin -Supported packaging systems include apt and yum across multiple +Supported packaging systems include apt and dnf across multiple distributions. To enable a plugin to hook into this and install package dependencies, packages may be listed at the following locations in the top-level of the plugin repository: - ``./devstack/files/debs/$plugin_name`` - Packages to install when running - on Ubuntu, Debian or Linux Mint. + on Ubuntu or Debian. - ``./devstack/files/rpms/$plugin_name`` - Packages to install when running - on Red Hat, Fedora, CentOS or XenServer. - -- ``./devstack/files/rpms-suse/$plugin_name`` - Packages to install when - running on SUSE Linux or openSUSE. + on Red Hat, Fedora, or CentOS. Although there a no plans to remove this method of installing packages, plugins should consider it deprecated for ``bindep`` support diff --git a/doc/source/tempest.rst b/doc/source/tempest.rst new file mode 100644 index 0000000000..65dd5b16b2 --- /dev/null +++ b/doc/source/tempest.rst @@ -0,0 +1,25 @@ +======= +Tempest +======= + +`Tempest`_ is the OpenStack Integration test suite. It is installed by default +and is used to provide integration testing for many of the OpenStack services. +Just like DevStack itself, it is possible to extend Tempest with plugins. In +fact, many Tempest plugin packages also include DevStack plugin to do things +like pre-create required static resources. + +The `Tempest documentation `_ provides a thorough guide to using +Tempest. However, if you simply wish to run the standard set of Tempest tests +against an existing deployment, you can do the following: + +.. code-block:: shell + + cd /opt/stack/tempest + /opt/stack/data/venv/bin/tempest run ... + +The above assumes you have installed DevStack in the default location +(configured via the ``DEST`` configuration variable) and have enabled +virtualenv-based installation in the standard location (configured via the +``USE_VENV`` and ``VENV_DEST`` configuration variables, respectively). + +.. _Tempest: https://docs.openstack.org/tempest/latest/ diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh index 15ecfe39eb..06c73ec763 100644 --- a/extras.d/80-tempest.sh +++ b/extras.d/80-tempest.sh @@ -6,7 +6,7 @@ if is_service_enabled tempest; then source $TOP_DIR/lib/tempest elif [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing Tempest" - install_tempest + async_runfunc install_tempest elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then # Tempest config must come after layer 2 services are running : @@ -17,6 +17,7 @@ if is_service_enabled tempest; then # local.conf Tempest option overrides : elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then + async_wait install_tempest echo_summary "Initializing Tempest" configure_tempest echo_summary "Installing Tempest Plugins" diff --git a/files/apache-cinder-api.template b/files/apache-cinder-api.template index e1246f11b6..e401803abc 100644 --- a/files/apache-cinder-api.template +++ b/files/apache-cinder-api.template @@ -6,21 +6,13 @@ Listen %PUBLICPORT% WSGIScriptAlias / %CINDER_BIN_DIR%/cinder-wsgi WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/%APACHE_NAME%/c-api.log %SSLENGINE% %SSLCERTFILE% %SSLKEYFILE% - = 2.4> - Require all granted - - - Order allow,deny - Allow from all - + Require all granted diff --git a/files/apache-horizon.template b/files/apache-horizon.template index efcfc0360b..c6c55ecf27 100644 --- a/files/apache-horizon.template +++ b/files/apache-horizon.template @@ -21,22 +21,13 @@ Options Indexes FollowSymLinks MultiViews AllowOverride None - # Apache 2.4 uses mod_authz_host for access control now (instead of - # "Allow") - - Order allow,deny - Allow from all - - = 2.4> - Require all granted - + Require all granted - = 2.4> - ErrorLogFormat "%{cu}t %M" - + ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/%APACHE_NAME%/horizon_error.log LogLevel warn CustomLog /var/log/%APACHE_NAME%/horizon_access.log combined +%WSGIPYTHONHOME% WSGISocketPrefix /var/run/%APACHE_NAME% diff --git a/files/apache-keystone.template b/files/apache-keystone.template index 128436027d..d99e8e6ce0 100644 --- a/files/apache-keystone.template +++ b/files/apache-keystone.template @@ -1,5 +1,4 @@ Listen %PUBLICPORT% -Listen %ADMINPORT% LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" keystone_combined @@ -20,24 +19,11 @@ LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" %SSLKEYFILE% - - WSGIDaemonProcess keystone-admin processes=3 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% - WSGIProcessGroup keystone-admin - WSGIScriptAlias / %KEYSTONE_BIN%/keystone-wsgi-admin - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - ErrorLogFormat "%M" - ErrorLog /var/log/%APACHE_NAME%/keystone.log - CustomLog /var/log/%APACHE_NAME%/keystone_access.log keystone_combined - %SSLENGINE% - %SSLCERTFILE% - %SSLKEYFILE% - - %SSLLISTEN% %SSLLISTEN% %SSLENGINE% %SSLLISTEN% %SSLCERTFILE% %SSLLISTEN% %SSLKEYFILE% +%SSLLISTEN% SSLProtocol -all +TLSv1.3 +TLSv1.2 %SSLLISTEN% Alias /identity %KEYSTONE_BIN%/keystone-wsgi-public @@ -49,13 +35,3 @@ Alias /identity %KEYSTONE_BIN%/keystone-wsgi-public WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - -Alias /identity_admin %KEYSTONE_BIN%/keystone-wsgi-admin - - SetHandler wsgi-script - Options +ExecCGI - - WSGIProcessGroup keystone-admin - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - diff --git a/files/apache-neutron.template b/files/apache-neutron.template index c7796b93bf..358e87f5da 100644 --- a/files/apache-neutron.template +++ b/files/apache-neutron.template @@ -24,6 +24,7 @@ LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" %SSLLISTEN% %SSLENGINE% %SSLLISTEN% %SSLCERTFILE% %SSLLISTEN% %SSLKEYFILE% +%SSLLISTEN% SSLProtocol -all +TLSv1.3 +TLSv1.2 %SSLLISTEN% Alias /networking %NEUTRON_BIN%/neutron-api diff --git a/files/apache-nova-api.template b/files/apache-nova-api.template index bcf406edf3..66fcf73cf2 100644 --- a/files/apache-nova-api.template +++ b/files/apache-nova-api.template @@ -6,9 +6,7 @@ Listen %PUBLICPORT% WSGIScriptAlias / %PUBLICWSGI% WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%M" - + ErrorLogFormat "%M" ErrorLog /var/log/%APACHE_NAME%/nova-api.log %SSLENGINE% %SSLCERTFILE% diff --git a/files/apache-nova-metadata.template b/files/apache-nova-metadata.template index 6231c1ced8..64be03166e 100644 --- a/files/apache-nova-metadata.template +++ b/files/apache-nova-metadata.template @@ -6,9 +6,7 @@ Listen %PUBLICPORT% WSGIScriptAlias / %PUBLICWSGI% WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%M" - + ErrorLogFormat "%M" ErrorLog /var/log/%APACHE_NAME%/nova-metadata.log %SSLENGINE% %SSLCERTFILE% diff --git a/files/apache-placement-api.template b/files/apache-placement-api.template deleted file mode 100644 index 011abb95fc..0000000000 --- a/files/apache-placement-api.template +++ /dev/null @@ -1,27 +0,0 @@ -# NOTE(sbauza): This virtualhost is only here because some directives can -# only be set by a virtualhost or server context, so that's why the port is not bound. -# TODO(sbauza): Find a better way to identify a free port that is not corresponding to an existing -# vhost. - - WSGIDaemonProcess placement-api processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% - WSGIProcessGroup placement-api - WSGIScriptAlias / %PUBLICWSGI% - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%M" - - ErrorLog /var/log/%APACHE_NAME%/placement-api.log - %SSLENGINE% - %SSLCERTFILE% - %SSLKEYFILE% - - -Alias /placement %PUBLICWSGI% - - SetHandler wsgi-script - Options +ExecCGI - WSGIProcessGroup placement-api - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - diff --git a/files/apts b/files/apts deleted file mode 120000 index ef926de053..0000000000 --- a/files/apts +++ /dev/null @@ -1 +0,0 @@ -debs/ \ No newline at end of file diff --git a/files/debs/dstat b/files/debs/dstat index 2b643b8b1b..40d00f4aa4 100644 --- a/files/debs/dstat +++ b/files/debs/dstat @@ -1 +1,2 @@ -dstat +dstat # dist:bionic +pcp diff --git a/files/debs/general b/files/debs/general index d64417f1b7..1e63e4f582 100644 --- a/files/debs/general +++ b/files/debs/general @@ -5,16 +5,14 @@ bsdmainutils curl default-jre-headless # NOPRIME g++ +gawk gcc gettext # used for compiling message catalogs git graphviz # needed for docs iputils-ping -libapache2-mod-proxy-uwsgi libffi-dev # for pyOpenSSL libjpeg-dev # Pillow 3.0.0 -libmysqlclient-dev # MySQL-python -libpcre3-dev # for python-pcre libpq-dev # psycopg2 libssl-dev # for pyOpenSSL libsystemd-dev # for systemd-python diff --git a/files/debs/neutron-common b/files/debs/neutron-common index e548396cd7..f6afc5bf55 100644 --- a/files/debs/neutron-common +++ b/files/debs/neutron-common @@ -6,7 +6,6 @@ haproxy # to serve as metadata proxy inside router/dhcp namespaces iptables iputils-arping iputils-ping -libmysqlclient-dev mysql-server #NOPRIME postgresql-server-dev-all python3-mysqldb diff --git a/files/debs/nova b/files/debs/nova index a7aebbf946..5c00ad72d9 100644 --- a/files/debs/nova +++ b/files/debs/nova @@ -1,15 +1,11 @@ conntrack curl -dnsmasq-base -dnsmasq-utils # for dhcp_release ebtables -gawk genisoimage # required for config_drive iptables iputils-arping kpartx libjs-jquery-tablesorter # Needed for coverage html reports -libmysqlclient-dev libvirt-clients # NOPRIME libvirt-daemon-system # NOPRIME libvirt-dev # NOPRIME diff --git a/files/debs/swift b/files/debs/swift index 4b8ac3d793..67c6c8ddb4 100644 --- a/files/debs/swift +++ b/files/debs/swift @@ -2,5 +2,6 @@ curl liberasurecode-dev make memcached +rsync sqlite3 xfsprogs diff --git a/files/dnsmasq-for-baremetal-from-nova-network.conf b/files/dnsmasq-for-baremetal-from-nova-network.conf deleted file mode 100644 index 66a375190e..0000000000 --- a/files/dnsmasq-for-baremetal-from-nova-network.conf +++ /dev/null @@ -1,3 +0,0 @@ -enable-tftp -tftp-root=/tftpboot -dhcp-boot=pxelinux.0 diff --git a/files/ldap/manager.ldif.in b/files/ldap/manager.ldif.in index 2f1f1395ee..d3b9be8b6e 100644 --- a/files/ldap/manager.ldif.in +++ b/files/ldap/manager.ldif.in @@ -1,4 +1,4 @@ -dn: olcDatabase={${LDAP_OLCDB_NUMBER}}hdb,cn=config +dn: olcDatabase={${LDAP_OLCDB_NUMBER}}${LDAP_OLCDB_TYPE},cn=config changetype: modify replace: olcSuffix olcSuffix: ${BASE_DN} diff --git a/files/lvm-backing-file.template b/files/lvm-backing-file.template new file mode 100644 index 0000000000..dc519d7745 --- /dev/null +++ b/files/lvm-backing-file.template @@ -0,0 +1,16 @@ +[Unit] +Description=Activate LVM backing file %BACKING_FILE% +DefaultDependencies=no +After=systemd-udev-settle.service +Before=lvm2-activation-early.service +Wants=systemd-udev-settle.service + +[Service] +ExecStart=/sbin/losetup --find --show %DIRECTIO% %BACKING_FILE% +ExecStop=/bin/sh -c '/sbin/losetup -d $$(/sbin/losetup --associated %BACKING_FILE% -O NAME -n)' +RemainAfterExit=yes +Type=oneshot + +[Install] +WantedBy=local-fs.target +Also=systemd-udev-settle.service diff --git a/files/openstack-cli-server/openstack b/files/openstack-cli-server/openstack new file mode 100755 index 0000000000..47fbfc5e17 --- /dev/null +++ b/files/openstack-cli-server/openstack @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 +# Copyright 2016 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import socket +import sys +import os +import os.path +import json + +server_address = "/tmp/openstack.sock" + +sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + +try: + sock.connect(server_address) +except socket.error as msg: + print(msg, file=sys.stderr) + sys.exit(1) + + +def send(sock, doc): + jdoc = json.dumps(doc) + sock.send(b'%d\n' % len(jdoc)) + sock.sendall(jdoc.encode('utf-8')) + +def recv(sock): + length_str = b'' + + char = sock.recv(1) + if len(char) == 0: + print("Unexpected end of file", file=sys.stderr) + sys.exit(1) + + while char != b'\n': + length_str += char + char = sock.recv(1) + if len(char) == 0: + print("Unexpected end of file", file=sys.stderr) + sys.exit(1) + + total = int(length_str) + + # use a memoryview to receive the data chunk by chunk efficiently + jdoc = memoryview(bytearray(total)) + next_offset = 0 + while total - next_offset > 0: + recv_size = sock.recv_into(jdoc[next_offset:], total - next_offset) + next_offset += recv_size + try: + doc = json.loads(jdoc.tobytes()) + except (TypeError, ValueError) as e: + raise Exception('Data received was not in JSON format') + return doc + +try: + env = {} + passenv = ["CINDER_VERSION", + "OS_AUTH_URL", + "OS_NO_CACHE", + "OS_PASSWORD", + "OS_PROJECT_NAME", + "OS_REGION_NAME", + "OS_TENANT_NAME", + "OS_USERNAME", + "OS_VOLUME_API_VERSION", + "OS_CLOUD"] + for name in passenv: + if name in os.environ: + env[name] = os.environ[name] + + cmd = { + "app": os.path.basename(sys.argv[0]), + "env": env, + "argv": sys.argv[1:] + } + try: + image_idx = sys.argv.index('image') + create_idx = sys.argv.index('create') + missing_file = image_idx < create_idx and \ + not any(x.startswith('--file') for x in sys.argv) + except ValueError: + missing_file = False + + if missing_file: + # This means we were called with an image create command, but were + # not provided a --file option. That likely means we're being passed + # the image data to stdin, which won't work because we do not proxy + # stdin to the server. So, we just reject the operation and ask the + # caller to provide the file with --file instead. + # We've already connected to the server, we need to send it some dummy + # data so it doesn't wait forever. + send(sock, {}) + print('Image create without --file is not allowed in server mode', + file=sys.stderr) + sys.exit(1) + else: + send(sock, cmd) + + doc = recv(sock) + if doc["stdout"] != b'': + print(doc["stdout"], end='') + if doc["stderr"] != b'': + print(doc["stderr"], file=sys.stderr) + sys.exit(doc["status"]) +finally: + sock.close() diff --git a/files/openstack-cli-server/openstack-cli-server b/files/openstack-cli-server/openstack-cli-server new file mode 100755 index 0000000000..f3d2747e52 --- /dev/null +++ b/files/openstack-cli-server/openstack-cli-server @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 +# Copyright 2016 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import socket +import sys +import os +import json + +from openstackclient import shell as osc_shell +from io import StringIO + +server_address = "/tmp/openstack.sock" + +try: + os.unlink(server_address) +except OSError: + if os.path.exists(server_address): + raise + +sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) +print('starting up on %s' % server_address, file=sys.stderr) +sock.bind(server_address) + +# Listen for incoming connections +sock.listen(1) + +def send(sock, doc): + jdoc = json.dumps(doc) + sock.send(b'%d\n' % len(jdoc)) + sock.sendall(jdoc.encode('utf-8')) + +def recv(sock): + length_str = b'' + char = sock.recv(1) + while char != b'\n': + length_str += char + char = sock.recv(1) + + total = int(length_str) + + # use a memoryview to receive the data chunk by chunk efficiently + jdoc = memoryview(bytearray(total)) + next_offset = 0 + while total - next_offset > 0: + recv_size = sock.recv_into(jdoc[next_offset:], total - next_offset) + next_offset += recv_size + try: + doc = json.loads(jdoc.tobytes()) + except (TypeError, ValueError) as e: + raise Exception('Data received was not in JSON format') + return doc + +while True: + csock, client_address = sock.accept() + try: + doc = recv(csock) + + print("%s %s" % (doc["app"], doc["argv"]), file=sys.stderr) + oldenv = {} + for name in doc["env"].keys(): + oldenv[name] = os.environ.get(name, None) + os.environ[name] = doc["env"][name] + + try: + old_stdout = sys.stdout + old_stderr = sys.stderr + my_stdout = sys.stdout = StringIO() + my_stderr = sys.stderr = StringIO() + + class Exit(BaseException): + def __init__(self, status): + self.status = status + + def noexit(stat): + raise Exit(stat) + + sys.exit = noexit + + if doc["app"] == "openstack": + sh = osc_shell.OpenStackShell() + ret = sh.run(doc["argv"]) + else: + print("Unknown application %s" % doc["app"], file=sys.stderr) + ret = 1 + except Exit as e: + ret = e.status + finally: + sys.stdout = old_stdout + sys.stderr = old_stderr + + for name in oldenv.keys(): + if oldenv[name] is None: + del os.environ[name] + else: + os.environ[name] = oldenv[name] + + send(csock, { + "stdout": my_stdout.getvalue(), + "stderr": my_stderr.getvalue(), + "status": ret, + }) + + except BaseException as e: + print(e, file=sys.stderr) + finally: + csock.close() diff --git a/files/rpms-suse/baremetal b/files/rpms-suse/baremetal deleted file mode 100644 index 61f73eeae3..0000000000 --- a/files/rpms-suse/baremetal +++ /dev/null @@ -1 +0,0 @@ -dnsmasq diff --git a/files/rpms-suse/ceph b/files/rpms-suse/ceph deleted file mode 100644 index 8c4955df90..0000000000 --- a/files/rpms-suse/ceph +++ /dev/null @@ -1,3 +0,0 @@ -ceph # NOPRIME -lsb -xfsprogs diff --git a/files/rpms-suse/cinder b/files/rpms-suse/cinder deleted file mode 100644 index b39cc79a27..0000000000 --- a/files/rpms-suse/cinder +++ /dev/null @@ -1,3 +0,0 @@ -lvm2 -qemu-tools -tgt # NOPRIME diff --git a/files/rpms-suse/dstat b/files/rpms-suse/dstat deleted file mode 100644 index 2b643b8b1b..0000000000 --- a/files/rpms-suse/dstat +++ /dev/null @@ -1 +0,0 @@ -dstat diff --git a/files/rpms-suse/general b/files/rpms-suse/general deleted file mode 100644 index 0de0876dcd..0000000000 --- a/files/rpms-suse/general +++ /dev/null @@ -1,33 +0,0 @@ -apache2 -apache2-devel -bc -ca-certificates-mozilla -curl -gcc -gcc-c++ -git-core -graphviz # docs -iputils -libffi-devel # pyOpenSSL -libjpeg8-devel # Pillow 3.0.0 -libopenssl-devel # to rebuild pyOpenSSL if needed -libxslt-devel # lxml -lsof # useful when debugging -make -net-tools -openssh -openssl -pcre-devel # python-pcre -postgresql-devel # psycopg2 -psmisc -python3-systemd -python-cmd2 # dist:opensuse-12.3 -python-devel # pyOpenSSL -python-xml -tar -tcpdump -unzip -util-linux -wget -which -zlib-devel diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon deleted file mode 100644 index 753ea76e04..0000000000 --- a/files/rpms-suse/horizon +++ /dev/null @@ -1,2 +0,0 @@ -apache2-mod_wsgi # NOPRIME -apache2 # NOPRIME diff --git a/files/rpms-suse/keystone b/files/rpms-suse/keystone deleted file mode 100644 index 66cfc23423..0000000000 --- a/files/rpms-suse/keystone +++ /dev/null @@ -1,4 +0,0 @@ -cyrus-sasl-devel -memcached -openldap2-devel -sqlite3 diff --git a/files/rpms-suse/ldap b/files/rpms-suse/ldap deleted file mode 100644 index 46d26f0796..0000000000 --- a/files/rpms-suse/ldap +++ /dev/null @@ -1,3 +0,0 @@ -openldap2 -openldap2-client -python-ldap diff --git a/files/rpms-suse/n-api b/files/rpms-suse/n-api deleted file mode 100644 index 0f08daace3..0000000000 --- a/files/rpms-suse/n-api +++ /dev/null @@ -1 +0,0 @@ -python-dateutil diff --git a/files/rpms-suse/n-cpu b/files/rpms-suse/n-cpu deleted file mode 100644 index 9c724cb9d8..0000000000 --- a/files/rpms-suse/n-cpu +++ /dev/null @@ -1,10 +0,0 @@ -cdrkit-cdrtools-compat # dist:sle12 -cryptsetup -dosfstools -libosinfo -lvm2 -mkisofs # not:sle12 -open-iscsi -sg3_utils -# Stuff for diablo volumes -sysfsutils diff --git a/files/rpms-suse/neutron-agent b/files/rpms-suse/neutron-agent deleted file mode 100644 index ea8819e884..0000000000 --- a/files/rpms-suse/neutron-agent +++ /dev/null @@ -1 +0,0 @@ -ipset diff --git a/files/rpms-suse/neutron-common b/files/rpms-suse/neutron-common deleted file mode 100644 index e3799a9353..0000000000 --- a/files/rpms-suse/neutron-common +++ /dev/null @@ -1,12 +0,0 @@ -acl -dnsmasq -dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1 -ebtables -haproxy # to serve as metadata proxy inside router/dhcp namespaces -iptables -iputils -rabbitmq-server # NOPRIME -radvd # NOPRIME -sqlite3 -sudo -vlan diff --git a/files/rpms-suse/neutron-l3 b/files/rpms-suse/neutron-l3 deleted file mode 100644 index a7a190c063..0000000000 --- a/files/rpms-suse/neutron-l3 +++ /dev/null @@ -1,2 +0,0 @@ -conntrack-tools -keepalived diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova deleted file mode 100644 index 9923760750..0000000000 --- a/files/rpms-suse/nova +++ /dev/null @@ -1,24 +0,0 @@ -cdrkit-cdrtools-compat # dist:sle12 -conntrack-tools -curl -dnsmasq -dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1 -ebtables -gawk -iptables -iputils -kpartx -kvm # NOPRIME -libvirt # NOPRIME -libvirt-python # NOPRIME -# mkisofs is required for config_drive -mkisofs # not:sle12 -parted -polkit -# qemu as fallback if kvm cannot be used -qemu # NOPRIME -rabbitmq-server # NOPRIME -socat -sqlite3 -sudo -vlan diff --git a/files/rpms-suse/openvswitch b/files/rpms-suse/openvswitch deleted file mode 100644 index 53f8bb22cf..0000000000 --- a/files/rpms-suse/openvswitch +++ /dev/null @@ -1,3 +0,0 @@ - -openvswitch -openvswitch-switch diff --git a/files/rpms-suse/os-brick b/files/rpms-suse/os-brick deleted file mode 100644 index 67b33a9861..0000000000 --- a/files/rpms-suse/os-brick +++ /dev/null @@ -1,2 +0,0 @@ -lsscsi -open-iscsi diff --git a/files/rpms-suse/q-agt b/files/rpms-suse/q-agt deleted file mode 120000 index 99fe353094..0000000000 --- a/files/rpms-suse/q-agt +++ /dev/null @@ -1 +0,0 @@ -neutron-agent \ No newline at end of file diff --git a/files/rpms-suse/q-l3 b/files/rpms-suse/q-l3 deleted file mode 120000 index 0a5ca2a45f..0000000000 --- a/files/rpms-suse/q-l3 +++ /dev/null @@ -1 +0,0 @@ -neutron-l3 \ No newline at end of file diff --git a/files/rpms-suse/swift b/files/rpms-suse/swift deleted file mode 100644 index 3663b98545..0000000000 --- a/files/rpms-suse/swift +++ /dev/null @@ -1,6 +0,0 @@ -curl -liberasurecode-devel -memcached -sqlite3 -xfsprogs -xinetd diff --git a/files/rpms/ceph b/files/rpms/ceph index 64befc5f00..19f158fd57 100644 --- a/files/rpms/ceph +++ b/files/rpms/ceph @@ -1,3 +1,3 @@ ceph # NOPRIME -redhat-lsb-core +redhat-lsb-core # not:rhel9,openEuler-22.03 xfsprogs diff --git a/files/rpms/general b/files/rpms/general index cfcd7ff261..6f4572c708 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -1,32 +1,38 @@ bc curl dbus +gawk gcc gcc-c++ gettext # used for compiling message catalogs git-core +glibc-langpack-en # dist:rhel9 graphviz # needed only for docs httpd httpd-devel +iptables-nft # dist:rhel9,rhel10 iptables-services -java-1.8.0-openjdk-headless +java-1.8.0-openjdk-headless # not:rhel10 +java-21-openjdk-headless # dist:rhel10 libffi-devel libjpeg-turbo-devel # Pillow 3.0.0 libxml2-devel # lxml libxslt-devel # lxml libyaml-devel +mod_ssl # required for tls-proxy on centos 9 stream computes net-tools openssh-server openssl openssl-devel # to rebuild pyOpenSSL if needed -pcre-devel # for python-pcre +pcre2-devel # dist:rhel10 for python-pcre2 +pcre-devel # not:rhel10 for python-pcre pkgconfig postgresql-devel # psycopg2 psmisc python3-devel -python3-pip +python3-pip # not:openEuler-22.03 python3-systemd -redhat-rpm-config # missing dep for gcc hardening flags, see rhbz#1217376 +redhat-rpm-config # not:openEuler-22.03 missing dep for gcc hardening flags, see rhbz#1217376 tar tcpdump unzip diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu index 68e5472685..3d50f3a062 100644 --- a/files/rpms/n-cpu +++ b/files/rpms/n-cpu @@ -1,9 +1,9 @@ cryptsetup dosfstools -genisoimage iscsi-initiator-utils libosinfo lvm2 sg3_utils # Stuff for diablo volumes sysfsutils +xorriso diff --git a/files/rpms/nova b/files/rpms/nova index 2218330230..d0f843bb60 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -1,16 +1,13 @@ conntrack-tools curl -dnsmasq # for q-dhcp -dnsmasq-utils # for dhcp_release ebtables -gawk -genisoimage # required for config_drive iptables iputils -kernel-modules +kernel-modules # not:openEuler-22.03 kpartx parted polkit rabbitmq-server # NOPRIME sqlite sudo +xorriso diff --git a/files/rpms/swift b/files/rpms/swift index 376c6f3df7..c3921a47d4 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -4,4 +4,3 @@ memcached rsync-daemon sqlite xfsprogs -xinetd diff --git a/files/swift/rsyncd.conf b/files/swift/rsyncd.conf index c49f716fa7..937d6c4b9a 100644 --- a/files/swift/rsyncd.conf +++ b/files/swift/rsyncd.conf @@ -6,74 +6,74 @@ address = 127.0.0.1 [account6612] max connections = 25 -path = %SWIFT_DATA_DIR%/1/node/ +path = %SWIFT_DATA_DIR%/1/ read only = false lock file = %SWIFT_DATA_DIR%/run/account6612.lock [account6622] max connections = 25 -path = %SWIFT_DATA_DIR%/2/node/ +path = %SWIFT_DATA_DIR%/2/ read only = false lock file = %SWIFT_DATA_DIR%/run/account6622.lock [account6632] max connections = 25 -path = %SWIFT_DATA_DIR%/3/node/ +path = %SWIFT_DATA_DIR%/3/ read only = false lock file = %SWIFT_DATA_DIR%/run/account6632.lock [account6642] max connections = 25 -path = %SWIFT_DATA_DIR%/4/node/ +path = %SWIFT_DATA_DIR%/4/ read only = false lock file = %SWIFT_DATA_DIR%/run/account6642.lock [container6611] max connections = 25 -path = %SWIFT_DATA_DIR%/1/node/ +path = %SWIFT_DATA_DIR%/1/ read only = false lock file = %SWIFT_DATA_DIR%/run/container6611.lock [container6621] max connections = 25 -path = %SWIFT_DATA_DIR%/2/node/ +path = %SWIFT_DATA_DIR%/2/ read only = false lock file = %SWIFT_DATA_DIR%/run/container6621.lock [container6631] max connections = 25 -path = %SWIFT_DATA_DIR%/3/node/ +path = %SWIFT_DATA_DIR%/3/ read only = false lock file = %SWIFT_DATA_DIR%/run/container6631.lock [container6641] max connections = 25 -path = %SWIFT_DATA_DIR%/4/node/ +path = %SWIFT_DATA_DIR%/4/ read only = false lock file = %SWIFT_DATA_DIR%/run/container6641.lock [object6613] max connections = 25 -path = %SWIFT_DATA_DIR%/1/node/ +path = %SWIFT_DATA_DIR%/1/ read only = false lock file = %SWIFT_DATA_DIR%/run/object6613.lock [object6623] max connections = 25 -path = %SWIFT_DATA_DIR%/2/node/ +path = %SWIFT_DATA_DIR%/2/ read only = false lock file = %SWIFT_DATA_DIR%/run/object6623.lock [object6633] max connections = 25 -path = %SWIFT_DATA_DIR%/3/node/ +path = %SWIFT_DATA_DIR%/3/ read only = false lock file = %SWIFT_DATA_DIR%/run/object6633.lock [object6643] max connections = 25 -path = %SWIFT_DATA_DIR%/4/node/ +path = %SWIFT_DATA_DIR%/4/ read only = false lock file = %SWIFT_DATA_DIR%/run/object6643.lock diff --git a/functions b/functions index fc87a5512d..829fc86c55 100644 --- a/functions +++ b/functions @@ -21,6 +21,7 @@ source ${FUNC_DIR}/inc/ini-config source ${FUNC_DIR}/inc/meta-config source ${FUNC_DIR}/inc/python source ${FUNC_DIR}/inc/rootwrap +source ${FUNC_DIR}/inc/async # Save trace setting _XTRACE_FUNCTIONS=$(set +o | grep xtrace) @@ -117,7 +118,7 @@ function _upload_image { useimport="--import" fi - openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format "$container" --disk-format "$disk" $useimport $properties < "${image}" + openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format "$container" --disk-format "$disk" $useimport $properties --file $(readlink -f "${image}") } # Retrieve an image from a URL and upload into Glance. @@ -132,17 +133,29 @@ function upload_image { local image image_fname image_name + local max_attempts=5 + # Create a directory for the downloaded image tarballs. mkdir -p $FILES/images image_fname=`basename "$image_url"` if [[ $image_url != file* ]]; then # Downloads the image (uec ami+akistyle), then extracts it. if [[ ! -f $FILES/$image_fname || "$(stat -c "%s" $FILES/$image_fname)" = "0" ]]; then - wget --progress=dot:giga -c $image_url -O $FILES/$image_fname - if [[ $? -ne 0 ]]; then - echo "Not found: $image_url" - return - fi + for attempt in `seq $max_attempts`; do + local rc=0 + wget --progress=dot:giga -c $image_url -O $FILES/$image_fname || rc=$? + if [[ $rc -ne 0 ]]; then + if [[ "$attempt" -eq "$max_attempts" ]]; then + echo "Not found: $image_url" + # Signal failure to download to the caller, so they can fail early + return 1 + fi + echo "Download failed, retrying in $attempt second, attempt: $attempt" + sleep $attempt + else + break + fi + done fi image="$FILES/${image_fname}" else @@ -279,31 +292,6 @@ function upload_image { return fi - # XenServer-vhd-ovf-format images are provided as .vhd.tgz - # and should not be decompressed prior to loading - if [[ "$image_url" =~ '.vhd.tgz' ]]; then - image_name="${image_fname%.vhd.tgz}" - local force_vm_mode="" - if [[ "$image_name" =~ 'cirros' ]]; then - # Cirros VHD image currently only boots in PV mode. - # Nova defaults to PV for all VHD images, but - # the glance setting is needed for booting - # directly from volume. - force_vm_mode="vm_mode=xen" - fi - _upload_image "$image_name" ovf vhd "$image" $force_vm_mode - return - fi - - # .xen-raw.tgz suggests a Xen capable raw image inside a tgz. - # and should not be decompressed prior to loading. - # Setting metadata, so PV mode is used. - if [[ "$image_url" =~ '.xen-raw.tgz' ]]; then - image_name="${image_fname%.xen-raw.tgz}" - _upload_image "$image_name" tgz raw "$image" vm_mode=xen - return - fi - if [[ "$image_url" =~ '.hds' ]]; then image_name="${image_fname%.hds}" vm_mode=${image_name##*-} @@ -438,10 +426,10 @@ function upload_image { # kernel for use when uploading the root filesystem. local kernel_id="" ramdisk_id=""; if [ -n "$kernel" ]; then - kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki < "$kernel" | grep ' id ' | get_field 2) + kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki --file $(readlink -f "$kernel") -f value -c id) fi if [ -n "$ramdisk" ]; then - ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari < "$ramdisk" | grep ' id ' | get_field 2) + ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari --file $(readlink -f "$ramdisk") -f value -c id) fi _upload_image "${image_name%.img}" ami ami "$image" ${kernel_id:+ kernel_id=$kernel_id} ${ramdisk_id:+ ramdisk_id=$ramdisk_id} $img_property fi @@ -707,6 +695,8 @@ function setup_colorized_logging { iniset $conf_file DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" iniset $conf_file DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" iniset $conf_file DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" + # Enable or disable color for oslo.log + iniset $conf_file DEFAULT log_color $LOG_COLOR } function setup_systemd_logging { @@ -728,6 +718,9 @@ function setup_systemd_logging { iniset $conf_file DEFAULT logging_context_format_string "%(color)s%(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(project_name)s %(user_name)s%(color)s] %(instance)s%(color)s%(message)s" iniset $conf_file DEFAULT logging_default_format_string "%(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" iniset $conf_file DEFAULT logging_exception_prefix "ERROR %(name)s %(instance)s" + + # Enable or disable color for oslo.log + iniset $conf_file DEFAULT log_color $LOG_COLOR } function setup_standard_logging_identity { diff --git a/functions-common b/functions-common index 87d8c64804..c2042c4fef 100644 --- a/functions-common +++ b/functions-common @@ -43,13 +43,16 @@ declare -A -g GITREPO declare -A -g GITBRANCH declare -A -g GITDIR +# Systemd service file environment variables per service +declare -A -g SYSTEMD_ENV_VARS + KILL_PATH="$(which kill)" # Save these variables to .stackenv STACK_ENV_VARS="BASE_SQL_CONN DATA_DIR DEST ENABLED_SERVICES HOST_IP \ KEYSTONE_SERVICE_URI \ LOGFILE OS_CACERT SERVICE_HOST STACK_USER TLS_IP \ - HOST_IPV6 SERVICE_IP_VERSION" + HOST_IPV6 SERVICE_IP_VERSION TUNNEL_ENDPOINT_IP TUNNEL_IP_VERSION" # Saves significant environment variables to .stackenv for later use @@ -85,7 +88,7 @@ function write_clouds_yaml { if [ -f "$SSL_BUNDLE_FILE" ]; then CA_CERT_ARG="--os-cacert $SSL_BUNDLE_FILE" fi - # demo -> devstack + # devstack: user with the member role on demo project $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ --file $CLOUDS_YAML \ --os-cloud devstack \ @@ -96,7 +99,29 @@ function write_clouds_yaml { --os-password $ADMIN_PASSWORD \ --os-project-name demo - # alt_demo -> devstack-alt + # devstack-admin: user with the admin role on the admin project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-admin \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username admin \ + --os-password $ADMIN_PASSWORD \ + --os-project-name admin + + # devstack-admin-demo: user with the admin role on the demo project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-admin-demo \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username admin \ + --os-password $ADMIN_PASSWORD \ + --os-project-name demo + + # devstack-alt: user with the member role on alt_demo project $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ --file $CLOUDS_YAML \ --os-cloud devstack-alt \ @@ -107,18 +132,40 @@ function write_clouds_yaml { --os-password $ADMIN_PASSWORD \ --os-project-name alt_demo - # admin -> devstack-admin + # devstack-alt-member: user with the member role on alt_demo project $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ --file $CLOUDS_YAML \ - --os-cloud devstack-admin \ + --os-cloud devstack-alt-member \ --os-region-name $REGION_NAME \ $CA_CERT_ARG \ --os-auth-url $KEYSTONE_SERVICE_URI \ - --os-username admin \ + --os-username alt_demo_member \ --os-password $ADMIN_PASSWORD \ - --os-project-name admin + --os-project-name alt_demo + + # devstack-alt-reader: user with the reader role on alt_demo project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-alt-reader \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username alt_demo_reader \ + --os-password $ADMIN_PASSWORD \ + --os-project-name alt_demo + + # devstack-reader: user with the reader role on demo project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-reader \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username demo_reader \ + --os-password $ADMIN_PASSWORD \ + --os-project-name demo - # admin with a system-scoped token -> devstack-system + # devstack-system-admin: user with the admin role on the system $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ --file $CLOUDS_YAML \ --os-cloud devstack-system-admin \ @@ -129,6 +176,28 @@ function write_clouds_yaml { --os-password $ADMIN_PASSWORD \ --os-system-scope all + # devstack-system-member: user with the member role on the system + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-system-member \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username system_member \ + --os-password $ADMIN_PASSWORD \ + --os-system-scope all + + # devstack-system-reader: user with the reader role on the system + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-system-reader \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username system_reader \ + --os-password $ADMIN_PASSWORD \ + --os-system-scope all + cat >> $CLOUDS_YAML < +# +# Convert True|False to int 1 or 0 +# This function can be used to convert the output of trueorfalse +# to an int follow c conventions where false is 0 and 1 it true. +function bool_to_int { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + if [ -z $1 ]; then + die $LINENO "Bool value required" + fi + if [[ $1 == "True" ]] ; then + echo '1' + else + echo '0' + fi + $xtrace +} + + function isset { [[ -v "$1" ]] } @@ -314,9 +404,9 @@ function warn { # such as "install_package" further abstract things in better ways. # # ``os_VENDOR`` - vendor name: ``Ubuntu``, ``Fedora``, etc -# ``os_RELEASE`` - major release: ``16.04`` (Ubuntu), ``23`` (Fedora) +# ``os_RELEASE`` - major release: ``22.04`` (Ubuntu), ``23`` (Fedora) # ``os_PACKAGE`` - package type: ``deb`` or ``rpm`` -# ``os_CODENAME`` - vendor's codename for release: ``xenial`` +# ``os_CODENAME`` - vendor's codename for release: ``jammy`` declare -g os_VENDOR os_RELEASE os_PACKAGE os_CODENAME @@ -333,7 +423,7 @@ function _ensure_lsb_release { elif [[ -x $(command -v zypper 2>/dev/null) ]]; then sudo zypper -n install lsb-release elif [[ -x $(command -v dnf 2>/dev/null) ]]; then - sudo dnf install -y redhat-lsb-core + sudo dnf install -y python3-distro || sudo dnf install -y openeuler-lsb else die $LINENO "Unable to find or auto-install lsb_release" fi @@ -346,14 +436,21 @@ function _ensure_lsb_release { # - os_VENDOR # - os_PACKAGE function GetOSVersion { - # We only support distros that provide a sane lsb_release - _ensure_lsb_release + # CentOS Stream 9 or later and RHEL 9 or later do not provide lsb_release + source /etc/os-release + if [[ "${ID}" =~ (almalinux|centos|rocky|rhel) ]]; then + os_RELEASE=${VERSION_ID} + os_CODENAME=$(echo $VERSION | grep -oP '(?<=[(])[^)]*' || echo 'n/a') + os_VENDOR=$(echo $NAME | tr -d '[:space:]') + else + _ensure_lsb_release - os_RELEASE=$(lsb_release -r -s) - os_CODENAME=$(lsb_release -c -s) - os_VENDOR=$(lsb_release -i -s) + os_RELEASE=$(lsb_release -r -s) + os_CODENAME=$(lsb_release -c -s) + os_VENDOR=$(lsb_release -i -s) + fi - if [[ $os_VENDOR =~ (Debian|Ubuntu|LinuxMint) ]]; then + if [[ $os_VENDOR =~ (Debian|Ubuntu) ]]; then os_PACKAGE="deb" else os_PACKAGE="rpm" @@ -371,34 +468,24 @@ declare -g DISTRO function GetDistro { GetOSVersion - if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) || \ - "$os_VENDOR" =~ (LinuxMint) ]]; then - # 'Everyone' refers to Ubuntu / Debian / Mint releases by + if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then + # 'Everyone' refers to Ubuntu / Debian releases by # the code name adjective DISTRO=$os_CODENAME elif [[ "$os_VENDOR" =~ (Fedora) ]]; then # For Fedora, just use 'f' and the release DISTRO="f$os_RELEASE" - elif is_opensuse; then - DISTRO="opensuse-$os_RELEASE" - # Tumbleweed uses "n/a" as a codename, and the release is a datestring - # like 20180218, so not very useful. Leap however uses a release - # with a "dot", so for example 15.0 - [ "$os_CODENAME" = "n/a" -a "$os_RELEASE" = "${os_RELEASE/\./}" ] && \ - DISTRO="opensuse-tumbleweed" - elif is_suse_linux_enterprise; then - # just use major release - DISTRO="sle${os_RELEASE%.*}" elif [[ "$os_VENDOR" =~ (Red.*Hat) || \ "$os_VENDOR" =~ (CentOS) || \ + "$os_VENDOR" =~ (AlmaLinux) || \ "$os_VENDOR" =~ (Scientific) || \ "$os_VENDOR" =~ (OracleServer) || \ + "$os_VENDOR" =~ (RockyLinux) || \ "$os_VENDOR" =~ (Virtuozzo) ]]; then - # Drop the . release as we assume it's compatible - # XXX re-evaluate when we get RHEL10 - DISTRO="rhel${os_RELEASE::1}" - elif [[ "$os_VENDOR" =~ (XenServer) ]]; then - DISTRO="xs${os_RELEASE%.*}" + MAJOR_VERSION=$(echo $os_RELEASE | cut -d. -f1) + DISTRO="rhel${MAJOR_VERSION}" + elif [[ "$os_VENDOR" =~ (openEuler) ]]; then + DISTRO="openEuler-$os_RELEASE" else # We can't make a good choice here. Setting a sensible DISTRO # is part of the problem, but not the major issue -- we really @@ -442,7 +529,7 @@ function is_oraclelinux { # Determine if current distribution is a Fedora-based distribution -# (Fedora, RHEL, CentOS, etc). +# (Fedora, RHEL, CentOS, Rocky, etc). # is_fedora function is_fedora { if [[ -z "$os_VENDOR" ]]; then @@ -450,44 +537,17 @@ function is_fedora { fi [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \ + [ "$os_VENDOR" = "openEuler" ] || \ [ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \ [ "$os_VENDOR" = "RedHatEnterprise" ] || \ + [ "$os_VENDOR" = "RedHatEnterpriseLinux" ] || \ + [ "$os_VENDOR" = "RockyLinux" ] || \ [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "CentOSStream" ] || \ + [ "$os_VENDOR" = "AlmaLinux" ] || \ [ "$os_VENDOR" = "OracleServer" ] || [ "$os_VENDOR" = "Virtuozzo" ] } -# Determine if current distribution is a SUSE-based distribution -# (openSUSE, SLE). -# is_suse -function is_suse { - is_opensuse || is_suse_linux_enterprise -} - - -# Determine if current distribution is an openSUSE distribution -# is_opensuse -function is_opensuse { - if [[ -z "$os_VENDOR" ]]; then - GetOSVersion - fi - - [[ "$os_VENDOR" =~ (openSUSE) ]] -} - - -# Determine if current distribution is a SUSE Linux Enterprise (SLE) -# distribution -# is_suse_linux_enterprise -function is_suse_linux_enterprise { - if [[ -z "$os_VENDOR" ]]; then - GetOSVersion - fi - - [[ "$os_VENDOR" =~ (^SUSE) ]] -} - - # Determine if current distribution is an Ubuntu-based distribution # It will also detect non-Ubuntu but Debian-based distros # is_ubuntu @@ -498,7 +558,14 @@ function is_ubuntu { [ "$os_PACKAGE" = "deb" ] } - +# Determine if current distribution is an openEuler distribution +# is_openeuler +function is_openeuler { + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + [ "$os_VENDOR" = "openEuler" ] +} # Git Functions # ============= @@ -549,7 +616,7 @@ function git_clone { if [[ "$ERROR_ON_CLONE" = "True" ]]; then echo "The $git_dest project was not found; if this is a gate job, add" echo "the project to 'required-projects' in the job definition." - die $LINENO "Cloning not allowed in this configuration" + die $LINENO "ERROR_ON_CLONE is set to True so cloning not allowed in this configuration" fi git_timed clone $git_clone_flags $git_remote $git_dest fi @@ -561,10 +628,12 @@ function git_clone { if [[ "$ERROR_ON_CLONE" = "True" ]]; then echo "The $git_dest project was not found; if this is a gate job, add" echo "the project to the \$PROJECTS variable in the job definition." - die $LINENO "Cloning not allowed in this configuration" + die $LINENO "ERROR_ON_CLONE is set to True so cloning not allowed in this configuration" fi - # '--branch' can also take tags - git_timed clone $git_clone_flags $git_remote $git_dest --branch $git_ref + git_timed clone --no-checkout $git_clone_flags $git_remote $git_dest + cd $git_dest + git_timed fetch $git_clone_flags origin $git_ref + git_timed checkout FETCH_HEAD elif [[ "$RECLONE" = "True" ]]; then # if it does exist then simulate what clone does if asked to RECLONE cd $git_dest @@ -574,7 +643,7 @@ function git_clone { # remove the existing ignored files (like pyc) as they cause breakage # (due to the py files having older timestamps than our pyc, so python # thinks the pyc files are correct using them) - find $git_dest -name '*.pyc' -delete + sudo find $git_dest -name '*.pyc' -delete # handle git_ref accordingly to type (tag, branch) if [[ -n "`git show-ref refs/tags/$git_ref`" ]]; then @@ -590,6 +659,18 @@ function git_clone { fi fi + # NOTE(ianw) 2022-04-13 : commit [1] has broken many assumptions + # about how we clone and work with repos. Mark them safe globally + # as a work-around. + # + # NOTE(danms): On bionic (and likely others) git-config may write + # ~stackuser/.gitconfig if not run with sudo -H. Using --system + # writes these changes to /etc/gitconfig which is more + # discoverable anyway. + # + # [1] https://github.com/git/git/commit/8959555cee7ec045958f9b6dd62e541affb7e7d9 + sudo git config --system --add safe.directory ${git_dest} + # print out the results so we know what change was used in the logs cd $git_dest git show --oneline | head -1 @@ -689,7 +770,7 @@ function get_default_host_ip { if [ -z "$host_ip" -o "$host_ip" == "dhcp" ]; then host_ip="" # Find the interface used for the default route - host_ip_iface=${host_ip_iface:-$(ip -f $af route | awk '/default/ {print $5}' | head -1)} + host_ip_iface=${host_ip_iface:-$(ip -f $af route list match default table all | grep via | awk '/default/ {print $5}' | head -1)} local host_ips host_ips=$(LC_ALL=C ip -f $af addr show ${host_ip_iface} | sed /temporary/d |awk /$af'/ {split($2,parts,"/"); print parts[1]}') local ip @@ -792,14 +873,9 @@ function policy_add { # Usage: get_or_create_domain function get_or_create_domain { local domain_id - # Gets domain id domain_id=$( - # Gets domain id - openstack domain show $1 \ - -f value -c id 2>/dev/null || - # Creates new domain - openstack domain create $1 \ - --description "$2" \ + openstack --os-cloud devstack-system-admin domain create $1 \ + --description "$2" --or-show \ -f value -c id ) echo $domain_id @@ -813,7 +889,7 @@ function get_or_create_group { # Gets group id group_id=$( # Creates new group with --or-show - openstack group create $1 \ + openstack --os-cloud devstack-system-admin group create $1 \ --domain $2 --description "$desc" --or-show \ -f value -c id ) @@ -832,7 +908,7 @@ function get_or_create_user { # Gets user id user_id=$( # Creates new user with --or-show - openstack user create \ + openstack --os-cloud devstack-system-admin user create \ $1 \ --password "$2" \ --domain=$3 \ @@ -849,7 +925,7 @@ function get_or_create_project { local project_id project_id=$( # Creates new project with --or-show - openstack project create $1 \ + openstack --os-cloud devstack-system-admin project create $1 \ --domain=$2 \ --or-show -f value -c id ) @@ -862,7 +938,7 @@ function get_or_create_role { local role_id role_id=$( # Creates role with --or-show - openstack role create $1 \ + openstack --os-cloud devstack-system-admin role create $1 \ --or-show -f value -c id ) echo $role_id @@ -888,29 +964,22 @@ function _get_domain_args { # Usage: get_or_add_user_project_role [ ] function get_or_add_user_project_role { local user_role_id + local domain_args domain_args=$(_get_domain_args $4 $5) - # Gets user role id - user_role_id=$(openstack role assignment list \ + # Note this is idempotent so we are safe across multiple + # duplicate calls. + openstack --os-cloud devstack-system-admin role add $1 \ + --user $2 \ + --project $3 \ + $domain_args + user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --user $2 \ --project $3 \ $domain_args \ - | grep '^|\s[a-f0-9]\+' | get_field 1) - if [[ -z "$user_role_id" ]]; then - # Adds role to user and get it - openstack role add $1 \ - --user $2 \ - --project $3 \ - $domain_args - user_role_id=$(openstack role assignment list \ - --role $1 \ - --user $2 \ - --project $3 \ - $domain_args \ - | grep '^|\s[a-f0-9]\+' | get_field 1) - fi + -c Role -f value) echo $user_role_id } @@ -918,22 +987,48 @@ function get_or_add_user_project_role { # Usage: get_or_add_user_domain_role function get_or_add_user_domain_role { local user_role_id - # Gets user role id - user_role_id=$(openstack role assignment list \ + + # Note this is idempotent so we are safe across multiple + # duplicate calls. + openstack --os-cloud devstack-system-admin role add $1 \ + --user $2 \ + --domain $3 + user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --user $2 \ --domain $3 \ - | grep '^|\s[a-f0-9]\+' | get_field 1) + -c Role -f value) + + echo $user_role_id +} + +# Gets or adds user role to system +# Usage: get_or_add_user_system_role [] +function get_or_add_user_system_role { + local user_role_id + local domain_args + + domain_args=$(_get_domain_args $4) + + # Gets user role id + user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ + --role $1 \ + --user $2 \ + --system $3 \ + $domain_args \ + -f value -c Role) if [[ -z "$user_role_id" ]]; then # Adds role to user and get it - openstack role add $1 \ + openstack --os-cloud devstack-system-admin role add $1 \ --user $2 \ - --domain $3 - user_role_id=$(openstack role assignment list \ + --system $3 \ + $domain_args + user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --user $2 \ - --domain $3 \ - | grep '^|\s[a-f0-9]\+' | get_field 1) + --system $3 \ + $domain_args \ + -f value -c Role) fi echo $user_role_id } @@ -942,23 +1037,18 @@ function get_or_add_user_domain_role { # Usage: get_or_add_group_project_role function get_or_add_group_project_role { local group_role_id - # Gets group role id - group_role_id=$(openstack role assignment list \ + + # Note this is idempotent so we are safe across multiple + # duplicate calls. + openstack role add $1 \ + --group $2 \ + --project $3 + group_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ --role $1 \ --group $2 \ --project $3 \ - -f value) - if [[ -z "$group_role_id" ]]; then - # Adds role to group and get it - openstack role add $1 \ - --group $2 \ - --project $3 - group_role_id=$(openstack role assignment list \ - --role $1 \ - --group $2 \ - --project $3 \ - -f value) - fi + -f value -c Role) + echo $group_role_id } @@ -969,9 +1059,9 @@ function get_or_create_service { # Gets service id service_id=$( # Gets service id - openstack service show $2 -f value -c id 2>/dev/null || + openstack --os-cloud devstack-system-admin service show $2 -f value -c id 2>/dev/null || # Creates new service if not exists - openstack service create \ + openstack --os-cloud devstack-system-admin service create \ $2 \ --name $1 \ --description="$3" \ @@ -984,14 +1074,14 @@ function get_or_create_service { # Usage: _get_or_create_endpoint_with_interface function _get_or_create_endpoint_with_interface { local endpoint_id - endpoint_id=$(openstack endpoint list \ + endpoint_id=$(openstack --os-cloud devstack-system-admin endpoint list \ --service $1 \ --interface $2 \ --region $4 \ -c ID -f value) if [[ -z "$endpoint_id" ]]; then # Creates new endpoint - endpoint_id=$(openstack endpoint create \ + endpoint_id=$(openstack --os-cloud devstack-system-admin endpoint create \ $1 $2 $3 --region $4 -f value -c id) fi @@ -1025,7 +1115,7 @@ function get_or_create_endpoint { # Get a URL from the identity service # Usage: get_endpoint_url function get_endpoint_url { - echo $(openstack endpoint list \ + echo $(openstack --os-cloud devstack-system-admin endpoint list \ --service $1 --interface $2 \ -c URL -f value) } @@ -1039,6 +1129,17 @@ function is_ironic_hardware { return 1 } +function is_ironic_enforce_scope { + is_service_enabled ironic && [[ "$IRONIC_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]] && return 0 + return 1 +} + +function is_ironic_sharded { + # todo(JayF): Support >1 shard with multiple n-cpu instances for each + is_service_enabled ironic && [[ "$IRONIC_SHARDS" == "1" ]] && return 0 + return 1 +} + # Package Functions # ================= @@ -1055,8 +1156,6 @@ function _get_package_dir { pkg_dir=$base_dir/debs elif is_fedora; then pkg_dir=$base_dir/rpms - elif is_suse; then - pkg_dir=$base_dir/rpms-suse else exit_distro_not_supported "list of packages" fi @@ -1331,8 +1430,6 @@ function real_install_package { apt_get install "$@" elif is_fedora; then yum_install "$@" - elif is_suse; then - zypper_install "$@" else exit_distro_not_supported "installing packages" fi @@ -1374,8 +1471,6 @@ function uninstall_package { apt_get purge "$@" elif is_fedora; then sudo dnf remove -y "$@" ||: - elif is_suse; then - sudo zypper remove -y "$@" ||: else exit_distro_not_supported "uninstalling packages" fi @@ -1444,6 +1539,7 @@ function write_user_unit_file { local command="$2" local group=$3 local user=$4 + local env_vars="$5" local extra="" if [[ -n "$group" ]]; then extra="Group=$group" @@ -1452,11 +1548,15 @@ function write_user_unit_file { mkdir -p $SYSTEMD_DIR iniset -sudo $unitfile "Unit" "Description" "Devstack $service" + iniset -sudo $unitfile "Service" "Environment" "\"PATH=$PATH\"" iniset -sudo $unitfile "Service" "User" "$user" iniset -sudo $unitfile "Service" "ExecStart" "$command" iniset -sudo $unitfile "Service" "KillMode" "process" iniset -sudo $unitfile "Service" "TimeoutStopSec" "300" iniset -sudo $unitfile "Service" "ExecReload" "$KILL_PATH -HUP \$MAINPID" + if [[ -n "$env_vars" ]] ; then + iniset -sudo $unitfile "Service" "Environment" "$env_vars" + fi if [[ -n "$group" ]]; then iniset -sudo $unitfile "Service" "Group" "$group" fi @@ -1471,10 +1571,12 @@ function write_uwsgi_user_unit_file { local command="$2" local group=$3 local user=$4 + local env_vars="$5" local unitfile="$SYSTEMD_DIR/$service" mkdir -p $SYSTEMD_DIR iniset -sudo $unitfile "Unit" "Description" "Devstack $service" + iniset -sudo $unitfile "Service" "Environment" "\"PATH=$PATH\"" iniset -sudo $unitfile "Service" "SyslogIdentifier" "$service" iniset -sudo $unitfile "Service" "User" "$user" iniset -sudo $unitfile "Service" "ExecStart" "$command" @@ -1485,6 +1587,9 @@ function write_uwsgi_user_unit_file { iniset -sudo $unitfile "Service" "NotifyAccess" "all" iniset -sudo $unitfile "Service" "RestartForceExitStatus" "100" + if [[ -n "$env_vars" ]] ; then + iniset -sudo $unitfile "Service" "Environment" "$env_vars" + fi if [[ -n "$group" ]]; then iniset -sudo $unitfile "Service" "Group" "$group" fi @@ -1532,10 +1637,20 @@ function _run_under_systemd { local systemd_service="devstack@$service.service" local group=$3 local user=${4:-$STACK_USER} + if [[ -z "$user" ]]; then + user=$STACK_USER + fi + local env_vars="$5" + if [[ -v SYSTEMD_ENV_VARS[$service] ]]; then + env_vars="${SYSTEMD_ENV_VARS[$service]} $env_vars" + fi if [[ "$command" =~ "uwsgi" ]] ; then - write_uwsgi_user_unit_file $systemd_service "$cmd" "$group" "$user" + if [[ "$GLOBAL_VENV" == "True" ]] ; then + cmd="$cmd --venv $DEVSTACK_VENV" + fi + write_uwsgi_user_unit_file $systemd_service "$cmd" "$group" "$user" "$env_vars" else - write_user_unit_file $systemd_service "$cmd" "$group" "$user" + write_user_unit_file $systemd_service "$cmd" "$group" "$user" "$env_vars" fi $SYSTEMCTL enable $systemd_service @@ -1556,18 +1671,20 @@ function is_running { # If the command includes shell metachatacters (;<>*) it must be run using a shell # If an optional group is provided sg will be used to run the # command as that group. -# run_process service "command-line" [group] [user] +# run_process service "command-line" [group] [user] [env_vars] +# env_vars must be a space separated list of variable assigments, ie: "A=1 B=2" function run_process { local service=$1 local command="$2" local group=$3 local user=$4 + local env_vars="$5" local name=$service time_start "run_process" if is_service_enabled $service; then - _run_under_systemd "$name" "$command" "$group" "$user" + _run_under_systemd "$name" "$command" "$group" "$user" "$env_vars" fi time_stop "run_process" } @@ -2323,6 +2440,11 @@ function time_stop { _TIME_TOTAL[$name]=$(($total + $elapsed_time)) } +function install_openstack_cli_server { + export PATH=$TOP_DIR/files/openstack-cli-server:$PATH + run_process openstack-cli-server "$PYTHON $TOP_DIR/files/openstack-cli-server/openstack-cli-server" +} + function oscwrap { local xtrace xtrace=$(set +o | grep xtrace) @@ -2418,6 +2540,11 @@ function clean_pyc_files { fi } +function is_fips_enabled { + fips=`cat /proc/sys/crypto/fips_enabled` + [ "$fips" == "1" ] +} + # Restore xtrace $_XTRACE_FUNCTIONS_COMMON diff --git a/inc/async b/inc/async new file mode 100644 index 0000000000..56338f5343 --- /dev/null +++ b/inc/async @@ -0,0 +1,256 @@ +#!/bin/bash +# +# Symbolic asynchronous tasks for devstack +# +# Usage: +# +# async_runfunc my_shell_func foo bar baz +# +# ... do other stuff ... +# +# async_wait my_shell_func +# + +DEVSTACK_PARALLEL=$(trueorfalse True DEVSTACK_PARALLEL) +_ASYNC_BG_TIME=0 + +# Keep track of how much total time was spent in background tasks +# Takes a job runtime in ms. +function _async_incr_bg_time { + local elapsed_ms="$1" + _ASYNC_BG_TIME=$(($_ASYNC_BG_TIME + $elapsed_ms)) +} + +# Get the PID of a named future to wait on +function async_pidof { + local name="$1" + local inifile="${DEST}/async/${name}.ini" + + if [ -f "$inifile" ]; then + iniget $inifile job pid + else + echo 'UNKNOWN' + return 1 + fi +} + +# Log a message about a job. If the message contains "%command" then the +# full command line of the job will be substituted in the output +function async_log { + local name="$1" + shift + local message="$*" + local inifile=${DEST}/async/${name}.ini + local pid + local command + + pid=$(iniget $inifile job pid) + command=$(iniget $inifile job command | tr '#' '-') + message=$(echo "$message" | sed "s#%command#$command#g") + + echo "[$BASHPID Async ${name}:${pid}]: $message" +} + +# Inner function that actually runs the requested task. We wrap it like this +# just so we can emit a finish message as soon as the work is done, to make +# it easier to find the tracking just before an error. +function async_inner { + local name="$1" + local rc + local fifo="${DEST}/async/${name}.fifo" + shift + set -o xtrace + if $* >${DEST}/async/${name}.log 2>&1; then + rc=0 + set +o xtrace + async_log "$name" "finished successfully" + else + rc=$? + set +o xtrace + async_log "$name" "FAILED with rc $rc" + fi + iniset ${DEST}/async/${name}.ini job end_time $(date "+%s%3N") + # Block on the fifo until we are signaled to exit by the main process + cat $fifo + return $rc +} + +# Run something async. Takes a symbolic name and a list of arguments of +# what to run. Ideally this would be rarely used and async_runfunc() would +# be used everywhere for readability. +# +# This spawns the work in a background worker, records a "future" to be +# collected by a later call to async_wait() +function async_run { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local name="$1" + shift + local inifile=${DEST}/async/${name}.ini + local fifo=${DEST}/async/${name}.fifo + + touch $inifile + iniset $inifile job command "$*" + iniset $inifile job start_time $(date +%s%3N) + + if [[ "$DEVSTACK_PARALLEL" = "True" ]]; then + mkfifo $fifo + async_inner $name $* & + iniset $inifile job pid $! + async_log "$name" "running: %command" + $xtrace + else + iniset $inifile job pid "self" + async_log "$name" "Running synchronously: %command" + $xtrace + $* + return $? + fi +} + +# Shortcut for running a shell function async. Uses the function name as the +# async name. +function async_runfunc { + async_run $1 $* +} + +# Dump some information to help debug a failed wait +function async_wait_dump { + local failpid=$1 + + echo "=== Wait failure dump from $BASHPID ===" + echo "Processes:" + ps -f + echo "Waiting jobs:" + for name in $(ls ${DEST}/async/*.ini); do + echo "Job $name :" + cat "$name" + done + echo "Failed PID status:" + sudo cat /proc/$failpid/status + sudo cat /proc/$failpid/cmdline + echo "=== End wait failure dump ===" +} + +# Wait for an async future to complete. May return immediately if already +# complete, or of the future has already been waited on (avoid this). May +# block until the future completes. +function async_wait { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local pid rc running inifile runtime fifo + rc=0 + for name in $*; do + running=$(ls ${DEST}/async/*.ini 2>/dev/null | wc -l) + inifile="${DEST}/async/${name}.ini" + fifo="${DEST}/async/${name}.fifo" + + if pid=$(async_pidof "$name"); then + async_log "$name" "Waiting for completion of %command" \ + "running on PID $pid ($running other jobs running)" + time_start async_wait + if [[ "$pid" != "self" ]]; then + # Signal the child to go ahead and exit since we are about to + # wait for it to collect its status. + async_log "$name" "Signaling child to exit" + echo WAKEUP > $fifo + async_log "$name" "Signaled" + # Do not actually call wait if we ran synchronously + if wait $pid; then + rc=0 + else + rc=$? + fi + cat ${DEST}/async/${name}.log + rm -f $fifo + fi + time_stop async_wait + local start_time + local end_time + start_time=$(iniget $inifile job start_time) + end_time=$(iniget $inifile job end_time) + _async_incr_bg_time $(($end_time - $start_time)) + runtime=$((($end_time - $start_time) / 1000)) + async_log "$name" "finished %command with result" \ + "$rc in $runtime seconds" + rm -f $inifile + if [ $rc -ne 0 ]; then + async_wait_dump $pid + echo Stopping async wait due to error: $* + break + fi + else + # This could probably be removed - it is really just here + # to help notice if you wait for something by the wrong + # name, but it also shows up for things we didn't start + # because they were not enabled. + echo Not waiting for async task $name that we never started or \ + has already been waited for + fi + done + + $xtrace + return $rc +} + +# Check for uncollected futures and wait on them +function async_cleanup { + local name + + if [[ "$DEVSTACK_PARALLEL" != "True" ]]; then + return 0 + fi + + for inifile in $(find ${DEST}/async -name '*.ini'); do + name=$(basename $pidfile .ini) + echo "WARNING: uncollected async future $name" + async_wait $name || true + done +} + +# Make sure our async dir is created and clean +function async_init { + local async_dir=${DEST}/async + + # Clean any residue if present from previous runs + rm -Rf $async_dir + + # Make sure we have a state directory + mkdir -p $async_dir +} + +function async_print_timing { + local bg_time_minus_wait + local elapsed_time + local serial_time + local speedup + + if [[ "$DEVSTACK_PARALLEL" != "True" ]]; then + return 0 + fi + + # The logic here is: All the background task time would be + # serialized if we did not do them in the background. So we can + # add that to the elapsed time for the whole run. However, time we + # spend waiting for async things to finish adds to the elapsed + # time, but is time where we're not doing anything useful. Thus, + # we substract that from the would-be-serialized time. + + bg_time_minus_wait=$((\ + ($_ASYNC_BG_TIME - ${_TIME_TOTAL[async_wait]}) / 1000)) + elapsed_time=$(($(date "+%s") - $_TIME_BEGIN)) + serial_time=$(($elapsed_time + $bg_time_minus_wait)) + + echo + echo "=================" + echo " Async summary" + echo "=================" + echo " Time spent in the background minus waits: $bg_time_minus_wait sec" + echo " Elapsed time: $elapsed_time sec" + echo " Time if we did everything serially: $serial_time sec" + echo " Speedup: " $(echo | awk "{print $serial_time / $elapsed_time}") +} diff --git a/inc/ini-config b/inc/ini-config index 79936823d2..920d4775fa 100644 --- a/inc/ini-config +++ b/inc/ini-config @@ -189,6 +189,11 @@ function iniset { local option=$3 local value=$4 + # Escape the ampersand (&) and backslash (\) characters for sed + # Order of substitution matters: we escape backslashes first before + # adding more backslashes to escape ampersands + value=$(echo $value | sed -e 's/\\/\\\\/g' -e 's/&/\\&/g') + if [[ -z $section || -z $option ]]; then $xtrace return diff --git a/inc/meta-config b/inc/meta-config index be73b60800..1215bb8307 100644 --- a/inc/meta-config +++ b/inc/meta-config @@ -185,11 +185,15 @@ function merge_config_group { break fi dir=$(dirname $realconfigfile) - if [[ -d $dir ]]; then - merge_config_file $localfile $group $configfile - else - die $LINENO "bogus config file specification $configfile ($configfile=$realconfigfile, $dir is not a directory)" + + test -e $dir && ! test -d $dir && die $LINENO "bogus config file specification $configfile ($configfile=$realconfigfile, $dir exists but it is not a directory)" + + if ! [[ -e $dir ]] ; then + sudo mkdir -p $dir || die $LINENO "could not create the directory of $real_configfile ($configfile)" + sudo chown ${STACK_USER} $dir fi + + merge_config_file $localfile $group $configfile done done } diff --git a/inc/python b/inc/python index 8941fd038d..3969c1fa82 100644 --- a/inc/python +++ b/inc/python @@ -7,7 +7,6 @@ # External functions used: # - GetOSVersion # - is_fedora -# - is_suse # - safe_chown # Save trace setting @@ -33,6 +32,23 @@ function join_extras { # Python Functions # ================ +# Setup the global devstack virtualenvs and the associated environment +# updates. +function setup_devstack_virtualenv { + # We run devstack out of a global virtualenv. + if [[ ! -d $DEVSTACK_VENV ]] ; then + # Using system site packages to enable nova to use libguestfs. + # This package is currently installed via the distro and not + # available on pypi. + $PYTHON -m venv --system-site-packages "${DEVSTACK_VENV}" + pip_install -U pip setuptools[core] + fi + if [[ ":$PATH:" != *":$DEVSTACK_VENV/bin:"* ]] ; then + export PATH="$DEVSTACK_VENV/bin:$PATH" + export PYTHON="$DEVSTACK_VENV/bin/python3" + fi +} + # Get the path to the pip command. # get_pip_command function get_pip_command { @@ -61,9 +77,11 @@ function get_python_exec_prefix { fi $xtrace - local PYTHON_PATH=/usr/local/bin - is_suse && PYTHON_PATH=/usr/bin - echo $PYTHON_PATH + if [[ "$GLOBAL_VENV" == "True" ]] ; then + echo "$DEVSTACK_VENV/bin" + else + echo "/usr/local/bin" + fi } # Wrapper for ``pip install`` that only installs versions of libraries @@ -168,15 +186,17 @@ function pip_install { if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip local sudo_pip="env" + elif [[ "${GLOBAL_VENV}" == "True" && -d ${DEVSTACK_VENV} ]] ; then + # We have to check that the DEVSTACK_VENV exists because early + # devstack boostrapping needs to operate in a system context + # too bootstrap pip. Once pip is bootstrapped we create the + # global venv and can start to use it. + local cmd_pip=$DEVSTACK_VENV/bin/pip + local sudo_pip="env" + echo "Using python $PYTHON3_VERSION to install $package_dir" else local cmd_pip="python$PYTHON3_VERSION -m pip" - # See - # https://github.com/pypa/setuptools/issues/2232 - # http://lists.openstack.org/pipermail/openstack-discuss/2020-August/016905.html - # this makes setuptools >=50 use the platform distutils. - # We only want to do this on global pip installs, not if - # installing in a virtualenv - local sudo_pip="sudo -H LC_ALL=en_US.UTF-8 SETUPTOOLS_USE_DISTUTILS=stdlib " + local sudo_pip="sudo -H LC_ALL=en_US.UTF-8" echo "Using python $PYTHON3_VERSION to install $package_dir" fi @@ -186,15 +206,11 @@ function pip_install { $xtrace - # adding SETUPTOOLS_SYS_PATH_TECHNIQUE is a workaround to keep - # the same behaviour of setuptools before version 25.0.0. - # related issue: https://github.com/pypa/pip/issues/3874 $sudo_pip \ http_proxy="${http_proxy:-}" \ https_proxy="${https_proxy:-}" \ no_proxy="${no_proxy:-}" \ PIP_FIND_LINKS=$PIP_FIND_LINKS \ - SETUPTOOLS_SYS_PATH_TECHNIQUE=rewrite \ $cmd_pip $upgrade \ $@ result=$? @@ -257,8 +273,7 @@ function use_library_from_git { function lib_installed_from_git { local name=$1 local safe_name - safe_name=$(python -c "from pkg_resources import safe_name; \ - print(safe_name('${name}'))") + safe_name=$(python -c "from packaging import canonicalize_name; print(canonicalize_name('${name}'))") # Note "pip freeze" doesn't always work here, because it tries to # be smart about finding the remote of the git repo the package # was installed from. This doesn't work with zuul which clones @@ -378,12 +393,16 @@ function _setup_package_with_constraints_edit { project_dir=$(cd $project_dir && pwd) if [ -n "$REQUIREMENTS_DIR" ]; then - # Constrain this package to this project directory from here on out. + # Remove this package from constraints before we install it. + # That way, later installs won't "downgrade" the install from + # source we are about to do. local name name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg) + if [ -z $name ]; then + name=$(awk '/^name =/ {gsub(/"/, "", $3); print $3}' $project_dir/pyproject.toml) + fi $REQUIREMENTS_DIR/.venv/bin/edit-constraints \ - $REQUIREMENTS_DIR/upper-constraints.txt -- $name \ - "$flags file://$project_dir#egg=$name" + $REQUIREMENTS_DIR/upper-constraints.txt -- $name fi setup_package $bindep $project_dir "$flags" $extras @@ -444,8 +463,11 @@ function setup_package { pip_install $flags "$project_dir$extras" # ensure that further actions can do things like setup.py sdist - if [[ "$flags" == "-e" ]]; then - safe_chown -R $STACK_USER $1/*.egg-info + if [[ "$flags" == "-e" && "$GLOBAL_VENV" == "False" ]]; then + # egg-info is not created when project have pyproject.toml + if [ -d $1/*.egg-info ]; then + safe_chown -R $STACK_USER $1/*.egg-info + fi fi } @@ -465,14 +487,8 @@ function install_python { function install_python3 { if is_ubuntu; then apt_get install python${PYTHON3_VERSION} python${PYTHON3_VERSION}-dev - elif is_suse; then - install_package python3-devel python3-dbm elif is_fedora; then - if [ "$os_VENDOR" = "Fedora" ]; then - install_package python${PYTHON3_VERSION//.} - else - install_package python${PYTHON3_VERSION//.} python${PYTHON3_VERSION//.}-devel - fi + install_package python${PYTHON3_VERSION}-devel python${PYTHON3_VERSION}-pip fi } diff --git a/inc/rootwrap b/inc/rootwrap index 2a6e4b648f..4c65440a4e 100644 --- a/inc/rootwrap +++ b/inc/rootwrap @@ -60,6 +60,11 @@ function configure_rootwrap { sudo install -o root -g root -m 644 $rootwrap_conf_src_dir/rootwrap.conf /etc/${project}/rootwrap.conf sudo sed -e "s:^filters_path=.*$:filters_path=/etc/${project}/rootwrap.d:" -i /etc/${project}/rootwrap.conf + # Rely on $PATH set by devstack to determine what is safe to execute + # by rootwrap rather than use explicit whitelist of paths in + # rootwrap.conf + sudo sed -e 's/^exec_dirs=.*/#&/' -i /etc/${project}/rootwrap.conf + # Set up the rootwrap sudoers local tempfile tempfile=$(mktemp) diff --git a/lib/apache b/lib/apache index 870a65a9d2..b3379a7cde 100644 --- a/lib/apache +++ b/lib/apache @@ -27,6 +27,11 @@ set +o xtrace APACHE_USER=${APACHE_USER:-$STACK_USER} APACHE_GROUP=${APACHE_GROUP:-$(id -gn $APACHE_USER)} +APACHE_LOCAL_HOST=$SERVICE_LOCAL_HOST +if [[ "$SERVICE_IP_VERSION" == 6 ]]; then + APACHE_LOCAL_HOST=[$APACHE_LOCAL_HOST] +fi + # Set up apache name and configuration directory # Note that APACHE_CONF_DIR is really more accurately apache's vhost @@ -39,10 +44,6 @@ elif is_fedora; then APACHE_NAME=httpd APACHE_CONF_DIR=${APACHE_CONF_DIR:-/etc/$APACHE_NAME/conf.d} APACHE_SETTINGS_DIR=${APACHE_SETTINGS_DIR:-/etc/$APACHE_NAME/conf.d} -elif is_suse; then - APACHE_NAME=apache2 - APACHE_CONF_DIR=${APACHE_CONF_DIR:-/etc/$APACHE_NAME/vhosts.d} - APACHE_SETTINGS_DIR=${APACHE_SETTINGS_DIR:-/etc/$APACHE_NAME/conf.d} fi APACHE_LOG_DIR="/var/log/${APACHE_NAME}" @@ -52,18 +53,16 @@ APACHE_LOG_DIR="/var/log/${APACHE_NAME}" # Enable apache mod and restart apache if it isn't already enabled. function enable_apache_mod { local mod=$1 + local should_restart=$2 # Apache installation, because we mark it NOPRIME if is_ubuntu; then # Skip mod_version as it is not a valid mod to enable # on debuntu, instead it is built in. if [[ "$mod" != "version" ]] && ! a2query -m $mod ; then sudo a2enmod $mod - restart_apache_server - fi - elif is_suse; then - if ! a2enmod -q $mod ; then - sudo a2enmod $mod - restart_apache_server + if [[ "$should_restart" != "norestart" ]] ; then + restart_apache_server + fi fi elif is_fedora; then # pass @@ -82,22 +81,15 @@ function install_apache_uwsgi { apxs="apxs" fi - # This varies based on packaged/installed. If we've - # pip_installed, then the pip setup will only build a "python" - # module that will be either python2 or python3 depending on what - # it was built with. - # - # For package installs, the distro ships both plugins and you need - # to select the right one ... it will not be autodetected. - UWSGI_PYTHON_PLUGIN=python3 - if is_ubuntu; then - local pkg_list="uwsgi uwsgi-plugin-python3 libapache2-mod-proxy-uwsgi" - if [[ "$DISTRO" == 'bionic' ]]; then - pkg_list="${pkg_list} uwsgi-plugin-python" - fi + local pkg_list="uwsgi uwsgi-plugin-python3" install_package ${pkg_list} - elif is_fedora; then + # NOTE(ianw) 2022-02-03 : Fedora 35 needs to skip this and fall + # into the install-from-source because the upstream packages + # didn't fix Python 3.10 compatibility before release. Should be + # fixed in uwsgi 4.9.0; can remove this when packages available + # or we drop this release + elif is_fedora && ! is_openeuler && ! [[ $DISTRO =~ rhel9 ]]; then # Note httpd comes with mod_proxy_uwsgi and it is loaded by # default; the mod_proxy_uwsgi package actually conflicts now. # See: @@ -106,10 +98,6 @@ function install_apache_uwsgi { # Thus there is nothing else to do after this install install_package uwsgi \ uwsgi-plugin-python3 - elif [[ $os_VENDOR =~ openSUSE ]]; then - install_package uwsgi \ - uwsgi-python3 \ - apache2-mod_uwsgi else # Compile uwsgi from source. local dir @@ -125,19 +113,16 @@ function install_apache_uwsgi { popd # delete the temp directory sudo rm -rf $dir - UWSGI_PYTHON_PLUGIN=python fi - if is_ubuntu || is_suse ; then - # we've got to enable proxy and proxy_uwsgi for this to work - sudo a2enmod proxy - sudo a2enmod proxy_uwsgi - elif is_fedora; then - # redhat is missing a nice way to turn on/off modules - echo "LoadModule proxy_uwsgi_module modules/mod_proxy_uwsgi.so" \ - | sudo tee /etc/httpd/conf.modules.d/02-proxy-uwsgi.conf + if is_ubuntu; then + if ! a2query -m proxy || ! a2query -m proxy_uwsgi ; then + # we've got to enable proxy and proxy_uwsgi for this to work + sudo a2enmod proxy + sudo a2enmod proxy_uwsgi + restart_apache_server + fi fi - restart_apache_server } # install_apache_wsgi() - Install Apache server and wsgi module @@ -152,14 +137,14 @@ function install_apache_wsgi { install_package libapache2-mod-wsgi-py3 elif is_fedora; then sudo rm -f /etc/httpd/conf.d/000-* - install_package httpd python3-mod_wsgi + install_package httpd python${PYTHON3_VERSION}-mod_wsgi + # rpm distros dont enable httpd by default so enable it to support reboots. + sudo systemctl enable httpd # For consistency with Ubuntu, switch to the worker mpm, as # the default is event sudo sed -i '/mod_mpm_prefork.so/s/^/#/g' /etc/httpd/conf.modules.d/00-mpm.conf sudo sed -i '/mod_mpm_event.so/s/^/#/g' /etc/httpd/conf.modules.d/00-mpm.conf sudo sed -i '/mod_mpm_worker.so/s/^#//g' /etc/httpd/conf.modules.d/00-mpm.conf - elif is_suse; then - install_package apache2 apache2-mod_wsgi else exit_distro_not_supported "apache wsgi installation" fi @@ -174,7 +159,7 @@ function install_apache_wsgi { # recognise it. a2ensite and a2dissite ignore the .conf suffix used as parameter. The default sites' # files are 000-default.conf and default-ssl.conf. # -# On Fedora and openSUSE, any file in /etc/httpd/conf.d/ whose name ends with .conf is enabled. +# On Fedora, any file in /etc/httpd/conf.d/ whose name ends with .conf is enabled. # # On RHEL and CentOS, things should hopefully work as in Fedora. # @@ -190,7 +175,7 @@ function apache_site_config_for { if is_ubuntu; then # Ubuntu 14.04 - Apache 2.4 echo $APACHE_CONF_DIR/${site}.conf - elif is_fedora || is_suse; then + elif is_fedora; then # fedora conf.d is only imported if it ends with .conf so this is approx the same local enabled_site_file="$APACHE_CONF_DIR/${site}.conf" if [ -f $enabled_site_file ]; then @@ -208,7 +193,7 @@ function enable_apache_site { enable_apache_mod version if is_ubuntu; then sudo a2ensite ${site} - elif is_fedora || is_suse; then + elif is_fedora; then local enabled_site_file="$APACHE_CONF_DIR/${site}.conf" # Do nothing if site already enabled or no site config exists if [[ -f ${enabled_site_file}.disabled ]] && [[ ! -f ${enabled_site_file} ]]; then @@ -222,7 +207,7 @@ function disable_apache_site { local site=$@ if is_ubuntu; then sudo a2dissite ${site} || true - elif is_fedora || is_suse; then + elif is_fedora; then local enabled_site_file="$APACHE_CONF_DIR/${site}.conf" # Do nothing if no site config exists if [[ -f ${enabled_site_file} ]]; then @@ -253,13 +238,17 @@ function restart_apache_server { restart_service $APACHE_NAME } +# write_uwsgi_config() - Create a new uWSGI config file function write_uwsgi_config { - local file=$1 + local conf=$1 local wsgi=$2 local url=$3 local http=$4 - local name="" - name=$(basename $wsgi) + local name=$5 + + if [ -z "$name" ]; then + name=$(basename $wsgi) + fi # create a home for the sockets; note don't use /tmp -- apache has # a private view of it on some platforms. @@ -274,39 +263,49 @@ function write_uwsgi_config { local socket="$socket_dir/${name}.socket" # always cleanup given that we are using iniset here - rm -rf $file - iniset "$file" uwsgi wsgi-file "$wsgi" - iniset "$file" uwsgi processes $API_WORKERS + rm -rf $conf + # Set either the module path or wsgi script path depending on what we've + # been given. Note that the regex isn't exhaustive - neither Python modules + # nor Python variables can start with a number - but it's "good enough" + if [[ "$wsgi" =~ ^[a-zA-Z0-9_.]+:[a-zA-Z0-9_]+$ ]]; then + iniset "$conf" uwsgi module "$wsgi" + else + deprecated 'Configuring uWSGI with a WSGI file is deprecated, use module paths instead' + iniset "$conf" uwsgi wsgi-file "$wsgi" + fi + iniset "$conf" uwsgi processes $API_WORKERS # This is running standalone - iniset "$file" uwsgi master true + iniset "$conf" uwsgi master true # Set die-on-term & exit-on-reload so that uwsgi shuts down - iniset "$file" uwsgi die-on-term true - iniset "$file" uwsgi exit-on-reload false + iniset "$conf" uwsgi die-on-term true + iniset "$conf" uwsgi exit-on-reload false # Set worker-reload-mercy so that worker will not exit till the time # configured after graceful shutdown - iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT - iniset "$file" uwsgi enable-threads true - iniset "$file" uwsgi plugins http,${UWSGI_PYTHON_PLUGIN} + iniset "$conf" uwsgi worker-reload-mercy $WORKER_TIMEOUT + iniset "$conf" uwsgi enable-threads true + iniset "$conf" uwsgi plugins http,python3 # uwsgi recommends this to prevent thundering herd on accept. - iniset "$file" uwsgi thunder-lock true + iniset "$conf" uwsgi thunder-lock true # Set hook to trigger graceful shutdown on SIGTERM - iniset "$file" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all" + iniset "$conf" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all" # Override the default size for headers from the 4k default. - iniset "$file" uwsgi buffer-size 65535 + iniset "$conf" uwsgi buffer-size 65535 # Make sure the client doesn't try to re-use the connection. - iniset "$file" uwsgi add-header "Connection: close" + iniset "$conf" uwsgi add-header "Connection: close" # This ensures that file descriptors aren't shared between processes. - iniset "$file" uwsgi lazy-apps true + iniset "$conf" uwsgi lazy-apps true + # Starting time of the WSGi server + iniset "$conf" uwsgi start-time %t # If we said bind directly to http, then do that and don't start the apache proxy if [[ -n "$http" ]]; then - iniset "$file" uwsgi http $http + iniset "$conf" uwsgi http $http else local apache_conf="" apache_conf=$(apache_site_config_for $name) - iniset "$file" uwsgi socket "$socket" - iniset "$file" uwsgi chmod-socket 666 - echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}/\" retry=0 " | sudo tee -a $apache_conf + iniset "$conf" uwsgi socket "$socket" + iniset "$conf" uwsgi chmod-socket 666 + echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}\" retry=0 acquire=1 " | sudo tee -a $apache_conf enable_apache_site $name restart_apache_server fi @@ -315,51 +314,62 @@ function write_uwsgi_config { # For services using chunked encoding, the only services known to use this # currently are Glance and Swift, we need to use an http proxy instead of # mod_proxy_uwsgi because the chunked encoding gets dropped. See: -# https://github.com/unbit/uwsgi/issues/1540 You can workaround this on python2 -# but that involves having apache buffer the request before sending it to -# uwsgi. +# https://github.com/unbit/uwsgi/issues/1540. function write_local_uwsgi_http_config { - local file=$1 + local conf=$1 local wsgi=$2 local url=$3 - name=$(basename $wsgi) + local name=$4 + + if [ -z "$name" ]; then + name=$(basename $wsgi) + fi # create a home for the sockets; note don't use /tmp -- apache has # a private view of it on some platforms. # always cleanup given that we are using iniset here - rm -rf $file - iniset "$file" uwsgi wsgi-file "$wsgi" + rm -rf $conf + # Set either the module path or wsgi script path depending on what we've + # been given + if [[ "$wsgi" =~ ^[a-zA-Z0-9_.]+:[a-zA-Z0-9_]+$ ]]; then + iniset "$conf" uwsgi module "$wsgi" + else + deprecated 'Configuring uWSGI with a WSGI file is deprecated, use module paths instead' + iniset "$conf" uwsgi wsgi-file "$wsgi" + fi port=$(get_random_port) - iniset "$file" uwsgi http-socket "127.0.0.1:$port" - iniset "$file" uwsgi processes $API_WORKERS + iniset "$conf" uwsgi http-socket "$APACHE_LOCAL_HOST:$port" + iniset "$conf" uwsgi processes $API_WORKERS # This is running standalone - iniset "$file" uwsgi master true + iniset "$conf" uwsgi master true # Set die-on-term & exit-on-reload so that uwsgi shuts down - iniset "$file" uwsgi die-on-term true - iniset "$file" uwsgi exit-on-reload false - iniset "$file" uwsgi enable-threads true - iniset "$file" uwsgi plugins http,${UWSGI_PYTHON_PLUGIN} - # uwsgi recommends this to prevent thundering herd on accept. - iniset "$file" uwsgi thunder-lock true - # Set hook to trigger graceful shutdown on SIGTERM - iniset "$file" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all" + iniset "$conf" uwsgi die-on-term true + iniset "$conf" uwsgi exit-on-reload false # Set worker-reload-mercy so that worker will not exit till the time # configured after graceful shutdown - iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT + iniset "$conf" uwsgi worker-reload-mercy $WORKER_TIMEOUT + iniset "$conf" uwsgi enable-threads true + iniset "$conf" uwsgi plugins http,python3 + # uwsgi recommends this to prevent thundering herd on accept. + iniset "$conf" uwsgi thunder-lock true + # Set hook to trigger graceful shutdown on SIGTERM + iniset "$conf" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all" # Override the default size for headers from the 4k default. - iniset "$file" uwsgi buffer-size 65535 + iniset "$conf" uwsgi buffer-size 65535 # Make sure the client doesn't try to re-use the connection. - iniset "$file" uwsgi add-header "Connection: close" + iniset "$conf" uwsgi add-header "Connection: close" # This ensures that file descriptors aren't shared between processes. - iniset "$file" uwsgi lazy-apps true - iniset "$file" uwsgi chmod-socket 666 - iniset "$file" uwsgi http-raw-body true - iniset "$file" uwsgi http-chunked-input true - iniset "$file" uwsgi http-auto-chunked true - iniset "$file" uwsgi http-keepalive false + iniset "$conf" uwsgi lazy-apps true + iniset "$conf" uwsgi chmod-socket 666 + iniset "$conf" uwsgi http-raw-body true + iniset "$conf" uwsgi http-chunked-input true + iniset "$conf" uwsgi http-auto-chunked true + iniset "$conf" uwsgi http-keepalive false # Increase socket timeout for slow chunked uploads - iniset "$file" uwsgi socket-timeout 30 + iniset "$conf" uwsgi socket-timeout 30 + # Starting time of the WSGi server + iniset "$conf" uwsgi start-time %t enable_apache_mod proxy enable_apache_mod proxy_http @@ -367,7 +377,7 @@ function write_local_uwsgi_http_config { apache_conf=$(apache_site_config_for $name) echo "KeepAlive Off" | sudo tee $apache_conf echo "SetEnv proxy-sendchunked 1" | sudo tee -a $apache_conf - echo "ProxyPass \"${url}\" \"http://127.0.0.1:$port\" retry=0 " | sudo tee -a $apache_conf + echo "ProxyPass \"${url}\" \"http://$APACHE_LOCAL_HOST:$port\" retry=0 acquire=1 " | sudo tee -a $apache_conf enable_apache_site $name restart_apache_server } @@ -386,18 +396,24 @@ function write_local_proxy_http_config { echo "KeepAlive Off" | sudo tee $apache_conf echo "SetEnv proxy-sendchunked 1" | sudo tee -a $apache_conf - echo "ProxyPass \"${loc}\" \"$url\" retry=0 " | sudo tee -a $apache_conf + echo "ProxyPass \"${loc}\" \"$url\" retry=0 acquire=1 " | sudo tee -a $apache_conf enable_apache_site $name restart_apache_server } function remove_uwsgi_config { - local file=$1 + local conf=$1 local wsgi=$2 local name="" + # TODO(stephenfin): Remove this call when everyone is using module path + # configuration instead of file path configuration name=$(basename $wsgi) - rm -rf $file + if [[ "$wsgi" = /* ]]; then + deprecated "Passing a wsgi script to remove_uwsgi_config is deprecated, pass an application name instead" + fi + + rm -rf $conf disable_apache_site $name } diff --git a/lib/atop b/lib/atop new file mode 100644 index 0000000000..25c8e9a83f --- /dev/null +++ b/lib/atop @@ -0,0 +1,49 @@ +#!/bin/bash +# +# lib/atop +# Functions to start and stop atop + +# Dependencies: +# +# - ``functions`` file + +# ``stack.sh`` calls the entry points in this order: +# +# - configure_atop +# - install_atop +# - start_atop +# - stop_atop + +# Save trace setting +_XTRACE_ATOP=$(set +o | grep xtrace) +set +o xtrace + +function configure_atop { + mkdir -p $LOGDIR/atop + cat </dev/null +# /etc/default/atop +# see man atoprc for more possibilities to configure atop execution + +LOGOPTS="-R" +LOGINTERVAL=${ATOP_LOGINTERVAL:-"30"} +LOGGENERATIONS=${ATOP_LOGGENERATIONS:-"1"} +LOGPATH=$LOGDIR/atop +EOF +} + +function install_atop { + install_package atop +} + +# start_() - Start running processes +function start_atop { + start_service atop +} + +# stop_atop() stop atop process +function stop_atop { + stop_service atop +} + +# Restore xtrace +$_XTRACE_ATOP diff --git a/lib/cinder b/lib/cinder index 6c97e114a6..02056c20f4 100644 --- a/lib/cinder +++ b/lib/cinder @@ -31,6 +31,7 @@ set +o xtrace CINDER_DRIVER=${CINDER_DRIVER:-default} CINDER_PLUGINS=$TOP_DIR/lib/cinder_plugins CINDER_BACKENDS=$TOP_DIR/lib/cinder_backends +CINDER_BACKUPS=$TOP_DIR/lib/cinder_backups # grab plugin config if specified via cinder_driver if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then @@ -42,6 +43,13 @@ GITDIR["python-cinderclient"]=$DEST/python-cinderclient GITDIR["python-brick-cinderclient-ext"]=$DEST/python-brick-cinderclient-ext CINDER_DIR=$DEST/cinder +if [[ $SERVICE_IP_VERSION == 6 ]]; then + CINDER_MY_IP="$HOST_IPV6" +else + CINDER_MY_IP="$HOST_IP" +fi + + # Cinder virtual environment if [[ ${USE_VENV} = True ]]; then PROJECT_VENV["cinder"]=${CINDER_DIR}.venv @@ -51,10 +59,11 @@ else fi CINDER_STATE_PATH=${CINDER_STATE_PATH:=$DATA_DIR/cinder} +OS_BRICK_LOCK_PATH=${OS_BRICK_LOCK_PATH:=$DATA_DIR/os_brick} CINDER_CONF_DIR=/etc/cinder CINDER_CONF=$CINDER_CONF_DIR/cinder.conf -CINDER_UWSGI=$CINDER_BIN_DIR/cinder-wsgi +CINDER_UWSGI=cinder.wsgi.api:application CINDER_UWSGI_CONF=$CINDER_CONF_DIR/cinder-api-uwsgi.ini CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini @@ -68,6 +77,11 @@ CINDER_SERVICE_PORT_INT=${CINDER_SERVICE_PORT_INT:-18776} CINDER_SERVICE_PROTOCOL=${CINDER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} CINDER_SERVICE_LISTEN_ADDRESS=${CINDER_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} +# We do not need to report service status every 10s for devstack-like +# deployments. In the gate this generates extra work for the services and the +# database which are already taxed. +CINDER_SERVICE_REPORT_INTERVAL=${CINDER_SERVICE_REPORT_INTERVAL:-120} + # What type of LVM device should Cinder use for LVM backend # Defaults to auto, which will do thin provisioning if it's a fresh # volume group, otherwise it will do thick. The other valid choices are @@ -75,6 +89,10 @@ CINDER_SERVICE_LISTEN_ADDRESS=${CINDER_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $S # thin provisioning. CINDER_LVM_TYPE=${CINDER_LVM_TYPE:-auto} +# ``CINDER_USE_SERVICE_TOKEN`` is a mode where service token is passed along with +# user token while communicating to external REST APIs like Glance. +CINDER_USE_SERVICE_TOKEN=$(trueorfalse True CINDER_USE_SERVICE_TOKEN) + # Default backends # The backend format is type:name where type is one of the supported backend # types (lvm, nfs, etc) and name is the identifier used in the Cinder @@ -87,20 +105,61 @@ CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-lvm:lvmdriver-1} CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}} CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]') -# Centos7 and OpenSUSE switched to using LIO and that's all that's supported, -# although the tgt bits are in EPEL and OpenSUSE we don't want that for CI -if is_fedora || is_suse; then - CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm} - if [[ ${CINDER_ISCSI_HELPER} != "lioadm" ]]; then - die "lioadm is the only valid Cinder target_helper config on this platform" +VOLUME_TYPE_MULTIATTACH=${VOLUME_TYPE_MULTIATTACH:-multiattach} + +if [[ -n "$CINDER_ISCSI_HELPER" ]]; then + if [[ -z "$CINDER_TARGET_HELPER" ]]; then + deprecated 'Using CINDER_ISCSI_HELPER is deprecated, use CINDER_TARGET_HELPER instead' + CINDER_TARGET_HELPER="$CINDER_ISCSI_HELPER" + else + deprecated 'Deprecated CINDER_ISCSI_HELPER is set, but is being overwritten by CINDER_TARGET_HELPER' fi +fi +CINDER_TARGET_HELPER=${CINDER_TARGET_HELPER:-lioadm} + +if [[ $CINDER_TARGET_HELPER == 'nvmet' ]]; then + CINDER_TARGET_PROTOCOL=${CINDER_TARGET_PROTOCOL:-'nvmet_rdma'} + CINDER_TARGET_PREFIX=${CINDER_TARGET_PREFIX:-'nvme-subsystem-1'} + CINDER_TARGET_PORT=${CINDER_TARGET_PORT:-4420} else - CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-tgtadm} + CINDER_TARGET_PROTOCOL=${CINDER_TARGET_PROTOCOL:-'iscsi'} + CINDER_TARGET_PREFIX=${CINDER_TARGET_PREFIX:-'iqn.2010-10.org.openstack:'} + CINDER_TARGET_PORT=${CINDER_TARGET_PORT:-3260} fi -# Toggle for deploying Cinder under a wsgi server. Legacy mod_wsgi -# reference should be cleaned up to more accurately refer to uwsgi. -CINDER_USE_MOD_WSGI=${CINDER_USE_MOD_WSGI:-True} + +# EL should only use lioadm +if is_fedora; then + if [[ ${CINDER_TARGET_HELPER} != "lioadm" && ${CINDER_TARGET_HELPER} != 'nvmet' ]]; then + die "lioadm and nvmet are the only valid Cinder target_helper config on this platform" + fi +fi + +# When Cinder is used as a backend for Glance, it can be configured to clone +# the volume containing image data directly in the backend instead of +# transferring data from volume to volume. Value is a comma separated list of +# schemes (currently only 'file' and 'cinder' are supported). The default +# configuration in Cinder is empty (that is, do not use this feature). NOTE: +# to use this feature you must also enable GLANCE_SHOW_DIRECT_URL and/or +# GLANCE_SHOW_MULTIPLE_LOCATIONS for glance-api.conf. +CINDER_ALLOWED_DIRECT_URL_SCHEMES=${CINDER_ALLOWED_DIRECT_URL_SCHEMES:-} +if [[ -n "$CINDER_ALLOWED_DIRECT_URL_SCHEMES" ]]; then + if [[ "${GLANCE_SHOW_DIRECT_URL:-False}" != "True" \ + && "${GLANCE_SHOW_MULTIPLE_LOCATIONS:-False}" != "True" ]]; then + warn $LINENO "CINDER_ALLOWED_DIRECT_URL_SCHEMES is set, but neither \ +GLANCE_SHOW_DIRECT_URL nor GLANCE_SHOW_MULTIPLE_LOCATIONS is True" + fi +fi + +# For backward compatibility +# Before CINDER_BACKUP_DRIVER was introduced, ceph backup driver was configured +# along with ceph backend driver. +if [[ -z "${CINDER_BACKUP_DRIVER}" && "$CINDER_ENABLED_BACKENDS" =~ "ceph" ]]; then + CINDER_BACKUP_DRIVER=ceph +fi + +# Supported backup drivers are in lib/cinder_backups +CINDER_BACKUP_DRIVER=${CINDER_BACKUP_DRIVER:-swift} # Source the enabled backends if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then @@ -113,9 +172,24 @@ if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then done fi +# Source the backup driver +if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then + if [[ -r $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER ]]; then + source $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER + else + die "cinder backup driver $CINDER_BACKUP_DRIVER is not supported" + fi +fi + # Environment variables to configure the image-volume cache CINDER_IMG_CACHE_ENABLED=${CINDER_IMG_CACHE_ENABLED:-True} +# Environment variables to configure the optimized volume upload +CINDER_UPLOAD_OPTIMIZED=${CINDER_UPLOAD_OPTIMIZED:-False} + +# Environment variables to configure the internal tenant during optimized volume upload +CINDER_UPLOAD_INTERNAL_TENANT=${CINDER_UPLOAD_INTERNAL_TENANT:-False} + # For limits, if left unset, it will use cinder defaults of 0 for unlimited CINDER_IMG_CACHE_SIZE_GB=${CINDER_IMG_CACHE_SIZE_GB:-} CINDER_IMG_CACHE_SIZE_COUNT=${CINDER_IMG_CACHE_SIZE_COUNT:-} @@ -125,6 +199,17 @@ CINDER_IMG_CACHE_SIZE_COUNT=${CINDER_IMG_CACHE_SIZE_COUNT:-} # enable the cache for all cinder backends. CINDER_CACHE_ENABLED_FOR_BACKENDS=${CINDER_CACHE_ENABLED_FOR_BACKENDS:-$CINDER_ENABLED_BACKENDS} +# Configure which cinder backends will have optimized volume upload, this takes the same +# form as the CINDER_ENABLED_BACKENDS config option. By default it will +# enable the cache for all cinder backends. +CINDER_UPLOAD_OPTIMIZED_BACKENDS=${CINDER_UPLOAD_OPTIMIZED_BACKENDS:-$CINDER_ENABLED_BACKENDS} + +# Flag to set the oslo_policy.enforce_scope. This is used to switch +# the Volume API policies to start checking the scope of token. by default, +# this flag is False. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +CINDER_ENFORCE_SCOPE=$(trueorfalse False CINDER_ENFORCE_SCOPE) + # Functions # --------- @@ -146,7 +231,7 @@ function _cinder_cleanup_apache_wsgi { function cleanup_cinder { # ensure the volume group is cleared up because fails might # leave dead volumes in the group - if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then + if [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then local targets targets=$(sudo tgtadm --op show --mode target) if [ $? -ne 0 ]; then @@ -174,8 +259,14 @@ function cleanup_cinder { else stop_service tgtd fi - else + elif [ "$CINDER_TARGET_HELPER" = "lioadm" ]; then sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete + elif [ "$CINDER_TARGET_HELPER" = "nvmet" ]; then + # If we don't disconnect everything vgremove will block + sudo nvme disconnect-all + sudo nvmetcli clear + else + die $LINENO "Unknown value \"$CINDER_TARGET_HELPER\" for CINDER_TARGET_HELPER" fi if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then @@ -189,8 +280,14 @@ function cleanup_cinder { done fi + if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then + if type cleanup_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then + cleanup_cinder_backup_$CINDER_BACKUP_DRIVER + fi + fi + stop_process "c-api" - remove_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" + remove_uwsgi_config "$CINDER_UWSGI_CONF" "cinder-wsgi" } # configure_cinder() - Set config files, create data dirs, etc @@ -220,7 +317,7 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $CINDER_CONF DEFAULT target_helper "$CINDER_ISCSI_HELPER" + iniset $CINDER_CONF DEFAULT target_helper "$CINDER_TARGET_HELPER" iniset $CINDER_CONF database connection `database_connection_url cinder` iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI iniset $CINDER_CONF DEFAULT rootwrap_config "$CINDER_CONF_DIR/rootwrap.conf" @@ -228,19 +325,26 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT osapi_volume_listen $CINDER_SERVICE_LISTEN_ADDRESS iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH iniset $CINDER_CONF oslo_concurrency lock_path $CINDER_STATE_PATH - if [[ $SERVICE_IP_VERSION == 6 ]]; then - iniset $CINDER_CONF DEFAULT my_ip "$HOST_IPV6" - else - iniset $CINDER_CONF DEFAULT my_ip "$HOST_IP" - fi + iniset $CINDER_CONF DEFAULT my_ip "$CINDER_MY_IP" iniset $CINDER_CONF key_manager backend cinder.keymgr.conf_key_mgr.ConfKeyManager iniset $CINDER_CONF key_manager fixed_key $(openssl rand -hex 16) + if [[ -n "$CINDER_ALLOWED_DIRECT_URL_SCHEMES" ]]; then + iniset $CINDER_CONF DEFAULT allowed_direct_url_schemes $CINDER_ALLOWED_DIRECT_URL_SCHEMES + fi + + # set default quotas + iniset $CINDER_CONF DEFAULT quota_volumes ${CINDER_QUOTA_VOLUMES:-10} + iniset $CINDER_CONF DEFAULT quota_backups ${CINDER_QUOTA_BACKUPS:-10} + iniset $CINDER_CONF DEFAULT quota_snapshots ${CINDER_QUOTA_SNAPSHOTS:-10} # Avoid RPC timeouts in slow CI and test environments by doubling the # default response timeout set by RPC clients. See bug #1873234 for more # details and example failures. iniset $CINDER_CONF DEFAULT rpc_response_timeout 120 + iniset $CINDER_CONF DEFAULT report_interval $CINDER_SERVICE_REPORT_INTERVAL + iniset $CINDER_CONF DEFAULT service_down_time $(($CINDER_SERVICE_REPORT_INTERVAL * 6)) + if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then local enabled_backends="" local default_name="" @@ -255,24 +359,28 @@ function configure_cinder { default_name=$be_name fi enabled_backends+=$be_name, - - iniset $CINDER_CONF $be_name volume_clear $CINDER_VOLUME_CLEAR - done iniset $CINDER_CONF DEFAULT enabled_backends ${enabled_backends%,*} if [[ -n "$default_name" ]]; then iniset $CINDER_CONF DEFAULT default_volume_type ${default_name} fi configure_cinder_image_volume_cache + + # The upload optimization uses Cinder's clone volume functionality to + # clone the Image-Volume from source volume hence can only be + # performed when glance is using cinder as it's backend. + if [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then + # Configure optimized volume upload + configure_cinder_volume_upload + fi fi - if is_service_enabled c-bak; then - # NOTE(mriedem): The default backup driver uses swift and if we're - # on a subnode we might not know if swift is enabled, but chances are - # good that it is on the controller so configure the backup service - # to use it. If we want to configure the backup service to use - # a non-swift driver, we'll likely need environment variables. - iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_" + if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then + if type configure_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then + configure_cinder_backup_$CINDER_BACKUP_DRIVER + else + die "configure_cinder_backup_$CINDER_BACKUP_DRIVER doesn't exist in $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER" + fi fi if is_service_enabled ceilometer; then @@ -282,14 +390,8 @@ function configure_cinder { if is_service_enabled tls-proxy; then if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then # Set the service port for a proxy to take the original - if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then - iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT - iniset $CINDER_CONF oslo_middleware enable_proxy_headers_parsing True - else - iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT - iniset $CINDER_CONF DEFAULT public_endpoint $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT - iniset $CINDER_CONF DEFAULT osapi_volume_base_URL $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT - fi + iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT + iniset $CINDER_CONF oslo_middleware enable_proxy_headers_parsing True fi fi @@ -300,9 +402,11 @@ function configure_cinder { iniset_rpc_backend cinder $CINDER_CONF # Format logging - setup_logging $CINDER_CONF $CINDER_USE_MOD_WSGI + setup_logging $CINDER_CONF - write_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" "/volume" + if is_service_enabled c-api; then + write_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" "/volume" "" "cinder-api" + fi if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then configure_cinder_driver @@ -316,6 +420,9 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT glance_ca_certificates_file $SSL_BUNDLE_FILE fi + # Set glance credentials (used for location APIs) + configure_keystone_authtoken_middleware $CINDER_CONF glance glance + # Set nova credentials (used for os-assisted-snapshots) configure_keystone_authtoken_middleware $CINDER_CONF nova nova iniset $CINDER_CONF nova region_name "$REGION_NAME" @@ -324,61 +431,54 @@ function configure_cinder { if [[ ! -z "$CINDER_COORDINATION_URL" ]]; then iniset $CINDER_CONF coordination backend_url "$CINDER_COORDINATION_URL" elif is_service_enabled etcd3; then - iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT" + # NOTE(jan.gutter): api_version can revert to default once tooz is + # updated with the etcd v3.4 defaults + iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT?api_version=v3" + fi + + if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $CINDER_CONF oslo_policy enforce_scope true + iniset $CINDER_CONF oslo_policy enforce_new_defaults true + else + iniset $CINDER_CONF oslo_policy enforce_scope false + iniset $CINDER_CONF oslo_policy enforce_new_defaults false + fi + + if [ "$CINDER_USE_SERVICE_TOKEN" == "True" ]; then + init_cinder_service_user_conf fi } # create_cinder_accounts() - Set up common required cinder accounts -# Tenant User Roles +# Project User Roles # ------------------------------------------------------------------ -# service cinder admin # if enabled +# SERVICE_PROJECT_NAME cinder service +# SERVICE_PROJECT_NAME cinder creator (if Barbican is enabled) # Migrated from keystone_data.sh function create_cinder_accounts { # Cinder if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then - create_service_user "cinder" + local extra_role="" - # block-storage is the official service type - get_or_create_service "cinder" "block-storage" "Cinder Volume Service" - if [ "$CINDER_USE_MOD_WSGI" == "False" ]; then - get_or_create_endpoint \ - "block-storage" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" - - get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2" - get_or_create_endpoint \ - "volumev2" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(project_id)s" - - get_or_create_service "cinderv3" "volumev3" "Cinder Volume Service V3" - get_or_create_endpoint \ - "volumev3" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" - else - get_or_create_endpoint \ - "block-storage" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v3/\$(project_id)s" - - get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2" - get_or_create_endpoint \ - "volumev2" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v2/\$(project_id)s" - - get_or_create_service "cinderv3" "volumev3" "Cinder Volume Service V3" - get_or_create_endpoint \ - "volumev3" \ - "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v3/\$(project_id)s" + # cinder needs the "creator" role in order to interact with barbican + if is_service_enabled barbican; then + extra_role=$(get_or_create_role "creator") fi + create_service_user "cinder" $extra_role + + local cinder_api_url + cinder_api_url="$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume" + + # block-storage is the official service type + get_or_create_service "cinder" "block-storage" "Cinder Volume Service" + get_or_create_endpoint \ + "block-storage" \ + "$REGION_NAME" \ + "$cinder_api_url/v3" configure_cinder_internal_tenant fi } @@ -401,25 +501,42 @@ function init_cinder { be_type=${be%%:*} be_name=${be##*:} if type init_cinder_backend_${be_type} >/dev/null 2>&1; then - # Always init the default volume group for lvm. - if [[ "$be_type" == "lvm" ]]; then - init_default_lvm_volume_group - fi init_cinder_backend_${be_type} ${be_name} fi done fi + if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then + if type init_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then + init_cinder_backup_$CINDER_BACKUP_DRIVER + fi + fi + mkdir -p $CINDER_STATE_PATH/volumes } + +function init_os_brick { + mkdir -p $OS_BRICK_LOCK_PATH + if is_service_enabled cinder; then + iniset $CINDER_CONF os_brick lock_path $OS_BRICK_LOCK_PATH + fi + if is_service_enabled nova; then + iniset $NOVA_CONF os_brick lock_path $OS_BRICK_LOCK_PATH + fi + if is_service_enabled glance; then + iniset $GLANCE_API_CONF os_brick lock_path $OS_BRICK_LOCK_PATH + iniset $GLANCE_CACHE_CONF os_brick lock_path $OS_BRICK_LOCK_PATH + fi +} + # install_cinder() - Collect source and prepare function install_cinder { git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH setup_develop $CINDER_DIR - if [[ "$CINDER_ISCSI_HELPER" == "tgtadm" ]]; then + if [[ "$CINDER_TARGET_HELPER" == "tgtadm" ]]; then install_package tgt - elif [[ "$CINDER_ISCSI_HELPER" == "lioadm" ]]; then + elif [[ "$CINDER_TARGET_HELPER" == "lioadm" ]]; then if is_ubuntu; then # TODO(frickler): Workaround for https://launchpad.net/bugs/1819819 sudo mkdir -p /etc/target @@ -428,6 +545,43 @@ function install_cinder { else install_package targetcli fi + elif [[ "$CINDER_TARGET_HELPER" == "nvmet" ]]; then + install_package nvme-cli + + # TODO: Remove manual installation of the dependency when the + # requirement is added to nvmetcli: + # http://lists.infradead.org/pipermail/linux-nvme/2022-July/033576.html + if is_ubuntu; then + install_package python3-configshell-fb + else + install_package python3-configshell + fi + # Install from source because Ubuntu doesn't have the package and some packaged versions didn't work on Python 3 + pip_install git+git://git.infradead.org/users/hch/nvmetcli.git + + sudo modprobe nvmet + sudo modprobe nvme-fabrics + + if [[ $CINDER_TARGET_PROTOCOL == 'nvmet_rdma' ]]; then + install_package rdma-core + sudo modprobe nvme-rdma + + # Create the Soft-RoCE device over the networking interface + local iface=${HOST_IP_IFACE:-`ip -br -$SERVICE_IP_VERSION a | grep $CINDER_MY_IP | awk '{print $1}'`} + if [[ -z "$iface" ]]; then + die $LINENO "Cannot find interface to bind Soft-RoCE" + fi + + if ! sudo rdma link | grep $iface ; then + sudo rdma link add rxe_$iface type rxe netdev $iface + fi + + elif [[ $CINDER_TARGET_PROTOCOL == 'nvmet_tcp' ]]; then + sudo modprobe nvme-tcp + + else # 'nvmet_fc' + sudo modprobe nvme-fc + fi fi } @@ -460,22 +614,13 @@ function start_cinder { local service_port=$CINDER_SERVICE_PORT local service_protocol=$CINDER_SERVICE_PROTOCOL local cinder_url - if is_service_enabled tls-proxy && [ "$CINDER_USE_MOD_WSGI" == "False" ]; then - service_port=$CINDER_SERVICE_PORT_INT - service_protocol="http" - fi - if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then + if [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then if is_service_enabled c-vol; then # Delete any old stack.conf sudo rm -f /etc/tgt/conf.d/stack.conf _configure_tgt_for_config_d if is_ubuntu; then sudo service tgt restart - elif is_suse; then - # NOTE(dmllr): workaround restart bug - # https://bugzilla.suse.com/show_bug.cgi?id=934642 - stop_service tgtd - start_service tgtd else restart_service tgtd fi @@ -485,17 +630,8 @@ function start_cinder { fi if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then - if [ "$CINDER_USE_MOD_WSGI" == "False" ]; then - run_process c-api "$CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF" - cinder_url=$service_protocol://$SERVICE_HOST:$service_port - # Start proxy if tls enabled - if is_service_enabled tls-proxy; then - start_tls_proxy cinder '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_PORT_INT - fi - else - run_process "c-api" "$(which uwsgi) --procname-prefix cinder-api --ini $CINDER_UWSGI_CONF" - cinder_url=$service_protocol://$SERVICE_HOST/volume/v3 - fi + run_process "c-api" "$(which uwsgi) --procname-prefix cinder-api --ini $CINDER_UWSGI_CONF" + cinder_url=$service_protocol://$SERVICE_HOST/volume/v3 fi echo "Waiting for Cinder API to start..." @@ -504,8 +640,13 @@ function start_cinder { fi run_process c-sch "$CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF" - run_process c-bak "$CINDER_BIN_DIR/cinder-backup --config-file $CINDER_CONF" - run_process c-vol "$CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF" + # Tune glibc for Python Services using single malloc arena for all threads + # and disabling dynamic thresholds to reduce memory usage when using native + # threads directly or via eventlet.tpool + # https://www.gnu.org/software/libc/manual/html_node/Memory-Allocation-Tunables.html + malloc_tuning="MALLOC_ARENA_MAX=1 MALLOC_MMAP_THRESHOLD_=131072 MALLOC_TRIM_THRESHOLD_=262144" + run_process c-bak "$CINDER_BIN_DIR/cinder-backup --config-file $CINDER_CONF" "" "" "$malloc_tuning" + run_process c-vol "$CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF" "" "" "$malloc_tuning" # NOTE(jdg): For cinder, startup order matters. To ensure that repor_capabilities is received # by the scheduler start the cinder-volume service last (or restart it) after the scheduler @@ -520,6 +661,23 @@ function stop_cinder { stop_process c-vol } +function create_one_type { + type_name=$1 + property_key=$2 + property_value=$3 + # NOTE (e0ne): openstack client doesn't work with cinder in noauth mode + if is_service_enabled keystone; then + openstack --os-region-name="$REGION_NAME" volume type create --property $property_key="$property_value" $type_name + else + # TODO (e0ne): use openstack client once it will support cinder in noauth mode: + # https://bugs.launchpad.net/python-cinderclient/+bug/1755279 + local cinder_url + cinder_url=$CINDER_SERVICE_PROTOCOL://$SERVICE_HOST:$CINDER_SERVICE_PORT/v3 + OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-create $type_name + OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-key $type_name set $property_key="$property_value" + fi +} + # create_volume_types() - Create Cinder's configured volume types function create_volume_types { # Create volume types @@ -527,18 +685,20 @@ function create_volume_types { local be be_name for be in ${CINDER_ENABLED_BACKENDS//,/ }; do be_name=${be##*:} - # NOTE (e0ne): openstack client doesn't work with cinder in noauth mode - if is_service_enabled keystone; then - openstack --os-region-name="$REGION_NAME" volume type create --property volume_backend_name="${be_name}" ${be_name} - else - # TODO (e0ne): use openstack client once it will support cinder in noauth mode: - # https://bugs.launchpad.net/python-cinderclient/+bug/1755279 - local cinder_url - cinder_url=$CINDER_SERVICE_PROTOCOL://$SERVICE_HOST:$CINDER_SERVICE_PORT/v3 - OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-create ${be_name} - OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-key ${be_name} set volume_backend_name=${be_name} - fi + create_one_type $be_name "volume_backend_name" $be_name done + + if [[ $ENABLE_VOLUME_MULTIATTACH == "True" ]]; then + create_one_type $VOLUME_TYPE_MULTIATTACH $VOLUME_TYPE_MULTIATTACH " True" + fi + + # Increase quota for the service project if glance is using cinder, + # since it's likely to occasionally go above the default 10 in parallel + # test execution. + if [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then + openstack --os-region-name="$REGION_NAME" \ + quota set --volumes 50 "$SERVICE_PROJECT_NAME" + fi fi } @@ -575,6 +735,23 @@ function configure_cinder_image_volume_cache { done } +function configure_cinder_volume_upload { + # Expect UPLOAD_VOLUME_OPTIMIZED_FOR_BACKENDS to be a list of backends + # similar to CINDER_ENABLED_BACKENDS with NAME:TYPE where NAME will + # be the backend specific configuration stanza in cinder.conf. + local be be_name + for be in ${CINDER_UPLOAD_OPTIMIZED_BACKENDS//,/ }; do + be_name=${be##*:} + + iniset $CINDER_CONF $be_name image_upload_use_cinder_backend $CINDER_UPLOAD_OPTIMIZED + iniset $CINDER_CONF $be_name image_upload_use_internal_tenant $CINDER_UPLOAD_INTERNAL_TENANT + done +} + +function init_cinder_service_user_conf { + configure_keystone_authtoken_middleware $CINDER_CONF cinder service_user + iniset $CINDER_CONF service_user send_service_user_token True +} # Restore xtrace $_XTRACE_CINDER diff --git a/lib/cinder_backends/ceph b/lib/cinder_backends/ceph index 33c9706d3d..0b465730c0 100644 --- a/lib/cinder_backends/ceph +++ b/lib/cinder_backends/ceph @@ -6,12 +6,6 @@ # Enable with: # # CINDER_ENABLED_BACKENDS+=,ceph:ceph -# -# Optional parameters: -# CINDER_BAK_CEPH_POOL= -# CINDER_BAK_CEPH_USER= -# CINDER_BAK_CEPH_POOL_PG= -# CINDER_BAK_CEPH_POOL_PGP= # Dependencies: # @@ -29,11 +23,6 @@ set +o xtrace # Defaults # -------- -CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups} -CINDER_BAK_CEPH_POOL_PG=${CINDER_BAK_CEPH_POOL_PG:-8} -CINDER_BAK_CEPH_POOL_PGP=${CINDER_BAK_CEPH_POOL_PGP:-8} -CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak} - # Entry Points # ------------ @@ -52,27 +41,6 @@ function configure_cinder_backend_ceph { iniset $CINDER_CONF $be_name rbd_flatten_volume_from_snapshot False iniset $CINDER_CONF $be_name rbd_max_clone_depth 5 iniset $CINDER_CONF DEFAULT glance_api_version 2 - - if is_service_enabled c-bak; then - sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP} - if [ "$REMOTE_CEPH" = "False" ]; then - # Configure Cinder backup service options, ceph pool, ceph user and ceph key - sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS} - if [[ $CEPH_REPLICAS -ne 1 ]]; then - sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID} - fi - fi - sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring - sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring - - iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver" - iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE" - iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL" - iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER" - iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0 - iniset $CINDER_CONF DEFAULT backup_ceph_stripe_count 0 - iniset $CINDER_CONF DEFAULT restore_discard_excess_bytes True - fi } # Restore xtrace diff --git a/lib/cinder_backends/ceph_iscsi b/lib/cinder_backends/ceph_iscsi new file mode 100644 index 0000000000..94412e0da6 --- /dev/null +++ b/lib/cinder_backends/ceph_iscsi @@ -0,0 +1,56 @@ +#!/bin/bash +# +# lib/cinder_backends/ceph_iscsi +# Configure the ceph_iscsi backend + +# Enable with: +# +# CINDER_ENABLED_BACKENDS+=,ceph_iscsi:ceph_iscsi +# +# Optional paramteters: +# CEPH_ISCSI_API_URL= +# +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_ceph_backend_ceph_iscsi - called from configure_cinder() + + +# Save trace setting +_XTRACE_CINDER_CEPH_ISCSI=$(set +o | grep xtrace) +set +o xtrace + +# Entry Points +# ------------ + +# configure_cinder_backend_ceph_iscsi - Set config files, create data dirs, etc +# configure_cinder_backend_ceph_iscsi $name +function configure_cinder_backend_ceph_iscsi { + local be_name=$1 + + CEPH_ISCSI_API_URL=${CEPH_ISCSI_API_URL:-http://$CEPH_ISCSI_API_HOST:$CEPH_ISCSI_API_PORT} + + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.ceph.rbd_iscsi.RBDISCSIDriver" + iniset $CINDER_CONF $be_name rbd_ceph_conf "$CEPH_CONF_FILE" + iniset $CINDER_CONF $be_name rbd_pool "$CINDER_CEPH_POOL" + iniset $CINDER_CONF $be_name rbd_user "$CINDER_CEPH_USER" + iniset $CINDER_CONF $be_name rbd_iscsi_api_user "$CEPH_ISCSI_API_USER" + iniset $CINDER_CONF $be_name rbd_iscsi_api_password "$CEPH_ISCSI_API_PASSWORD" + iniset $CINDER_CONF $be_name rbd_iscsi_api_url "$CEPH_ISCSI_API_URL" + iniset $CINDER_CONF $be_name rbd_iscsi_target_iqn "$CEPH_ISCSI_TARGET_IQN" + iniset $CINDER_CONF $be_name rbd_flatten_volume_from_snapshot False + iniset $CINDER_CONF $be_name rbd_max_clone_depth 5 + iniset $CINDER_CONF DEFAULT glance_api_version 2 + + pip_install rbd-iscsi-client +} + +# Restore xtrace +$_XTRACE_CINDER_CEPH_ISCSI + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_backends/fake_gate b/lib/cinder_backends/fake_gate index 3ffd9a6785..3b9f1d1164 100644 --- a/lib/cinder_backends/fake_gate +++ b/lib/cinder_backends/fake_gate @@ -50,7 +50,7 @@ function configure_cinder_backend_lvm { iniset $CINDER_CONF $be_name volume_backend_name $be_name iniset $CINDER_CONF $be_name volume_driver "cinder.tests.fake_driver.FakeGateDriver" iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name - iniset $CINDER_CONF $be_name target_helper "$CINDER_ISCSI_HELPER" + iniset $CINDER_CONF $be_name target_helper "$CINDER_TARGET_HELPER" iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE" if [[ "$CINDER_VOLUME_CLEAR" == "non" ]]; then diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm index 497081c9e4..42865119da 100644 --- a/lib/cinder_backends/lvm +++ b/lib/cinder_backends/lvm @@ -50,9 +50,12 @@ function configure_cinder_backend_lvm { iniset $CINDER_CONF $be_name volume_backend_name $be_name iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.lvm.LVMVolumeDriver" iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name - iniset $CINDER_CONF $be_name target_helper "$CINDER_ISCSI_HELPER" + iniset $CINDER_CONF $be_name target_helper "$CINDER_TARGET_HELPER" + iniset $CINDER_CONF $be_name target_protocol "$CINDER_TARGET_PROTOCOL" + iniset $CINDER_CONF $be_name target_port "$CINDER_TARGET_PORT" + iniset $CINDER_CONF $be_name target_prefix "$CINDER_TARGET_PREFIX" iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE" - + iniset $CINDER_CONF $be_name volume_clear "$CINDER_VOLUME_CLEAR" } # init_cinder_backend_lvm - Initialize volume group diff --git a/lib/cinder_backends/nfs b/lib/cinder_backends/nfs index 89a37a1f02..f3fcbeff19 100644 --- a/lib/cinder_backends/nfs +++ b/lib/cinder_backends/nfs @@ -32,6 +32,15 @@ function configure_cinder_backend_nfs { iniset $CINDER_CONF $be_name volume_backend_name $be_name iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.nfs.NfsDriver" iniset $CINDER_CONF $be_name nfs_shares_config "$CINDER_CONF_DIR/nfs-shares-$be_name.conf" + iniset $CINDER_CONF $be_name nas_host localhost + iniset $CINDER_CONF $be_name nas_share_path ${NFS_EXPORT_DIR} + iniset $CINDER_CONF $be_name nas_secure_file_operations \ + ${NFS_SECURE_FILE_OPERATIONS} + iniset $CINDER_CONF $be_name nas_secure_file_permissions \ + ${NFS_SECURE_FILE_PERMISSIONS} + + # NFS snapshot support is currently opt-in only. + iniset $CINDER_CONF $be_name nfs_snapshot_support True echo "$CINDER_NFS_SERVERPATH" | tee "$CINDER_CONF_DIR/nfs-shares-$be_name.conf" } diff --git a/lib/cinder_backups/ceph b/lib/cinder_backups/ceph new file mode 100644 index 0000000000..e4d6b96407 --- /dev/null +++ b/lib/cinder_backups/ceph @@ -0,0 +1,58 @@ +#!/bin/bash +# +# lib/cinder_backups/ceph +# Configure the ceph backup driver + +# Enable with: +# +# CINDER_BACKUP_DRIVER=ceph + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# Save trace setting +_XTRACE_CINDER_CEPH=$(set +o | grep xtrace) +set +o xtrace + +# Defaults +# -------- + +CINDER_BAK_CEPH_MAX_SNAPSHOTS=${CINDER_BAK_CEPH_MAX_SNAPSHOTS:-0} +CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups} +CINDER_BAK_CEPH_POOL_PG=${CINDER_BAK_CEPH_POOL_PG:-8} +CINDER_BAK_CEPH_POOL_PGP=${CINDER_BAK_CEPH_POOL_PGP:-8} +CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak} + + +function configure_cinder_backup_ceph { + # Execute this part only when cephadm is not used + if [[ "$CEPHADM_DEPLOY" = "False" ]]; then + sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP} + if [[ "$REMOTE_CEPH" = "False" && "$CEPH_REPLICAS" -ne 1 ]]; then + sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID} + fi + sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "profile rbd" osd "profile rbd pool=${CINDER_BAK_CEPH_POOL}, profile rbd pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring + sudo chown $STACK_USER ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring + fi + + iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver" + iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE" + iniset $CINDER_CONF DEFAULT backup_ceph_max_snapshots "$CINDER_BAK_CEPH_MAX_SNAPSHOTS" + iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL" + iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER" + iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0 + iniset $CINDER_CONF DEFAULT backup_ceph_stripe_count 0 + iniset $CINDER_CONF DEFAULT restore_discard_excess_bytes True +} + +# init_cinder_backup_ceph: nothing to do +# cleanup_cinder_backup_ceph: nothing to do + +# Restore xtrace +$_XTRACE_CINDER_CEPH + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_backups/s3_swift b/lib/cinder_backups/s3_swift new file mode 100644 index 0000000000..6fb248606e --- /dev/null +++ b/lib/cinder_backups/s3_swift @@ -0,0 +1,45 @@ +#!/bin/bash +# +# lib/cinder_backups/s3_swift +# Configure the s3 backup driver with swift s3api +# +# TODO: create lib/cinder_backup/s3 for external s3 compatible storage + +# Enable with: +# +# CINDER_BACKUP_DRIVER=s3_swift +# enable_service s3api s-proxy s-object s-container s-account + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# Save trace setting +_XTRACE_CINDER_S3_SWIFT=$(set +o | grep xtrace) +set +o xtrace + +function configure_cinder_backup_s3_swift { + # This configuration requires swift and s3api. If we're + # on a subnode we might not know if they are enabled + iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.s3.S3BackupDriver" + iniset $CINDER_CONF DEFAULT backup_s3_endpoint_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$S3_SERVICE_PORT" +} + +function init_cinder_backup_s3_swift { + openstack ec2 credential create + iniset $CINDER_CONF DEFAULT backup_s3_store_access_key "$(openstack ec2 credential list -c Access -f value)" + iniset $CINDER_CONF DEFAULT backup_s3_store_secret_key "$(openstack ec2 credential list -c Secret -f value)" + if is_service_enabled tls-proxy; then + iniset $CINDER_CONF DEFAULT backup_s3_ca_cert_file "$SSL_BUNDLE_FILE" + fi +} + +# cleanup_cinder_backup_s3_swift: nothing to do + +# Restore xtrace +$_XTRACE_CINDER_S3_SWIFT + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_backups/swift b/lib/cinder_backups/swift new file mode 100644 index 0000000000..c7ec306246 --- /dev/null +++ b/lib/cinder_backups/swift @@ -0,0 +1,41 @@ +#!/bin/bash +# +# lib/cinder_backups/swift +# Configure the swift backup driver + +# Enable with: +# +# CINDER_BACKUP_DRIVER=swift + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# Save trace setting +_XTRACE_CINDER_SWIFT=$(set +o | grep xtrace) +set +o xtrace + + +function configure_cinder_backup_swift { + # NOTE(mriedem): The default backup driver uses swift and if we're + # on a subnode we might not know if swift is enabled, but chances are + # good that it is on the controller so configure the backup service + # to use it. + iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.swift.SwiftBackupDriver" + iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_" + if is_service_enabled tls-proxy; then + iniset $CINDER_CONF DEFAULT backup_swift_ca_cert_file $SSL_BUNDLE_FILE + fi +} + +# init_cinder_backup_swift: nothing to do +# cleanup_cinder_backup_swift: nothing to do + + +# Restore xtrace +$_XTRACE_CINDER_SWIFT + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_plugins/XenAPINFS b/lib/cinder_plugins/XenAPINFS deleted file mode 100644 index 92135e7c4f..0000000000 --- a/lib/cinder_plugins/XenAPINFS +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash -# -# lib/cinder_plugins/XenAPINFS -# Configure the XenAPINFS driver - -# Enable with: -# -# CINDER_DRIVER=XenAPINFS - -# Dependencies: -# -# - ``functions`` file -# - ``cinder`` configurations - -# configure_cinder_driver - make configuration changes, including those to other services - -# Save trace setting -_XTRACE_CINDER_XENAPINFS=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Set up default directories - - -# Entry Points -# ------------ - -# configure_cinder_driver - Set config files, create data dirs, etc -function configure_cinder_driver { - iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.xenapi.sm.XenAPINFSDriver" - iniset $CINDER_CONF DEFAULT xenapi_connection_url "$CINDER_XENAPI_CONNECTION_URL" - iniset $CINDER_CONF DEFAULT xenapi_connection_username "$CINDER_XENAPI_CONNECTION_USERNAME" - iniset $CINDER_CONF DEFAULT xenapi_connection_password "$CINDER_XENAPI_CONNECTION_PASSWORD" - iniset $CINDER_CONF DEFAULT xenapi_nfs_server "$CINDER_XENAPI_NFS_SERVER" - iniset $CINDER_CONF DEFAULT xenapi_nfs_serverpath "$CINDER_XENAPI_NFS_SERVERPATH" -} - -# Restore xtrace -$_XTRACE_CINDER_XENAPINFS - -# Local variables: -# mode: shell-script -# End: diff --git a/lib/database b/lib/database index 7940cf2208..78563f6f6d 100644 --- a/lib/database +++ b/lib/database @@ -89,6 +89,10 @@ function initialize_database_backends { DATABASE_PASSWORD=$MYSQL_PASSWORD fi + return 0 +} + +function define_database_baseurl { # We configure Nova, Horizon, Glance and Keystone to use MySQL as their # database server. While they share a single server, each has their own # database and tables. @@ -100,8 +104,6 @@ function initialize_database_backends { # NOTE: Don't specify ``/db`` in this string so we can use it for multiple services BASE_SQL_CONN=${BASE_SQL_CONN:-$(get_database_type_$DATABASE_TYPE)://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOST} - - return 0 } # Recreate a given database diff --git a/lib/databases/mysql b/lib/databases/mysql index d4969d713c..a47580ca3d 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -12,6 +12,7 @@ _XTRACE_DB_MYSQL=$(set +o | grep xtrace) set +o xtrace MYSQL_DRIVER=${MYSQL_DRIVER:-PyMySQL} +INSTALL_DATABASE_SERVER_PACKAGES=$(trueorfalse True INSTALL_DATABASE_SERVER_PACKAGES) register_database mysql @@ -19,11 +20,7 @@ if [[ -z "$MYSQL_SERVICE_NAME" ]]; then MYSQL_SERVICE_NAME=mysql if is_fedora && ! is_oraclelinux; then MYSQL_SERVICE_NAME=mariadb - elif is_suse && systemctl list-unit-files | grep -q 'mariadb\.service'; then - # Older mariadb packages on SLES 12 provided mysql.service. The - # newer ones on SLES 12 and 15 use mariadb.service; they also - # provide a mysql.service symlink for backwards-compatibility, but - # let's not rely on that. + elif [[ "$DISTRO" =~ trixie|bookworm|bullseye ]]; then MYSQL_SERVICE_NAME=mariadb fi fi @@ -51,7 +48,7 @@ function cleanup_database_mysql { elif is_oraclelinux; then uninstall_package mysql-community-server sudo rm -rf /var/lib/mysql - elif is_suse || is_fedora; then + elif is_fedora; then uninstall_package mariadb-server sudo rm -rf /var/lib/mysql else @@ -66,12 +63,12 @@ function recreate_database_mysql { } function configure_database_mysql { - local my_conf mysql slow_log + local my_conf mysql slow_log my_client_conf echo_summary "Configuring and starting MySQL" if is_ubuntu; then my_conf=/etc/mysql/my.cnf - elif is_suse || is_oraclelinux; then + elif is_oraclelinux; then my_conf=/etc/my.cnf elif is_fedora; then my_conf=/etc/my.cnf @@ -83,42 +80,74 @@ function configure_database_mysql { exit_distro_not_supported "mysql configuration" fi - # Start mysql-server - if is_fedora || is_suse; then + # Set fips mode on + if is_ubuntu; then + if is_fips_enabled; then + my_client_conf=/etc/mysql/mysql.conf.d/mysql.cnf + iniset -sudo $my_client_conf mysql ssl-fips-mode "on" + iniset -sudo $my_conf mysqld ssl-fips-mode "on" + fi + fi + + # Change bind-address from localhost (127.0.0.1) to any (::) + iniset -sudo $my_conf mysqld bind-address "$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)" + + # (Re)Start mysql-server + if is_fedora; then # service is not started by default start_service $MYSQL_SERVICE_NAME + elif is_ubuntu; then + # required since bind-address could have changed above + restart_service $MYSQL_SERVICE_NAME fi # Set the root password - only works the first time. For Ubuntu, we already # did that with debconf before installing the package, but we still try, - # because the package might have been installed already. - sudo mysqladmin -u root password $DATABASE_PASSWORD || true + # because the package might have been installed already. We don't do this + # for Ubuntu 22.04+ because the authorization model change in + # version 10.4 of mariadb. See + # https://mariadb.org/authentication-in-mariadb-10-4/ + if ! (is_ubuntu && [[ ! "$DISTRO" =~ trixie|bookworm|bullseye ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then + sudo mysqladmin -u root password $DATABASE_PASSWORD || true + fi # In case of Mariadb, giving hostname in arguments causes permission # problems as it expects connection through socket if is_ubuntu && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then local cmd_args="-uroot -p$DATABASE_PASSWORD " else - local cmd_args="-uroot -p$DATABASE_PASSWORD -h127.0.0.1 " + local cmd_args="-uroot -p$DATABASE_PASSWORD -h$SERVICE_LOCAL_HOST " + fi + + # Workaround for mariadb > 11.6.2, + # see https://bugs.launchpad.net/nova/+bug/2116186/comments/3 + min_db_ver="11.6.2" + db_version=$(sudo mysql ${cmd_args} -e "select version();" -sN | cut -d '-' -f 1) + max_db_ver=$(printf '%s\n' ${min_db_ver} ${db_version} | sort -V | tail -n 1) + if [[ "${min_db_ver}" != "${max_db_ver}" ]]; then + iniset -sudo $my_conf mysqld innodb_snapshot_isolation OFF + restart_service $MYSQL_SERVICE_NAME fi # In mariadb e.g. on Ubuntu socket plugin is used for authentication # as root so it works only as sudo. To restore old "mysql like" behaviour, # we need to change auth plugin for root user - if is_ubuntu && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then - sudo mysql $cmd_args -e "UPDATE mysql.user SET plugin='' WHERE user='$DATABASE_USER' AND host='localhost';" - sudo mysql $cmd_args -e "FLUSH PRIVILEGES;" + # TODO(frickler): simplify this logic + if is_ubuntu && [[ ! "$DISTRO" =~ bookworm|bullseye ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then + # For Ubuntu 22.04+ we follow the model outlined in + # https://mariadb.org/authentication-in-mariadb-10-4/ + sudo mysql -e "ALTER USER $DATABASE_USER@localhost IDENTIFIED VIA mysql_native_password USING PASSWORD('$DATABASE_PASSWORD');" + fi + if ! (is_ubuntu && [[ ! "$DISTRO" =~ bookworm|bullseye ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then + # Create DB user if it does not already exist + sudo mysql $cmd_args -e "CREATE USER IF NOT EXISTS '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';" + # Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases: + sudo mysql $cmd_args -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%';" fi - # Create DB user if it does not already exist - sudo mysql $cmd_args -e "CREATE USER IF NOT EXISTS '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';" - # Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases: - sudo mysql $cmd_args -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%';" # Now update ``my.cnf`` for some local needs and restart the mysql service - # Change bind-address from localhost (127.0.0.1) to any (::) and - # set default db type to InnoDB - iniset -sudo $my_conf mysqld bind-address "$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)" + # Set default db type to InnoDB iniset -sudo $my_conf mysqld sql_mode TRADITIONAL iniset -sudo $my_conf mysqld default-storage-engine InnoDB iniset -sudo $my_conf mysqld max_connections 1024 @@ -143,6 +172,29 @@ function configure_database_mysql { iniset -sudo $my_conf mysqld log-queries-not-using-indexes 1 fi + if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then + echo "enabling MySQL performance counting" + + # Install our sqlalchemy plugin + pip_install ${TOP_DIR}/tools/dbcounter + + # Create our stats database for accounting + recreate_database stats + mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST -e \ + "CREATE TABLE queries (db VARCHAR(32), op VARCHAR(32), + count INT, PRIMARY KEY (db, op)) ENGINE MEMORY" stats + fi + + if [[ "$MYSQL_REDUCE_MEMORY" == "True" ]]; then + iniset -sudo $my_conf mysqld read_buffer_size 64K + iniset -sudo $my_conf mysqld innodb_buffer_pool_size 16M + iniset -sudo $my_conf mysqld thread_stack 192K + iniset -sudo $my_conf mysqld thread_cache_size 8 + iniset -sudo $my_conf mysqld tmp_table_size 8M + iniset -sudo $my_conf mysqld sort_buffer_size 8M + iniset -sudo $my_conf mysqld max_allowed_packet 8M + fi + restart_service $MYSQL_SERVICE_NAME } @@ -173,18 +225,17 @@ EOF chmod 0600 $HOME/.my.cnf fi # Install mysql-server - if is_oraclelinux; then - install_package mysql-community-server - elif is_fedora; then - install_package mariadb-server mariadb-devel - sudo systemctl enable $MYSQL_SERVICE_NAME - elif is_suse; then - install_package mariadb-server - sudo systemctl enable $MYSQL_SERVICE_NAME - elif is_ubuntu; then - install_package $MYSQL_SERVICE_NAME-server - else - exit_distro_not_supported "mysql installation" + if [[ "$INSTALL_DATABASE_SERVER_PACKAGES" == "True" ]]; then + if is_oraclelinux; then + install_package mysql-community-server + elif is_fedora; then + install_package mariadb-server mariadb-devel mariadb + sudo systemctl enable $MYSQL_SERVICE_NAME + elif is_ubuntu; then + install_package $MYSQL_SERVICE_NAME-server + else + exit_distro_not_supported "mysql installation" + fi fi } @@ -200,7 +251,17 @@ function install_database_python_mysql { function database_connection_url_mysql { local db=$1 - echo "$BASE_SQL_CONN/$db?charset=utf8" + local plugin + + # NOTE(danms): We don't enable perf on subnodes yet because the + # plugin is not installed there + if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then + if is_service_enabled mysql; then + plugin="&plugin=dbcounter" + fi + fi + + echo "$BASE_SQL_CONN/$db?charset=utf8$plugin" } diff --git a/lib/databases/postgresql b/lib/databases/postgresql index 618834b550..2aa38ccf76 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -13,7 +13,7 @@ set +o xtrace MAX_DB_CONNECTIONS=${MAX_DB_CONNECTIONS:-200} - +INSTALL_DATABASE_SERVER_PACKAGES=$(trueorfalse True INSTALL_DATABASE_SERVER_PACKAGES) register_database postgresql @@ -32,7 +32,7 @@ function cleanup_database_postgresql { # Get ruthless with mysql apt_get purge -y postgresql* return - elif is_fedora || is_suse; then + elif is_fedora; then uninstall_package postgresql-server else return @@ -46,6 +46,10 @@ function recreate_database_postgresql { createdb -h $DATABASE_HOST -U$DATABASE_USER -l C -T template0 -E utf8 $db } +function _exit_pg_init { + sudo cat /var/lib/pgsql/initdb_postgresql.log +} + function configure_database_postgresql { local pg_conf pg_dir pg_hba check_role version echo_summary "Configuring and starting PostgreSQL" @@ -53,7 +57,9 @@ function configure_database_postgresql { pg_hba=/var/lib/pgsql/data/pg_hba.conf pg_conf=/var/lib/pgsql/data/postgresql.conf if ! sudo [ -e $pg_hba ]; then + trap _exit_pg_init EXIT sudo postgresql-setup initdb + trap - EXIT fi elif is_ubuntu; then version=`psql --version | cut -d ' ' -f3 | cut -d. -f1-2` @@ -66,11 +72,6 @@ function configure_database_postgresql { pg_dir=`find /etc/postgresql -name pg_hba.conf|xargs dirname` pg_hba=$pg_dir/pg_hba.conf pg_conf=$pg_dir/postgresql.conf - elif is_suse; then - pg_hba=/var/lib/pgsql/data/pg_hba.conf - pg_conf=/var/lib/pgsql/data/postgresql.conf - # initdb is called when postgresql is first started - sudo [ -e $pg_hba ] || start_service postgresql else exit_distro_not_supported "postgresql configuration" fi @@ -95,7 +96,6 @@ function configure_database_postgresql { function install_database_postgresql { echo_summary "Installing postgresql" - deprecated "Use of postgresql in devstack is deprecated, and will be removed during the Pike cycle" local pgpass=$HOME/.pgpass if [[ ! -e $pgpass ]]; then cat < $pgpass @@ -105,15 +105,17 @@ EOF else sed -i "s/:root:\w\+/:root:$DATABASE_PASSWORD/" $pgpass fi - if is_ubuntu; then - install_package postgresql - elif is_fedora || is_suse; then - install_package postgresql-server - if is_fedora; then - sudo systemctl enable postgresql + if [[ "$INSTALL_DATABASE_SERVER_PACKAGES" == "True" ]]; then + if is_ubuntu; then + install_package postgresql + elif is_fedora; then + install_package postgresql-server + if is_fedora; then + sudo systemctl enable postgresql + fi + else + exit_distro_not_supported "postgresql installation" fi - else - exit_distro_not_supported "postgresql installation" fi } diff --git a/lib/dstat b/lib/dstat index eb03ae0fb2..9bd0370847 100644 --- a/lib/dstat +++ b/lib/dstat @@ -33,19 +33,25 @@ function start_dstat { # To enable memory_tracker add: # enable_service memory_tracker # to your localrc - run_process memory_tracker "$TOP_DIR/tools/memory_tracker.sh" "" "root" + run_process memory_tracker "$TOP_DIR/tools/memory_tracker.sh" "" "root" "PYTHON=python${PYTHON3_VERSION}" # TODO(jh): Fail when using the old service name otherwise consumers might # never notice that is has been removed. if is_service_enabled peakmem_tracker; then die $LINENO "The peakmem_tracker service has been removed, use memory_tracker instead" fi + + # To enable file_tracker add: + # enable_service file_tracker + # to your localrc + run_process file_tracker "$TOP_DIR/tools/file_tracker.sh" } # stop_dstat() stop dstat process function stop_dstat { stop_process dstat stop_process memory_tracker + stop_process file_tracker } # Restore xtrace diff --git a/lib/etcd3 b/lib/etcd3 index 4f3a7a4349..0d22de8c73 100644 --- a/lib/etcd3 +++ b/lib/etcd3 @@ -51,7 +51,7 @@ function start_etcd3 { fi cmd+=" --listen-client-urls http://$SERVICE_HOST:$ETCD_PORT" if [ "$ENABLE_DEBUG_LOG_LEVEL" == "True" ]; then - cmd+=" --debug" + cmd+=" --log-level=debug" fi local unitfile="$SYSTEMD_DIR/$ETCD_SYSTEMD_SERVICE" diff --git a/lib/glance b/lib/glance index c2a8b7492e..9422c22141 100644 --- a/lib/glance +++ b/lib/glance @@ -41,16 +41,33 @@ else GLANCE_BIN_DIR=$(get_python_exec_prefix) fi +#S3 for Glance +GLANCE_USE_S3=$(trueorfalse False GLANCE_USE_S3) +GLANCE_S3_DEFAULT_BACKEND=${GLANCE_S3_DEFAULT_BACKEND:-s3_fast} +GLANCE_S3_BUCKET_ON_PUT=$(trueorfalse True GLANCE_S3_BUCKET_ON_PUT) +GLANCE_S3_BUCKET_NAME=${GLANCE_S3_BUCKET_NAME:-images} + # Cinder for Glance USE_CINDER_FOR_GLANCE=$(trueorfalse False USE_CINDER_FOR_GLANCE) # GLANCE_CINDER_DEFAULT_BACKEND should be one of the values # from CINDER_ENABLED_BACKENDS GLANCE_CINDER_DEFAULT_BACKEND=${GLANCE_CINDER_DEFAULT_BACKEND:-lvmdriver-1} GLANCE_STORE_ROOTWRAP_BASE_DIR=/usr/local/etc/glance -# NOTE (abhishekk): For opensuse data files are stored in different directory -if is_opensuse; then - GLANCE_STORE_ROOTWRAP_BASE_DIR=/usr/etc/glance +if [[ "$GLOBAL_VENV" == "True" ]] ; then + GLANCE_STORE_ROOTWRAP_BASE_DIR=${DEVSTACK_VENV}/etc/glance fi +# When Cinder is used as a glance store, you can optionally configure cinder to +# optimize bootable volume creation by allowing volumes to be cloned directly +# in the backend instead of transferring data via Glance. To use this feature, +# set CINDER_ALLOWED_DIRECT_URL_SCHEMES for cinder.conf and enable +# GLANCE_SHOW_DIRECT_URL and/or GLANCE_SHOW_MULTIPLE_LOCATIONS for Glance. The +# default value for both of these is False, because for some backends they +# present a grave security risk (though not for Cinder, because all that's +# exposed is the volume_id where the image data is stored.) See OSSN-0065 for +# more information: https://wiki.openstack.org/wiki/OSSN/OSSN-0065 +GLANCE_SHOW_DIRECT_URL=$(trueorfalse False GLANCE_SHOW_DIRECT_URL) +GLANCE_SHOW_MULTIPLE_LOCATIONS=$(trueorfalse False GLANCE_SHOW_MULTIPLE_LOCATIONS) + # Glance multi-store configuration # Boolean flag to enable multiple store configuration for glance GLANCE_ENABLE_MULTIPLE_STORES=$(trueorfalse False GLANCE_ENABLE_MULTIPLE_STORES) @@ -64,13 +81,7 @@ GLANCE_MULTIPLE_FILE_STORES=${GLANCE_MULTIPLE_FILE_STORES:-fast} GLANCE_DEFAULT_BACKEND=${GLANCE_DEFAULT_BACKEND:-fast} GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache} - -# Full Glance functionality requires running in standalone mode. If we are -# not in uwsgi mode, then we are standalone, otherwise allow separate control. -if [[ "$WSGI_MODE" != "uwsgi" ]]; then - GLANCE_STANDALONE=True -fi -GLANCE_STANDALONE=${GLANCE_STANDALONE:-False} +GLANCE_CACHE_DRIVER=${GLANCE_CACHE_DRIVER:-centralized_db} # File path for each store specified in GLANCE_MULTIPLE_FILE_STORES, the store # identifier will be appended to this path at runtime. If GLANCE_MULTIPLE_FILE_STORES @@ -84,6 +95,16 @@ GLANCE_STAGING_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/os_glance_stagi GLANCE_TASKS_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/os_glance_tasks_store} GLANCE_USE_IMPORT_WORKFLOW=$(trueorfalse False GLANCE_USE_IMPORT_WORKFLOW) +GLANCE_ENABLE_QUOTAS=$(trueorfalse True GLANCE_ENABLE_QUOTAS) + +# Flag to set the oslo_policy.enforce_scope. This is used to switch +# This is used to disable the Image API policies scope and new defaults. +# By Default, it is True. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +GLANCE_ENFORCE_SCOPE=$(trueorfalse True GLANCE_ENFORCE_SCOPE) + +# Flag to disable image format inspection on upload +GLANCE_ENFORCE_IMAGE_FORMAT=$(trueorfalse True GLANCE_ENFORCE_IMAGE_FORMAT) GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance} GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs @@ -105,16 +126,13 @@ GLANCE_SERVICE_PORT=${GLANCE_SERVICE_PORT:-9292} GLANCE_SERVICE_PORT_INT=${GLANCE_SERVICE_PORT_INT:-19292} GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT} GLANCE_SERVICE_PROTOCOL=${GLANCE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} -GLANCE_UWSGI=$GLANCE_BIN_DIR/glance-wsgi-api +GLANCE_UWSGI=glance.wsgi.api:application GLANCE_UWSGI_CONF=$GLANCE_CONF_DIR/glance-uwsgi.ini -# If wsgi mode is uwsgi run glance under uwsgi, else default to eventlet -# TODO(mtreinish): Remove the eventlet path here and in all the similar -# conditionals below after the Pike release -if [[ "$WSGI_MODE" == "uwsgi" ]]; then - GLANCE_URL="$GLANCE_SERVICE_PROTOCOL://$GLANCE_SERVICE_HOST/image" -else - GLANCE_URL="$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" -fi + +# Glance default limit for Devstack +GLANCE_LIMIT_IMAGE_SIZE_TOTAL=${GLANCE_LIMIT_IMAGE_SIZE_TOTAL:-2000} + +GLANCE_URL="$GLANCE_SERVICE_PROTOCOL://$GLANCE_SERVICE_HOST/image" # Functions # --------- @@ -130,8 +148,9 @@ function is_glance_enabled { # cleanup_glance() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_glance { - # delete image files (glance) - sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR + # delete image files (glance) and all of the glance-remote temporary + # storage + sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR "${DATA_DIR}/glance-remote" # Cleanup multiple stores directories if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "True" ]]; then @@ -144,6 +163,35 @@ function cleanup_glance { # Cleanup reserved stores directories sudo rm -rf $GLANCE_STAGING_DIR $GLANCE_TASKS_DIR fi + remove_uwsgi_config "$GLANCE_UWSGI_CONF" "glance-wsgi-api" +} + +# Set multiple s3 store related config options +# +function configure_multiple_s3_stores { + enabled_backends="${GLANCE_S3_DEFAULT_BACKEND}:s3" + + iniset $GLANCE_API_CONF DEFAULT enabled_backends ${enabled_backends} + iniset $GLANCE_API_CONF glance_store default_backend $GLANCE_S3_DEFAULT_BACKEND +} + +# Set common S3 store options to given config section +# +# Arguments: +# config_section +# +function set_common_s3_store_params { + local config_section="$1" + openstack ec2 credential create + iniset $GLANCE_API_CONF $config_section s3_store_host "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$S3_SERVICE_PORT" + iniset $GLANCE_API_CONF $config_section s3_store_access_key "$(openstack ec2 credential list -c Access -f value)" + iniset $GLANCE_API_CONF $config_section s3_store_secret_key "$(openstack ec2 credential list -c Secret -f value)" + iniset $GLANCE_API_CONF $config_section s3_store_create_bucket_on_put $GLANCE_S3_BUCKET_ON_PUT + iniset $GLANCE_API_CONF $config_section s3_store_bucket $GLANCE_S3_BUCKET_NAME + iniset $GLANCE_API_CONF $config_section s3_store_bucket_url_format "path" + if is_service_enabled tls-proxy; then + iniset $GLANCE_API_CONF $config_section s3_store_cacert $SSL_BUNDLE_FILE + fi } # Set multiple cinder store related config options for each of the cinder store @@ -230,7 +278,6 @@ function configure_glance_store { local be if [[ "$glance_enable_multiple_stores" == "False" ]]; then - # Configure traditional glance_store if [[ "$use_cinder_for_glance" == "True" ]]; then # set common glance_store parameters iniset $GLANCE_API_CONF glance_store stores "cinder,file,http" @@ -253,7 +300,7 @@ function configure_glance_store { if [[ "$use_cinder_for_glance" == "True" ]]; then # Configure multiple cinder stores for glance configure_multiple_cinder_stores - else + elif ! is_service_enabled s-proxy && [[ "$GLANCE_USE_S3" == "False" ]]; then # Configure multiple file stores for glance configure_multiple_file_stores fi @@ -262,6 +309,38 @@ function configure_glance_store { fi } +function configure_glance_quotas { + + # Registered limit resources in keystone are system-specific resources. + # Make sure we use a system-scoped token to interact with this API. + + openstack --os-cloud devstack-system-admin registered limit create --service glance \ + --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL --region $REGION_NAME image_size_total + openstack --os-cloud devstack-system-admin registered limit create --service glance \ + --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL --region $REGION_NAME image_stage_total + openstack --os-cloud devstack-system-admin registered limit create --service glance \ + --default-limit 100 --region $REGION_NAME image_count_total + openstack --os-cloud devstack-system-admin registered limit create --service glance \ + --default-limit 100 --region $REGION_NAME image_count_uploading + + # Tell glance to use these limits + iniset $GLANCE_API_CONF DEFAULT use_keystone_limits True + + # Configure oslo_limit so it can talk to keystone + iniset $GLANCE_API_CONF oslo_limit user_domain_name $SERVICE_DOMAIN_NAME + iniset $GLANCE_API_CONF oslo_limit password $SERVICE_PASSWORD + iniset $GLANCE_API_CONF oslo_limit username glance + iniset $GLANCE_API_CONF oslo_limit auth_type password + iniset $GLANCE_API_CONF oslo_limit auth_url $KEYSTONE_SERVICE_URI + iniset $GLANCE_API_CONF oslo_limit system_scope all + iniset $GLANCE_API_CONF oslo_limit endpoint_id \ + $(openstack --os-cloud devstack-system-admin endpoint list --service glance -f value -c ID) + + # Allow the glance service user to read quotas + openstack --os-cloud devstack-system-admin role add --user glance \ + --user-domain $SERVICE_DOMAIN_NAME --system all reader +} + # configure_glance() - Set config files, create data dirs, etc function configure_glance { sudo install -d -o $STACK_USER $GLANCE_CONF_DIR $GLANCE_METADEF_DIR @@ -274,18 +353,19 @@ function configure_glance { iniset $GLANCE_API_CONF database connection $dburl iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ + iniset $GLANCE_API_CONF DEFAULT image_cache_driver $GLANCE_CACHE_DRIVER iniset $GLANCE_API_CONF oslo_concurrency lock_path $GLANCE_LOCK_DIR iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement configure_keystone_authtoken_middleware $GLANCE_API_CONF glance iniset $GLANCE_API_CONF oslo_messaging_notifications driver messagingv2 iniset_rpc_backend glance $GLANCE_API_CONF - if [ "$VIRT_DRIVER" = 'xenserver' ]; then - iniset $GLANCE_API_CONF DEFAULT container_formats "ami,ari,aki,bare,ovf,tgz" - iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,raw,iso" - fi if [ "$VIRT_DRIVER" = 'libvirt' ] && [ "$LIBVIRT_TYPE" = 'parallels' ]; then iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso,ploop" fi + # Only use these if you know what you are doing! See OSSN-0065 + iniset $GLANCE_API_CONF DEFAULT show_image_direct_url $GLANCE_SHOW_DIRECT_URL + iniset $GLANCE_API_CONF DEFAULT show_multiple_locations $GLANCE_SHOW_MULTIPLE_LOCATIONS + iniset $GLANCE_API_CONF image_format require_image_format_match $GLANCE_ENFORCE_IMAGE_FORMAT # Configure glance_store configure_glance_store $USE_CINDER_FOR_GLANCE $GLANCE_ENABLE_MULTIPLE_STORES @@ -299,8 +379,15 @@ function configure_glance { # No multiple stores for swift yet if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "False" ]]; then - # Store the images in swift if enabled. - if is_service_enabled s-proxy; then + # Return if s3api is enabled for glance + if [[ "$GLANCE_USE_S3" == "True" ]]; then + if is_service_enabled s3api; then + # set common glance_store parameters + iniset $GLANCE_API_CONF glance_store stores "s3,file,http" + iniset $GLANCE_API_CONF glance_store default_store s3 + fi + elif is_service_enabled s-proxy; then + # Store the images in swift if enabled. iniset $GLANCE_API_CONF glance_store default_store swift iniset $GLANCE_API_CONF glance_store swift_store_create_container_on_put True @@ -318,6 +405,12 @@ function configure_glance { iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v3 iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version 3 fi + else + if [[ "$GLANCE_USE_S3" == "True" ]]; then + if is_service_enabled s3api; then + configure_multiple_s3_stores + fi + fi fi # We need to tell glance what it's public endpoint is so that the version @@ -338,21 +431,17 @@ function configure_glance { iniset $GLANCE_CACHE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_CACHE_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ - iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI - iniset $GLANCE_CACHE_CONF DEFAULT admin_tenant_name $SERVICE_PROJECT_NAME - iniset $GLANCE_CACHE_CONF DEFAULT admin_user glance - iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD + iniset $GLANCE_CACHE_CONF DEFAULT image_cache_driver $GLANCE_CACHE_DRIVER # Store specific confs iniset $GLANCE_CACHE_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ # Set default configuration options for the glance-image-import - iniset $GLANCE_IMAGE_IMPORT_CONF image_import_opts image_import_plugins [] + iniset $GLANCE_IMAGE_IMPORT_CONF image_import_opts image_import_plugins "[]" iniset $GLANCE_IMAGE_IMPORT_CONF inject_metadata_properties ignore_user_roles admin iniset $GLANCE_IMAGE_IMPORT_CONF inject_metadata_properties inject cp -p $GLANCE_DIR/etc/schema-image.json $GLANCE_SCHEMA_JSON - cp -p $GLANCE_DIR/etc/metadefs/*.json $GLANCE_METADEF_DIR if is_service_enabled tls-proxy; then @@ -363,13 +452,24 @@ function configure_glance { iniset $GLANCE_CACHE_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/%(project_id)s" fi - if [[ "$GLANCE_STANDALONE" == False ]]; then - write_local_uwsgi_http_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" "/image" + write_local_uwsgi_http_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" "/image" "glance-api" + + # Grab our uwsgi listen address and use that to fill out our + # worker_self_reference_url config + iniset $GLANCE_API_CONF DEFAULT worker_self_reference_url $(awk '-F= ' '/^http-socket/ { print "http://"$2}' $GLANCE_UWSGI_CONF) + + # Configure the Python binary used for "import" plugins. If unset, these + # will attempt the uwsgi binary instead. + iniset $GLANCE_API_CONF wsgi python_interpreter $PYTHON + + if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $GLANCE_API_CONF oslo_policy enforce_scope true + iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults true + iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac true else - write_local_proxy_http_config glance "http://$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT_INT" "/image" - iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS - iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT - iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS" + iniset $GLANCE_API_CONF oslo_policy enforce_scope false + iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults false + iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac false fi } @@ -384,7 +484,9 @@ function configure_glance { function create_glance_accounts { if is_service_enabled g-api; then - create_service_user "glance" + # When cinder talk to glance service APIs user needs service + # role for RBAC checks and admin role for cinder to access images. + create_service_user "glance" "admin" # required for swift access if is_service_enabled s-proxy; then @@ -401,6 +503,18 @@ function create_glance_accounts { service_domain_id=$(get_or_create_domain $SERVICE_DOMAIN_NAME) iniset $GLANCE_SWIFT_STORE_CONF ref1 project_domain_id $service_domain_id iniset $GLANCE_SWIFT_STORE_CONF ref1 user_domain_id $service_domain_id + + if [[ "$GLANCE_ENABLE_QUOTAS" = True ]]; then + configure_glance_quotas + fi + + if is_service_enabled s3api && [[ "$GLANCE_USE_S3" == "True" ]]; then + if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "False" ]]; then + set_common_s3_store_params glance_store + else + set_common_s3_store_params $GLANCE_S3_DEFAULT_BACKEND + fi + fi fi } @@ -460,19 +574,85 @@ function install_glance { setup_develop $GLANCE_DIR } +# glance_remote_conf() - Return the path to an alternate config file for +# the remote glance clone +function glance_remote_conf { + echo $(dirname "${GLANCE_CONF_DIR}")/glance-remote/$(basename "$1") +} + +# start_glance_remote_clone() - Clone the regular glance api worker +function start_glance_remote_clone { + local glance_remote_conf_dir glance_remote_port remote_data + local glance_remote_uwsgi venv + + glance_remote_conf_dir="$(glance_remote_conf "")" + glance_remote_port=$(get_random_port) + glance_remote_uwsgi="$(glance_remote_conf $GLANCE_UWSGI_CONF)" + + # Clone the existing ready-to-go glance-api setup + sudo rm -Rf "$glance_remote_conf_dir" + sudo cp -r "$GLANCE_CONF_DIR" "$glance_remote_conf_dir" + sudo chown $STACK_USER -R "$glance_remote_conf_dir" + + # Point this worker at different data dirs + remote_data="${DATA_DIR}/glance-remote" + mkdir -p $remote_data/os_glance_tasks_store \ + "${remote_data}/os_glance_staging_store" + iniset $(glance_remote_conf "$GLANCE_API_CONF") os_glance_staging_store \ + filesystem_store_datadir "${remote_data}/os_glance_staging_store" + iniset $(glance_remote_conf "$GLANCE_API_CONF") os_glance_tasks_store \ + filesystem_store_datadir "${remote_data}/os_glance_tasks_store" + + # Point this worker to use different cache dir + mkdir -p "$remote_data/cache" + iniset $(glance_remote_conf "$GLANCE_API_CONF") DEFAULT \ + image_cache_dir "${remote_data}/cache" + + # Change our uwsgi to our new port + sed -ri "s/^(http-socket.*):[0-9]+/\1:$glance_remote_port/" \ + "$glance_remote_uwsgi" + + # Update the self-reference url with our new port + iniset $(glance_remote_conf $GLANCE_API_CONF) DEFAULT \ + worker_self_reference_url \ + $(awk '-F= ' '/^http-socket/ { print "http://"$2 }' \ + "$glance_remote_uwsgi") + + # We need to create the systemd service for the clone, but then + # change it to include an Environment line to point the WSGI app + # at the alternate config directory. + if [[ "$GLOBAL_VENV" == True ]]; then + venv="--venv $DEVSTACK_VENV" + fi + write_uwsgi_user_unit_file devstack@g-api-r.service "$(which uwsgi) \ + --procname-prefix \ + glance-api-remote \ + --ini $glance_remote_uwsgi \ + $venv" \ + "" "$STACK_USER" + iniadd -sudo ${SYSTEMD_DIR}/devstack@g-api-r.service \ + "Service" "Environment" \ + "OS_GLANCE_CONFIG_DIR=$glance_remote_conf_dir" + + # Reload and restart with the new config + $SYSTEMCTL daemon-reload + $SYSTEMCTL restart devstack@g-api-r + + get_or_create_service glance_remote image_remote "Alternate glance" + get_or_create_endpoint image_remote $REGION_NAME \ + $(awk '-F= ' '/^http-socket/ { print "http://"$2 }' \ + $glance_remote_uwsgi) +} + # start_glance() - Start running processes function start_glance { local service_protocol=$GLANCE_SERVICE_PROTOCOL - if is_service_enabled tls-proxy; then - if [[ "$WSGI_MODE" != "uwsgi" ]]; then - start_tls_proxy glance-service '*' $GLANCE_SERVICE_PORT $GLANCE_SERVICE_HOST $GLANCE_SERVICE_PORT_INT - fi - fi - if [[ "$GLANCE_STANDALONE" == False ]]; then - run_process g-api "$(which uwsgi) --procname-prefix glance-api --ini $GLANCE_UWSGI_CONF" - else - run_process g-api "$GLANCE_BIN_DIR/glance-api --config-dir=$GLANCE_CONF_DIR" + run_process g-api "$(which uwsgi) --procname-prefix glance-api --ini $GLANCE_UWSGI_CONF" + + if is_service_enabled g-api-r; then + echo "Starting the g-api-r clone service..." + start_glance_remote_clone fi echo "Waiting for g-api ($GLANCE_SERVICE_HOST) to start..." @@ -484,6 +664,7 @@ function start_glance { # stop_glance() - Stop running processes function stop_glance { stop_process g-api + stop_process g-api-r } # Restore xtrace diff --git a/lib/horizon b/lib/horizon index b2bf7bcb49..7c0d443aa6 100644 --- a/lib/horizon +++ b/lib/horizon @@ -109,12 +109,21 @@ function configure_horizon { _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT "True" fi + if is_service_enabled c-bak; then + _horizon_config_set $local_settings OPENSTACK_CINDER_FEATURES enable_backup "True" + fi + # Create an empty directory that apache uses as docroot sudo mkdir -p $HORIZON_DIR/.blackhole local horizon_conf horizon_conf=$(apache_site_config_for horizon) + local wsgi_venv_config="" + if [[ "$GLOBAL_VENV" == "True" ]] ; then + wsgi_venv_config="WSGIPythonHome $DEVSTACK_VENV" + fi + # Configure apache to run horizon # Set up the django horizon application to serve via apache/wsgi sudo sh -c "sed -e \" @@ -124,12 +133,13 @@ function configure_horizon { s,%APACHE_NAME%,$APACHE_NAME,g; s,%DEST%,$DEST,g; s,%WEBROOT%,$HORIZON_APACHE_ROOT,g; + s,%WSGIPYTHONHOME%,$wsgi_venv_config,g; \" $FILES/apache-horizon.template >$horizon_conf" if is_ubuntu; then disable_apache_site 000-default sudo touch $horizon_conf - elif is_fedora || is_suse; then + elif is_fedora; then : # nothing to do else exit_distro_not_supported "horizon apache configuration" @@ -163,6 +173,10 @@ function install_horizon { # Apache installation, because we mark it NOPRIME install_apache_wsgi + # Install the memcache library so that horizon can use memcached as its + # cache backend + pip_install_gr pymemcache + git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH } diff --git a/lib/host b/lib/host new file mode 100644 index 0000000000..58062eff6b --- /dev/null +++ b/lib/host @@ -0,0 +1,98 @@ +#!/bin/bash + +# Kernel Samepage Merging (KSM) +# ----------------------------- + +# Processes that mark their memory as mergeable can share identical memory +# pages if KSM is enabled. This is particularly useful for nova + libvirt +# backends but any other setup that marks its memory as mergeable can take +# advantage. The drawback is there is higher cpu load; however, we tend to +# be memory bound not cpu bound so enable KSM by default but allow people +# to opt out if the CPU time is more important to them. +ENABLE_KSM=$(trueorfalse True ENABLE_KSM) +ENABLE_KSMTUNED=$(trueorfalse True ENABLE_KSMTUNED) +function configure_ksm { + if [[ $ENABLE_KSMTUNED == "True" ]] ; then + install_package "ksmtuned" + fi + if [[ -f /sys/kernel/mm/ksm/run ]] ; then + echo $(bool_to_int ENABLE_KSM) | sudo tee /sys/kernel/mm/ksm/run + fi +} + +# Compressed swap (ZSWAP) +#------------------------ + +# as noted in the kernel docs https://docs.kernel.org/admin-guide/mm/zswap.html +# Zswap is a lightweight compressed cache for swap pages. +# It takes pages that are in the process of being swapped out and attempts +# to compress them into a dynamically allocated RAM-based memory pool. +# zswap basically trades CPU cycles for potentially reduced swap I/O. +# This trade-off can also result in a significant performance improvement +# if reads from the compressed cache are faster than reads from a swap device. + +ENABLE_ZSWAP=$(trueorfalse False ENABLE_ZSWAP) +# lz4 is very fast although it does not have the best compression +# zstd has much better compression but more latency +ZSWAP_COMPRESSOR=${ZSWAP_COMPRESSOR:="lz4"} +ZSWAP_ZPOOL=${ZSWAP_ZPOOL:="zsmalloc"} +function configure_zswap { + if [[ $ENABLE_ZSWAP == "True" ]] ; then + # Centos 9 stream seems to only support enabling but not run time + # tuning so dont try to choose better default on centos + if is_ubuntu; then + echo ${ZSWAP_COMPRESSOR} | sudo tee /sys/module/zswap/parameters/compressor + echo ${ZSWAP_ZPOOL} | sudo tee /sys/module/zswap/parameters/zpool + fi + echo 1 | sudo tee /sys/module/zswap/parameters/enabled + # print curent zswap kernel config + sudo grep -R . /sys/module/zswap/parameters || /bin/true + fi +} + +ENABLE_SYSCTL_MEM_TUNING=$(trueorfalse False ENABLE_SYSCTL_MEM_TUNING) +function configure_sysctl_mem_parmaters { + if [[ $ENABLE_SYSCTL_MEM_TUNING == "True" ]] ; then + # defer write when memory is available + sudo sysctl -w vm.dirty_ratio=60 + sudo sysctl -w vm.dirty_background_ratio=10 + sudo sysctl -w vm.vfs_cache_pressure=50 + # assume swap is compressed so on new kernels + # give it equal priority as page cache which is + # uncompressed. on kernels < 5.8 the max is 100 + # not 200 so it will strongly prefer swapping. + sudo sysctl -w vm.swappiness=100 + sudo grep -R . /proc/sys/vm/ || /bin/true + fi +} + +function configure_host_mem { + configure_zswap + configure_ksm + configure_sysctl_mem_parmaters +} + +ENABLE_SYSCTL_NET_TUNING=$(trueorfalse False ENABLE_SYSCTL_NET_TUNING) +function configure_sysctl_net_parmaters { + if [[ $ENABLE_SYSCTL_NET_TUNING == "True" ]] ; then + # detect dead TCP connections after 120 seconds + sudo sysctl -w net.ipv4.tcp_keepalive_time=60 + sudo sysctl -w net.ipv4.tcp_keepalive_intvl=10 + sudo sysctl -w net.ipv4.tcp_keepalive_probes=6 + # reudce network latency for new connections + sudo sysctl -w net.ipv4.tcp_fastopen=3 + # print tcp options + sudo grep -R . /proc/sys/net/ipv4/tcp* || /bin/true + # disable qos by default + sudo sysctl -w net.core.default_qdisc=pfifo_fast + fi +} + +function configure_host_net { + configure_sysctl_net_parmaters +} + +function tune_host { + configure_host_mem + configure_host_net +} diff --git a/lib/infra b/lib/infra index b983f2b739..f4760c352c 100644 --- a/lib/infra +++ b/lib/infra @@ -31,7 +31,7 @@ function install_infra { local PIP_VIRTUAL_ENV="$REQUIREMENTS_DIR/.venv" [ ! -d $PIP_VIRTUAL_ENV ] && ${VIRTUALENV_CMD} $PIP_VIRTUAL_ENV # We don't care about testing git pbr in the requirements venv. - PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr + PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr setuptools[core] PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install $REQUIREMENTS_DIR # Unset the PIP_VIRTUAL_ENV so that PBR does not end up trapped diff --git a/lib/keystone b/lib/keystone index d4c7b063bb..840103b9f4 100644 --- a/lib/keystone +++ b/lib/keystone @@ -9,7 +9,6 @@ # - ``tls`` file # - ``DEST``, ``STACK_USER`` # - ``FILES`` -# - ``IDENTITY_API_VERSION`` # - ``BASE_SQL_CONN`` # - ``SERVICE_HOST``, ``SERVICE_PROTOCOL`` # - ``S3_SERVICE_PORT`` (template backend only) @@ -50,18 +49,7 @@ fi KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone} KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf KEYSTONE_PUBLIC_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-public.ini -KEYSTONE_ADMIN_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-admin.ini -KEYSTONE_PUBLIC_UWSGI=$KEYSTONE_BIN_DIR/keystone-wsgi-public -KEYSTONE_ADMIN_UWSGI=$KEYSTONE_BIN_DIR/keystone-wsgi-admin - -# KEYSTONE_DEPLOY defines how keystone is deployed, allowed values: -# - mod_wsgi : Run keystone under Apache HTTPd mod_wsgi -# - uwsgi : Run keystone under uwsgi -if [[ "$WSGI_MODE" == "uwsgi" ]]; then - KEYSTONE_DEPLOY=uwsgi -else - KEYSTONE_DEPLOY=mod_wsgi -fi +KEYSTONE_PUBLIC_UWSGI=keystone.wsgi.api:application # Select the Identity backend driver KEYSTONE_IDENTITY_BACKEND=${KEYSTONE_IDENTITY_BACKEND:-sql} @@ -81,21 +69,12 @@ KEYSTONE_RESOURCE_BACKEND=${KEYSTONE_RESOURCE_BACKEND:-sql} KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-fernet} KEYSTONE_TOKEN_FORMAT=$(echo ${KEYSTONE_TOKEN_FORMAT} | tr '[:upper:]' '[:lower:]') -# Set Keystone interface configuration -KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST} -KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357} -KEYSTONE_AUTH_PORT_INT=${KEYSTONE_AUTH_PORT_INT:-35358} -KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL} - # Public facing bits KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST} KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000} KEYSTONE_SERVICE_PORT_INT=${KEYSTONE_SERVICE_PORT_INT:-5001} KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} -# Bind hosts -KEYSTONE_ADMIN_BIND_HOST=${KEYSTONE_ADMIN_BIND_HOST:-$KEYSTONE_SERVICE_HOST} - # Set the project for service accounts in Keystone SERVICE_DOMAIN_NAME=${SERVICE_DOMAIN_NAME:-Default} SERVICE_PROJECT_NAME=${SERVICE_PROJECT_NAME:-service} @@ -106,7 +85,6 @@ SERVICE_TENANT_NAME=${SERVICE_PROJECT_NAME:-service} # if we are running with SSL use https protocols if is_service_enabled tls-proxy; then - KEYSTONE_AUTH_PROTOCOL="https" KEYSTONE_SERVICE_PROTOCOL="https" fi @@ -134,6 +112,15 @@ KEYSTONE_PASSWORD_HASH_ROUNDS=${KEYSTONE_PASSWORD_HASH_ROUNDS:-4} # Cache settings KEYSTONE_ENABLE_CACHE=${KEYSTONE_ENABLE_CACHE:-True} +# Whether to create a keystone admin endpoint for legacy applications +KEYSTONE_ADMIN_ENDPOINT=$(trueorfalse False KEYSTONE_ADMIN_ENDPOINT) + +# Flag to set the oslo_policy.enforce_scope. This is used to switch +# the Identity API policies to start checking the scope of token. By Default, +# this flag is False. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +KEYSTONE_ENFORCE_SCOPE=$(trueorfalse False KEYSTONE_ENFORCE_SCOPE) + # Functions # --------- @@ -148,18 +135,9 @@ function is_keystone_enabled { # cleanup_keystone() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_keystone { - if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - # These files will be created if we are running WSGI_MODE="mod_wsgi" - disable_apache_site keystone - sudo rm -f $(apache_site_config_for keystone) - else - stop_process "keystone" - # TODO: remove admin at pike-2 - remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" - remove_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI" - sudo rm -f $(apache_site_config_for keystone-wsgi-public) - sudo rm -f $(apache_site_config_for keystone-wsgi-admin) - fi + stop_process "keystone" + remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "keystone-wsgi-public" + sudo rm -f $(apache_site_config_for keystone-wsgi-public) } # _config_keystone_apache_wsgi() - Set WSGI config files of Keystone @@ -171,12 +149,10 @@ function _config_keystone_apache_wsgi { local keystone_certfile="" local keystone_keyfile="" local keystone_service_port=$KEYSTONE_SERVICE_PORT - local keystone_auth_port=$KEYSTONE_AUTH_PORT local venv_path="" if is_service_enabled tls-proxy; then keystone_service_port=$KEYSTONE_SERVICE_PORT_INT - keystone_auth_port=$KEYSTONE_AUTH_PORT_INT fi if [[ ${USE_VENV} = True ]]; then venv_path="python-path=${PROJECT_VENV["keystone"]}/lib/$(python_version)/site-packages" @@ -185,7 +161,6 @@ function _config_keystone_apache_wsgi { sudo cp $FILES/apache-keystone.template $keystone_apache_conf sudo sed -e " s|%PUBLICPORT%|$keystone_service_port|g; - s|%ADMINPORT%|$keystone_auth_port|g; s|%APACHE_NAME%|$APACHE_NAME|g; s|%SSLLISTEN%|$keystone_ssl_listen|g; s|%SSLENGINE%|$keystone_ssl|g; @@ -220,25 +195,24 @@ function configure_keystone { iniset $KEYSTONE_CONF cache backend $CACHE_BACKEND iniset $KEYSTONE_CONF cache memcache_servers $MEMCACHE_SERVERS + # Enable errors if response validation fails. We want this enabled in CI + # and development contexts to highlights bugs in our response schemas. + iniset $KEYSTONE_CONF api response_validation error + iniset_rpc_backend keystone $KEYSTONE_CONF oslo_messaging_notifications local service_port=$KEYSTONE_SERVICE_PORT - local auth_port=$KEYSTONE_AUTH_PORT if is_service_enabled tls-proxy; then # Set the service ports for a proxy to take the originals service_port=$KEYSTONE_SERVICE_PORT_INT - auth_port=$KEYSTONE_AUTH_PORT_INT fi - # Override the endpoints advertised by keystone (the public_endpoint and - # admin_endpoint) so that clients use the correct endpoint. By default, the - # keystone server uses the public_port and admin_port which isn't going to - # work when you want to use a different port (in the case of proxy), or you - # don't want the port (in the case of putting keystone on a path in - # apache). + # Override the endpoints advertised by keystone so that clients use the correct + # endpoint. By default, the keystone server uses the public_port which isn't + # going to work when you want to use a different port (in the case of proxy), + # or you don't want the port (in the case of putting keystone on a path in apache). iniset $KEYSTONE_CONF DEFAULT public_endpoint $KEYSTONE_SERVICE_URI - iniset $KEYSTONE_CONF DEFAULT admin_endpoint $KEYSTONE_AUTH_URI if [[ "$KEYSTONE_TOKEN_FORMAT" != "" ]]; then iniset $KEYSTONE_CONF token provider $KEYSTONE_TOKEN_FORMAT @@ -256,13 +230,7 @@ function configure_keystone { iniset $KEYSTONE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - iniset $KEYSTONE_CONF DEFAULT logging_exception_prefix "%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s" - _config_keystone_apache_wsgi - else # uwsgi - write_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" "/identity" - write_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI" "/identity_admin" - fi + write_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" "/identity" "" "keystone-api" iniset $KEYSTONE_CONF DEFAULT max_token_size 16384 @@ -281,6 +249,16 @@ function configure_keystone { iniset $KEYSTONE_CONF security_compliance lockout_duration $KEYSTONE_LOCKOUT_DURATION iniset $KEYSTONE_CONF security_compliance unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT fi + + iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml + + if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $KEYSTONE_CONF oslo_policy enforce_scope true + iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults true + else + iniset $KEYSTONE_CONF oslo_policy enforce_scope false + iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults false + fi } # create_keystone_accounts() - Sets up common required keystone accounts @@ -303,40 +281,48 @@ function configure_keystone { # admins admin admin admin # nonadmins demo, alt_demo member, anotherrole demo, alt_demo +# System User Roles +# ------------------------------------------------------------------ +# all admin admin +# all system_reader reader +# all system_member member + # Migrated from keystone_data.sh function create_keystone_accounts { # The keystone bootstrapping process (performed via keystone-manage - # bootstrap) creates an admin user, admin role, member role, and admin + # bootstrap) creates an admin user and an admin # project. As a sanity check we exercise the CLI to retrieve the IDs for # these values. local admin_project admin_project=$(openstack project show "admin" -f value -c id) local admin_user admin_user=$(openstack user show "admin" -f value -c id) + # These roles are also created during bootstrap but we don't need their IDs local admin_role="admin" local member_role="member" + local reader_role="reader" - get_or_add_user_domain_role $admin_role $admin_user default + async_run ks-domain-role get_or_add_user_domain_role $admin_role $admin_user default # Create service project/role get_or_create_domain "$SERVICE_DOMAIN_NAME" - get_or_create_project "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME" + async_run ks-project get_or_create_project "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME" # Service role, so service users do not have to be admins - get_or_create_role service + async_run ks-service get_or_create_role service # The ResellerAdmin role is used by Nova and Ceilometer so we need to keep it. # The admin role in swift allows a user to act as an admin for their project, # but ResellerAdmin is needed for a user to act as any project. The name of this # role is also configurable in swift-proxy.conf - get_or_create_role ResellerAdmin + async_run ks-reseller get_or_create_role ResellerAdmin # another_role demonstrates that an arbitrary role may be created and used # TODO(sleepsonthefloor): show how this can be used for rbac in the future! local another_role="anotherrole" - get_or_create_role $another_role + async_run ks-anotherrole get_or_create_role $another_role # invisible project - admin can't see this one local invis_project @@ -349,21 +335,55 @@ function create_keystone_accounts { demo_user=$(get_or_create_user "demo" \ "$ADMIN_PASSWORD" "default" "demo@example.com") - get_or_add_user_project_role $member_role $demo_user $demo_project - get_or_add_user_project_role $admin_role $admin_user $demo_project - get_or_add_user_project_role $another_role $demo_user $demo_project - get_or_add_user_project_role $member_role $demo_user $invis_project + async_wait ks-{domain-role,domain,project,service,reseller,anotherrole} + + async_run ks-demo-member get_or_add_user_project_role $member_role $demo_user $demo_project + + async_run ks-demo-admin get_or_add_user_project_role $admin_role $admin_user $demo_project + async_run ks-demo-another get_or_add_user_project_role $another_role $demo_user $demo_project + async_run ks-demo-invis get_or_add_user_project_role $member_role $demo_user $invis_project - # alt_demo + # Create a user to act as a reader on project demo + local demo_reader + demo_reader=$(get_or_create_user "demo_reader" \ + "$ADMIN_PASSWORD" "default" "demo_reader@example.com") + + async_run ks-demo-reader get_or_add_user_project_role $reader_role $demo_reader $demo_project + + # Create a different project called alt_demo local alt_demo_project alt_demo_project=$(get_or_create_project "alt_demo" default) + # Create a user to act as member, admin and anotherrole on project alt_demo local alt_demo_user alt_demo_user=$(get_or_create_user "alt_demo" \ "$ADMIN_PASSWORD" "default" "alt_demo@example.com") - get_or_add_user_project_role $member_role $alt_demo_user $alt_demo_project - get_or_add_user_project_role $admin_role $admin_user $alt_demo_project - get_or_add_user_project_role $another_role $alt_demo_user $alt_demo_project + async_run ks-alt-admin get_or_add_user_project_role $admin_role $alt_demo_user $alt_demo_project + async_run ks-alt-another get_or_add_user_project_role $another_role $alt_demo_user $alt_demo_project + + # Create another user to act as a member on project alt_demo + local alt_demo_member + alt_demo_member=$(get_or_create_user "alt_demo_member" \ + "$ADMIN_PASSWORD" "default" "alt_demo_member@example.com") + async_run ks-alt-member-user get_or_add_user_project_role $member_role $alt_demo_member $alt_demo_project + + # Create another user to act as a reader on project alt_demo + local alt_demo_reader + alt_demo_reader=$(get_or_create_user "alt_demo_reader" \ + "$ADMIN_PASSWORD" "default" "alt_demo_reader@example.com") + async_run ks-alt-reader-user get_or_add_user_project_role $reader_role $alt_demo_reader $alt_demo_project + + # Create two users, give one the member role on the system and the other the + # reader role on the system. These two users model system-member and + # system-reader personas. The admin user already has the admin role on the + # system and we can re-use this user as a system-admin. + system_member_user=$(get_or_create_user "system_member" \ + "$ADMIN_PASSWORD" "default" "system_member@example.com") + async_run ks-system-member get_or_add_user_system_role $member_role $system_member_user "all" + + system_reader_user=$(get_or_create_user "system_reader" \ + "$ADMIN_PASSWORD" "default" "system_reader@example.com") + async_run ks-system-reader get_or_add_user_system_role $reader_role $system_reader_user "all" # groups local admin_group @@ -373,11 +393,16 @@ function create_keystone_accounts { non_admin_group=$(get_or_create_group "nonadmins" \ "default" "non-admin group") - get_or_add_group_project_role $member_role $non_admin_group $demo_project - get_or_add_group_project_role $another_role $non_admin_group $demo_project - get_or_add_group_project_role $member_role $non_admin_group $alt_demo_project - get_or_add_group_project_role $another_role $non_admin_group $alt_demo_project - get_or_add_group_project_role $admin_role $admin_group $admin_project + async_run ks-group-memberdemo get_or_add_group_project_role $member_role $non_admin_group $demo_project + async_run ks-group-anotherdemo get_or_add_group_project_role $another_role $non_admin_group $demo_project + async_run ks-group-memberalt get_or_add_group_project_role $member_role $non_admin_group $alt_demo_project + async_run ks-group-anotheralt get_or_add_group_project_role $another_role $non_admin_group $alt_demo_project + async_run ks-group-admin get_or_add_group_project_role $admin_role $admin_group $admin_project + + async_wait ks-demo-{member,admin,another,invis,reader} + async_wait ks-alt-{admin,another,member-user,reader-user} + async_wait ks-system-{member,reader} + async_wait ks-group-{memberdemo,anotherdemo,memberalt,anotheralt,admin} if is_service_enabled ldap; then create_ldap_domain @@ -411,6 +436,7 @@ function configure_keystone_authtoken_middleware { local conf_file=$1 local admin_user=$2 local section=${3:-keystone_authtoken} + local service_type=$4 iniset $conf_file $section auth_type password iniset $conf_file $section interface public @@ -423,6 +449,9 @@ function configure_keystone_authtoken_middleware { iniset $conf_file $section cafile $SSL_BUNDLE_FILE iniset $conf_file $section memcached_servers $MEMCACHE_SERVERS + if [[ -n "$service_type" ]]; then + iniset $conf_file $section service_type $service_type + fi } # configure_auth_token_middleware conf_file admin_user IGNORED [section] @@ -502,28 +531,19 @@ function install_keystone { if is_service_enabled ldap; then setup_develop $KEYSTONE_DIR ldap fi - - if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - install_apache_wsgi - fi } # start_keystone() - Start running processes function start_keystone { # Get right service port for testing local service_port=$KEYSTONE_SERVICE_PORT - local auth_protocol=$KEYSTONE_AUTH_PROTOCOL + local auth_protocol=$KEYSTONE_SERVICE_PROTOCOL if is_service_enabled tls-proxy; then service_port=$KEYSTONE_SERVICE_PORT_INT auth_protocol="http" fi - if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - enable_apache_site keystone - restart_apache_server - else # uwsgi - run_process keystone "$(which uwsgi) --procname-prefix keystone --ini $KEYSTONE_PUBLIC_UWSGI_CONF" "" - fi + run_process keystone "$(which uwsgi) --procname-prefix keystone --ini $KEYSTONE_PUBLIC_UWSGI_CONF" "" echo "Waiting for keystone to start..." # Check that the keystone service is running. Even if the tls tunnel @@ -531,7 +551,7 @@ function start_keystone { # unencryted traffic at this point. # If running in Apache, use the path rather than port. - local service_uri=$auth_protocol://$KEYSTONE_SERVICE_HOST/identity/v$IDENTITY_API_VERSION/ + local service_uri=$auth_protocol://$KEYSTONE_SERVICE_HOST/identity/v3/ if ! wait_for_service $SERVICE_TIMEOUT $service_uri; then die $LINENO "keystone did not start" @@ -540,7 +560,6 @@ function start_keystone { # Start proxies if enabled if is_service_enabled tls-proxy; then start_tls_proxy keystone-service '*' $KEYSTONE_SERVICE_PORT $KEYSTONE_SERVICE_HOST $KEYSTONE_SERVICE_PORT_INT - start_tls_proxy keystone-auth '*' $KEYSTONE_AUTH_PORT $KEYSTONE_AUTH_HOST $KEYSTONE_AUTH_PORT_INT fi # (re)start memcached to make sure we have a clean memcache. @@ -549,23 +568,15 @@ function start_keystone { # stop_keystone() - Stop running processes function stop_keystone { - if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then - disable_apache_site keystone - restart_apache_server - else - stop_process keystone - fi + stop_process keystone } # bootstrap_keystone() - Initialize user, role and project # This function uses the following GLOBAL variables: # - ``KEYSTONE_BIN_DIR`` # - ``ADMIN_PASSWORD`` -# - ``IDENTITY_API_VERSION`` # - ``REGION_NAME`` -# - ``KEYSTONE_SERVICE_PROTOCOL`` -# - ``KEYSTONE_SERVICE_HOST`` -# - ``KEYSTONE_SERVICE_PORT`` +# - ``KEYSTONE_SERVICE_URI`` function bootstrap_keystone { $KEYSTONE_BIN_DIR/keystone-manage bootstrap \ --bootstrap-username admin \ @@ -574,14 +585,22 @@ function bootstrap_keystone { --bootstrap-role-name admin \ --bootstrap-service-name keystone \ --bootstrap-region-id "$REGION_NAME" \ - --bootstrap-admin-url "$KEYSTONE_AUTH_URI" \ --bootstrap-public-url "$KEYSTONE_SERVICE_URI" + if [ "$KEYSTONE_ADMIN_ENDPOINT" == "True" ]; then + openstack endpoint create --region "$REGION_NAME" \ + --os-username admin \ + --os-user-domain-id default \ + --os-password "$ADMIN_PASSWORD" \ + --os-project-name admin \ + --os-project-domain-id default \ + keystone admin "$KEYSTONE_SERVICE_URI" + fi } # create_ldap_domain() - Create domain file and initialize domain with a user function create_ldap_domain { # Creates domain Users - openstack --os-identity-api-version=3 domain create --description "LDAP domain" Users + openstack domain create --description "LDAP domain" Users # Create domain file inside etc/keystone/domains KEYSTONE_LDAP_DOMAIN_FILE=$KEYSTONE_CONF_DIR/domains/keystone.Users.conf @@ -597,6 +616,7 @@ function create_ldap_domain { iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_name_attribute "cn" iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_mail_attribute "mail" iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_id_attribute "uid" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_enabled_emulation "True" iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user "cn=Manager,dc=openstack,dc=org" iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap url "ldap://localhost" iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap suffix $LDAP_BASE_DN diff --git a/lib/ldap b/lib/ldap index 5a53d0eaee..66c2afc4d5 100644 --- a/lib/ldap +++ b/lib/ldap @@ -33,16 +33,12 @@ LDAP_SERVICE_NAME=slapd if is_ubuntu; then LDAP_OLCDB_NUMBER=1 + LDAP_OLCDB_TYPE=mdb LDAP_ROOTPW_COMMAND=replace elif is_fedora; then LDAP_OLCDB_NUMBER=2 + LDAP_OLCDB_TYPE=hdb LDAP_ROOTPW_COMMAND=add -elif is_suse; then - # SUSE has slappasswd in /usr/sbin/ - PATH=$PATH:/usr/sbin/ - LDAP_OLCDB_NUMBER=1 - LDAP_ROOTPW_COMMAND=add - LDAP_SERVICE_NAME=ldap fi @@ -56,6 +52,7 @@ function _ldap_varsubst { local slappass=$2 sed -e " s|\${LDAP_OLCDB_NUMBER}|$LDAP_OLCDB_NUMBER| + s|\${LDAP_OLCDB_TYPE}|$LDAP_OLCDB_TYPE| s|\${SLAPPASS}|$slappass| s|\${LDAP_ROOTPW_COMMAND}|$LDAP_ROOTPW_COMMAND| s|\${BASE_DC}|$LDAP_BASE_DC| @@ -72,8 +69,6 @@ function cleanup_ldap { sudo rm -rf /etc/ldap/ldap.conf /var/lib/ldap elif is_fedora; then sudo rm -rf /etc/openldap /var/lib/ldap - elif is_suse; then - sudo rm -rf /var/lib/ldap fi } @@ -87,6 +82,14 @@ function init_ldap { # Remove data but not schemas clear_ldap_state + if is_ubuntu; then + # a bug in OpenLDAP 2.6.7+ + # (https://bugs.openldap.org/show_bug.cgi?id=10336) causes slapd crash + # after deleting nonexisting tree. It is fixed upstream, but Ubuntu is + # still not having a fix in Noble. Try temporarily simly restarting the + # process. + sudo service $LDAP_SERVICE_NAME restart + fi # Add our top level ldap nodes if ldapsearch -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -b "$LDAP_BASE_DN" | grep -q "Success"; then @@ -122,11 +125,6 @@ function install_ldap { configure_ldap elif is_fedora; then start_ldap - elif is_suse; then - _ldap_varsubst $FILES/ldap/suse-base-config.ldif.in >$tmp_ldap_dir/suse-base-config.ldif - sudo slapadd -F /etc/openldap/slapd.d/ -bcn=config -l $tmp_ldap_dir/suse-base-config.ldif - sudo sed -i '/^OPENLDAP_START_LDAPI=/s/"no"/"yes"/g' /etc/sysconfig/openldap - start_ldap fi echo "LDAP_PASSWORD is $LDAP_PASSWORD" @@ -157,7 +155,7 @@ function configure_ldap { slapd slapd/dump_database_destdir string /var/backups/slapd-VERSION slapd slapd/domain string Users slapd shared/organization string $LDAP_DOMAIN - slapd slapd/backend string HDB + slapd slapd/backend string ${LDAP_OLCDB_TYPE^^} slapd slapd/purge_database boolean true slapd slapd/move_old_database boolean true slapd slapd/allow_ldap_v2 boolean false diff --git a/lib/libraries b/lib/libraries old mode 100644 new mode 100755 index c7aa8151ae..ffc004b24c --- a/lib/libraries +++ b/lib/libraries @@ -1,6 +1,6 @@ #!/bin/bash # -# lib/oslo +# lib/libraries # # Functions to install libraries from git # @@ -27,6 +27,7 @@ GITDIR["castellan"]=$DEST/castellan GITDIR["cliff"]=$DEST/cliff GITDIR["cursive"]=$DEST/cursive GITDIR["debtcollector"]=$DEST/debtcollector +GITDIR["etcd3gw"]=$DEST/etcd3gw GITDIR["futurist"]=$DEST/futurist GITDIR["openstacksdk"]=$DEST/openstacksdk GITDIR["os-client-config"]=$DEST/os-client-config @@ -38,6 +39,7 @@ GITDIR["oslo.config"]=$DEST/oslo.config GITDIR["oslo.context"]=$DEST/oslo.context GITDIR["oslo.db"]=$DEST/oslo.db GITDIR["oslo.i18n"]=$DEST/oslo.i18n +GITDIR["oslo.limit"]=$DEST/oslo.limit GITDIR["oslo.log"]=$DEST/oslo.log GITDIR["oslo.messaging"]=$DEST/oslo.messaging GITDIR["oslo.middleware"]=$DEST/oslo.middleware @@ -59,6 +61,7 @@ GITDIR["tooz"]=$DEST/tooz # Non oslo libraries are welcomed below as well, this prevents # duplication of this code. GITDIR["os-brick"]=$DEST/os-brick +GITDIR["os-resource-classes"]=$DEST/os-resource-classes GITDIR["os-traits"]=$DEST/os-traits # Support entry points installation of console scripts @@ -101,6 +104,7 @@ function install_libs { _install_lib_from_source "oslo.context" _install_lib_from_source "oslo.db" _install_lib_from_source "oslo.i18n" + _install_lib_from_source "oslo.limit" _install_lib_from_source "oslo.log" _install_lib_from_source "oslo.messaging" _install_lib_from_source "oslo.middleware" @@ -122,18 +126,18 @@ function install_libs { # # os-traits for nova _install_lib_from_source "os-brick" + _install_lib_from_source "os-resource-classes" _install_lib_from_source "os-traits" # # python client libraries we might need from git can go here _install_lib_from_source "python-barbicanclient" - - # etcd (because tooz does not have a hard dependency on these) - # - # NOTE(sdague): this is currently a work around because tooz - # doesn't pull in etcd3. - pip_install etcd3 - pip_install etcd3gw + if use_library_from_git etcd3gw ; then + _install_lib_from_source "etcd3gw" + else + # etcd (because tooz does not have a hard dependency on these) + pip_install etcd3gw + fi } # Restore xtrace diff --git a/lib/lvm b/lib/lvm index b826c1bc63..b7e84d9505 100644 --- a/lib/lvm +++ b/lib/lvm @@ -53,28 +53,10 @@ function _remove_lvm_volume_group { sudo vgremove -f $vg } -# _clean_lvm_backing_file() removes the backing file of the -# volume group -# -# Usage: _clean_lvm_backing_file() $backing_file -function _clean_lvm_backing_file { - local backing_file=$1 - - # If the backing physical device is a loop device, it was probably setup by DevStack - if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then - local vg_dev - vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'$BACKING_FILE_SUFFIX'/ { print $1}') - if [[ -n "$vg_dev" ]]; then - sudo losetup -d $vg_dev - fi - rm -f $backing_file - fi -} - # clean_lvm_volume_group() cleans up the volume group and removes the # backing file # -# Usage: clean_lvm_volume_group $vg +# Usage: clean_lvm_volume_group() $vg function clean_lvm_volume_group { local vg=$1 @@ -83,11 +65,22 @@ function clean_lvm_volume_group { # if there is no logical volume left, it's safe to attempt a cleanup # of the backing file if [[ -z "$(sudo lvs --noheadings -o lv_name $vg 2>/dev/null)" ]]; then - _clean_lvm_backing_file $DATA_DIR/$vg$BACKING_FILE_SUFFIX + local backing_file=$DATA_DIR/$vg$BACKING_FILE_SUFFIX + + if [[ -n "$vg$BACKING_FILE_SUFFIX" ]] && \ + [[ -e "/etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service" ]]; then + sudo systemctl disable --now $vg$BACKING_FILE_SUFFIX.service + sudo rm -f /etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service + sudo systemctl daemon-reload + fi + + # If the backing physical device is a loop device, it was probably setup by DevStack + if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then + rm -f $backing_file + fi fi } - # _create_lvm_volume_group creates default volume group # # Usage: _create_lvm_volume_group() $vg $size @@ -106,8 +99,20 @@ function _create_lvm_volume_group { directio="--direct-io=on" fi + # Only create systemd service if it doesn't already exists + if [[ ! -e "/etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service" ]]; then + sed -e " + s|%DIRECTIO%|${directio}|g; + s|%BACKING_FILE%|${backing_file}|g; + " $FILES/lvm-backing-file.template | sudo tee \ + /etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service + + sudo systemctl daemon-reload + sudo systemctl enable --now $vg$BACKING_FILE_SUFFIX.service + fi + local vg_dev - vg_dev=$(sudo losetup -f --show $directio $backing_file) + vg_dev=$(sudo losetup --associated $backing_file -O NAME -n) # Only create volume group if it doesn't already exist if ! sudo vgs $vg; then @@ -124,19 +129,25 @@ function init_lvm_volume_group { local vg=$1 local size=$2 - # Start the tgtd service on Fedora and SUSE if tgtadm is used - if is_fedora || is_suse && [[ "$CINDER_ISCSI_HELPER" = "tgtadm" ]]; then + # Start the tgtd service on Fedora if tgtadm is used + if is_fedora; then start_service tgtd fi # Start with a clean volume group _create_lvm_volume_group $vg $size - # Remove iscsi targets - if [ "$CINDER_ISCSI_HELPER" = "lioadm" ]; then - sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete - else - sudo tgtadm --op show --mode target | awk '/Target/ {print $3}' | sudo xargs -r -n1 tgt-admin --delete + if is_service_enabled cinder; then + # Remove iscsi targets + if [ "$CINDER_TARGET_HELPER" = "lioadm" ]; then + sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete + elif [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then + sudo tgtadm --op show --mode target | awk '/Target/ {print $3}' | sudo xargs -r -n1 tgt-admin --delete + elif [ "$CINDER_TARGET_HELPER" = "nvmet" ]; then + # If we don't disconnect everything vgremove will block + sudo nvme disconnect-all + sudo nvmetcli clear + fi fi _clean_lvm_volume_group $vg } @@ -189,7 +200,7 @@ function set_lvm_filter { filter_string=$filter_string$filter_suffix clean_lvm_filter - sudo sed -i "/# global_filter = \[*\]/a\ $global_filter$filter_string" /etc/lvm/lvm.conf + sudo sed -i "/# global_filter = \[.*\]/a\ $filter_string" /etc/lvm/lvm.conf echo_summary "set lvm.conf device global_filter to: $filter_string" } diff --git a/lib/neutron b/lib/neutron index 885df97f7c..dec15fb782 100644 --- a/lib/neutron +++ b/lib/neutron @@ -1,117 +1,305 @@ #!/bin/bash # # lib/neutron -# Install and start **Neutron** network services +# functions - functions specific to neutron # Dependencies: -# # ``functions`` file # ``DEST`` must be defined +# ``STACK_USER`` must be defined # ``stack.sh`` calls the entry points in this order: # -# - is_XXXX_enabled -# - install_XXXX -# - configure_XXXX -# - init_XXXX -# - start_XXXX -# - stop_XXXX -# - cleanup_XXXX +# - install_neutron_agent_packages +# - install_neutronclient +# - install_neutron +# - install_neutron_third_party +# - configure_neutron +# - init_neutron +# - configure_neutron_third_party +# - init_neutron_third_party +# - start_neutron_third_party +# - create_nova_conf_neutron +# - configure_neutron_after_post_config +# - start_neutron_service_and_check +# - check_neutron_third_party_integration +# - start_neutron_agents +# - create_neutron_initial_network +# +# ``unstack.sh`` calls the entry points in this order: +# +# - stop_neutron +# - stop_neutron_third_party +# - cleanup_neutron -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace +# Functions in lib/neutron are classified into the following categories: +# +# - entry points (called from stack.sh or unstack.sh) +# - internal functions +# - neutron exercises +# - 3rd party programs + + +# Neutron Networking +# ------------------ + +# Make sure that neutron is enabled in ``ENABLED_SERVICES``. If you want +# to run Neutron on this host, make sure that q-svc is also in +# ``ENABLED_SERVICES``. +# +# See "Neutron Network Configuration" below for additional variables +# that must be set in localrc for connectivity across hosts with +# Neutron. -# Defaults +# Settings # -------- + +# Neutron Network Configuration +# ----------------------------- + +if is_service_enabled tls-proxy; then + Q_PROTOCOL="https" +fi + # Set up default directories GITDIR["python-neutronclient"]=$DEST/python-neutronclient -# NEUTRON_DEPLOY_MOD_WSGI defines how neutron is deployed, allowed values: -# - False (default) : Run neutron under Eventlet -# - True : Run neutron under uwsgi -# TODO(annp): Switching to uwsgi in next cycle if things turn out to be stable -# enough -NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI) -NEUTRON_AGENT=${NEUTRON_AGENT:-openvswitch} NEUTRON_DIR=$DEST/neutron +NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas -NEUTRON_DISTRIBUTED_ROUTING=$(trueorfalse False NEUTRON_DISTRIBUTED_ROUTING) -# Distributed Virtual Router (DVR) configuration -# Can be: -# - ``legacy`` - No DVR functionality -# - ``dvr_snat`` - Controller or single node DVR -# - ``dvr`` - Compute node in multi-node DVR -# - ``dvr_no_external`` - Compute node in multi-node DVR, no external network -# -# Default is 'dvr_snat' since it can handle both DVR and legacy routers -NEUTRON_DVR_MODE=${NEUTRON_DVR_MODE:-dvr_snat} - -NEUTRON_BIN_DIR=$(get_python_exec_prefix) -NEUTRON_DHCP_BINARY="neutron-dhcp-agent" +# Support entry points installation of console scripts +if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then + NEUTRON_BIN_DIR=$NEUTRON_DIR/bin +else + NEUTRON_BIN_DIR=$(get_python_exec_prefix) +fi NEUTRON_CONF_DIR=/etc/neutron NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf -NEUTRON_META_CONF=$NEUTRON_CONF_DIR/metadata_agent.ini -NEUTRON_META_DATA_HOST=${NEUTRON_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)} +export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} -NEUTRON_DHCP_CONF=$NEUTRON_CONF_DIR/dhcp_agent.ini -NEUTRON_L3_CONF=$NEUTRON_CONF_DIR/l3_agent.ini -NEUTRON_AGENT_CONF=$NEUTRON_CONF_DIR/ -NEUTRON_CREATE_INITIAL_NETWORKS=${NEUTRON_CREATE_INITIAL_NETWORKS:-True} +NEUTRON_UWSGI=neutron.wsgi.api:application +NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini -NEUTRON_STATE_PATH=${NEUTRON_STATE_PATH:=$DATA_DIR/neutron} +# If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope" +# and "enforce_new_defaults" to True in the Neutron's config to enforce usage +# of the new RBAC policies and scopes. Set it to False if you do not +# want to run Neutron with new RBAC. +NEUTRON_ENFORCE_SCOPE=$(trueorfalse True NEUTRON_ENFORCE_SCOPE) + +# Agent binaries. Note, binary paths for other agents are set in per-service +# scripts in lib/neutron_plugins/services/ +AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent" +AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"} +AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent" + +# Agent config files. Note, plugin-specific Q_PLUGIN_CONF_FILE is set and +# loaded from per-plugin scripts in lib/neutron_plugins/ +Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini +# NOTE(slaweq): NEUTRON_DHCP_CONF is used e.g. in neutron repository, +# it was previously defined in the lib/neutron module which is now deleted. +NEUTRON_DHCP_CONF=$Q_DHCP_CONF_FILE +Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini +# NOTE(slaweq): NEUTRON_L3_CONF is used e.g. in neutron repository, +# it was previously defined in the lib/neutron module which is now deleted. +NEUTRON_L3_CONF=$Q_L3_CONF_FILE +Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini + +# Default name for Neutron database +Q_DB_NAME=${Q_DB_NAME:-neutron} +# Default Neutron Plugin +Q_PLUGIN=${Q_PLUGIN:-ml2} +# Default Neutron Port +Q_PORT=${Q_PORT:-9696} +# Default Neutron Internal Port when using TLS proxy +Q_PORT_INT=${Q_PORT_INT:-19696} +# Default Neutron Host +Q_HOST=${Q_HOST:-$SERVICE_HOST} +# Default protocol +Q_PROTOCOL=${Q_PROTOCOL:-$SERVICE_PROTOCOL} +# Default listen address +Q_LISTEN_ADDRESS=${Q_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} +# Default admin username +Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron} +# Default auth strategy +Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} +# RHEL's support for namespaces requires using veths with ovs +Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False} +Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} +Q_USE_ROOTWRAP_DAEMON=$(trueorfalse True Q_USE_ROOTWRAP_DAEMON) +# Meta data IP +Q_META_DATA_IP=${Q_META_DATA_IP:-$(ipv6_unquote $SERVICE_HOST)} +# Allow Overlapping IP among subnets +Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True} +Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True} +Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True} +VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True} +VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300} + +# Allow to skip stopping of OVN services +SKIP_STOP_OVN=${SKIP_STOP_OVN:-False} + +# The directory which contains files for Q_PLUGIN_EXTRA_CONF_FILES. +# /etc/neutron is assumed by many of devstack plugins. Do not change. +_Q_PLUGIN_EXTRA_CONF_PATH=/etc/neutron + +# The name of the service in the endpoint URL +NEUTRON_ENDPOINT_SERVICE_NAME=${NEUTRON_ENDPOINT_SERVICE_NAME-"networking"} +if [[ -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then + NEUTRON_ENDPOINT_SERVICE_NAME="networking" +fi -NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini +# Source install libraries +ALEMBIC_REPO=${ALEMBIC_REPO:-https://github.com/sqlalchemy/alembic.git} +ALEMBIC_DIR=${ALEMBIC_DIR:-$DEST/alembic} +ALEMBIC_BRANCH=${ALEMBIC_BRANCH:-main} +SQLALCHEMY_REPO=${SQLALCHEMY_REPO:-https://github.com/sqlalchemy/sqlalchemy.git} +SQLALCHEMY_DIR=${SQLALCHEMY_DIR:-$DEST/sqlalchemy} +SQLALCHEMY_BRANCH=${SQLALCHEMY_BRANCH:-main} + +# List of config file names in addition to the main plugin config file +# To add additional plugin config files, use ``neutron_server_config_add`` +# utility function. For example: +# +# ``neutron_server_config_add file1`` +# +# These config files are relative to ``/etc/neutron``. The above +# example would specify ``--config-file /etc/neutron/file1`` for +# neutron server. +declare -a -g Q_PLUGIN_EXTRA_CONF_FILES + +# same as Q_PLUGIN_EXTRA_CONF_FILES, but with absolute path. +declare -a -g _Q_PLUGIN_EXTRA_CONF_FILES_ABS + + +Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf +if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + Q_RR_COMMAND="sudo" +else + NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) + Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + Q_RR_DAEMON_COMMAND="sudo $NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" + fi +fi -# By default, use the ML2 plugin -NEUTRON_CORE_PLUGIN=${NEUTRON_CORE_PLUGIN:-ml2} -NEUTRON_CORE_PLUGIN_CONF_FILENAME=${NEUTRON_CORE_PLUGIN_CONF_FILENAME:-ml2_conf.ini} -NEUTRON_CORE_PLUGIN_CONF_PATH=$NEUTRON_CONF_DIR/plugins/$NEUTRON_CORE_PLUGIN -NEUTRON_CORE_PLUGIN_CONF=$NEUTRON_CORE_PLUGIN_CONF_PATH/$NEUTRON_CORE_PLUGIN_CONF_FILENAME -NEUTRON_METERING_AGENT_CONF_FILENAME=${NEUTRON_METERING_AGENT_CONF_FILENAME:-metering_agent.ini} -NEUTRON_METERING_AGENT_CONF=$NEUTRON_CONF_DIR/$NEUTRON_METERING_AGENT_CONF_FILENAME +# Distributed Virtual Router (DVR) configuration +# Can be: +# - ``legacy`` - No DVR functionality +# - ``dvr_snat`` - Controller or single node DVR +# - ``dvr`` - Compute node in multi-node DVR +# - ``dvr_no_external`` - Compute node in multi-node DVR, no external network +# +Q_DVR_MODE=${Q_DVR_MODE:-legacy} +if [[ "$Q_DVR_MODE" != "legacy" ]]; then + Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,l2population +fi + +# Provider Network Configurations +# -------------------------------- + +# The following variables control the Neutron ML2 plugins' allocation +# of tenant networks and availability of provider networks. If these +# are not configured in ``localrc``, tenant networks will be local to +# the host (with no remote connectivity), and no physical resources +# will be available for the allocation of provider networks. + +# To disable tunnels (GRE or VXLAN) for tenant networks, +# set to False in ``local.conf``. +# GRE tunnels are only supported by the openvswitch. +ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-True} + +# If using GRE, VXLAN or GENEVE tunnels for tenant networks, +# specify the range of IDs from which tenant networks are +# allocated. Can be overridden in ``localrc`` if necessary. +TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGES:-1:1000} + +# To use VLANs for tenant networks, set to True in localrc. VLANs +# are supported by the ML2 plugins, requiring additional configuration +# described below. +ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} + +# If using VLANs for tenant networks, set in ``localrc`` to specify +# the range of VLAN VIDs from which tenant networks are +# allocated. An external network switch must be configured to +# trunk these VLANs between hosts for multi-host connectivity. +# +# Example: ``TENANT_VLAN_RANGE=1000:1999`` +TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} -NEUTRON_AGENT_BINARY=${NEUTRON_AGENT_BINARY:-neutron-$NEUTRON_AGENT-agent} -NEUTRON_L3_BINARY=${NEUTRON_L3_BINARY:-neutron-l3-agent} -NEUTRON_META_BINARY=${NEUTRON_META_BINARY:-neutron-metadata-agent} -NEUTRON_METERING_BINARY=${NEUTRON_METERING_BINARY:-neutron-metering-agent} +# If using VLANs for tenant networks, or if using flat or VLAN +# provider networks, set in ``localrc`` to the name of the physical +# network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the +# openvswitch agent, as described below. +# +# Example: ``PHYSICAL_NETWORK=default`` +PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public} + +# With the openvswitch agent, if using VLANs for tenant networks, +# or if using flat or VLAN provider networks, set in ``localrc`` to +# the name of the OVS bridge to use for the physical network. The +# bridge will be created if it does not already exist, but a +# physical interface must be manually added to the bridge as a +# port for external connectivity. +# +# Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` +OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex} -# Public facing bits -if is_service_enabled tls-proxy; then - NEUTRON_SERVICE_PROTOCOL="https" +# With the openvswitch plugin, set to True in ``localrc`` to enable +# provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. +# +# Example: ``OVS_ENABLE_TUNNELING=True`` +OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} + +# Use DHCP agent for providing metadata service in the case of +# without L3 agent (No Route Agent), set to True in localrc. +ENABLE_ISOLATED_METADATA=${ENABLE_ISOLATED_METADATA:-False} + +# Add a static route as dhcp option, so the request to 169.254.169.254 +# will be able to reach through a route(DHCP agent) +# This option require ENABLE_ISOLATED_METADATA = True +ENABLE_METADATA_NETWORK=${ENABLE_METADATA_NETWORK:-False} +# Neutron plugin specific functions +# --------------------------------- + +# Please refer to ``lib/neutron_plugins/README.md`` for details. +if [ -f $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN ]; then + source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN fi -NEUTRON_SERVICE_HOST=${NEUTRON_SERVICE_HOST:-$SERVICE_HOST} -NEUTRON_SERVICE_PORT=${NEUTRON_SERVICE_PORT:-9696} -NEUTRON_SERVICE_PORT_INT=${NEUTRON_SERVICE_PORT_INT:-19696} -NEUTRON_SERVICE_PROTOCOL=${NEUTRON_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} -NEUTRON_AUTH_STRATEGY=${NEUTRON_AUTH_STRATEGY:-keystone} -NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) -NEUTRON_ROOTWRAP_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf -NEUTRON_ROOTWRAP_CMD="$NEUTRON_ROOTWRAP $NEUTRON_ROOTWRAP_CONF_FILE" -NEUTRON_ROOTWRAP_DAEMON_CMD="$NEUTRON_ROOTWRAP-daemon $NEUTRON_ROOTWRAP_CONF_FILE" +# Agent metering service plugin functions +# ------------------------------------------- -# This is needed because _neutron_ovs_base_configure_l3_agent uses it to create -# an external network bridge -PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} -PUBLIC_BRIDGE_MTU=${PUBLIC_BRIDGE_MTU:-1500} +# Hardcoding for 1 service plugin for now +source $TOP_DIR/lib/neutron_plugins/services/metering -# Network type - default vxlan, however enables vlan based jobs to override -# using the legacy environment variable as well as a new variable in greater -# alignment with the naming scheme of this plugin. -NEUTRON_TENANT_NETWORK_TYPE=${NEUTRON_TENANT_NETWORK_TYPE:-vxlan} +# L3 Service functions +source $TOP_DIR/lib/neutron_plugins/services/l3 -NEUTRON_TENANT_VLAN_RANGE=${NEUTRON_TENANT_VLAN_RANGE:-${TENANT_VLAN_RANGE:-100:150}} +# Additional Neutron service plugins +source $TOP_DIR/lib/neutron_plugins/services/placement +source $TOP_DIR/lib/neutron_plugins/services/trunk +source $TOP_DIR/lib/neutron_plugins/services/qos +source $TOP_DIR/lib/neutron_plugins/services/segments -# Physical network for VLAN network usage. -NEUTRON_PHYSICAL_NETWORK=${NEUTRON_PHYSICAL_NETWORK:-} +# Use security group or not +if has_neutron_plugin_security_group; then + Q_USE_SECGROUP=${Q_USE_SECGROUP:-True} +else + Q_USE_SECGROUP=False +fi +# OVN_BRIDGE_MAPPINGS - ovn-bridge-mappings +# NOTE(hjensas): Initialize after sourcing neutron_plugins/services/l3 +# which initialize PUBLIC_BRIDGE. +OVN_BRIDGE_MAPPINGS=${OVN_BRIDGE_MAPPINGS:-$PHYSICAL_NETWORK:$PUBLIC_BRIDGE} + +# Save trace setting +_XTRACE_NEUTRON=$(set +o | grep xtrace) +set +o xtrace -# Additional neutron api config files -declare -a -g _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS # Functions # --------- @@ -125,296 +313,210 @@ function is_neutron_enabled { } # Test if any Neutron services are enabled -# is_neutron_enabled +# TODO(slaweq): this is not really needed now and we should remove it as soon +# as it will not be called from any other Devstack plugins, like e.g. Neutron +# plugin function is_neutron_legacy_enabled { - # first we need to remove all "neutron-" from DISABLED_SERVICES list - disabled_services_copy=$(echo $DISABLED_SERVICES | sed 's/neutron-//g') - [[ ,${disabled_services_copy} =~ ,"neutron" ]] && return 1 - [[ ,${ENABLED_SERVICES} =~ ,"q-" ]] && return 0 - return 1 + return 0 } -if is_neutron_legacy_enabled; then - source $TOP_DIR/lib/neutron-legacy -fi - -# cleanup_neutron() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_neutron_new { - source $TOP_DIR/lib/neutron_plugins/${NEUTRON_AGENT}_agent - if is_neutron_ovs_base_plugin; then - neutron_ovs_base_cleanup +function _determine_config_server { + if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" != '' ]]; then + if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" = "$_Q_PLUGIN_EXTRA_CONF_PATH" ]]; then + deprecated "Q_PLUGIN_EXTRA_CONF_PATH is deprecated" + else + die $LINENO "Q_PLUGIN_EXTRA_CONF_PATH is deprecated" + fi fi - - if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then - neutron_lb_cleanup + if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 ]]; then + deprecated "Q_PLUGIN_EXTRA_CONF_FILES is deprecated. Use neutron_server_config_add instead." fi - # delete all namespaces created by neutron - for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|qlbaas|fip|snat)-[0-9a-f-]*'); do - sudo ip netns delete ${ns} + for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do + _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($_Q_PLUGIN_EXTRA_CONF_PATH/$cfg_file) done -} -# configure_root_helper_options() - Configure agent rootwrap helper options -function configure_root_helper_options { - local conffile=$1 - iniset $conffile agent root_helper "sudo $NEUTRON_ROOTWRAP_CMD" - iniset $conffile agent root_helper_daemon "sudo $NEUTRON_ROOTWRAP_DAEMON_CMD" + local cfg_file + local opts="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" + for cfg_file in ${_Q_PLUGIN_EXTRA_CONF_FILES_ABS[@]}; do + opts+=" --config-file $cfg_file" + done + echo "$opts" } -# configure_neutron() - Set config files, create data dirs, etc -function configure_neutron_new { - sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR - - (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) - - cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF - - configure_neutron_rootwrap +function _determine_config_l3 { + local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE" + echo "$opts" +} - mkdir -p $NEUTRON_CORE_PLUGIN_CONF_PATH +function _enable_ovn_maintenance { + if [[ $Q_AGENT == "ovn" ]]; then + enable_service neutron-ovn-maintenance-worker + fi +} - # NOTE(yamamoto): A decomposed plugin should prepare the config file in - # its devstack plugin. - if [ -f $NEUTRON_DIR/etc/neutron/plugins/$NEUTRON_CORE_PLUGIN/$NEUTRON_CORE_PLUGIN_CONF_FILENAME.sample ]; then - cp $NEUTRON_DIR/etc/neutron/plugins/$NEUTRON_CORE_PLUGIN/$NEUTRON_CORE_PLUGIN_CONF_FILENAME.sample $NEUTRON_CORE_PLUGIN_CONF +function _run_ovn_maintenance { + if [[ $Q_AGENT == "ovn" ]]; then + run_process neutron-ovn-maintenance-worker "$NEUTRON_BIN_DIR/neutron-ovn-maintenance-worker $cfg_file_options" fi +} - iniset $NEUTRON_CONF database connection `database_connection_url neutron` - iniset $NEUTRON_CONF DEFAULT state_path $NEUTRON_STATE_PATH - iniset $NEUTRON_CONF oslo_concurrency lock_path $NEUTRON_STATE_PATH/lock - iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG +function _stop_ovn_maintenance { + if [[ $Q_AGENT == "ovn" ]]; then + stop_process neutron-ovn-maintenance-worker + fi +} - iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL +# For services and agents that require it, dynamically construct a list of +# --config-file arguments that are passed to the binary. +function determine_config_files { + local opts="" + case "$1" in + "neutron-server") opts="$(_determine_config_server)" ;; + "neutron-l3-agent") opts="$(_determine_config_l3)" ;; + esac + if [ -z "$opts" ] ; then + die $LINENO "Could not determine config files for $1." + fi + echo "$opts" +} +# configure_neutron() +# Set common config for all neutron server and agents. +function configure_neutron { + _configure_neutron_common iniset_rpc_backend neutron $NEUTRON_CONF - # Neutron API server & Neutron plugin - if is_service_enabled neutron-api; then - local policy_file=$NEUTRON_CONF_DIR/policy.json - # Allow neutron user to administer neutron to match neutron account - # NOTE(amotoki): This is required for nova works correctly with neutron. - if [ -f $NEUTRON_DIR/etc/policy.json ]; then - cp $NEUTRON_DIR/etc/policy.json $policy_file - sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $policy_file - else - echo '{"context_is_admin": "role:admin or user_name:neutron"}' > $policy_file - fi - - cp $NEUTRON_DIR/etc/api-paste.ini $NEUTRON_CONF_DIR/api-paste.ini - - iniset $NEUTRON_CONF DEFAULT core_plugin $NEUTRON_CORE_PLUGIN - - iniset $NEUTRON_CONF DEFAULT policy_file $policy_file - iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips True - iniset $NEUTRON_CONF DEFAULT router_distributed $NEUTRON_DISTRIBUTED_ROUTING - - iniset $NEUTRON_CONF DEFAULT auth_strategy $NEUTRON_AUTH_STRATEGY - configure_keystone_authtoken_middleware $NEUTRON_CONF neutron - configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova - - # Configure tenant network type - iniset $NEUTRON_CORE_PLUGIN_CONF ml2 tenant_network_types $NEUTRON_TENANT_NETWORK_TYPE - - local mech_drivers="openvswitch" - if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then - mech_drivers+=",l2population" - else - mech_drivers+=",linuxbridge" - fi - iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers $mech_drivers - - iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000 - iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_flat flat_networks $PUBLIC_NETWORK_NAME - if [[ "$NEUTRON_TENANT_NETWORK_TYPE" =~ "vlan" ]] && [[ "$NEUTRON_PHYSICAL_NETWORK" != "" ]]; then - iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vlan network_vlan_ranges ${NEUTRON_PHYSICAL_NETWORK}:${NEUTRON_TENANT_VLAN_RANGE} - fi - if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then - neutron_ml2_extension_driver_add port_security - fi + if is_service_enabled q-metering neutron-metering; then + _configure_neutron_metering fi - - # Neutron OVS or LB agent - if is_service_enabled neutron-agent; then - iniset $NEUTRON_CORE_PLUGIN_CONF agent tunnel_types vxlan - iniset $NEUTRON_CORE_PLUGIN_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - configure_root_helper_options $NEUTRON_CORE_PLUGIN_CONF - - # Configure the neutron agent - if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then - iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver iptables - iniset $NEUTRON_CORE_PLUGIN_CONF vxlan local_ip $HOST_IP - elif [[ $NEUTRON_AGENT == "openvswitch" ]]; then - iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver openvswitch - iniset $NEUTRON_CORE_PLUGIN_CONF ovs local_ip $HOST_IP - - if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then - iniset $NEUTRON_CORE_PLUGIN_CONF agent l2_population True - iniset $NEUTRON_CORE_PLUGIN_CONF agent enable_distributed_routing True - iniset $NEUTRON_CORE_PLUGIN_CONF agent arp_responder True - fi - fi - - if ! running_in_container; then - enable_kernel_bridge_firewall - fi + if is_service_enabled q-agt neutron-agent; then + _configure_neutron_plugin_agent fi - - # DHCP Agent - if is_service_enabled neutron-dhcp; then - cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $NEUTRON_DHCP_CONF - - iniset $NEUTRON_DHCP_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - # make it so we have working DNS from guests - iniset $NEUTRON_DHCP_CONF DEFAULT dnsmasq_local_resolv True - - configure_root_helper_options $NEUTRON_DHCP_CONF - iniset $NEUTRON_DHCP_CONF DEFAULT interface_driver $NEUTRON_AGENT - neutron_plugin_configure_dhcp_agent $NEUTRON_DHCP_CONF + if is_service_enabled q-dhcp neutron-dhcp; then + _configure_neutron_dhcp_agent + fi + if is_service_enabled q-l3 neutron-l3; then + _configure_neutron_l3_agent + fi + if is_service_enabled q-meta neutron-metadata-agent; then + _configure_neutron_metadata_agent fi - if is_service_enabled neutron-l3; then - cp $NEUTRON_DIR/etc/l3_agent.ini.sample $NEUTRON_L3_CONF - iniset $NEUTRON_L3_CONF DEFAULT interface_driver $NEUTRON_AGENT - neutron_service_plugin_class_add router - configure_root_helper_options $NEUTRON_L3_CONF - iniset $NEUTRON_L3_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - neutron_plugin_configure_l3_agent $NEUTRON_L3_CONF + if [[ "$Q_DVR_MODE" != "legacy" ]]; then + _configure_dvr + fi + if is_service_enabled ceilometer; then + _configure_neutron_ceilometer_notifications + fi - # Configure the neutron agent to serve external network ports - if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then - iniset $NEUTRON_CORE_PLUGIN_CONF linux_bridge bridge_mappings "$PUBLIC_NETWORK_NAME:$PUBLIC_BRIDGE" - else - iniset $NEUTRON_CORE_PLUGIN_CONF ovs bridge_mappings "$PUBLIC_NETWORK_NAME:$PUBLIC_BRIDGE" - fi + if [[ $Q_AGENT == "ovn" ]]; then + configure_ovn + configure_ovn_plugin + fi - if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then - iniset $NEUTRON_L3_CONF DEFAULT agent_mode $NEUTRON_DVR_MODE + # Configure Neutron's advanced services + if is_service_enabled q-placement neutron-placement; then + configure_placement_extension + fi + if is_service_enabled q-trunk neutron-trunk; then + configure_trunk_extension + fi + if is_service_enabled q-qos neutron-qos; then + configure_qos + if is_service_enabled q-l3 neutron-l3; then + configure_l3_agent_extension_fip_qos + configure_l3_agent_extension_gateway_ip_qos fi fi - - # Metadata - if is_service_enabled neutron-metadata-agent; then - cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $NEUTRON_META_CONF - - iniset $NEUTRON_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $NEUTRON_META_CONF DEFAULT nova_metadata_host $NEUTRON_META_DATA_HOST - iniset $NEUTRON_META_CONF DEFAULT metadata_workers $API_WORKERS - # TODO(ihrachys) do we really need to set rootwrap for metadata agent? - configure_root_helper_options $NEUTRON_META_CONF - - # TODO(dtroyer): remove the v2.0 hard code below - iniset $NEUTRON_META_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI - configure_keystone_authtoken_middleware $NEUTRON_META_CONF neutron DEFAULT + if is_service_enabled neutron-segments; then + configure_placement_neutron + configure_segments_extension fi - # Format logging - setup_logging $NEUTRON_CONF - - if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then - # Set the service port for a proxy to take the original - iniset $NEUTRON_CONF DEFAULT bind_port "$NEUTRON_SERVICE_PORT_INT" - iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True + # Finally configure Neutron server and core plugin + if is_service_enabled q-agt neutron-agent q-svc neutron-api; then + _configure_neutron_service fi - # Metering - if is_service_enabled neutron-metering; then - cp $NEUTRON_DIR/etc/metering_agent.ini.sample $NEUTRON_METERING_AGENT_CONF - neutron_service_plugin_class_add metering - fi + iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS" + # devstack is not a tool for running uber scale OpenStack + # clouds, therefore running without a dedicated RPC worker + # for state reports is more than adequate. + iniset $NEUTRON_CONF DEFAULT rpc_state_report_workers 0 + + write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_UWSGI" "/networking" "" "neutron-api" } -# configure_neutron_rootwrap() - configure Neutron's rootwrap -function configure_neutron_rootwrap { - # Deploy new rootwrap filters files (owned by root). - # Wipe any existing rootwrap.d files first - if [[ -d $NEUTRON_CONF_DIR/rootwrap.d ]]; then - sudo rm -rf $NEUTRON_CONF_DIR/rootwrap.d +function configure_neutron_nova { + create_nova_conf_neutron $NOVA_CONF + if [[ "${CELLSV2_SETUP}" == "superconductor" ]]; then + for i in $(seq 1 $NOVA_NUM_CELLS); do + local conf + conf=$(conductor_conf $i) + create_nova_conf_neutron $conf + done fi +} - # Deploy filters to /etc/neutron/rootwrap.d - sudo install -d -o root -g root -m 755 $NEUTRON_CONF_DIR/rootwrap.d - sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.d/*.filters $NEUTRON_CONF_DIR/rootwrap.d - - # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d`` - sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $NEUTRON_CONF_DIR - sudo sed -e "s:^filters_path=.*$:filters_path=$NEUTRON_CONF_DIR/rootwrap.d:" -i $NEUTRON_CONF_DIR/rootwrap.conf - - # Set up the rootwrap sudoers for Neutron - tempfile=`mktemp` - echo "$STACK_USER ALL=(root) NOPASSWD: $NEUTRON_ROOTWRAP_CMD *" >$tempfile - echo "$STACK_USER ALL=(root) NOPASSWD: $NEUTRON_ROOTWRAP_DAEMON_CMD" >>$tempfile - chmod 0440 $tempfile - sudo chown root:root $tempfile - sudo mv $tempfile /etc/sudoers.d/neutron-rootwrap -} - -# Make Neutron-required changes to nova.conf -# Takes a single optional argument which is the config file to update, -# if not passed $NOVA_CONF is used. -function configure_neutron_nova_new { +function create_nova_conf_neutron { local conf=${1:-$NOVA_CONF} iniset $conf neutron auth_type "password" iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI" - iniset $conf neutron username neutron + iniset $conf neutron username nova iniset $conf neutron password "$SERVICE_PASSWORD" - iniset $conf neutron user_domain_name "Default" - iniset $conf neutron project_name "$SERVICE_TENANT_NAME" - iniset $conf neutron project_domain_name "Default" - iniset $conf neutron auth_strategy $NEUTRON_AUTH_STRATEGY + iniset $conf neutron user_domain_name "$SERVICE_DOMAIN_NAME" + iniset $conf neutron project_name "$SERVICE_PROJECT_NAME" + iniset $conf neutron project_domain_name "$SERVICE_DOMAIN_NAME" + iniset $conf neutron auth_strategy "$Q_AUTH_STRATEGY" iniset $conf neutron region_name "$REGION_NAME" # optionally set options in nova_conf neutron_plugin_create_nova_conf $conf - if is_service_enabled neutron-metadata-agent; then + if is_service_enabled q-meta neutron-metadata-agent; then iniset $conf neutron service_metadata_proxy "True" fi + iniset $conf DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL" + iniset $conf DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT" } +# create_neutron_accounts() - Set up common required neutron accounts + # Tenant User Roles # ------------------------------------------------------------------ # service neutron admin # if enabled -# create_neutron_accounts() - Create required service accounts -function create_neutron_accounts_new { +# Migrated from keystone_data.sh +function create_neutron_accounts { local neutron_url - - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST/networking/ - else - neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST:$NEUTRON_SERVICE_PORT/ + neutron_url=$Q_PROTOCOL://$SERVICE_HOST/ + if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then + neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME fi - - if [[ "$ENABLED_SERVICES" =~ "neutron-api" ]]; then + if is_service_enabled q-svc neutron-api; then create_service_user "neutron" - neutron_service=$(get_or_create_service "neutron" \ - "network" "Neutron Service") - get_or_create_endpoint $neutron_service \ + get_or_create_service "neutron" "network" "Neutron Service" + get_or_create_endpoint \ + "network" \ "$REGION_NAME" "$neutron_url" fi } # init_neutron() - Initialize databases, etc. -function init_neutron_new { - - recreate_database neutron - +function init_neutron { + recreate_database $Q_DB_NAME time_start "dbsync" # Run Neutron db migrations - $NEUTRON_BIN_DIR/neutron-db-manage upgrade heads + $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head time_stop "dbsync" } # install_neutron() - Collect source and prepare -function install_neutron_new { - git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH - setup_develop $NEUTRON_DIR - +function install_neutron { # Install neutron-lib from git so we make sure we're testing # the latest code. if use_library_from_git "neutron-lib"; then @@ -422,17 +524,23 @@ function install_neutron_new { setup_dev_lib "neutron-lib" fi - # L3 service requires radvd - if is_service_enabled neutron-l3; then - install_package radvd + # Install SQLAlchemy and alembic from git when these are required + # see https://bugs.launchpad.net/neutron/+bug/2042941 + if use_library_from_git "sqlalchemy"; then + git_clone $SQLALCHEMY_REPO $SQLALCHEMY_DIR $SQLALCHEMY_BRANCH + setup_develop $SQLALCHEMY_DIR fi - - if is_service_enabled neutron-agent neutron-dhcp neutron-l3; then - #TODO(sc68cal) - kind of ugly - source $TOP_DIR/lib/neutron_plugins/${NEUTRON_AGENT}_agent - neutron_plugin_install_agent_packages + if use_library_from_git "alembic"; then + git_clone $ALEMBIC_REPO $ALEMBIC_DIR $ALEMBIC_BRANCH + setup_develop $ALEMBIC_DIR fi + git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH + setup_develop $NEUTRON_DIR + + if [[ $Q_AGENT == "ovn" ]]; then + install_ovn + fi } # install_neutronclient() - Collect source and prepare @@ -440,291 +548,611 @@ function install_neutronclient { if use_library_from_git "python-neutronclient"; then git_clone_by_name "python-neutronclient" setup_dev_lib "python-neutronclient" - sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-neutronclient"]}/tools/,/etc/bash_completion.d/}neutron.bash_completion fi } -# start_neutron_api() - Start the API process ahead of other things -function start_neutron_api { - local service_port=$NEUTRON_SERVICE_PORT - local service_protocol=$NEUTRON_SERVICE_PROTOCOL - local neutron_url - if is_service_enabled tls-proxy; then - service_port=$NEUTRON_SERVICE_PORT_INT - service_protocol="http" +# install_neutron_agent_packages() - Collect source and prepare +function install_neutron_agent_packages { + # radvd doesn't come with the OS. Install it if the l3 service is enabled. + if is_service_enabled q-l3 neutron-l3; then + install_package radvd + fi + # install packages that are specific to plugin agent(s) + if is_service_enabled q-agt neutron-agent q-dhcp neutron-dhcp q-l3 neutron-l3; then + neutron_plugin_install_agent_packages fi +} - local opts="" - opts+=" --config-file $NEUTRON_CONF" - opts+=" --config-file $NEUTRON_CORE_PLUGIN_CONF" - local cfg_file - for cfg_file in ${_NEUTRON_SERVER_EXTRA_CONF_FILES_ABS[@]}; do - opts+=" --config-file $cfg_file" - done +# Finish neutron configuration +function configure_neutron_after_post_config { + if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then + iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES + fi + configure_rbac_policies +} - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" - neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST/networking/ - enable_service neutron-rpc-server - run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $opts" +# configure_rbac_policies() - Configure Neutron to enforce new RBAC +# policies and scopes if NEUTRON_ENFORCE_SCOPE == True +function configure_rbac_policies { + if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == True ]]; then + iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True + iniset $NEUTRON_CONF oslo_policy enforce_scope True else - # Start the Neutron service - # TODO(sc68cal) Stop hard coding this - run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server $opts" - neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST:$service_port - # Start proxy if enabled - if is_service_enabled tls-proxy; then - start_tls_proxy neutron '*' $NEUTRON_SERVICE_PORT $NEUTRON_SERVICE_HOST $NEUTRON_SERVICE_PORT_INT - fi + iniset $NEUTRON_CONF oslo_policy enforce_new_defaults False + iniset $NEUTRON_CONF oslo_policy enforce_scope False fi +} - if ! wait_for_service $SERVICE_TIMEOUT $neutron_url; then - die $LINENO "neutron-api did not start" +# Start running OVN processes +function start_ovn_services { + if [[ $Q_AGENT == "ovn" ]]; then + if [ "$VIRT_DRIVER" != 'ironic' ]; then + # NOTE(TheJulia): Ironic's devstack plugin needs to perform + # additional networking configuration to setup a working test + # environment with test virtual machines to emulate baremetal, + # which requires OVN to be up and running earlier to complete + # that base configuration. + init_ovn + start_ovn + fi + if [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]]; then + if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" != "True" ]]; then + echo "OVN_L3_CREATE_PUBLIC_NETWORK=True is being ignored " + echo "because NEUTRON_CREATE_INITIAL_NETWORKS is set to False" + else + create_public_bridge + fi + fi fi } -# start_neutron() - Start running processes -function start_neutron_new { - # Start up the neutron agents if enabled - # TODO(sc68cal) Make this pluggable so different DevStack plugins for different Neutron plugins - # can resolve the $NEUTRON_AGENT_BINARY - if is_service_enabled neutron-agent; then - # TODO(ihrachys) stop loading ml2_conf.ini into agents, instead load agent specific files - run_process neutron-agent "$NEUTRON_BIN_DIR/$NEUTRON_AGENT_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_CORE_PLUGIN_CONF" - fi - if is_service_enabled neutron-dhcp; then - neutron_plugin_configure_dhcp_agent $NEUTRON_DHCP_CONF - run_process neutron-dhcp "$NEUTRON_BIN_DIR/$NEUTRON_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_DHCP_CONF" +# Start running processes +function start_neutron_service_and_check { + local service_port=$Q_PORT + local service_protocol=$Q_PROTOCOL + local cfg_file_options + local neutron_url + + cfg_file_options="$(determine_config_files neutron-server)" + + if is_service_enabled tls-proxy; then + service_port=$Q_PORT_INT + service_protocol="http" fi - if is_service_enabled neutron-l3; then - run_process neutron-l3 "$NEUTRON_BIN_DIR/$NEUTRON_L3_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_L3_CONF" + + # Start the Neutron service + # The default value of "rpc_workers" is None (not defined). If + # "rpc_workers" is explicitly set to 0, the RPC workers process + # should not be executed. + local rpc_workers + rpc_workers=$(iniget_multiline $NEUTRON_CONF DEFAULT rpc_workers) + + enable_service neutron-api + run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" + neutron_url=$Q_PROTOCOL://$Q_HOST/ + if [ "$rpc_workers" != "0" ]; then + enable_service neutron-rpc-server fi - if is_service_enabled neutron-api && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then - # XXX(sc68cal) - Here's where plugins can wire up their own networks instead - # of the code in lib/neutron_plugins/services/l3 - if type -p neutron_plugin_create_initial_networks > /dev/null; then - neutron_plugin_create_initial_networks - else - # XXX(sc68cal) Load up the built in Neutron networking code and build a topology - source $TOP_DIR/lib/neutron_plugins/services/l3 - # Create the networks using servic - create_neutron_initial_network - fi + enable_service neutron-periodic-workers + _enable_ovn_maintenance + if [ "$rpc_workers" != "0" ]; then + run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" fi - if is_service_enabled neutron-metadata-agent; then - run_process neutron-metadata-agent "$NEUTRON_BIN_DIR/$NEUTRON_META_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_META_CONF" + run_process neutron-periodic-workers "$NEUTRON_BIN_DIR/neutron-periodic-workers $cfg_file_options" + _run_ovn_maintenance + if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then + neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME fi + echo "Waiting for Neutron to start..." - if is_service_enabled neutron-metering; then - run_process neutron-metering "$NEUTRON_BIN_DIR/$NEUTRON_METERING_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_METERING_AGENT_CONF" - fi + local testcmd="wget ${ssl_ca} --no-proxy -q -O- $neutron_url" + test_with_retry "$testcmd" "Neutron did not start" $SERVICE_TIMEOUT } -# stop_neutron() - Stop running processes -function stop_neutron_new { - for serv in neutron-api neutron-agent neutron-l3; do - stop_process $serv - done +function start_neutron { + start_l2_agent "$@" + start_other_agents "$@" +} - if is_service_enabled neutron-rpc-server; then - stop_process neutron-rpc-server +# Control of the l2 agent is separated out to make it easier to test partial +# upgrades (everything upgraded except the L2 agent) +function start_l2_agent { + run_process q-agt "$AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" + + if is_provider_network && [[ $Q_AGENT == "openvswitch" ]]; then + sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE + sudo ip link set $OVS_PHYSICAL_BRIDGE up + sudo ip link set br-int up + sudo ip link set $PUBLIC_INTERFACE up + if is_ironic_hardware; then + for IP in $(ip addr show dev $PUBLIC_INTERFACE | grep ' inet ' | awk '{print $2}'); do + sudo ip addr del $IP dev $PUBLIC_INTERFACE + sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE + done + sudo ip route replace $FIXED_RANGE via $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE + fi fi +} + +function start_other_agents { + run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $Q_DHCP_CONF_FILE" + + run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)" + + run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE" + run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" +} - if is_service_enabled neutron-dhcp; then - stop_process neutron-dhcp +# Start running processes, including screen +function start_neutron_agents { + # NOTE(slaweq): it's now just a wrapper for start_neutron function + start_neutron "$@" +} + +function stop_l2_agent { + stop_process q-agt +} + +# stop_other() - Stop running processes +function stop_other { + if is_service_enabled q-dhcp neutron-dhcp; then + stop_process q-dhcp pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }') [ ! -z "$pid" ] && sudo kill -9 $pid fi - if is_service_enabled neutron-metadata-agent; then - sudo pkill -9 -f neutron-ns-metadata-proxy || : - stop_process neutron-metadata-agent + stop_process neutron-rpc-server + stop_process neutron-periodic-workers + stop_process neutron-api + _stop_ovn_maintenance + + if is_service_enabled q-l3 neutron-l3; then + sudo pkill -f "radvd -C $DATA_DIR/neutron/ra" + stop_process q-l3 fi -} -# neutron_service_plugin_class_add() - add service plugin class -function neutron_service_plugin_class_add_new { - local service_plugin_class=$1 - local plugins="" + if is_service_enabled q-meta neutron-metadata-agent; then + stop_process q-meta + fi - plugins=$(iniget $NEUTRON_CONF DEFAULT service_plugins) - if [ $plugins ]; then - plugins+="," + if is_service_enabled q-metering neutron-metering; then + neutron_metering_stop + fi + + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + # pkill takes care not to kill itself, but it may kill its parent + # sudo unless we use the "ps | grep [f]oo" trick + sudo pkill -9 -f "$NEUTRON_ROOTWRAP-[d]aemon" || : fi - plugins+="${service_plugin_class}" - iniset $NEUTRON_CONF DEFAULT service_plugins $plugins } -function _neutron_ml2_extension_driver_add { - local driver=$1 - local drivers="" +# stop_neutron() - Stop running processes (non-screen) +function stop_neutron { + stop_other + stop_l2_agent - drivers=$(iniget $NEUTRON_CORE_PLUGIN_CONF ml2 extension_drivers) - if [ $drivers ]; then - drivers+="," + if [[ $Q_AGENT == "ovn" && $SKIP_STOP_OVN != "True" ]]; then + stop_ovn fi - drivers+="${driver}" - iniset $NEUTRON_CORE_PLUGIN_CONF ml2 extension_drivers $drivers } -function neutron_server_config_add_new { - _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS+=($1) +# _move_neutron_addresses_route() - Move the primary IP to the OVS bridge +# on startup, or back to the public interface on cleanup. If no IP is +# configured on the interface, just add it as a port to the OVS bridge. +function _move_neutron_addresses_route { + local from_intf=$1 + local to_intf=$2 + local add_ovs_port=$3 + local del_ovs_port=$4 + local af=$5 + + if [[ -n "$from_intf" && -n "$to_intf" ]]; then + # Remove the primary IP address from $from_intf and add it to $to_intf, + # along with the default route, if it exists. Also, when called + # on configure we will also add $from_intf as a port on $to_intf, + # assuming it is an OVS bridge. + + local IP_REPLACE="" + local IP_DEL="" + local IP_UP="" + local DEFAULT_ROUTE_GW + DEFAULT_ROUTE_GW=$(ip -f $af r | awk "/default.+$from_intf\s/ { print \$3; exit }") + local ADD_OVS_PORT="" + local DEL_OVS_PORT="" + local ARP_CMD="" + + IP_BRD=$(ip -f $af a s dev $from_intf scope global primary | grep inet | awk '{ print $2, $3, $4; exit }') + + if [ "$DEFAULT_ROUTE_GW" != "" ]; then + ADD_DEFAULT_ROUTE="sudo ip -f $af r replace default via $DEFAULT_ROUTE_GW dev $to_intf" + fi + + if [[ "$add_ovs_port" == "True" ]]; then + ADD_OVS_PORT="sudo ovs-vsctl --may-exist add-port $to_intf $from_intf" + fi + + if [[ "$del_ovs_port" == "True" ]]; then + DEL_OVS_PORT="sudo ovs-vsctl --if-exists del-port $from_intf $to_intf" + fi + + if [[ "$IP_BRD" != "" ]]; then + IP_DEL="sudo ip addr del $IP_BRD dev $from_intf" + IP_REPLACE="sudo ip addr replace $IP_BRD dev $to_intf" + IP_UP="sudo ip link set $to_intf up" + if [[ "$af" == "inet" ]]; then + IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1) + ARP_CMD="sudo arping -A -c 3 -w 5 -I $to_intf $IP " + fi + fi + + # The add/del OVS port calls have to happen either before or + # after the address is moved in order to not leave it orphaned. + $DEL_OVS_PORT; $IP_DEL; $IP_REPLACE; $IP_UP; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE; $ARP_CMD + fi } -# neutron_deploy_rootwrap_filters() - deploy rootwrap filters -function neutron_deploy_rootwrap_filters_new { - local srcdir=$1 - sudo install -d -o root -g root -m 755 $NEUTRON_CONF_DIR/rootwrap.d - sudo install -o root -g root -m 644 $srcdir/etc/neutron/rootwrap.d/*.filters $NEUTRON_CONF_DIR/rootwrap.d +# _configure_public_network_connectivity() - Configures connectivity to the +# external network using $PUBLIC_INTERFACE or NAT on the single interface +# machines +function _configure_public_network_connectivity { + # If we've given a PUBLIC_INTERFACE to take over, then we assume + # that we can own the whole thing, and privot it into the OVS + # bridge. If we are not, we're probably on a single interface + # machine, and we just setup NAT so that fixed guests can get out. + if [[ -n "$PUBLIC_INTERFACE" ]]; then + _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet" + + if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then + _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6" + fi + else + for d in $default_v4_route_devs; do + sudo iptables -t nat -A POSTROUTING -o $d -s $FLOATING_RANGE -j MASQUERADE + done + fi } -# Dispatch functions -# These are needed for compatibility between the old and new implementations -# where there are function name overlaps. These will be removed when -# neutron-legacy is removed. -# TODO(sc68cal) Remove when neutron-legacy is no more. +# cleanup_neutron() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up function cleanup_neutron { - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - stop_process neutron-api - stop_process neutron-rpc-server - remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api" - sudo rm -f $(apache_site_config_for neutron-api) + stop_process neutron-api + stop_process neutron-rpc-server + stop_process neutron-periodic-workers + _stop_ovn_maintenance + remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "neutron-api" + sudo rm -f $(apache_site_config_for neutron-api) + + if [[ -n "$OVS_PHYSICAL_BRIDGE" ]]; then + _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False True "inet" + + if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then + # ip(8) wants the prefix length when deleting + local v6_gateway + v6_gateway=$(ip -6 a s dev $OVS_PHYSICAL_BRIDGE | grep $IPV6_PUBLIC_NETWORK_GATEWAY | awk '{ print $2 }') + sudo ip -6 addr del $v6_gateway dev $OVS_PHYSICAL_BRIDGE + _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False False "inet6" + fi + + if is_provider_network && is_ironic_hardware; then + for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do + sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE + sudo ip addr add $IP dev $PUBLIC_INTERFACE + done + sudo route del -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE + fi fi - if is_neutron_legacy_enabled; then - # Call back to old function - cleanup_mutnauq "$@" - else - cleanup_neutron_new "$@" + if is_neutron_ovs_base_plugin; then + neutron_ovs_base_cleanup + fi + + # delete all namespaces created by neutron + for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do + sudo ip netns delete ${ns} + done + + if [[ $Q_AGENT == "ovn" ]]; then + cleanup_ovn fi } -function configure_neutron { - if is_neutron_legacy_enabled; then - # Call back to old function - configure_mutnauq "$@" + +function _create_neutron_conf_dir { + # Put config files in ``NEUTRON_CONF_DIR`` for everyone to find + sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR +} + +# _configure_neutron_common() +# Set common config for all neutron server and agents. +# This MUST be called before other ``_configure_neutron_*`` functions. +function _configure_neutron_common { + _create_neutron_conf_dir + + # Uses oslo config generator to generate core sample configuration files + (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) + + cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF + + Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json + + # allow neutron user to administer neutron to match neutron account + # NOTE(amotoki): This is required for nova works correctly with neutron. + if [ -f $NEUTRON_DIR/etc/policy.json ]; then + cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE + sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $Q_POLICY_FILE else - configure_neutron_new "$@" + echo '{"context_is_admin": "role:admin or user_name:neutron"}' > $Q_POLICY_FILE + fi + + # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``. + # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``. + neutron_plugin_configure_common + + if [[ "$Q_PLUGIN_CONF_PATH" == '' || "$Q_PLUGIN_CONF_FILENAME" == '' || "$Q_PLUGIN_CLASS" == '' ]]; then + die $LINENO "Neutron plugin not set.. exiting" fi - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api" "/networking" + # If needed, move config file from ``$NEUTRON_DIR/etc/neutron`` to ``NEUTRON_CONF_DIR`` + mkdir -p /$Q_PLUGIN_CONF_PATH + Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME + # NOTE(slaweq): NEUTRON_CORE_PLUGIN_CONF is used e.g. in neutron repository, + # it was previously defined in the lib/neutron module which is now deleted. + NEUTRON_CORE_PLUGIN_CONF=$Q_PLUGIN_CONF_FILE + # NOTE(hichihara): Some neutron vendor plugins were already decomposed and + # there is no config file in Neutron tree. They should prepare the file in each plugin. + if [ -f "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" ]; then + cp "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" /$Q_PLUGIN_CONF_FILE + elif [ -f $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE ]; then + cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE + fi + + iniset $NEUTRON_CONF database connection `database_connection_url $Q_DB_NAME` + iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron + iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG + iniset $NEUTRON_CONF DEFAULT bind_host $Q_LISTEN_ADDRESS + iniset $NEUTRON_CONF oslo_concurrency lock_path $DATA_DIR/neutron/lock + + # NOTE(freerunner): Need to adjust Region Name for nova in multiregion installation + iniset $NEUTRON_CONF nova region_name $REGION_NAME + + if [ "$VIRT_DRIVER" = 'fake' ]; then + # Disable arbitrary limits + iniset $NEUTRON_CONF quotas quota_network -1 + iniset $NEUTRON_CONF quotas quota_subnet -1 + iniset $NEUTRON_CONF quotas quota_port -1 + iniset $NEUTRON_CONF quotas quota_security_group -1 + iniset $NEUTRON_CONF quotas quota_security_group_rule -1 fi + + # Format logging + setup_logging $NEUTRON_CONF + + _neutron_setup_rootwrap } -function configure_neutron_nova { - if is_neutron_legacy_enabled; then - # Call back to old function - create_nova_conf_neutron $NOVA_CONF - if [[ "${CELLSV2_SETUP}" == "superconductor" ]]; then - for i in $(seq 1 $NOVA_NUM_CELLS); do - local conf - conf=$(conductor_conf $i) - create_nova_conf_neutron $conf - done - fi - else - configure_neutron_nova_new $NOVA_CONF - if [[ "${CELLSV2_SETUP}" == "superconductor" ]]; then - for i in $(seq 1 $NOVA_NUM_CELLS); do - local conf - conf=$(conductor_conf $i) - configure_neutron_nova_new $conf - done +function _configure_neutron_dhcp_agent { + + cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $Q_DHCP_CONF_FILE + + iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + # make it so we have working DNS from guests + iniset $Q_DHCP_CONF_FILE DEFAULT dnsmasq_local_resolv True + configure_root_helper_options $Q_DHCP_CONF_FILE + + if ! is_service_enabled q-l3 neutron-l3; then + if [[ "$ENABLE_ISOLATED_METADATA" = "True" ]]; then + iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata $ENABLE_ISOLATED_METADATA + iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network $ENABLE_METADATA_NETWORK + else + if [[ "$ENABLE_METADATA_NETWORK" = "True" ]]; then + die "$LINENO" "Enable isolated metadata is a must for metadata network" + fi fi fi + + _neutron_setup_interface_driver $Q_DHCP_CONF_FILE + + neutron_plugin_configure_dhcp_agent $Q_DHCP_CONF_FILE } -function create_neutron_accounts { - if is_neutron_legacy_enabled; then - # Call back to old function - create_mutnauq_accounts "$@" - else - create_neutron_accounts_new "$@" - fi + +function _configure_neutron_metadata_agent { + cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $Q_META_CONF_FILE + + iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $Q_META_CONF_FILE DEFAULT nova_metadata_host $Q_META_DATA_IP + iniset $Q_META_CONF_FILE DEFAULT metadata_workers $API_WORKERS + configure_root_helper_options $Q_META_CONF_FILE } -function init_neutron { - if is_neutron_legacy_enabled; then - # Call back to old function - init_mutnauq "$@" - else - init_neutron_new "$@" - fi +function _configure_neutron_ceilometer_notifications { + iniset $NEUTRON_CONF oslo_messaging_notifications driver messagingv2 } -function install_neutron { - if is_neutron_legacy_enabled; then - # Call back to old function - install_mutnauq "$@" +function _configure_neutron_metering { + neutron_agent_metering_configure_common + neutron_agent_metering_configure_agent +} + +function _configure_dvr { + iniset $NEUTRON_CONF DEFAULT router_distributed True + iniset $Q_L3_CONF_FILE DEFAULT agent_mode $Q_DVR_MODE +} + + +# _configure_neutron_plugin_agent() - Set config files for neutron plugin agent +# It is called when q-agt is enabled. +function _configure_neutron_plugin_agent { + # Specify the default root helper prior to agent configuration to + # ensure that an agent's configuration can override the default + configure_root_helper_options /$Q_PLUGIN_CONF_FILE + iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + + # Configure agent for plugin + neutron_plugin_configure_plugin_agent +} + +# _configure_neutron_service() - Set config files for neutron service +# It is called when q-svc is enabled. +function _configure_neutron_service { + Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini + if test -r $NEUTRON_DIR/etc/neutron/api-paste.ini; then + cp $NEUTRON_DIR/etc/neutron/api-paste.ini $Q_API_PASTE_FILE else - install_neutron_new "$@" + # TODO(stephenfin): Remove this branch once [1] merges + # [1] https://review.opendev.org/c/openstack/neutron/+/961130 + cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE fi + + # Update either configuration file with plugin + iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS + + iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $NEUTRON_CONF oslo_policy policy_file $Q_POLICY_FILE + + iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY + configure_keystone_authtoken_middleware $NEUTRON_CONF $Q_ADMIN_USERNAME + + # Configuration for neutron notifications to nova. + iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES + iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES + + configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova + + # Configuration for placement client + configure_keystone_authtoken_middleware $NEUTRON_CONF placement placement + + # Configure plugin + neutron_plugin_configure_service } +# Utility Functions +#------------------ + +# neutron_service_plugin_class_add() - add service plugin class function neutron_service_plugin_class_add { - if is_neutron_legacy_enabled; then - # Call back to old function - _neutron_service_plugin_class_add "$@" - else - neutron_service_plugin_class_add_new "$@" + local service_plugin_class=$1 + if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then + Q_SERVICE_PLUGIN_CLASSES=$service_plugin_class + elif [[ ! ,${Q_SERVICE_PLUGIN_CLASSES}, =~ ,${service_plugin_class}, ]]; then + Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$service_plugin_class" fi } +# neutron_ml2_extension_driver_add() - add ML2 extension driver function neutron_ml2_extension_driver_add { - if is_neutron_legacy_enabled; then - # Call back to old function - _neutron_ml2_extension_driver_add_old "$@" - else - _neutron_ml2_extension_driver_add "$@" + local extension=$1 + if [[ $Q_ML2_PLUGIN_EXT_DRIVERS == '' ]]; then + Q_ML2_PLUGIN_EXT_DRIVERS=$extension + elif [[ ! ,${Q_ML2_PLUGIN_EXT_DRIVERS}, =~ ,${extension}, ]]; then + Q_ML2_PLUGIN_EXT_DRIVERS="$Q_ML2_PLUGIN_EXT_DRIVERS,$extension" fi } -function install_neutron_agent_packages { - if is_neutron_legacy_enabled; then - # Call back to old function - install_neutron_agent_packages_mutnauq "$@" - else - : - fi +# neutron_server_config_add() - add server config file +function neutron_server_config_add { + _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($1) } -function neutron_server_config_add { - if is_neutron_legacy_enabled; then - # Call back to old function - mutnauq_server_config_add "$@" - else - neutron_server_config_add_new "$@" +# neutron_deploy_rootwrap_filters() - deploy rootwrap filters to $Q_CONF_ROOTWRAP_D (owned by root). +function neutron_deploy_rootwrap_filters { + if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + return fi + local srcdir=$1 + sudo install -d -o root -m 755 $Q_CONF_ROOTWRAP_D + sudo install -o root -m 644 $srcdir/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ } -function start_neutron { - if is_neutron_legacy_enabled; then - # Call back to old function - start_mutnauq_l2_agent "$@" - start_mutnauq_other_agents "$@" +# _neutron_setup_rootwrap() - configure Neutron's rootwrap +function _neutron_setup_rootwrap { + if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + return + fi + # Wipe any existing ``rootwrap.d`` files first + Q_CONF_ROOTWRAP_D=$NEUTRON_CONF_DIR/rootwrap.d + if [[ -d $Q_CONF_ROOTWRAP_D ]]; then + sudo rm -rf $Q_CONF_ROOTWRAP_D + fi + + neutron_deploy_rootwrap_filters $NEUTRON_DIR + + # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d`` + # location moved in newer versions, prefer new location + if test -r $NEUTRON_DIR/etc/neutron/rootwrap.conf; then + sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE else - start_neutron_new "$@" + # TODO(stephenfin): Remove this branch once [1] merges + # [1] https://review.opendev.org/c/openstack/neutron/+/961130 + sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE fi + sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE + # Rely on $PATH set by devstack to determine what is safe to execute + # by rootwrap rather than use explicit whitelist of paths in + # rootwrap.conf + sudo sed -e 's/^exec_dirs=.*/#&/' -i $Q_RR_CONF_FILE + + # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap + ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *" + ROOTWRAP_DAEMON_SUDOER_CMD="$NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" + + # Set up the rootwrap sudoers for neutron + TEMPFILE=`mktemp` + echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE + echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_DAEMON_SUDOER_CMD" >>$TEMPFILE + chmod 0440 $TEMPFILE + sudo chown root:root $TEMPFILE + sudo mv $TEMPFILE /etc/sudoers.d/neutron-rootwrap + + # Update the root_helper + configure_root_helper_options $NEUTRON_CONF } -function stop_neutron { - if is_neutron_legacy_enabled; then - # Call back to old function - stop_mutnauq "$@" - else - stop_neutron_new "$@" +function configure_root_helper_options { + local conffile=$1 + iniset $conffile agent root_helper "$Q_RR_COMMAND" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + iniset $conffile agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" fi } -function neutron_deploy_rootwrap_filters { - if is_neutron_legacy_enabled; then - # Call back to old function - _neutron_deploy_rootwrap_filters "$@" - else - neutron_deploy_rootwrap_filters_new "$@" +function _neutron_setup_interface_driver { + + # ovs_use_veth needs to be set before the plugin configuration + # occurs to allow plugins to override the setting. + iniset $1 DEFAULT ovs_use_veth $Q_OVS_USE_VETH + + neutron_plugin_setup_interface_driver $1 +} +# Functions for Neutron Exercises +#-------------------------------- + +# ssh check +function _ssh_check_neutron { + local from_net=$1 + local key_file=$2 + local ip=$3 + local user=$4 + local timeout_sec=$5 + local probe_cmd = "" + probe_cmd=`_get_probe_cmd_prefix $from_net` + local testcmd="$probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success" + test_with_retry "$testcmd" "server $ip didn't become ssh-able" $timeout_sec +} + +function plugin_agent_add_l2_agent_extension { + local l2_agent_extension=$1 + if [[ -z "$L2_AGENT_EXTENSIONS" ]]; then + L2_AGENT_EXTENSIONS=$l2_agent_extension + elif [[ ! ,${L2_AGENT_EXTENSIONS}, =~ ,${l2_agent_extension}, ]]; then + L2_AGENT_EXTENSIONS+=",$l2_agent_extension" fi } # Restore xtrace -$XTRACE +$_XTRACE_NEUTRON + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 791ff18b10..e90400fec1 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -1,1010 +1,6 @@ #!/bin/bash -# -# lib/neutron -# functions - functions specific to neutron -# Dependencies: -# ``functions`` file -# ``DEST`` must be defined -# ``STACK_USER`` must be defined +# TODO(slaweq): remove that file when other projects, like e.g. Grenade will +# be using lib/neutron -# ``stack.sh`` calls the entry points in this order: -# -# - install_neutron_agent_packages -# - install_neutronclient -# - install_neutron -# - install_neutron_third_party -# - configure_neutron -# - init_neutron -# - configure_neutron_third_party -# - init_neutron_third_party -# - start_neutron_third_party -# - create_nova_conf_neutron -# - configure_neutron_after_post_config -# - start_neutron_service_and_check -# - check_neutron_third_party_integration -# - start_neutron_agents -# - create_neutron_initial_network -# -# ``unstack.sh`` calls the entry points in this order: -# -# - stop_neutron -# - stop_neutron_third_party -# - cleanup_neutron - -# Functions in lib/neutron are classified into the following categories: -# -# - entry points (called from stack.sh or unstack.sh) -# - internal functions -# - neutron exercises -# - 3rd party programs - - -# Neutron Networking -# ------------------ - -# Make sure that neutron is enabled in ``ENABLED_SERVICES``. If you want -# to run Neutron on this host, make sure that q-svc is also in -# ``ENABLED_SERVICES``. -# -# See "Neutron Network Configuration" below for additional variables -# that must be set in localrc for connectivity across hosts with -# Neutron. - -# Settings -# -------- - - -# Neutron Network Configuration -# ----------------------------- - -if is_service_enabled tls-proxy; then - Q_PROTOCOL="https" -fi - - -# Set up default directories -GITDIR["python-neutronclient"]=$DEST/python-neutronclient - - -NEUTRON_DIR=$DEST/neutron -NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas - -# Support entry points installation of console scripts -if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then - NEUTRON_BIN_DIR=$NEUTRON_DIR/bin -else - NEUTRON_BIN_DIR=$(get_python_exec_prefix) -fi - -NEUTRON_CONF_DIR=/etc/neutron -NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf -export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} - -# NEUTRON_DEPLOY_MOD_WSGI defines how neutron is deployed, allowed values: -# - False (default) : Run neutron under Eventlet -# - True : Run neutron under uwsgi -# TODO(annp): Switching to uwsgi in next cycle if things turn out to be stable -# enough -NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI) - -NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini - -# Agent binaries. Note, binary paths for other agents are set in per-service -# scripts in lib/neutron_plugins/services/ -AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent" -AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"} -AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent" - -# Agent config files. Note, plugin-specific Q_PLUGIN_CONF_FILE is set and -# loaded from per-plugin scripts in lib/neutron_plugins/ -Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini -Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini -Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini - -# Default name for Neutron database -Q_DB_NAME=${Q_DB_NAME:-neutron} -# Default Neutron Plugin -Q_PLUGIN=${Q_PLUGIN:-ml2} -# Default Neutron Port -Q_PORT=${Q_PORT:-9696} -# Default Neutron Internal Port when using TLS proxy -Q_PORT_INT=${Q_PORT_INT:-19696} -# Default Neutron Host -Q_HOST=${Q_HOST:-$SERVICE_HOST} -# Default protocol -Q_PROTOCOL=${Q_PROTOCOL:-$SERVICE_PROTOCOL} -# Default listen address -Q_LISTEN_ADDRESS=${Q_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} -# Default admin username -Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron} -# Default auth strategy -Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} -# RHEL's support for namespaces requires using veths with ovs -Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False} -Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} -Q_USE_ROOTWRAP_DAEMON=$(trueorfalse True Q_USE_ROOTWRAP_DAEMON) -# Meta data IP -Q_META_DATA_IP=${Q_META_DATA_IP:-$(ipv6_unquote $SERVICE_HOST)} -# Allow Overlapping IP among subnets -Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True} -Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True} -Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True} -VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True} -VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300} - -# The directory which contains files for Q_PLUGIN_EXTRA_CONF_FILES. -# /etc/neutron is assumed by many of devstack plugins. Do not change. -_Q_PLUGIN_EXTRA_CONF_PATH=/etc/neutron - -# List of config file names in addition to the main plugin config file -# To add additional plugin config files, use ``neutron_server_config_add`` -# utility function. For example: -# -# ``neutron_server_config_add file1`` -# -# These config files are relative to ``/etc/neutron``. The above -# example would specify ``--config-file /etc/neutron/file1`` for -# neutron server. -declare -a -g Q_PLUGIN_EXTRA_CONF_FILES - -# same as Q_PLUGIN_EXTRA_CONF_FILES, but with absolute path. -declare -a -g _Q_PLUGIN_EXTRA_CONF_FILES_ABS - - -Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf -if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then - Q_RR_COMMAND="sudo" -else - NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) - Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - Q_RR_DAEMON_COMMAND="sudo $NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" - fi -fi - - -# Distributed Virtual Router (DVR) configuration -# Can be: -# - ``legacy`` - No DVR functionality -# - ``dvr_snat`` - Controller or single node DVR -# - ``dvr`` - Compute node in multi-node DVR -# -Q_DVR_MODE=${Q_DVR_MODE:-legacy} -if [[ "$Q_DVR_MODE" != "legacy" ]]; then - Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,l2population -fi - -# Provider Network Configurations -# -------------------------------- - -# The following variables control the Neutron ML2 plugins' allocation -# of tenant networks and availability of provider networks. If these -# are not configured in ``localrc``, tenant networks will be local to -# the host (with no remote connectivity), and no physical resources -# will be available for the allocation of provider networks. - -# To disable tunnels (GRE or VXLAN) for tenant networks, -# set to False in ``local.conf``. -# GRE tunnels are only supported by the openvswitch. -ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-True} - -# If using GRE, VXLAN or GENEVE tunnels for tenant networks, -# specify the range of IDs from which tenant networks are -# allocated. Can be overridden in ``localrc`` if necessary. -TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGES:-1:1000} - -# To use VLANs for tenant networks, set to True in localrc. VLANs -# are supported by the ML2 plugins, requiring additional configuration -# described below. -ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} - -# If using VLANs for tenant networks, set in ``localrc`` to specify -# the range of VLAN VIDs from which tenant networks are -# allocated. An external network switch must be configured to -# trunk these VLANs between hosts for multi-host connectivity. -# -# Example: ``TENANT_VLAN_RANGE=1000:1999`` -TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} - -# If using VLANs for tenant networks, or if using flat or VLAN -# provider networks, set in ``localrc`` to the name of the physical -# network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the -# openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge -# agent, as described below. -# -# Example: ``PHYSICAL_NETWORK=default`` -PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public} - -# With the openvswitch agent, if using VLANs for tenant networks, -# or if using flat or VLAN provider networks, set in ``localrc`` to -# the name of the OVS bridge to use for the physical network. The -# bridge will be created if it does not already exist, but a -# physical interface must be manually added to the bridge as a -# port for external connectivity. -# -# Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` -OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex} - -# With the linuxbridge agent, if using VLANs for tenant networks, -# or if using flat or VLAN provider networks, set in ``localrc`` to -# the name of the network interface to use for the physical -# network. -# -# Example: ``LB_PHYSICAL_INTERFACE=eth1`` -if [[ $Q_AGENT == "linuxbridge" && -z ${LB_PHYSICAL_INTERFACE} ]]; then - default_route_dev=$( (ip route; ip -6 route) | grep ^default | head -n 1 | awk '{print $5}') - die_if_not_set $LINENO default_route_dev "Failure retrieving default route device" - LB_PHYSICAL_INTERFACE=$default_route_dev -fi - -# When Neutron tunnels are enabled it is needed to specify the -# IP address of the end point in the local server. This IP is set -# by default to the same IP address that the HOST IP. -# This variable can be used to specify a different end point IP address -# Example: ``TUNNEL_ENDPOINT_IP=1.1.1.1`` -TUNNEL_ENDPOINT_IP=${TUNNEL_ENDPOINT_IP:-$HOST_IP} - -# With the openvswitch plugin, set to True in ``localrc`` to enable -# provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. -# -# Example: ``OVS_ENABLE_TUNNELING=True`` -OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} - -# Use DHCP agent for providing metadata service in the case of -# without L3 agent (No Route Agent), set to True in localrc. -ENABLE_ISOLATED_METADATA=${ENABLE_ISOLATED_METADATA:-False} - -# Add a static route as dhcp option, so the request to 169.254.169.254 -# will be able to reach through a route(DHCP agent) -# This option require ENABLE_ISOLATED_METADATA = True -ENABLE_METADATA_NETWORK=${ENABLE_METADATA_NETWORK:-False} -# Neutron plugin specific functions -# --------------------------------- - -# Please refer to ``lib/neutron_plugins/README.md`` for details. -if [ -f $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN ]; then - source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN -fi - -# Agent metering service plugin functions -# ------------------------------------------- - -# Hardcoding for 1 service plugin for now -source $TOP_DIR/lib/neutron_plugins/services/metering - -# L3 Service functions -source $TOP_DIR/lib/neutron_plugins/services/l3 -# Use security group or not -if has_neutron_plugin_security_group; then - Q_USE_SECGROUP=${Q_USE_SECGROUP:-True} -else - Q_USE_SECGROUP=False -fi - -# Save trace setting -_XTRACE_NEUTRON=$(set +o | grep xtrace) -set +o xtrace - - -# Functions -# --------- - -function _determine_config_server { - if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" != '' ]]; then - if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" = "$_Q_PLUGIN_EXTRA_CONF_PATH" ]]; then - deprecated "Q_PLUGIN_EXTRA_CONF_PATH is deprecated" - else - die $LINENO "Q_PLUGIN_EXTRA_CONF_PATH is deprecated" - fi - fi - if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 ]]; then - deprecated "Q_PLUGIN_EXTRA_CONF_FILES is deprecated. Use neutron_server_config_add instead." - fi - for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do - _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($_Q_PLUGIN_EXTRA_CONF_PATH/$cfg_file) - done - - local cfg_file - local opts="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" - for cfg_file in ${_Q_PLUGIN_EXTRA_CONF_FILES_ABS[@]}; do - opts+=" --config-file $cfg_file" - done - echo "$opts" -} - -function _determine_config_l3 { - local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE" - echo "$opts" -} - -# For services and agents that require it, dynamically construct a list of -# --config-file arguments that are passed to the binary. -function determine_config_files { - local opts="" - case "$1" in - "neutron-server") opts="$(_determine_config_server)" ;; - "neutron-l3-agent") opts="$(_determine_config_l3)" ;; - esac - if [ -z "$opts" ] ; then - die $LINENO "Could not determine config files for $1." - fi - echo "$opts" -} - -# configure_mutnauq() -# Set common config for all neutron server and agents. -function configure_mutnauq { - _configure_neutron_common - iniset_rpc_backend neutron $NEUTRON_CONF - - if is_service_enabled q-metering; then - _configure_neutron_metering - fi - if is_service_enabled q-agt q-svc; then - _configure_neutron_service - fi - if is_service_enabled q-agt; then - _configure_neutron_plugin_agent - fi - if is_service_enabled q-dhcp; then - _configure_neutron_dhcp_agent - fi - if is_service_enabled q-l3; then - _configure_neutron_l3_agent - fi - if is_service_enabled q-meta; then - _configure_neutron_metadata_agent - fi - - if [[ "$Q_DVR_MODE" != "legacy" ]]; then - _configure_dvr - fi - if is_service_enabled ceilometer; then - _configure_neutron_ceilometer_notifications - fi - - if [[ $Q_AGENT == "ovn" ]]; then - configure_ovn - configure_ovn_plugin - fi - - iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS" - # devstack is not a tool for running uber scale OpenStack - # clouds, therefore running without a dedicated RPC worker - # for state reports is more than adequate. - iniset $NEUTRON_CONF DEFAULT rpc_state_report_workers 0 -} - -function create_nova_conf_neutron { - local conf=${1:-$NOVA_CONF} - iniset $conf neutron auth_type "password" - iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI" - iniset $conf neutron username "$Q_ADMIN_USERNAME" - iniset $conf neutron password "$SERVICE_PASSWORD" - iniset $conf neutron user_domain_name "$SERVICE_DOMAIN_NAME" - iniset $conf neutron project_name "$SERVICE_PROJECT_NAME" - iniset $conf neutron project_domain_name "$SERVICE_DOMAIN_NAME" - iniset $conf neutron auth_strategy "$Q_AUTH_STRATEGY" - iniset $conf neutron region_name "$REGION_NAME" - - # optionally set options in nova_conf - neutron_plugin_create_nova_conf $conf - - if is_service_enabled q-meta; then - iniset $conf neutron service_metadata_proxy "True" - fi - - iniset $conf DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL" - iniset $conf DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT" -} - -# create_mutnauq_accounts() - Set up common required neutron accounts - -# Tenant User Roles -# ------------------------------------------------------------------ -# service neutron admin # if enabled - -# Migrated from keystone_data.sh -function create_mutnauq_accounts { - local neutron_url - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - neutron_url=$Q_PROTOCOL://$SERVICE_HOST/networking/ - else - neutron_url=$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/ - fi - - if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then - - create_service_user "neutron" - - get_or_create_service "neutron" "network" "Neutron Service" - get_or_create_endpoint \ - "network" \ - "$REGION_NAME" "$neutron_url" - fi -} - -# init_mutnauq() - Initialize databases, etc. -function init_mutnauq { - recreate_database $Q_DB_NAME - time_start "dbsync" - # Run Neutron db migrations - $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head - time_stop "dbsync" -} - -# install_mutnauq() - Collect source and prepare -function install_mutnauq { - # Install neutron-lib from git so we make sure we're testing - # the latest code. - if use_library_from_git "neutron-lib"; then - git_clone_by_name "neutron-lib" - setup_dev_lib "neutron-lib" - fi - - git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH - setup_develop $NEUTRON_DIR - - if [[ $Q_AGENT == "ovn" ]]; then - install_ovn - fi -} - -# install_neutron_agent_packages() - Collect source and prepare -function install_neutron_agent_packages_mutnauq { - # radvd doesn't come with the OS. Install it if the l3 service is enabled. - if is_service_enabled q-l3; then - install_package radvd - fi - # install packages that are specific to plugin agent(s) - if is_service_enabled q-agt q-dhcp q-l3; then - neutron_plugin_install_agent_packages - fi -} - -# Finish neutron configuration -function configure_neutron_after_post_config { - if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then - iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES - fi -} - -# Start running OVN processes -function start_ovn_services { - if [[ $Q_AGENT == "ovn" ]]; then - init_ovn - start_ovn - if [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]]; then - if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" != "True" ]]; then - echo "OVN_L3_CREATE_PUBLIC_NETWORK=True is being ignored " - echo "because NEUTRON_CREATE_INITIAL_NETWORKS is set to False" - else - create_public_bridge - fi - fi - fi -} - -# Start running processes -function start_neutron_service_and_check { - local service_port=$Q_PORT - local service_protocol=$Q_PROTOCOL - local cfg_file_options - local neutron_url - - cfg_file_options="$(determine_config_files neutron-server)" - - if is_service_enabled tls-proxy; then - service_port=$Q_PORT_INT - service_protocol="http" - fi - # Start the Neutron service - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - enable_service neutron-api - run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" - neutron_url=$Q_PROTOCOL://$Q_HOST/networking/ - enable_service neutron-rpc-server - run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" - else - run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options" - neutron_url=$service_protocol://$Q_HOST:$service_port - # Start proxy if enabled - if is_service_enabled tls-proxy; then - start_tls_proxy neutron '*' $Q_PORT $Q_HOST $Q_PORT_INT - fi - fi - echo "Waiting for Neutron to start..." - - local testcmd="wget ${ssl_ca} --no-proxy -q -O- $neutron_url" - test_with_retry "$testcmd" "Neutron did not start" $SERVICE_TIMEOUT -} - -# Control of the l2 agent is separated out to make it easier to test partial -# upgrades (everything upgraded except the L2 agent) -function start_mutnauq_l2_agent { - run_process q-agt "$AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" - - if is_provider_network && [[ $Q_AGENT == "openvswitch" ]]; then - sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE - sudo ip link set $OVS_PHYSICAL_BRIDGE up - sudo ip link set br-int up - sudo ip link set $PUBLIC_INTERFACE up - if is_ironic_hardware; then - for IP in $(ip addr show dev $PUBLIC_INTERFACE | grep ' inet ' | awk '{print $2}'); do - sudo ip addr del $IP dev $PUBLIC_INTERFACE - sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE - done - sudo ip route replace $FIXED_RANGE via $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE - fi - fi -} - -function start_mutnauq_other_agents { - run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $Q_DHCP_CONF_FILE" - - if is_service_enabled neutron-vpnaas; then - : # Started by plugin - else - run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)" - fi - - run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE" - run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" -} - -# Start running processes, including screen -function start_neutron_agents { - # Start up the neutron agents if enabled - start_mutnauq_l2_agent - start_mutnauq_other_agents -} - -function stop_mutnauq_l2_agent { - stop_process q-agt -} - -# stop_mutnauq_other() - Stop running processes -function stop_mutnauq_other { - if is_service_enabled q-dhcp; then - stop_process q-dhcp - pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }') - [ ! -z "$pid" ] && sudo kill -9 $pid - fi - - if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then - stop_process neutron-rpc-server - stop_process neutron-api - else - stop_process q-svc - fi - - if is_service_enabled q-l3; then - sudo pkill -f "radvd -C $DATA_DIR/neutron/ra" - stop_process q-l3 - fi - - if is_service_enabled q-meta; then - sudo pkill -9 -f neutron-ns-metadata-proxy || : - stop_process q-meta - fi - - if is_service_enabled q-metering; then - neutron_metering_stop - fi - - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - sudo pkill -9 -f $NEUTRON_ROOTWRAP-daemon || : - fi -} - -# stop_neutron() - Stop running processes (non-screen) -function stop_mutnauq { - stop_mutnauq_other - stop_mutnauq_l2_agent - - if [[ $Q_AGENT == "ovn" ]]; then - stop_ovn - fi -} - -# _move_neutron_addresses_route() - Move the primary IP to the OVS bridge -# on startup, or back to the public interface on cleanup. If no IP is -# configured on the interface, just add it as a port to the OVS bridge. -function _move_neutron_addresses_route { - local from_intf=$1 - local to_intf=$2 - local add_ovs_port=$3 - local del_ovs_port=$4 - local af=$5 - - if [[ -n "$from_intf" && -n "$to_intf" ]]; then - # Remove the primary IP address from $from_intf and add it to $to_intf, - # along with the default route, if it exists. Also, when called - # on configure we will also add $from_intf as a port on $to_intf, - # assuming it is an OVS bridge. - - local IP_REPLACE="" - local IP_DEL="" - local IP_UP="" - local DEFAULT_ROUTE_GW - DEFAULT_ROUTE_GW=$(ip -f $af r | awk "/default.+$from_intf\s/ { print \$3; exit }") - local ADD_OVS_PORT="" - local DEL_OVS_PORT="" - local ARP_CMD="" - - IP_BRD=$(ip -f $af a s dev $from_intf scope global primary | grep inet | awk '{ print $2, $3, $4; exit }') - - if [ "$DEFAULT_ROUTE_GW" != "" ]; then - ADD_DEFAULT_ROUTE="sudo ip -f $af r replace default via $DEFAULT_ROUTE_GW dev $to_intf" - fi - - if [[ "$add_ovs_port" == "True" ]]; then - ADD_OVS_PORT="sudo ovs-vsctl --may-exist add-port $to_intf $from_intf" - fi - - if [[ "$del_ovs_port" == "True" ]]; then - DEL_OVS_PORT="sudo ovs-vsctl --if-exists del-port $from_intf $to_intf" - fi - - if [[ "$IP_BRD" != "" ]]; then - IP_DEL="sudo ip addr del $IP_BRD dev $from_intf" - IP_REPLACE="sudo ip addr replace $IP_BRD dev $to_intf" - IP_UP="sudo ip link set $to_intf up" - if [[ "$af" == "inet" ]]; then - IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1) - ARP_CMD="sudo arping -A -c 3 -w 5 -I $to_intf $IP " - fi - fi - - # The add/del OVS port calls have to happen either before or - # after the address is moved in order to not leave it orphaned. - $DEL_OVS_PORT; $IP_DEL; $IP_REPLACE; $IP_UP; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE; $ARP_CMD - fi -} - -# cleanup_mutnauq() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_mutnauq { - - if [[ -n "$OVS_PHYSICAL_BRIDGE" ]]; then - _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False True "inet" - - if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then - # ip(8) wants the prefix length when deleting - local v6_gateway - v6_gateway=$(ip -6 a s dev $OVS_PHYSICAL_BRIDGE | grep $IPV6_PUBLIC_NETWORK_GATEWAY | awk '{ print $2 }') - sudo ip -6 addr del $v6_gateway dev $OVS_PHYSICAL_BRIDGE - _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False False "inet6" - fi - - if is_provider_network && is_ironic_hardware; then - for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do - sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE - sudo ip addr add $IP dev $PUBLIC_INTERFACE - done - sudo route del -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE - fi - fi - - if is_neutron_ovs_base_plugin; then - neutron_ovs_base_cleanup - fi - - if [[ $Q_AGENT == "linuxbridge" ]]; then - neutron_lb_cleanup - fi - - # delete all namespaces created by neutron - for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do - sudo ip netns delete ${ns} - done - - if [[ $Q_AGENT == "ovn" ]]; then - cleanup_ovn - fi -} - - -function _create_neutron_conf_dir { - # Put config files in ``NEUTRON_CONF_DIR`` for everyone to find - sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR -} - -# _configure_neutron_common() -# Set common config for all neutron server and agents. -# This MUST be called before other ``_configure_neutron_*`` functions. -function _configure_neutron_common { - _create_neutron_conf_dir - - # Uses oslo config generator to generate core sample configuration files - (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) - - cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF - - Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json - - # allow neutron user to administer neutron to match neutron account - # NOTE(amotoki): This is required for nova works correctly with neutron. - if [ -f $NEUTRON_DIR/etc/policy.json ]; then - cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE - sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $Q_POLICY_FILE - else - echo '{"context_is_admin": "role:admin or user_name:neutron"}' > $Q_POLICY_FILE - fi - - # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``. - # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``. - neutron_plugin_configure_common - - if [[ "$Q_PLUGIN_CONF_PATH" == '' || "$Q_PLUGIN_CONF_FILENAME" == '' || "$Q_PLUGIN_CLASS" == '' ]]; then - die $LINENO "Neutron plugin not set.. exiting" - fi - - # If needed, move config file from ``$NEUTRON_DIR/etc/neutron`` to ``NEUTRON_CONF_DIR`` - mkdir -p /$Q_PLUGIN_CONF_PATH - Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME - # NOTE(hichihara): Some neutron vendor plugins were already decomposed and - # there is no config file in Neutron tree. They should prepare the file in each plugin. - if [ -f "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" ]; then - cp "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" /$Q_PLUGIN_CONF_FILE - elif [ -f $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE ]; then - cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE - fi - - iniset $NEUTRON_CONF database connection `database_connection_url $Q_DB_NAME` - iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron - iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG - iniset $NEUTRON_CONF DEFAULT bind_host $Q_LISTEN_ADDRESS - iniset $NEUTRON_CONF oslo_concurrency lock_path $DATA_DIR/neutron/lock - - # NOTE(freerunner): Need to adjust Region Name for nova in multiregion installation - iniset $NEUTRON_CONF nova region_name $REGION_NAME - - if [ "$VIRT_DRIVER" = 'fake' ]; then - # Disable arbitrary limits - iniset $NEUTRON_CONF quotas quota_network -1 - iniset $NEUTRON_CONF quotas quota_subnet -1 - iniset $NEUTRON_CONF quotas quota_port -1 - iniset $NEUTRON_CONF quotas quota_security_group -1 - iniset $NEUTRON_CONF quotas quota_security_group_rule -1 - fi - - # Format logging - setup_logging $NEUTRON_CONF - - if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then - # Set the service port for a proxy to take the original - iniset $NEUTRON_CONF DEFAULT bind_port "$Q_PORT_INT" - iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True - fi - - _neutron_setup_rootwrap -} - -function _configure_neutron_dhcp_agent { - - cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $Q_DHCP_CONF_FILE - - iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - # make it so we have working DNS from guests - iniset $Q_DHCP_CONF_FILE DEFAULT dnsmasq_local_resolv True - iniset $Q_DHCP_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset $Q_DHCP_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND" - fi - - if ! is_service_enabled q-l3; then - if [[ "$ENABLE_ISOLATED_METADATA" = "True" ]]; then - iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata $ENABLE_ISOLATED_METADATA - iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network $ENABLE_METADATA_NETWORK - else - if [[ "$ENABLE_METADATA_NETWORK" = "True" ]]; then - die "$LINENO" "Enable isolated metadata is a must for metadata network" - fi - fi - fi - - _neutron_setup_interface_driver $Q_DHCP_CONF_FILE - - neutron_plugin_configure_dhcp_agent $Q_DHCP_CONF_FILE -} - - -function _configure_neutron_metadata_agent { - cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $Q_META_CONF_FILE - - iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $Q_META_CONF_FILE DEFAULT nova_metadata_host $Q_META_DATA_IP - iniset $Q_META_CONF_FILE DEFAULT metadata_workers $API_WORKERS - iniset $Q_META_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset $Q_META_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND" - fi -} - -function _configure_neutron_ceilometer_notifications { - iniset $NEUTRON_CONF oslo_messaging_notifications driver messagingv2 -} - -function _configure_neutron_metering { - neutron_agent_metering_configure_common - neutron_agent_metering_configure_agent -} - -function _configure_dvr { - iniset $NEUTRON_CONF DEFAULT router_distributed True - iniset $Q_L3_CONF_FILE DEFAULT agent_mode $Q_DVR_MODE -} - - -# _configure_neutron_plugin_agent() - Set config files for neutron plugin agent -# It is called when q-agt is enabled. -function _configure_neutron_plugin_agent { - # Specify the default root helper prior to agent configuration to - # ensure that an agent's configuration can override the default - iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_COMMAND" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" - fi - iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - - # Configure agent for plugin - neutron_plugin_configure_plugin_agent -} - -# _configure_neutron_service() - Set config files for neutron service -# It is called when q-svc is enabled. -function _configure_neutron_service { - Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini - cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE - - # Update either configuration file with plugin - iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS - - iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $NEUTRON_CONF oslo_policy policy_file $Q_POLICY_FILE - iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP - - iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY - configure_keystone_authtoken_middleware $NEUTRON_CONF $Q_ADMIN_USERNAME - - # Configuration for neutron notifications to nova. - iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES - iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES - - configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova - - # Configure plugin - neutron_plugin_configure_service -} - -# Utility Functions -#------------------ - -# _neutron_service_plugin_class_add() - add service plugin class -function _neutron_service_plugin_class_add { - local service_plugin_class=$1 - if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then - Q_SERVICE_PLUGIN_CLASSES=$service_plugin_class - elif [[ ! ,${Q_SERVICE_PLUGIN_CLASSES}, =~ ,${service_plugin_class}, ]]; then - Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$service_plugin_class" - fi -} - -# _neutron_ml2_extension_driver_add_old() - add ML2 extension driver -function _neutron_ml2_extension_driver_add_old { - local extension=$1 - if [[ $Q_ML2_PLUGIN_EXT_DRIVERS == '' ]]; then - Q_ML2_PLUGIN_EXT_DRIVERS=$extension - elif [[ ! ,${Q_ML2_PLUGIN_EXT_DRIVERS}, =~ ,${extension}, ]]; then - Q_ML2_PLUGIN_EXT_DRIVERS="$Q_ML2_PLUGIN_EXT_DRIVERS,$extension" - fi -} - -# mutnauq_server_config_add() - add server config file -function mutnauq_server_config_add { - _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($1) -} - -# _neutron_deploy_rootwrap_filters() - deploy rootwrap filters to $Q_CONF_ROOTWRAP_D (owned by root). -function _neutron_deploy_rootwrap_filters { - if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then - return - fi - local srcdir=$1 - sudo install -d -o root -m 755 $Q_CONF_ROOTWRAP_D - sudo install -o root -m 644 $srcdir/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ -} - -# _neutron_setup_rootwrap() - configure Neutron's rootwrap -function _neutron_setup_rootwrap { - if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then - return - fi - # Wipe any existing ``rootwrap.d`` files first - Q_CONF_ROOTWRAP_D=$NEUTRON_CONF_DIR/rootwrap.d - if [[ -d $Q_CONF_ROOTWRAP_D ]]; then - sudo rm -rf $Q_CONF_ROOTWRAP_D - fi - - _neutron_deploy_rootwrap_filters $NEUTRON_DIR - - # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d`` - # location moved in newer versions, prefer new location - if test -r $NEUTRON_DIR/etc/neutron/rootwrap.conf; then - sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE - else - sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE - fi - sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE - sudo sed -e 's:^exec_dirs=\(.*\)$:exec_dirs=\1,/usr/local/bin:' -i $Q_RR_CONF_FILE - - # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap - ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *" - ROOTWRAP_DAEMON_SUDOER_CMD="$NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" - - # Set up the rootwrap sudoers for neutron - TEMPFILE=`mktemp` - echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE - echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_DAEMON_SUDOER_CMD" >>$TEMPFILE - chmod 0440 $TEMPFILE - sudo chown root:root $TEMPFILE - sudo mv $TEMPFILE /etc/sudoers.d/neutron-rootwrap - - # Update the root_helper - iniset $NEUTRON_CONF agent root_helper "$Q_RR_COMMAND" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset $NEUTRON_CONF agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" - fi -} - -function _neutron_setup_interface_driver { - - # ovs_use_veth needs to be set before the plugin configuration - # occurs to allow plugins to override the setting. - iniset $1 DEFAULT ovs_use_veth $Q_OVS_USE_VETH - - neutron_plugin_setup_interface_driver $1 -} -# Functions for Neutron Exercises -#-------------------------------- - -function delete_probe { - local from_net="$1" - net_id=`_get_net_id $from_net` - probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'` - neutron-debug --os-tenant-name admin --os-username admin probe-delete $probe_id -} - -function _get_net_id { - openstack --os-cloud devstack-admin --os-region-name="$REGION_NAME" --os-project-name admin --os-username admin --os-password $ADMIN_PASSWORD network list | grep $1 | awk '{print $2}' -} - -function _get_probe_cmd_prefix { - local from_net="$1" - net_id=`_get_net_id $from_net` - probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1` - echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id" -} - -# ssh check -function _ssh_check_neutron { - local from_net=$1 - local key_file=$2 - local ip=$3 - local user=$4 - local timeout_sec=$5 - local probe_cmd = "" - probe_cmd=`_get_probe_cmd_prefix $from_net` - local testcmd="$probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success" - test_with_retry "$testcmd" "server $ip didn't become ssh-able" $timeout_sec -} - -# Restore xtrace -$_XTRACE_NEUTRON - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: +source $TOP_DIR/lib/neutron diff --git a/lib/neutron_plugins/README.md b/lib/neutron_plugins/README.md index ed40886fda..728aaee85f 100644 --- a/lib/neutron_plugins/README.md +++ b/lib/neutron_plugins/README.md @@ -13,7 +13,7 @@ Plugin specific configuration variables should be in this file. functions --------- -``lib/neutron-legacy`` calls the following functions when the ``$Q_PLUGIN`` is enabled +``lib/neutron`` calls the following functions when the ``$Q_PLUGIN`` is enabled * ``neutron_plugin_create_nova_conf`` : optionally set options in nova_conf diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight index d3f5bd5752..84ca7ec42c 100644 --- a/lib/neutron_plugins/bigswitch_floodlight +++ b/lib/neutron_plugins/bigswitch_floodlight @@ -67,7 +67,7 @@ function has_neutron_plugin_security_group { } function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 + is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0 } # Restore xtrace diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade index 310b72e5ad..96400634af 100644 --- a/lib/neutron_plugins/brocade +++ b/lib/neutron_plugins/brocade @@ -72,7 +72,7 @@ function has_neutron_plugin_security_group { } function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 + is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0 } # Restore xtrace diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent deleted file mode 100644 index bdeaf0f3c6..0000000000 --- a/lib/neutron_plugins/linuxbridge_agent +++ /dev/null @@ -1,104 +0,0 @@ -#!/bin/bash -# -# Neutron Linux Bridge L2 agent -# ----------------------------- - -# Save trace setting -_XTRACE_NEUTRON_LB=$(set +o | grep xtrace) -set +o xtrace - -function neutron_lb_cleanup { - sudo ip link delete $PUBLIC_BRIDGE - - bridge_list=`ls /sys/class/net/*/bridge/bridge_id 2>/dev/null | cut -f5 -d/` - if [[ -z "$bridge_list" ]]; then - return - fi - if [[ "$Q_ML2_TENANT_NETWORK_TYPE" = "vxlan" ]]; then - for port in $(echo $bridge_list | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e vxlan-[0-9a-f\-]*); do - sudo ip link delete $port - done - elif [[ "$Q_ML2_TENANT_NETWORK_TYPE" = "vlan" ]]; then - for port in $(echo $bridge_list | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e ${LB_PHYSICAL_INTERFACE}\.[0-9a-f\-]*); do - sudo ip link delete $port - done - fi - for bridge in $(echo $bridge_list |grep -o -e brq[0-9a-f\-]*); do - sudo ip link delete $bridge - done -} - -function is_neutron_ovs_base_plugin { - # linuxbridge doesn't use OVS - return 1 -} - -function neutron_plugin_create_nova_conf { - : -} - -function neutron_plugin_install_agent_packages { - : -} - -function neutron_plugin_configure_dhcp_agent { - local conf_file=$1 - : -} - -function neutron_plugin_configure_l3_agent { - local conf_file=$1 - sudo ip link add $PUBLIC_BRIDGE type bridge - set_mtu $PUBLIC_BRIDGE $PUBLIC_BRIDGE_MTU -} - -function neutron_plugin_configure_plugin_agent { - # Setup physical network interface mappings. Override - # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more - # complex physical network configurations. - if [[ "$LB_INTERFACE_MAPPINGS" == "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$LB_PHYSICAL_INTERFACE" != "" ]]; then - LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE - fi - if [[ "$PUBLIC_BRIDGE" != "" ]] && [[ "$PUBLIC_PHYSICAL_NETWORK" != "" ]]; then - if is_service_enabled q-l3 || is_service_enabled neutron-l3; then - iniset /$Q_PLUGIN_CONF_FILE linux_bridge bridge_mappings "$PUBLIC_PHYSICAL_NETWORK:$PUBLIC_BRIDGE" - fi - fi - if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE linux_bridge physical_interface_mappings $LB_INTERFACE_MAPPINGS - fi - if [[ "$Q_USE_SECGROUP" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver - if ! running_in_container; then - enable_kernel_bridge_firewall - fi - else - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver - fi - AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-linuxbridge-agent" - iniset /$Q_PLUGIN_CONF_FILE agent tunnel_types $Q_TUNNEL_TYPES - - # Configure vxlan tunneling - if [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then - if [[ "$Q_ML2_TENANT_NETWORK_TYPE" == "vxlan" ]]; then - iniset /$Q_PLUGIN_CONF_FILE vxlan enable_vxlan "True" - iniset /$Q_PLUGIN_CONF_FILE vxlan local_ip $TUNNEL_ENDPOINT_IP - else - iniset /$Q_PLUGIN_CONF_FILE vxlan enable_vxlan "False" - fi - else - iniset /$Q_PLUGIN_CONF_FILE vxlan enable_vxlan "False" - fi -} - -function neutron_plugin_setup_interface_driver { - local conf_file=$1 - iniset $conf_file DEFAULT interface_driver linuxbridge -} - -function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 -} - -# Restore xtrace -$_XTRACE_NEUTRON_LB diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index ae4b251d83..687167bf79 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -7,15 +7,16 @@ _XTRACE_NEUTRON_ML2=$(set +o | grep xtrace) set +o xtrace -# Default openvswitch L2 agent -Q_AGENT=${Q_AGENT:-openvswitch} +# Default OVN L2 agent +Q_AGENT=${Q_AGENT:-ovn} if [ -f $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent ]; then source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent fi # Enable this to simply and quickly enable tunneling with ML2. -# Select either 'gre', 'vxlan', or 'gre,vxlan' -Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"vxlan"} +# For ML2/OVS select either 'gre', 'vxlan', or 'gre,vxlan'. +# For ML2/OVN use 'geneve'. +Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"geneve"} # This has to be set here since the agent will set this in the config file if [[ "$Q_ML2_TENANT_NETWORK_TYPE" == "gre" || "$Q_ML2_TENANT_NETWORK_TYPE" == "vxlan" ]]; then Q_TUNNEL_TYPES=$Q_ML2_TENANT_NETWORK_TYPE @@ -24,7 +25,7 @@ elif [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then fi # List of MechanismDrivers to load -Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-openvswitch,linuxbridge} +Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn} # Default GRE TypeDriver options Q_ML2_PLUGIN_GRE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GRE_TYPE_OPTIONS:-tunnel_id_ranges=$TENANT_TUNNEL_RANGES} # Default VXLAN TypeDriver options @@ -66,7 +67,7 @@ function neutron_plugin_configure_common { Q_PLUGIN_CLASS="ml2" # The ML2 plugin delegates L3 routing/NAT functionality to # the L3 service plugin which must therefore be specified. - _neutron_service_plugin_class_add $ML2_L3_PLUGIN + neutron_service_plugin_class_add $ML2_L3_PLUGIN } function neutron_plugin_configure_service { @@ -110,20 +111,10 @@ function neutron_plugin_configure_service { fi fi fi - # REVISIT(rkukura): Setting firewall_driver here for - # neutron.agent.securitygroups_rpc.is_firewall_enabled() which is - # used in the server, in case no L2 agent is configured on the - # server's node. If an L2 agent is configured, this will get - # overridden with the correct driver. The ml2 plugin should - # instead use its own config variable to indicate whether security - # groups is enabled, and that will need to be set here instead. - if [[ "$Q_USE_SECGROUP" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.not.a.real.FirewallDriver - else - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver - fi + populate_ml2_config /$Q_PLUGIN_CONF_FILE securitygroup enable_security_group=$Q_USE_SECGROUP populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 overlay_ip_version=$TUNNEL_IP_VERSION if [[ -n "$Q_ML2_PLUGIN_TYPE_DRIVERS" ]]; then populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS @@ -155,5 +146,9 @@ function has_neutron_plugin_security_group { return 0 } +function configure_qos_ml2 { + neutron_ml2_extension_driver_add "qos" +} + # Restore xtrace $_XTRACE_NEUTRON_ML2 diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent index 100961196d..6e79984e9b 100644 --- a/lib/neutron_plugins/openvswitch_agent +++ b/lib/neutron_plugins/openvswitch_agent @@ -15,6 +15,10 @@ function neutron_plugin_create_nova_conf { function neutron_plugin_install_agent_packages { _neutron_ovs_base_install_agent_packages + if use_library_from_git "os-ken"; then + git_clone_by_name "os-ken" + setup_dev_lib "os-ken" + fi } function neutron_plugin_configure_dhcp_agent { @@ -64,7 +68,7 @@ function neutron_plugin_setup_interface_driver { } function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 + is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0 } # Restore xtrace diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent index b661f593a4..48e92a1782 100644 --- a/lib/neutron_plugins/ovn_agent +++ b/lib/neutron_plugins/ovn_agent @@ -21,20 +21,14 @@ source ${TOP_DIR}/lib/neutron_plugins/ovs_base source ${TOP_DIR}/lib/neutron_plugins/openvswitch_agent -# Load devstack ovs base functions -source $NEUTRON_DIR/devstack/lib/ovs - - -# Defaults -# -------- - -Q_BUILD_OVS_FROM_GIT=$(trueorfalse True Q_BUILD_OVS_FROM_GIT) +# Load devstack ovs compliation and loading functions +source ${TOP_DIR}/lib/neutron_plugins/ovs_source # Set variables for building OVN from source OVN_REPO=${OVN_REPO:-https://github.com/ovn-org/ovn.git} OVN_REPO_NAME=$(basename ${OVN_REPO} | cut -f1 -d'.') OVN_REPO_NAME=${OVN_REPO_NAME:-ovn} -OVN_BRANCH=${OVN_BRANCH:-v20.06.1} +OVN_BRANCH=${OVN_BRANCH:-branch-24.03} # The commit removing OVN bits from the OVS tree, it is the commit that is not # present in OVN tree and is used to distinguish if OVN is part of OVS or not. # https://github.com/openvswitch/ovs/commit/05bf1dbb98b0635a51f75e268ef8aed27601401d @@ -66,13 +60,18 @@ OVN_L3_SCHEDULER=${OVN_L3_SCHEDULER:-leastloaded} # A UUID to uniquely identify this system. If one is not specified, a random # one will be generated. A randomly generated UUID will be saved in a file -# 'ovn-uuid' so that the same one will be re-used if you re-run DevStack. +# $OVS_SYSCONFDIR/system-id.conf (typically /etc/openvswitch/system-id.conf) +# so that the same one will be re-used if you re-run DevStack or restart +# Open vSwitch service. OVN_UUID=${OVN_UUID:-} # Whether or not to build the openvswitch kernel module from ovs. This is required # unless the distro kernel includes ovs+conntrack support. OVN_BUILD_MODULES=$(trueorfalse False OVN_BUILD_MODULES) OVN_BUILD_FROM_SOURCE=$(trueorfalse False OVN_BUILD_FROM_SOURCE) +if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + Q_BUILD_OVS_FROM_GIT=True +fi # Whether or not to install the ovs python module from ovs source. This can be # used to test and validate new ovs python features. This should only be used @@ -86,15 +85,37 @@ OVN_INSTALL_OVS_PYTHON_MODULE=$(trueorfalse False OVN_INSTALL_OVS_PYTHON_MODULE) # configure the MTU DHCP option. OVN_GENEVE_OVERHEAD=${OVN_GENEVE_OVERHEAD:-38} -# The log level of the OVN databases (north and south) +# The log level of the OVN databases (north and south). +# Supported log levels are: off, emer, err, warn, info or dbg. +# More information about log levels can be found at +# http://www.openvswitch.org/support/dist-docs/ovs-appctl.8.txt OVN_DBS_LOG_LEVEL=${OVN_DBS_LOG_LEVEL:-info} +# OVN metadata agent configuration OVN_META_CONF=$NEUTRON_CONF_DIR/neutron_ovn_metadata_agent.ini OVN_META_DATA_HOST=${OVN_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)} +# OVN agent configuration +# The OVN agent is configured, by default, with the "metadata" extension. +OVN_AGENT_CONF=$NEUTRON_CONF_DIR/plugins/ml2/ovn_agent.ini +OVN_AGENT_EXTENSIONS=${OVN_AGENT_EXTENSIONS:-metadata} +# The variable TARGET_ENABLE_OVN_AGENT, if True, overrides the OVN Metadata +# agent service (q-ovn-metadata-agent neutron-ovn-metadata-agent) and the OVN +# agent service (q-ovn-agent neutron-ovn-agent) configuration, always disabling +# the first one (OVN Metadata agent) and enabling the second (OVN agent). +# This variable will be removed in 2026.2, along with the OVN Metadata agent +# removal. +TARGET_ENABLE_OVN_AGENT=$(trueorfalse False TARGET_ENABLE_OVN_AGENT) + +# If True (default) the node will be considered a gateway node. +ENABLE_CHASSIS_AS_GW=$(trueorfalse True ENABLE_CHASSIS_AS_GW) +OVN_L3_CREATE_PUBLIC_NETWORK=$(trueorfalse True OVN_L3_CREATE_PUBLIC_NETWORK) + export OVSDB_SERVER_LOCAL_HOST=$SERVICE_LOCAL_HOST +TUNNEL_IP=$TUNNEL_ENDPOINT_IP if [[ "$SERVICE_IP_VERSION" == 6 ]]; then OVSDB_SERVER_LOCAL_HOST=[$OVSDB_SERVER_LOCAL_HOST] + TUNNEL_IP=[$TUNNEL_IP] fi OVN_IGMP_SNOOPING_ENABLE=$(trueorfalse False OVN_IGMP_SNOOPING_ENABLE) @@ -109,14 +130,22 @@ OVS_RUNDIR=$OVS_PREFIX/var/run/openvswitch OVS_SHAREDIR=$OVS_PREFIX/share/openvswitch OVS_SCRIPTDIR=$OVS_SHAREDIR/scripts OVS_DATADIR=$DATA_DIR/ovs +OVS_SYSCONFDIR=${OVS_SYSCONFDIR:-$OVS_PREFIX/etc/openvswitch} -OVN_DATADIR=$DATA_DIR/ovn +if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + OVN_DATADIR=$DATA_DIR/ovn +else + # When using OVN from packages, the data dir for OVN DBs is + # /var/lib/ovn + OVN_DATADIR=/var/lib/ovn +fi OVN_SHAREDIR=$OVS_PREFIX/share/ovn OVN_SCRIPTDIR=$OVN_SHAREDIR/scripts OVN_RUNDIR=$OVS_PREFIX/var/run/ovn NEUTRON_OVN_BIN_DIR=$(get_python_exec_prefix) NEUTRON_OVN_METADATA_BINARY="neutron-ovn-metadata-agent" +NEUTRON_OVN_AGENT_BINARY="neutron-ovn-agent" STACK_GROUP="$( id --group --name "$STACK_USER" )" @@ -140,8 +169,10 @@ fi # Defaults Overwrite # ------------------ - -Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn,logger} +# NOTE(ralonsoh): during the eventlet removal, the "logger" mech +# driver has been removed from this list. Re-add it once the removal +# is finished or the mech driver does not call monkey_patch(). +Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn} Q_ML2_PLUGIN_TYPE_DRIVERS=${Q_ML2_PLUGIN_TYPE_DRIVERS:-local,flat,vlan,geneve} Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"geneve"} Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS:-"vni_ranges=1:65536"} @@ -149,22 +180,39 @@ Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS:-port_security,qos} # this one allows empty: ML2_L3_PLUGIN=${ML2_L3_PLUGIN-"ovn-router"} +Q_LOG_DRIVER_RATE_LIMIT=${Q_LOG_DRIVER_RATE_LIMIT:-100} +Q_LOG_DRIVER_BURST_LIMIT=${Q_LOG_DRIVER_BURST_LIMIT:-25} +Q_LOG_DRIVER_LOG_BASE=${Q_LOG_DRIVER_LOG_BASE:-acl_log_meter} # Utility Functions # ----------------- +function wait_for_db_file { + local count=0 + while [ ! -f $1 ]; do + sleep 1 + count=$((count+1)) + if [ "$count" -gt 40 ]; then + die $LINENO "DB File $1 not found" + fi + done +} + function wait_for_sock_file { local count=0 while [ ! -S $1 ]; do sleep 1 count=$((count+1)) - if [ "$count" -gt 5 ]; then + if [ "$count" -gt 40 ]; then die $LINENO "Socket $1 not found" fi done } function use_new_ovn_repository { + if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]]; then + return 0 + fi if [ -z "$is_new_ovn" ]; then local ovs_repo_dir=$DEST/$OVS_REPO_NAME if [ ! -d $ovs_repo_dir ]; then @@ -212,11 +260,12 @@ function _run_process { local cmd="$2" local stop_cmd="$3" local group=$4 - local user=${5:-$STACK_USER} + local user=$5 + local rundir=${6:-$OVS_RUNDIR} local systemd_service="devstack@$service.service" local unit_file="$SYSTEMD_DIR/$systemd_service" - local environment="OVN_RUNDIR=$OVS_RUNDIR OVN_DBDIR=$OVN_DATADIR OVN_LOGDIR=$LOGDIR OVS_RUNDIR=$OVS_RUNDIR OVS_DBDIR=$OVS_DATADIR OVS_LOGDIR=$LOGDIR" + local environment="OVN_RUNDIR=$OVN_RUNDIR OVN_DBDIR=$OVN_DATADIR OVN_LOGDIR=$LOGDIR OVS_RUNDIR=$OVS_RUNDIR OVS_DBDIR=$OVS_DATADIR OVS_LOGDIR=$LOGDIR" echo "Starting $service executed command": $cmd @@ -232,9 +281,14 @@ function _run_process { _start_process $systemd_service - local testcmd="test -e $OVS_RUNDIR/$service.pid" + local testcmd="test -e $rundir/$service.pid" test_with_retry "$testcmd" "$service did not start" $SERVICE_TIMEOUT 1 - sudo ovs-appctl -t $service vlog/set console:off syslog:info file:info + local service_ctl_file + service_ctl_file=$(ls $rundir | grep $service | grep ctl) + if [ -z "$service_ctl_file" ]; then + die $LINENO "ctl file for service $service is not present." + fi + sudo ovs-appctl -t $rundir/$service_ctl_file vlog/set console:off syslog:info file:info } function clone_repository { @@ -247,64 +301,28 @@ function clone_repository { ERROR_ON_CLONE=false git_clone $repo $dir $branch } -function get_ext_gw_interface { - # Get ext_gw_interface depending on value of Q_USE_PUBLIC_VETH - # This function is copied directly from the devstack neutron-legacy script - if [[ "$Q_USE_PUBLIC_VETH" == "True" ]]; then - echo $Q_PUBLIC_VETH_EX - else - # Disable in-band as we are going to use local port - # to communicate with VMs - sudo ovs-vsctl set Bridge $PUBLIC_BRIDGE \ - other_config:disable-in-band=true - echo $PUBLIC_BRIDGE - fi -} - function create_public_bridge { # Create the public bridge that OVN will use - # This logic is based on the devstack neutron-legacy _neutron_configure_router_v4 and _v6 - local ext_gw_ifc - ext_gw_ifc=$(get_ext_gw_interface) - - sudo ovs-vsctl --may-exist add-br $ext_gw_ifc -- set bridge $ext_gw_ifc protocols=OpenFlow13,OpenFlow15 - sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=$PHYSICAL_NETWORK:$ext_gw_ifc - if [ -n "$FLOATING_RANGE" ]; then - local cidr_len=${FLOATING_RANGE#*/} - sudo ip addr flush dev $ext_gw_ifc - sudo ip addr add $PUBLIC_NETWORK_GATEWAY/$cidr_len dev $ext_gw_ifc - fi - - # Ensure IPv6 RAs are accepted on the interface with the default route. - # This is needed for neutron-based devstack clouds to work in - # IPv6-only clouds in the gate. Please do not remove this without - # talking to folks in Infra. This fix is based on a devstack fix for - # neutron L3 agent: https://review.openstack.org/#/c/359490/. - default_route_dev=$(ip route | grep ^default | awk '{print $5}') - sudo sysctl -w net.ipv6.conf.$default_route_dev.accept_ra=2 - - sudo sysctl -w net.ipv6.conf.all.forwarding=1 - if [ -n "$IPV6_PUBLIC_RANGE" ]; then - local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/} - sudo ip -6 addr flush dev $ext_gw_ifc - sudo ip -6 addr add $IPV6_PUBLIC_NETWORK_GATEWAY/$ipv6_cidr_len dev $ext_gw_ifc - fi - - sudo ip link set $ext_gw_ifc up + sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE -- set bridge $PUBLIC_BRIDGE protocols=OpenFlow13,OpenFlow15 + sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=${OVN_BRIDGE_MAPPINGS} + _configure_public_network_connectivity } -function _disable_libvirt_apparmor { - if ! sudo aa-status --enabled ; then +function is_ovn_metadata_agent_enabled { + if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent && [[ "$TARGET_ENABLE_OVN_AGENT" == "False" ]]; then return 0 fi - # NOTE(arosen): This is used as a work around to allow newer versions - # of libvirt to work with ovs configured ports. See LP#1466631. - # requires the apparmor-utils - install_package apparmor-utils - # disables apparmor for libvirtd - sudo aa-complain /etc/apparmor.d/usr.sbin.libvirtd + return 1 } +function is_ovn_agent_enabled { + if is_service_enabled q-ovn-agent neutron-ovn-agent || [[ "$TARGET_ENABLE_OVN_AGENT" == "True" ]]; then + enable_service q-ovn-agent + return 0 + fi + return 1 + +} # OVN compilation functions # ------------------------- @@ -312,16 +330,13 @@ function _disable_libvirt_apparmor { # compile_ovn() - Compile OVN from source and load needed modules # Accepts three parameters: -# - first optional is False by default and means that -# modules are built and installed. -# - second optional parameter defines prefix for +# - first optional parameter defines prefix for # ovn compilation -# - third optional parameter defines localstatedir for +# - second optional parameter defines localstatedir for # ovn single machine runtime function compile_ovn { - local build_modules=${1:-False} - local prefix=$2 - local localstatedir=$3 + local prefix=$1 + local localstatedir=$2 if [ -n "$prefix" ]; then prefix="--prefix=$prefix" @@ -338,8 +353,24 @@ function compile_ovn { ./boot.sh fi + # NOTE(mnaser): OVN requires that you build using the OVS from the + # submodule. + # + # https://github.com/ovn-org/ovn/blob/3fb397b63663297acbcbf794e1233951222ae5af/Documentation/intro/install/general.rst#bootstrapping + # https://github.com/ovn-org/ovn/issues/128 + git submodule update --init + pushd ovs + if [ ! -f configure ] ; then + ./boot.sh + fi + if [ ! -f config.status ] || [ configure -nt config.status ] ; then + ./configure + fi + make -j$(($(nproc) + 1)) + popd + if [ ! -f config.status ] || [ configure -nt config.status ] ; then - ./configure --with-ovs-source=$DEST/$OVS_REPO_NAME $prefix $localstatedir + ./configure $prefix $localstatedir fi make -j$(($(nproc) + 1)) sudo make install @@ -352,7 +383,7 @@ function compile_ovn { # OVN service sanity check function ovn_sanity_check { - if is_service_enabled q-agt neutron-agt; then + if is_service_enabled q-agt neutron-agent; then die $LINENO "The q-agt/neutron-agt service must be disabled with OVN." elif is_service_enabled q-l3 neutron-l3; then die $LINENO "The q-l3/neutron-l3 service must be disabled with OVN." @@ -365,11 +396,6 @@ function ovn_sanity_check { # install_ovn() - Collect source and prepare function install_ovn { - if [[ "$Q_BUILD_OVS_FROM_GIT" == "False" ]]; then - echo "Installation of OVS from source disabled." - return 0 - fi - echo "Installing OVN and dependent packages" # Check the OVN configuration @@ -380,10 +406,6 @@ function install_ovn { sudo mkdir -p $OVS_RUNDIR sudo chown $(whoami) $OVS_RUNDIR - # NOTE(lucasagomes): To keep things simpler, let's reuse the same - # RUNDIR for both OVS and OVN. This way we avoid having to specify the - # --db option in the ovn-{n,s}bctl commands while playing with DevStack - sudo ln -s $OVS_RUNDIR $OVN_RUNDIR if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then # If OVS is already installed, remove it, because we're about to @@ -399,7 +421,7 @@ function install_ovn { compile_ovs $OVN_BUILD_MODULES if use_new_ovn_repository; then - compile_ovn $OVN_BUILD_MODULES + compile_ovn fi sudo mkdir -p $OVS_PREFIX/var/log/openvswitch @@ -407,7 +429,6 @@ function install_ovn { sudo mkdir -p $OVS_PREFIX/var/log/ovn sudo chown $(whoami) $OVS_PREFIX/var/log/ovn else - fixup_ovn_centos install_package $(get_packages openvswitch) install_package $(get_packages ovn) fi @@ -472,7 +493,7 @@ function filter_network_api_extensions { function configure_ovn_plugin { echo "Configuring Neutron for OVN" - if is_service_enabled q-svc ; then + if is_service_enabled q-svc neutron-api; then filter_network_api_extensions populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_geneve max_header_size=$OVN_GENEVE_OVERHEAD populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_connection="$OVN_NB_REMOTE" @@ -490,7 +511,15 @@ function configure_ovn_plugin { populate_ml2_config /$Q_PLUGIN_CONF_FILE securitygroup enable_security_group="$Q_USE_SECGROUP" inicomment /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver - if is_service_enabled q-ovn-metadata-agent; then + if is_service_enabled q-log neutron-log; then + populate_ml2_config /$Q_PLUGIN_CONF_FILE network_log rate_limit="$Q_LOG_DRIVER_RATE_LIMIT" + populate_ml2_config /$Q_PLUGIN_CONF_FILE network_log burst_limit="$Q_LOG_DRIVER_BURST_LIMIT" + inicomment /$Q_PLUGIN_CONF_FILE network_log local_output_log_base="$Q_LOG_DRIVER_LOG_BASE" + fi + + if is_ovn_metadata_agent_enabled; then + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True + elif is_ovn_agent_enabled && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]]; then populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True else populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=False @@ -511,7 +540,9 @@ function configure_ovn_plugin { fi if is_service_enabled n-api-meta ; then - if is_service_enabled q-ovn-metadata-agent ; then + if is_ovn_metadata_agent_enabled; then + iniset $NOVA_CONF neutron service_metadata_proxy True + elif is_ovn_agent_enabled && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]]; then iniset $NOVA_CONF neutron service_metadata_proxy True fi fi @@ -521,38 +552,65 @@ function configure_ovn { echo "Configuring OVN" if [ -z "$OVN_UUID" ] ; then - if [ -f ./ovn-uuid ] ; then - OVN_UUID=$(cat ovn-uuid) + if [ -f $OVS_SYSCONFDIR/system-id.conf ]; then + OVN_UUID=$(cat $OVS_SYSCONFDIR/system-id.conf) else OVN_UUID=$(uuidgen) - echo $OVN_UUID > ovn-uuid + echo $OVN_UUID | sudo tee $OVS_SYSCONFDIR/system-id.conf + fi + else + local ovs_uuid + ovs_uuid=$(cat $OVS_SYSCONFDIR/system-id.conf) + if [ "$ovs_uuid" != $OVN_UUID ]; then + echo $OVN_UUID | sudo tee $OVS_SYSCONFDIR/system-id.conf fi fi + # Erase the pre-set configurations from packages. DevStack will + # configure OVS and OVN accordingly for its use. + if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]] && is_fedora; then + sudo truncate -s 0 /etc/openvswitch/default.conf + sudo truncate -s 0 /etc/sysconfig/openvswitch + sudo truncate -s 0 /etc/sysconfig/ovn + fi + # Metadata - if is_service_enabled q-ovn-metadata-agent && is_service_enabled ovn-controller; then + local sample_file="" + local config_file="" + if is_ovn_agent_enabled && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]] && is_service_enabled ovn-controller; then + sample_file=$NEUTRON_DIR/etc/neutron/plugins/ml2/ovn_agent.ini.sample + config_file=$OVN_AGENT_CONF + elif is_ovn_metadata_agent_enabled && is_service_enabled ovn-controller; then + sample_file=$NEUTRON_DIR/etc/neutron_ovn_metadata_agent.ini.sample + config_file=$OVN_META_CONF + fi + if [ -n "$config_file" ]; then sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR mkdir -p $NEUTRON_DIR/etc/neutron/plugins/ml2 (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) - cp $NEUTRON_DIR/etc/neutron_ovn_metadata_agent.ini.sample $OVN_META_CONF - configure_root_helper_options $OVN_META_CONF + cp $sample_file $config_file + configure_root_helper_options $config_file - iniset $OVN_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $OVN_META_CONF DEFAULT nova_metadata_host $OVN_META_DATA_HOST - iniset $OVN_META_CONF DEFAULT metadata_workers $API_WORKERS - iniset $OVN_META_CONF DEFAULT state_path $NEUTRON_STATE_PATH - iniset $OVN_META_CONF ovs ovsdb_connection tcp:$OVSDB_SERVER_LOCAL_HOST:6640 - iniset $OVN_META_CONF ovn ovn_sb_connection $OVN_SB_REMOTE + iniset $config_file DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $config_file DEFAULT nova_metadata_host $OVN_META_DATA_HOST + iniset $config_file DEFAULT metadata_workers $API_WORKERS + iniset $config_file DEFAULT state_path $DATA_DIR/neutron + iniset $config_file ovs ovsdb_connection tcp:$OVSDB_SERVER_LOCAL_HOST:6640 + iniset $config_file ovn ovn_sb_connection $OVN_SB_REMOTE if is_service_enabled tls-proxy; then - iniset $OVN_META_CONF ovn \ + iniset $config_file ovn \ ovn_sb_ca_cert $INT_CA_DIR/ca-chain.pem - iniset $OVN_META_CONF ovn \ + iniset $config_file ovn \ ovn_sb_certificate $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt - iniset $OVN_META_CONF ovn \ + iniset $config_file ovn \ ovn_sb_private_key $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key fi + if [[ $config_file == $OVN_AGENT_CONF ]]; then + iniset $config_file agent extensions $OVN_AGENT_EXTENSIONS + iniset $config_file ovn ovn_nb_connection $OVN_NB_REMOTE + fi fi } @@ -564,15 +622,20 @@ function init_ovn { # in the ovn, ovn-nb, or ovs databases. We're going to trash them and # create new ones on each devstack run. - _disable_libvirt_apparmor + local mkdir_cmd="mkdir -p ${OVN_DATADIR}" - mkdir -p $OVN_DATADIR + if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]]; then + mkdir_cmd="sudo ${mkdir_cmd}" + fi + + $mkdir_cmd mkdir -p $OVS_DATADIR rm -f $OVS_DATADIR/*.db rm -f $OVS_DATADIR/.*.db.~lock~ - rm -f $OVN_DATADIR/*.db - rm -f $OVN_DATADIR/.*.db.~lock~ + sudo rm -f $OVN_DATADIR/*.db + sudo rm -f $OVN_DATADIR/.*.db.~lock~ + sudo rm -f $OVN_RUNDIR/*.sock } function _start_ovs { @@ -599,12 +662,12 @@ function _start_ovs { dbcmd+=" --remote=db:hardware_vtep,Global,managers $OVS_DATADIR/vtep.db" fi dbcmd+=" $OVS_DATADIR/conf.db" - _run_process ovsdb-server "$dbcmd" + _run_process ovsdb-server "$dbcmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR" # Note: ovn-controller will create and configure br-int once it is started. # So, no need to create it now because nothing depends on that bridge here. local ovscmd="$OVS_SBINDIR/ovs-vswitchd --log-file --pidfile --detach" - _run_process ovs-vswitchd "$ovscmd" "" "$STACK_GROUP" "root" + _run_process ovs-vswitchd "$ovscmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR" else _start_process "$OVSDB_SERVER_SERVICE" _start_process "$OVS_VSWITCHD_SERVICE" @@ -624,8 +687,8 @@ function _start_ovs { sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-remote="$OVN_SB_REMOTE" sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-bridge="br-int" sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-type="geneve" - sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$HOST_IP" - sudo ovs-vsctl --no-wait set open_vswitch . external-ids:hostname="$LOCAL_HOSTNAME" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$TUNNEL_IP" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:hostname=$(hostname) # Select this chassis to host gateway routers if [[ "$ENABLE_CHASSIS_AS_GW" == "True" ]]; then sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-cms-options="enable-chassis-as-gw" @@ -639,36 +702,34 @@ function _start_ovs { if is_service_enabled ovn-controller-vtep ; then ovn_base_setup_bridge br-v vtep-ctl add-ps br-v - vtep-ctl set Physical_Switch br-v tunnel_ips=$HOST_IP + vtep-ctl set Physical_Switch br-v tunnel_ips=$TUNNEL_IP enable_service ovs-vtep local vtepcmd="$OVS_SCRIPTDIR/ovs-vtep --log-file --pidfile --detach br-v" - _run_process ovs-vtep "$vtepcmd" "" "$STACK_GROUP" "root" + _run_process ovs-vtep "$vtepcmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR" vtep-ctl set-manager tcp:$HOST_IP:6640 fi fi } -function _start_ovn_services { - _start_process "$OVSDB_SERVER_SERVICE" - _start_process "$OVS_VSWITCHD_SERVICE" +function _wait_for_ovn_and_set_custom_config { + # Wait for the service to be ready + # Check for socket and db files for both OVN NB and SB + wait_for_sock_file $OVN_RUNDIR/ovnnb_db.sock + wait_for_sock_file $OVN_RUNDIR/ovnsb_db.sock + wait_for_db_file $OVN_DATADIR/ovnnb_db.db + wait_for_db_file $OVN_DATADIR/ovnsb_db.db - if is_service_enabled ovn-northd ; then - _start_process "$OVN_NORTHD_SERVICE" - fi - if is_service_enabled ovn-controller ; then - _start_process "$OVN_CONTROLLER_SERVICE" - fi - if is_service_enabled ovn-controller-vtep ; then - _start_process "$OVN_CONTROLLER_VTEP_SERVICE" - fi - if is_service_enabled ovs-vtep ; then - _start_process "devstack@ovs-vtep.service" - fi - if is_service_enabled q-ovn-metadata-agent; then - _start_process "devstack@q-ovn-metadata-agent.service" + if is_service_enabled tls-proxy; then + sudo ovn-nbctl --db=unix:$OVN_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem + sudo ovn-sbctl --db=unix:$OVN_RUNDIR/ovnsb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem fi + + sudo ovn-nbctl --db=unix:$OVN_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 + sudo ovn-sbctl --db=unix:$OVN_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 + sudo ovs-appctl -t $OVN_RUNDIR/ovnnb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL + sudo ovs-appctl -t $OVN_RUNDIR/ovnsb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL } # start_ovn() - Start running processes, including screen @@ -687,23 +748,13 @@ function start_ovn { local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_northd" local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_northd" - _run_process ovn-northd "$cmd" "$stop_cmd" + _run_process ovn-northd "$cmd" "$stop_cmd" "$STACK_GROUP" "root" "$OVN_RUNDIR" else _start_process "$OVN_NORTHD_SERVICE" fi - # Wait for the service to be ready - wait_for_sock_file $OVS_RUNDIR/ovnnb_db.sock - wait_for_sock_file $OVS_RUNDIR/ovnsb_db.sock + _wait_for_ovn_and_set_custom_config - if is_service_enabled tls-proxy; then - sudo ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem - sudo ovn-sbctl --db=unix:$OVS_RUNDIR/ovnsb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem - fi - sudo ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 - sudo ovn-sbctl --db=unix:$OVS_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 - sudo ovs-appctl -t $OVS_RUNDIR/ovnnb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL - sudo ovs-appctl -t $OVS_RUNDIR/ovnsb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL fi if is_service_enabled ovn-controller ; then @@ -711,7 +762,7 @@ function start_ovn { local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_controller" local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_controller" - _run_process ovn-controller "$cmd" "$stop_cmd" "$STACK_GROUP" "root" + _run_process ovn-controller "$cmd" "$stop_cmd" "$STACK_GROUP" "root" "$OVN_RUNDIR" else _start_process "$OVN_CONTROLLER_SERVICE" fi @@ -720,19 +771,23 @@ function start_ovn { if is_service_enabled ovn-controller-vtep ; then if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then local cmd="$OVS_BINDIR/ovn-controller-vtep --log-file --pidfile --detach --ovnsb-db=$OVN_SB_REMOTE" - _run_process ovn-controller-vtep "$cmd" "" "$STACK_GROUP" "root" + _run_process ovn-controller-vtep "$cmd" "" "$STACK_GROUP" "root" "$OVN_RUNDIR" else _start_process "$OVN_CONTROLLER_VTEP_SERVICE" fi fi - if is_service_enabled q-ovn-metadata-agent; then + if is_ovn_metadata_agent_enabled; then run_process q-ovn-metadata-agent "$NEUTRON_OVN_BIN_DIR/$NEUTRON_OVN_METADATA_BINARY --config-file $OVN_META_CONF" # Format logging setup_logging $OVN_META_CONF fi - _start_ovn_services + if is_ovn_agent_enabled; then + run_process q-ovn-agent "$NEUTRON_OVN_BIN_DIR/$NEUTRON_OVN_AGENT_BINARY --config-file $OVN_AGENT_CONF" + # Format logging + setup_logging $OVN_AGENT_CONF + fi } function _stop_ovs_dp { @@ -750,10 +805,22 @@ function _stop_process { } function stop_ovn { - if is_service_enabled q-ovn-metadata-agent; then - sudo pkill -9 -f haproxy || : + # NOTE(ralonsoh): this check doesn't use "is_ovn_metadata_agent_enabled", + # instead it relies only in the configured services, disregarding the + # flag "TARGET_ENABLE_OVN_AGENT". It is needed to force the OVN Metadata + # agent stop in case the flag "TARGET_ENABLE_OVN_AGENT" is set. + if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then + # pkill takes care not to kill itself, but it may kill its parent + # sudo unless we use the "ps | grep [f]oo" trick + sudo pkill -9 -f "[h]aproxy" || : _stop_process "devstack@q-ovn-metadata-agent.service" fi + if is_ovn_agent_enabled; then + # pkill takes care not to kill itself, but it may kill its parent + # sudo unless we use the "ps | grep [f]oo" trick + sudo pkill -9 -f "[h]aproxy" || : + _stop_process "devstack@q-ovn-agent.service" + fi if is_service_enabled ovn-controller-vtep ; then _stop_process "$OVN_CONTROLLER_VTEP_SERVICE" fi @@ -796,5 +863,5 @@ function cleanup_ovn { _cleanup $ovs_path fi - sudo rm -f $OVN_RUNDIR + sudo rm -rf $OVN_RUNDIR } diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 2e63fe3c7b..adabc56412 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -7,6 +7,12 @@ _XTRACE_NEUTRON_OVS_BASE=$(set +o | grep xtrace) set +o xtrace +# Load devstack ovs compliation and loading functions +source ${TOP_DIR}/lib/neutron_plugins/ovs_source + +# Defaults +# -------- + OVS_BRIDGE=${OVS_BRIDGE:-br-int} # OVS recognize default 'system' datapath or 'netdev' for userspace datapath OVS_DATAPATH_TYPE=${OVS_DATAPATH_TYPE:-system} @@ -60,26 +66,20 @@ function _neutron_ovs_base_install_ubuntu_dkms { } function _neutron_ovs_base_install_agent_packages { - # Install deps - install_package $(get_packages "openvswitch") - if is_ubuntu; then - _neutron_ovs_base_install_ubuntu_dkms - restart_service openvswitch-switch - elif is_fedora; then - restart_service openvswitch - sudo systemctl enable openvswitch - elif is_suse; then - if [[ $DISTRO == "sle12" ]] && vercmp "$os_RELEASE" "<" "12.2" ; then + if [ "$Q_BUILD_OVS_FROM_GIT" == "True" ]; then + remove_ovs_packages + compile_ovs False /usr/local /var + load_conntrack_gre_module + start_new_ovs + else + # Install deps + install_package $(get_packages "openvswitch") + if is_ubuntu; then + _neutron_ovs_base_install_ubuntu_dkms restart_service openvswitch-switch - else - # workaround for https://bugzilla.suse.com/show_bug.cgi?id=1085971 - if [[ $DISTRO =~ "tumbleweed" ]]; then - sudo sed -i -e "s,^OVS_USER_ID=.*,OVS_USER_ID='root:root'," /etc/sysconfig/openvswitch - fi - restart_service openvswitch || { - journalctl -xe || : - systemctl status openvswitch - } + elif is_fedora; then + restart_service openvswitch + sudo systemctl enable openvswitch fi fi } diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source new file mode 100644 index 0000000000..6b6f531a01 --- /dev/null +++ b/lib/neutron_plugins/ovs_source @@ -0,0 +1,214 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Defaults +# -------- +Q_BUILD_OVS_FROM_GIT=$(trueorfalse False Q_BUILD_OVS_FROM_GIT) + +# Set variables for building OVS from source +OVS_REPO=${OVS_REPO:-https://github.com/openvswitch/ovs.git} +OVS_REPO_NAME=$(basename ${OVS_REPO} | cut -f1 -d'.') +OVS_REPO_NAME=${OVS_REPO_NAME:-ovs} +OVS_BRANCH=${OVS_BRANCH:-branch-3.3} + +# Functions + +# load_module() - Load module using modprobe module given by argument and dies +# on failure +# - fatal argument is optional and says whether function should +# exit if module can't be loaded +function load_module { + local module=$1 + local fatal=$2 + + if [ "$(trueorfalse True fatal)" == "True" ]; then + sudo modprobe $module || (sudo dmesg && die $LINENO "FAILED TO LOAD $module") + else + sudo modprobe $module || (echo "FAILED TO LOAD $module" && sudo dmesg) + fi +} + +# prepare_for_compilation() - Fetch ovs git repository and install packages needed for +# compilation. +function prepare_for_ovs_compilation { + local build_modules=${1:-False} + OVS_DIR=$DEST/$OVS_REPO_NAME + + if [ ! -d $OVS_DIR ] ; then + # We can't use git_clone here because we want to ignore ERROR_ON_CLONE + git_timed clone $OVS_REPO $OVS_DIR + cd $OVS_DIR + git checkout $OVS_BRANCH + else + # Even though the directory already exists, call git_clone to update it + # if needed based on the RECLONE option + git_clone $OVS_REPO $OVS_DIR $OVS_BRANCH + cd $OVS_DIR + fi + + # TODO: Can you create package list files like you can inside devstack? + install_package autoconf automake libtool gcc patch make + + # If build_modules is False, we don't need to install the kernel-* + # packages. Just return. + if [[ "$build_modules" == "False" ]]; then + return + fi + + KERNEL_VERSION=`uname -r` + if is_fedora ; then + # is_fedora covers Fedora, RHEL, CentOS, etc... + if [[ "$os_VENDOR" == "Fedora" ]]; then + install_package elfutils-libelf-devel + KERNEL_VERSION=`echo $KERNEL_VERSION | cut --delimiter='-' --field 1` + elif [[ ${KERNEL_VERSION:0:2} != "3." ]]; then + # dash is illegal character in rpm version so replace + # them with underscore like it is done in the kernel + # https://github.com/torvalds/linux/blob/master/scripts/package/mkspec#L25 + # but only for latest series of the kernel, not 3.x + + KERNEL_VERSION=`echo $KERNEL_VERSION | tr - _` + fi + + echo NOTE: if kernel-devel-$KERNEL_VERSION or kernel-headers-$KERNEL_VERSION installation + echo failed, please, provide a repository with the package, or yum update / reboot + echo your machine to get the latest kernel. + + install_package kernel-devel-$KERNEL_VERSION + install_package kernel-headers-$KERNEL_VERSION + if is_service_enabled tls-proxy; then + install_package openssl-devel + fi + + elif is_ubuntu ; then + install_package linux-headers-$KERNEL_VERSION + if is_service_enabled tls-proxy; then + install_package libssl-dev + fi + fi +} + +# load_ovs_kernel_modules() - load openvswitch kernel module +function load_ovs_kernel_modules { + load_module openvswitch + load_module vport-geneve False + sudo dmesg | tail +} + +# reload_ovs_kernel_modules() - reload openvswitch kernel module +function reload_ovs_kernel_modules { + set +e + ovs_system=$(sudo ovs-dpctl dump-dps | grep ovs-system) + if [ -n "$ovs_system" ]; then + sudo ovs-dpctl del-dp ovs-system + fi + set -e + sudo modprobe -r vport_geneve + sudo modprobe -r openvswitch + load_ovs_kernel_modules +} + +# compile_ovs() - Compile OVS from source and load needed modules. +# Accepts two parameters: +# - first one is False by default and means that modules are not built and installed. +# - second optional parameter defines prefix for ovs compilation +# - third optional parameter defines localstatedir for ovs single machine runtime +# Env variables OVS_REPO_NAME, OVS_REPO and OVS_BRANCH must be set +function compile_ovs { + local _pwd=$PWD + local build_modules=${1:-False} + local prefix=$2 + local localstatedir=$3 + + if [ -n "$prefix" ]; then + prefix="--prefix=$prefix" + fi + + if [ -n "$localstatedir" ]; then + localstatedir="--localstatedir=$localstatedir" + fi + + prepare_for_ovs_compilation $build_modules + + KERNEL_VERSION=$(uname -r) + major_version=$(echo "${KERNEL_VERSION}" | cut -d '.' -f1) + patch_level=$(echo "${KERNEL_VERSION}" | cut -d '.' -f2) + if [ "${major_version}" -gt 5 ] || [ "${major_version}" == 5 ] && [ "${patch_level}" -gt 5 ]; then + echo "NOTE: KERNEL VERSION is ${KERNEL_VERSION} and OVS doesn't support compiling " + echo "Kernel module for version higher than 5.5. Skipping module compilation..." + build_modules="False" + fi + + if [ ! -f configure ] ; then + ./boot.sh + fi + if [ ! -f config.status ] || [ configure -nt config.status ] ; then + if [[ "$build_modules" == "True" ]]; then + ./configure $prefix $localstatedir --with-linux=/lib/modules/$(uname -r)/build + else + ./configure $prefix $localstatedir + fi + fi + make -j$(($(nproc) + 1)) + sudo make install + if [[ "$build_modules" == "True" ]]; then + sudo make INSTALL_MOD_DIR=kernel/net/openvswitch modules_install + fi + reload_ovs_kernel_modules + + cd $_pwd +} + +# action_service - call an action over openvswitch service +# Accepts one parameter that can be either +# 'start', 'restart' and 'stop'. +function action_openvswitch { + local action=$1 + + if is_ubuntu; then + ${action}_service openvswitch-switch + elif is_fedora; then + ${action}_service openvswitch + fi +} + +# start_new_ovs() - removes old ovs database, creates a new one and starts ovs +function start_new_ovs { + sudo rm -f /etc/openvswitch/conf.db /etc/openvswitch/.conf.db~lock~ + sudo /usr/local/share/openvswitch/scripts/ovs-ctl start +} + +# stop_new_ovs() - stops ovs +function stop_new_ovs { + local ovs_ctl='/usr/local/share/openvswitch/scripts/ovs-ctl' + + if [ -x $ovs_ctl ] ; then + sudo $ovs_ctl stop + fi +} + +# remove_ovs_packages() - removes old ovs packages from the system +function remove_ovs_packages { + for package in openvswitch openvswitch-switch openvswitch-common; do + if is_package_installed $package; then + uninstall_package $package + fi + done +} + + +# load_conntrack_gre_module() - loads nf_conntrack_proto_gre kernel module +function load_conntrack_gre_module { + load_module nf_conntrack_proto_gre False +} diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 index 75a3567096..bbedc57a44 100644 --- a/lib/neutron_plugins/services/l3 +++ b/lib/neutron_plugins/services/l3 @@ -47,7 +47,8 @@ Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-True} # used for the network. In case of ofagent, you should add the # corresponding entry to your OFAGENT_PHYSICAL_INTERFACE_MAPPINGS. # For openvswitch agent, you should add the corresponding entry to -# your OVS_BRIDGE_MAPPINGS. +# your OVS_BRIDGE_MAPPINGS and for OVN add the corresponding entry +# to your OVN_BRIDGE_MAPPINGS. # # eg. (ofagent) # Q_USE_PROVIDERNET_FOR_PUBLIC=True @@ -60,6 +61,11 @@ Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-True} # PUBLIC_PHYSICAL_NETWORK=public # OVS_BRIDGE_MAPPINGS=public:br-ex # +# eg. (ovn agent) +# Q_USER_PROVIDERNET_FOR_PUBLIC=True +# PUBLIC_PHYSICAL_NETWORK=public +# OVN_BRIDGE_MAPPINGS=public:br-ex +# # The provider-network-type defaults to flat, however, the values # PUBLIC_PROVIDERNET_TYPE and PUBLIC_PROVIDERNET_SEGMENTATION_ID could # be set to specify the parameters for an alternate network type. @@ -123,21 +129,7 @@ function _configure_neutron_l3_agent { neutron_plugin_configure_l3_agent $Q_L3_CONF_FILE - # If we've given a PUBLIC_INTERFACE to take over, then we assume - # that we can own the whole thing, and privot it into the OVS - # bridge. If we are not, we're probably on a single interface - # machine, and we just setup NAT so that fixed guests can get out. - if [[ -n "$PUBLIC_INTERFACE" ]]; then - _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet" - - if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then - _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6" - fi - else - for d in $default_v4_route_devs; do - sudo iptables -t nat -A POSTROUTING -o $d -s $FLOATING_RANGE -j MASQUERADE - done - fi + _configure_public_network_connectivity } # Explicitly set router id in l3 agent configuration @@ -161,10 +153,6 @@ function _neutron_get_ext_gw_interface { } function create_neutron_initial_network { - local project_id - project_id=$(openstack project list | grep " demo " | get_field 1) - die_if_not_set $LINENO project_id "Failure retrieving project_id for demo" - # Allow drivers that need to create an initial network to do so here if type -p neutron_plugin_create_initial_network_profile > /dev/null; then neutron_plugin_create_initial_network_profile $PHYSICAL_NETWORK @@ -184,15 +172,15 @@ function create_neutron_initial_network { if is_provider_network; then die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK" die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specify the PROVIDER_NETWORK_TYPE" - NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --project $project_id --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share | grep ' id ' | get_field 2) - die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $project_id" + NET_ID=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share -f value -c id) + die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK" if [[ "$IP_VERSION" =~ 4.* ]]; then if [ -z $SUBNETPOOL_V4_ID ]; then fixed_range_v4=$FIXED_RANGE fi - SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} | grep ' id ' | get_field 2) - die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $project_id" + SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} -f value -c id) + die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME" fi if [[ "$IP_VERSION" =~ .*6 ]]; then @@ -201,8 +189,8 @@ function create_neutron_initial_network { if [ -z $SUBNETPOOL_V6_ID ]; then fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE fi - IPV6_SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} | grep ' id ' | get_field 2) - die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME $project_id" + IPV6_SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} -f value -c id) + die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME" fi if [[ $Q_AGENT == "openvswitch" ]]; then @@ -211,17 +199,17 @@ function create_neutron_initial_network { sudo ip link set $PUBLIC_INTERFACE up fi else - NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create --project $project_id "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) - die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME $project_id" + NET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" network create "$PRIVATE_NETWORK_NAME" -f value -c id) + die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME" if [[ "$IP_VERSION" =~ 4.* ]]; then # Create IPv4 private subnet - SUBNET_ID=$(_neutron_create_private_subnet_v4 $project_id) + SUBNET_ID=$(_neutron_create_private_subnet_v4) fi if [[ "$IP_VERSION" =~ .*6 ]]; then # Create IPv6 private subnet - IPV6_SUBNET_ID=$(_neutron_create_private_subnet_v6 $project_id) + IPV6_SUBNET_ID=$(_neutron_create_private_subnet_v6) fi fi @@ -229,12 +217,12 @@ function create_neutron_initial_network { # Create a router, and add the private subnet as one of its interfaces if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then # create a tenant-owned router. - ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create --project $project_id $Q_ROUTER_NAME | grep ' id ' | get_field 2) - die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $project_id $Q_ROUTER_NAME" + ROUTER_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" router create $Q_ROUTER_NAME -f value -c id) + die_if_not_set $LINENO ROUTER_ID "Failure creating router $Q_ROUTER_NAME" else # Plugin only supports creating a single router, which should be admin owned. - ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2) - die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $Q_ROUTER_NAME" + ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME -f value -c id) + die_if_not_set $LINENO ROUTER_ID "Failure creating router $Q_ROUTER_NAME" fi EXTERNAL_NETWORK_FLAGS="--external" @@ -243,9 +231,9 @@ function create_neutron_initial_network { fi # Create an external network, and a subnet. Configure the external network as router gw if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then - EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2) + EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} -f value -c id) else - EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS | grep ' id ' | get_field 2) + EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS -f value -c id) fi die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME" @@ -263,35 +251,32 @@ function create_neutron_initial_network { # Create private IPv4 subnet function _neutron_create_private_subnet_v4 { - local project_id=$1 if [ -z $SUBNETPOOL_V4_ID ]; then fixed_range_v4=$FIXED_RANGE fi - local subnet_params="--project $project_id " - subnet_params+="--ip-version 4 " + local subnet_params="--ip-version 4 " if [[ -n "$NETWORK_GATEWAY" ]]; then subnet_params+="--gateway $NETWORK_GATEWAY " fi + subnet_params+="${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} " subnet_params+="${fixed_range_v4:+--subnet-range $fixed_range_v4} " subnet_params+="--network $NET_ID $PRIVATE_SUBNET_NAME" local subnet_id - subnet_id=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) - die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet for $project_id" + subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params -f value -c id) + die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet" echo $subnet_id } # Create private IPv6 subnet function _neutron_create_private_subnet_v6 { - local project_id=$1 die_if_not_set $LINENO IPV6_RA_MODE "IPV6 RA Mode not set" die_if_not_set $LINENO IPV6_ADDRESS_MODE "IPV6 Address Mode not set" local ipv6_modes="--ipv6-ra-mode $IPV6_RA_MODE --ipv6-address-mode $IPV6_ADDRESS_MODE" if [ -z $SUBNETPOOL_V6_ID ]; then fixed_range_v6=$FIXED_RANGE_V6 fi - local subnet_params="--project $project_id " - subnet_params+="--ip-version 6 " + local subnet_params="--ip-version 6 " if [[ -n "$IPV6_PRIVATE_NETWORK_GATEWAY" ]]; then subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY " fi @@ -299,8 +284,8 @@ function _neutron_create_private_subnet_v6 { subnet_params+="${fixed_range_v6:+--subnet-range $fixed_range_v6} " subnet_params+="$ipv6_modes --network $NET_ID $IPV6_PRIVATE_SUBNET_NAME " local ipv6_subnet_id - ipv6_subnet_id=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2) - die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet for $project_id" + ipv6_subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params -f value -c id) + die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet" echo $ipv6_subnet_id } @@ -333,7 +318,7 @@ function _neutron_create_public_subnet_v6 { # Configure neutron router for IPv4 public access function _neutron_configure_router_v4 { - openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router add subnet $ROUTER_ID $SUBNET_ID + openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router add subnet $ROUTER_ID $SUBNET_ID # Create a public subnet on the external network local id_and_ext_gw_ip id_and_ext_gw_ip=$(_neutron_create_public_subnet_v4 $EXT_NET_ID) @@ -341,24 +326,14 @@ function _neutron_configure_router_v4 { ext_gw_ip=$(echo $id_and_ext_gw_ip | get_field 2) PUB_SUBNET_ID=$(echo $id_and_ext_gw_ip | get_field 5) # Configure the external network as the default router gateway - openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID + openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID - # This logic is specific to using the l3-agent for layer 3 - if is_service_enabled q-l3 || is_service_enabled neutron-l3; then + # This logic is specific to using OVN or the l3-agent for layer 3 + if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-api) || is_service_enabled q-l3 neutron-l3; then # Configure and enable public bridge local ext_gw_interface="none" if is_neutron_ovs_base_plugin; then ext_gw_interface=$(_neutron_get_ext_gw_interface) - elif [[ "$Q_AGENT" = "linuxbridge" ]]; then - # Get the device the neutron router and network for $FIXED_RANGE - # will be using. - if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then - # in provider nets a bridge mapping uses the public bridge directly - ext_gw_interface=$PUBLIC_BRIDGE - else - # e.x. brq3592e767-da for NET_ID 3592e767-da66-4bcb-9bec-cdb03cd96102 - ext_gw_interface=brq${EXT_NET_ID:0:11} - fi fi if [[ "$ext_gw_interface" != "none" ]]; then local cidr_len=${FLOATING_RANGE#*/} @@ -377,7 +352,7 @@ function _neutron_configure_router_v4 { # Configure neutron router for IPv6 public access function _neutron_configure_router_v6 { - openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router add subnet $ROUTER_ID $IPV6_SUBNET_ID + openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router add subnet $ROUTER_ID $IPV6_SUBNET_ID # Create a public subnet on the external network local ipv6_id_and_ext_gw_ip ipv6_id_and_ext_gw_ip=$(_neutron_create_public_subnet_v6 $EXT_NET_ID) @@ -389,11 +364,11 @@ function _neutron_configure_router_v6 { # If the external network has not already been set as the default router # gateway when configuring an IPv4 public subnet, do so now if [[ "$IP_VERSION" == "6" ]]; then - openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID + openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID fi - # This logic is specific to using the l3-agent for layer 3 - if is_service_enabled q-l3 || is_service_enabled neutron-l3; then + # This logic is specific to using OVN or the l3-agent for layer 3 + if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-api) || is_service_enabled q-l3 neutron-l3; then # if the Linux host considers itself to be a router then it will # ignore all router advertisements # Ensure IPv6 RAs are accepted on interfaces with a default route. @@ -410,7 +385,13 @@ function _neutron_configure_router_v6 { sudo sysctl -w net.ipv6.conf.all.forwarding=1 # Configure and enable public bridge # Override global IPV6_ROUTER_GW_IP with the true value from neutron - IPV6_ROUTER_GW_IP=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' | grep $ipv6_pub_subnet_id | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ') + # NOTE(slaweq): when enforce scopes is enabled in Neutron, router's + # gateway ports aren't visible in API because such ports don't belongs + # to any tenant. Because of that, at least temporary we need to find + # IPv6 address of the router's gateway in a bit different way. + # It can be reverted when bug + # https://bugs.launchpad.net/neutron/+bug/1959332 will be fixed + IPV6_ROUTER_GW_IP=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router show $ROUTER_ID -c external_gateway_info -f json | grep -C 1 $ipv6_pub_subnet_id | grep ip_address | awk '{print $2}' | tr -d '"') die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP" if is_neutron_ovs_base_plugin; then @@ -418,8 +399,16 @@ function _neutron_configure_router_v6 { ext_gw_interface=$(_neutron_get_ext_gw_interface) local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/} - # Configure interface for public bridge + # Configure interface for public bridge by setting the interface + # to "up" in case the job is running entirely private network based + # testing. + sudo ip link set $ext_gw_interface up sudo ip -6 addr replace $ipv6_ext_gw_ip/$ipv6_cidr_len dev $ext_gw_interface + # Any IPv6 private subnet that uses the default IPV6 subnet pool + # and that is plugged into the default router (Q_ROUTER_NAME) will + # be reachable from the devstack node (ex: ipv6-private-subnet). + # Some scenario tests (such as octavia-tempest-plugin) rely heavily + # on this feature. local replace_range=${SUBNETPOOL_PREFIX_V6} if [[ -z "${SUBNETPOOL_V6_ID}" ]]; then replace_range=${FIXED_RANGE_V6} @@ -436,3 +425,12 @@ function is_networking_extension_supported { EXT_LIST=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" extension list --network -c Alias -f value) [[ $EXT_LIST =~ $extension ]] && return 0 } + +function plugin_agent_add_l3_agent_extension { + local l3_agent_extension=$1 + if [[ -z "$L3_AGENT_EXTENSIONS" ]]; then + L3_AGENT_EXTENSIONS=$l3_agent_extension + elif [[ ! ,${L3_AGENT_EXTENSIONS}, =~ ,${l3_agent_extension}, ]]; then + L3_AGENT_EXTENSIONS+=",$l3_agent_extension" + fi +} diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering index 5b32468d21..757a562ee6 100644 --- a/lib/neutron_plugins/services/metering +++ b/lib/neutron_plugins/services/metering @@ -12,7 +12,7 @@ AGENT_METERING_BINARY="$NEUTRON_BIN_DIR/neutron-metering-agent" METERING_PLUGIN="neutron.services.metering.metering_plugin.MeteringPlugin" function neutron_agent_metering_configure_common { - _neutron_service_plugin_class_add $METERING_PLUGIN + neutron_service_plugin_class_add $METERING_PLUGIN } function neutron_agent_metering_configure_agent { diff --git a/lib/neutron_plugins/services/placement b/lib/neutron_plugins/services/placement new file mode 100644 index 0000000000..3ec185bae6 --- /dev/null +++ b/lib/neutron_plugins/services/placement @@ -0,0 +1,21 @@ +#!/bin/bash + +function configure_placement_service_plugin { + neutron_service_plugin_class_add "placement" +} + +function configure_placement_neutron { + iniset $NEUTRON_CONF placement auth_type "$NEUTRON_PLACEMENT_AUTH_TYPE" + iniset $NEUTRON_CONF placement auth_url "$KEYSTONE_SERVICE_URI" + iniset $NEUTRON_CONF placement username "$NEUTRON_PLACEMENT_USERNAME" + iniset $NEUTRON_CONF placement password "$SERVICE_PASSWORD" + iniset $NEUTRON_CONF placement user_domain_name "$SERVICE_DOMAIN_NAME" + iniset $NEUTRON_CONF placement project_name "$SERVICE_TENANT_NAME" + iniset $NEUTRON_CONF placement project_domain_name "$SERVICE_DOMAIN_NAME" + iniset $NEUTRON_CONF placement region_name "$REGION_NAME" +} + +function configure_placement_extension { + configure_placement_service_plugin + configure_placement_neutron +} diff --git a/lib/neutron_plugins/services/qos b/lib/neutron_plugins/services/qos new file mode 100644 index 0000000000..c11c315586 --- /dev/null +++ b/lib/neutron_plugins/services/qos @@ -0,0 +1,30 @@ +#!/bin/bash + +function configure_qos_service_plugin { + neutron_service_plugin_class_add "qos" +} + + +function configure_qos_core_plugin { + configure_qos_$Q_PLUGIN +} + + +function configure_qos_l2_agent { + plugin_agent_add_l2_agent_extension "qos" +} + + +function configure_qos { + configure_qos_service_plugin + configure_qos_core_plugin + configure_qos_l2_agent +} + +function configure_l3_agent_extension_fip_qos { + plugin_agent_add_l3_agent_extension "fip_qos" +} + +function configure_l3_agent_extension_gateway_ip_qos { + plugin_agent_add_l3_agent_extension "gateway_ip_qos" +} diff --git a/lib/neutron_plugins/services/segments b/lib/neutron_plugins/services/segments new file mode 100644 index 0000000000..08936bae49 --- /dev/null +++ b/lib/neutron_plugins/services/segments @@ -0,0 +1,10 @@ +#!/bin/bash + +function configure_segments_service_plugin { + neutron_service_plugin_class_add segments +} + +function configure_segments_extension { + configure_segments_service_plugin +} + diff --git a/lib/neutron_plugins/services/trunk b/lib/neutron_plugins/services/trunk new file mode 100644 index 0000000000..8e0f6944cf --- /dev/null +++ b/lib/neutron_plugins/services/trunk @@ -0,0 +1,5 @@ +#!/bin/bash + +function configure_trunk_extension { + neutron_service_plugin_class_add "trunk" +} diff --git a/lib/nova b/lib/nova index d7426039c4..460b4adc85 100644 --- a/lib/nova +++ b/lib/nova @@ -53,11 +53,19 @@ NOVA_COND_CONF=$NOVA_CONF_DIR/nova.conf NOVA_CPU_CONF=$NOVA_CONF_DIR/nova-cpu.conf NOVA_FAKE_CONF=$NOVA_CONF_DIR/nova-fake.conf NOVA_API_DB=${NOVA_API_DB:-nova_api} -NOVA_UWSGI=$NOVA_BIN_DIR/nova-api-wsgi -NOVA_METADATA_UWSGI=$NOVA_BIN_DIR/nova-metadata-wsgi +NOVA_UWSGI=nova.wsgi.osapi_compute:application +NOVA_METADATA_UWSGI=nova.wsgi.metadata:application NOVA_UWSGI_CONF=$NOVA_CONF_DIR/nova-api-uwsgi.ini NOVA_METADATA_UWSGI_CONF=$NOVA_CONF_DIR/nova-metadata-uwsgi.ini +# Allow forcing the stable compute uuid to something specific. This would be +# done by deployment tools that pre-allocate the UUIDs, but it is also handy +# for developers that need to re-stack a compute-only deployment multiple +# times. Since the DB is non-local and not erased on an unstack, making it +# stay the same each time is what developers want. Set to a uuid here or +# leave it blank for default allocate-on-start behavior. +NOVA_CPU_UUID="" + # The total number of cells we expect. Must be greater than one and doesn't # count cell0. NOVA_NUM_CELLS=${NOVA_NUM_CELLS:-1} @@ -67,13 +75,10 @@ NOVA_CPU_CELL=${NOVA_CPU_CELL:-1} NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini} -# Toggle for deploying Nova-API under a wsgi server. We default to -# true to use UWSGI, but allow False so that fall back to the -# eventlet server can happen for grenade runs. -# NOTE(cdent): We can adjust to remove the eventlet-base api service -# after pike, at which time we can stop using NOVA_USE_MOD_WSGI to -# mean "use uwsgi" because we'll be always using uwsgi. -NOVA_USE_MOD_WSGI=${NOVA_USE_MOD_WSGI:-True} +# We do not need to report service status every 10s for devstack-like +# deployments. In the gate this generates extra work for the services and the +# database which are already taxed. +NOVA_SERVICE_REPORT_INTERVAL=${NOVA_SERVICE_REPORT_INTERVAL:-120} if is_service_enabled tls-proxy; then NOVA_SERVICE_PROTOCOL="https" @@ -83,6 +88,11 @@ fi # services and the compute node NOVA_CONSOLE_PROXY_COMPUTE_TLS=${NOVA_CONSOLE_PROXY_COMPUTE_TLS:-False} +# Validate configuration +if ! is_service_enabled tls-proxy && [ "$NOVA_CONSOLE_PROXY_COMPUTE_TLS" == "True" ]; then + die $LINENO "enabling TLS for the console proxy requires the tls-proxy service" +fi + # Public facing bits NOVA_SERVICE_HOST=${NOVA_SERVICE_HOST:-$SERVICE_HOST} NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774} @@ -92,33 +102,34 @@ NOVA_SERVICE_LISTEN_ADDRESS=${NOVA_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVI METADATA_SERVICE_PORT=${METADATA_SERVICE_PORT:-8775} NOVA_ENABLE_CACHE=${NOVA_ENABLE_CACHE:-True} +# Flag to set the oslo_policy.enforce_scope and oslo_policy.enforce_new_defaults. +# This is used to disable the compute API policies scope and new defaults. +# By Default, it is True. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +NOVA_ENFORCE_SCOPE=$(trueorfalse True NOVA_ENFORCE_SCOPE) + +if [[ $SERVICE_IP_VERSION == 6 ]]; then + NOVA_MY_IP="$HOST_IPV6" +else + NOVA_MY_IP="$HOST_IP" +fi + # Option to enable/disable config drive # NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"False"} # The following NOVA_FILTERS contains SameHostFilter and DifferentHostFilter with # the default filters. -NOVA_FILTERS="AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter" +NOVA_FILTERS="ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter" QEMU_CONF=/etc/libvirt/qemu.conf -# Set default defaults here as some hypervisor drivers override these -PUBLIC_INTERFACE_DEFAULT=br100 -# Set ``GUEST_INTERFACE_DEFAULT`` to some interface on the box so that -# the default isn't completely crazy. This will match ``eth*``, ``em*``, or -# the new ``p*`` interfaces, then basically picks the first -# alphabetically. It's probably wrong, however it's less wrong than -# always using ``eth0`` which doesn't exist on new Linux distros at all. -GUEST_INTERFACE_DEFAULT=$(ip link \ - | grep 'state UP' \ - | awk '{print $2}' \ - | sed 's/://' \ - | grep ^[ep] \ - | head -1) - # ``NOVA_VNC_ENABLED`` can be used to forcibly enable VNC configuration. # In multi-node setups allows compute hosts to not run ``n-novnc``. NOVA_VNC_ENABLED=$(trueorfalse False NOVA_VNC_ENABLED) +# same as ``NOVA_VNC_ENABLED`` but for Spice and serial console respectively. +NOVA_SPICE_ENABLED=$(trueorfalse False NOVA_SPICE_ENABLED) +NOVA_SERIAL_ENABLED=$(trueorfalse False NOVA_SERIAL_ENABLED) # Get hypervisor configuration # ---------------------------- @@ -135,7 +146,7 @@ fi # ``NOVA_USE_SERVICE_TOKEN`` is a mode where service token is passed along with # user token while communicating to external RESP API's like Neutron, Cinder # and Glance. -NOVA_USE_SERVICE_TOKEN=$(trueorfalse False NOVA_USE_SERVICE_TOKEN) +NOVA_USE_SERVICE_TOKEN=$(trueorfalse True NOVA_USE_SERVICE_TOKEN) # ``NOVA_ALLOW_MOVE_TO_SAME_HOST`` can be set to False in multi node DevStack, # where there are at least two nova-computes. @@ -154,6 +165,12 @@ NOVA_NOTIFICATION_FORMAT=${NOVA_NOTIFICATION_FORMAT:-unversioned} # image in devstack is CirrOS. NOVA_SHUTDOWN_TIMEOUT=${NOVA_SHUTDOWN_TIMEOUT:-0} +# Whether to use Keystone unified limits instead of legacy quota limits. +NOVA_USE_UNIFIED_LIMITS=$(trueorfalse False NOVA_USE_UNIFIED_LIMITS) + +# TB Cache Size in MiB for qemu guests +NOVA_LIBVIRT_TB_CACHE_SIZE=${NOVA_LIBVIRT_TB_CACHE_SIZE:-0} + # Functions # --------- @@ -211,6 +228,9 @@ function cleanup_nova { done sudo iscsiadm --mode node --op delete || true + # Disconnect all nvmeof connections + sudo nvme disconnect-all || true + # Clean out the instances directory. sudo rm -rf $NOVA_INSTANCES_PATH/* fi @@ -226,8 +246,12 @@ function cleanup_nova { stop_process "n-api" stop_process "n-api-meta" - remove_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" - remove_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" + remove_uwsgi_config "$NOVA_UWSGI_CONF" "nova-api" + remove_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "nova-metadata" + + if [[ "$NOVA_BACKEND" == "LVM" ]]; then + clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME + fi } # configure_nova() - Set config files, create data dirs, etc @@ -255,7 +279,8 @@ function configure_nova { if [ ! -e /dev/kvm ]; then echo "WARNING: Switching to QEMU" LIBVIRT_TYPE=qemu - LIBVIRT_CPU_MODE=none + LIBVIRT_CPU_MODE=custom + LIBVIRT_CPU_MODEL=Nehalem if which selinuxenabled >/dev/null 2>&1 && selinuxenabled; then # https://bugzilla.redhat.com/show_bug.cgi?id=753589 sudo setsebool virt_use_execmem on @@ -293,11 +318,9 @@ function configure_nova { fi fi - if is_fedora && [[ $DISTRO =~ f31] ]]; then - # For f31 use the rebased 2.1.0 version of the package. - sudo dnf copr enable -y lyarwood/iscsi-initiator-utils - sudo dnf update -y - fi + # Due to cinder bug #1966513 we ALWAYS need an initiator name for LVM + # Ensure each compute host uses a unique iSCSI initiator + echo InitiatorName=$(iscsi-iname) | sudo tee /etc/iscsi/initiatorname.iscsi if [[ ${ISCSID_DEBUG} == "True" ]]; then # Install an override that starts iscsid with debugging @@ -312,8 +335,32 @@ EOF sudo systemctl daemon-reload fi - # ensure that iscsid is started, even when disabled by default - restart_service iscsid + # set chap algorithms. The default chap_algorithm is md5 which will + # not work under FIPS. + iniset -sudo /etc/iscsi/iscsid.conf DEFAULT "node.session.auth.chap_algs" "SHA3-256,SHA256" + + if [[ $CINDER_TARGET_HELPER != 'nvmet' ]]; then + # ensure that iscsid is started, even when disabled by default + restart_service iscsid + + # For NVMe-oF we need different packages that many not be present + else + install_package nvme-cli + sudo modprobe nvme-fabrics + + # Ensure NVMe is ready and create the Soft-RoCE device over the networking interface + if [[ $CINDER_TARGET_PROTOCOL == 'nvmet_rdma' ]]; then + sudo modprobe nvme-rdma + iface=${HOST_IP_IFACE:-`ip -br -$SERVICE_IP_VERSION a | grep $NOVA_MY_IP | awk '{print $1}'`} + if ! sudo rdma link | grep $iface ; then + sudo rdma link add rxe_$iface type rxe netdev $iface + fi + elif [[ $CINDER_TARGET_PROTOCOL == 'nvmet_tcp' ]]; then + sudo modprobe nvme-tcp + else # 'nvmet_fc' + sudo modprobe nvme-fc + fi + fi fi # Rebuild the config file from scratch @@ -341,11 +388,7 @@ function create_nova_accounts { create_service_user "nova" "admin" local nova_api_url - if [[ "$NOVA_USE_MOD_WSGI" == "False" ]]; then - nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT" - else - nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST/compute" - fi + nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST/compute" get_or_create_service "nova_legacy" "compute_legacy" "Nova Compute Service (Legacy 2.0)" get_or_create_endpoint \ @@ -379,6 +422,13 @@ function create_nova_accounts { "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ "http://$SERVICE_HOST:$S3_SERVICE_PORT" fi + + # Unified limits + if is_service_enabled n-api; then + if [[ "$NOVA_USE_UNIFIED_LIMITS" = True ]]; then + configure_nova_unified_limits + fi + fi } # create_nova_conf() - Create a new nova.conf file @@ -397,20 +447,23 @@ function create_nova_conf { iniset $NOVA_CONF filter_scheduler enabled_filters "$NOVA_FILTERS" iniset $NOVA_CONF scheduler workers "$API_WORKERS" iniset $NOVA_CONF neutron default_floating_pool "$PUBLIC_NETWORK_NAME" - if [[ $SERVICE_IP_VERSION == 6 ]]; then - iniset $NOVA_CONF DEFAULT my_ip "$HOST_IPV6" - else - iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP" - fi + iniset $NOVA_CONF DEFAULT my_ip "$NOVA_MY_IP" iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" iniset $NOVA_CONF DEFAULT osapi_compute_listen "$NOVA_SERVICE_LISTEN_ADDRESS" iniset $NOVA_CONF DEFAULT metadata_listen "$NOVA_SERVICE_LISTEN_ADDRESS" iniset $NOVA_CONF DEFAULT shutdown_timeout $NOVA_SHUTDOWN_TIMEOUT + # Enable errors if response validation fails. We want this enabled in CI + # and development contexts to highlights bugs in our response schemas. + iniset $NOVA_CONF api response_validation error + iniset $NOVA_CONF key_manager backend nova.keymgr.conf_key_mgr.ConfKeyManager - if is_fedora || is_suse; then - # nova defaults to /usr/local/bin, but fedora and suse pip like to + iniset $NOVA_CONF DEFAULT report_interval $NOVA_SERVICE_REPORT_INTERVAL + iniset $NOVA_CONF DEFAULT service_down_time $(($NOVA_SERVICE_REPORT_INTERVAL * 6)) + + if is_fedora; then + # nova defaults to /usr/local/bin, but fedora pip like to # install things in /usr/bin iniset $NOVA_CONF DEFAULT bindir "/usr/bin" fi @@ -418,7 +471,7 @@ function create_nova_conf { # only setup database connections and cache backend if there are services # that require them running on the host. The ensures that n-cpu doesn't # leak a need to use the db in a multinode scenario. - if is_service_enabled n-api n-cond n-sched; then + if is_service_enabled n-api n-cond n-sched n-spice n-novnc n-sproxy; then # If we're in multi-tier cells mode, we want our control services pointing # at cell0 instead of cell1 to ensure isolation. If not, we point everything # at the main database like normal. @@ -448,10 +501,12 @@ function create_nova_conf { NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/,metadata//") fi iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS" - if is_service_enabled tls-proxy && [ "$NOVA_USE_MOD_WSGI" == "False" ]; then - # Set the service port for a proxy to take the original - iniset $NOVA_CONF DEFAULT osapi_compute_listen_port "$NOVA_SERVICE_PORT_INT" - iniset $NOVA_CONF DEFAULT osapi_compute_link_prefix $NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT + if [[ "$NOVA_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then + iniset $NOVA_CONF oslo_policy enforce_new_defaults True + iniset $NOVA_CONF oslo_policy enforce_scope True + else + iniset $NOVA_CONF oslo_policy enforce_new_defaults False + iniset $NOVA_CONF oslo_policy enforce_scope False fi configure_keystone_authtoken_middleware $NOVA_CONF nova @@ -461,6 +516,10 @@ function create_nova_conf { configure_cinder_access fi + if is_service_enabled manila; then + configure_manila_access + fi + if [ -n "$NOVA_STATE_PATH" ]; then iniset $NOVA_CONF DEFAULT state_path "$NOVA_STATE_PATH" iniset $NOVA_CONF oslo_concurrency lock_path "$NOVA_STATE_PATH" @@ -476,7 +535,8 @@ function create_nova_conf { fi # nova defaults to genisoimage but only mkisofs is available for 15.0+ - if is_suse; then + # rhel provides mkisofs symlink to genisoimage or xorriso appropiately + if is_fedora; then iniset $NOVA_CONF DEFAULT mkisofs_cmd /usr/bin/mkisofs fi @@ -485,8 +545,13 @@ function create_nova_conf { iniset $NOVA_CONF upgrade_levels compute "auto" - write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute" - write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" "$SERVICE_LISTEN_ADDRESS:${METADATA_SERVICE_PORT}" + if is_service_enabled n-api; then + write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute" "" "nova-api" + fi + + if is_service_enabled n-api-meta; then + write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" "$SERVICE_LISTEN_ADDRESS:${METADATA_SERVICE_PORT}" "nova-metadata" + fi if is_service_enabled ceilometer; then iniset $NOVA_CONF DEFAULT instance_usage_audit "True" @@ -569,7 +634,7 @@ function configure_placement_nova_compute { local conf=${1:-$NOVA_CONF} iniset $conf placement auth_type "password" iniset $conf placement auth_url "$KEYSTONE_SERVICE_URI" - iniset $conf placement username placement + iniset $conf placement username nova iniset $conf placement password "$SERVICE_PASSWORD" iniset $conf placement user_domain_name "$SERVICE_DOMAIN_NAME" iniset $conf placement project_name "$SERVICE_TENANT_NAME" @@ -600,6 +665,18 @@ function configure_cinder_access { fi } +# Configure access to manila. +function configure_manila_access { + iniset $NOVA_CONF manila os_region_name "$REGION_NAME" + iniset $NOVA_CONF manila auth_type "password" + iniset $NOVA_CONF manila auth_url "$KEYSTONE_SERVICE_URI" + iniset $NOVA_CONF manila username nova + iniset $NOVA_CONF manila password "$SERVICE_PASSWORD" + iniset $NOVA_CONF manila user_domain_name "$SERVICE_DOMAIN_NAME" + iniset $NOVA_CONF manila project_name "$SERVICE_TENANT_NAME" + iniset $NOVA_CONF manila project_domain_name "$SERVICE_DOMAIN_NAME" +} + function configure_console_compute { # If we are running multiple cells (and thus multiple console proxies) on a # single host, we offset the ports to avoid collisions. We need to @@ -607,10 +684,10 @@ function configure_console_compute { # can use the NOVA_CPU_CELL variable to know which cell we are for # calculating the offset. # Stagger the offset based on the total number of possible console proxies - # (novnc, xvpvnc, spice, serial) so that their ports will not collide if + # (novnc, spice, serial) so that their ports will not collide if # all are enabled. local offset - offset=$(((NOVA_CPU_CELL - 1) * 4)) + offset=$(((NOVA_CPU_CELL - 1) * 3)) # Use the host IP instead of the service host because for multi-node, the # service host will be the controller only. @@ -618,7 +695,7 @@ function configure_console_compute { default_proxyclient_addr=$(iniget $NOVA_CPU_CONF DEFAULT my_ip) # All nova-compute workers need to know the vnc configuration options - # These settings don't hurt anything if n-xvnc and n-novnc are disabled + # These settings don't hurt anything if n-novnc is disabled if is_service_enabled n-cpu; then if [ "$NOVNC_FROM_PACKAGE" == "True" ]; then # Use the old URL when installing novnc packages. @@ -631,13 +708,11 @@ function configure_console_compute { NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:$((6080 + offset))/vnc_lite.html"} fi iniset $NOVA_CPU_CONF vnc novncproxy_base_url "$NOVNCPROXY_URL" - XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:$((6081 + offset))/console"} - iniset $NOVA_CPU_CONF vnc xvpvncproxy_base_url "$XVPVNCPROXY_URL" - SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:$((6082 + offset))/spice_auto.html"} + SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:$((6081 + offset))/spice_auto.html"} iniset $NOVA_CPU_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL" fi - if is_service_enabled n-novnc || is_service_enabled n-xvnc || [ "$NOVA_VNC_ENABLED" != False ]; then + if is_service_enabled n-novnc || [ "$NOVA_VNC_ENABLED" != False ]; then # Address on which instance vncservers will listen on compute hosts. # For multi-host, this should be the management ip of the compute host. VNCSERVER_LISTEN=${VNCSERVER_LISTEN:-$NOVA_SERVICE_LISTEN_ADDRESS} @@ -648,7 +723,7 @@ function configure_console_compute { iniset $NOVA_CPU_CONF vnc enabled false fi - if is_service_enabled n-spice; then + if is_service_enabled n-spice || [ "$NOVA_SPICE_ENABLED" != False ]; then # Address on which instance spiceservers will listen on compute hosts. # For multi-host, this should be the management ip of the compute host. SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS:-$default_proxyclient_addr} @@ -658,9 +733,9 @@ function configure_console_compute { iniset $NOVA_CPU_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" fi - if is_service_enabled n-sproxy; then + if is_service_enabled n-sproxy || [ "$NOVA_SERIAL_ENABLED" != False ]; then iniset $NOVA_CPU_CONF serial_console enabled True - iniset $NOVA_CPU_CONF serial_console base_url "ws://$SERVICE_HOST:$((6083 + offset))/" + iniset $NOVA_CPU_CONF serial_console base_url "ws://$SERVICE_HOST:$((6082 + offset))/" fi } @@ -669,15 +744,13 @@ function configure_console_proxies { local conf=${1:-$NOVA_CONF} local offset=${2:-0} # Stagger the offset based on the total number of possible console proxies - # (novnc, xvpvnc, spice, serial) so that their ports will not collide if + # (novnc, spice, serial) so that their ports will not collide if # all are enabled. - offset=$((offset * 4)) + offset=$((offset * 3)) - if is_service_enabled n-novnc || is_service_enabled n-xvnc || [ "$NOVA_VNC_ENABLED" != False ]; then + if is_service_enabled n-novnc || [ "$NOVA_VNC_ENABLED" != False ]; then iniset $conf vnc novncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" iniset $conf vnc novncproxy_port $((6080 + offset)) - iniset $conf vnc xvpvncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" - iniset $conf vnc xvpvncproxy_port $((6081 + offset)) if is_nova_console_proxy_compute_tls_enabled ; then iniset $conf vnc auth_schemes "vencrypt" @@ -709,15 +782,62 @@ function configure_console_proxies { if is_service_enabled n-spice; then iniset $conf spice html5proxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" - iniset $conf spice html5proxy_port $((6082 + offset)) + iniset $conf spice html5proxy_port $((6081 + offset)) fi if is_service_enabled n-sproxy; then iniset $conf serial_console serialproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" - iniset $conf serial_console serialproxy_port $((6083 + offset)) + iniset $conf serial_console serialproxy_port $((6082 + offset)) fi } +function configure_nova_unified_limits { + # Registered limit resources in keystone are system-specific resources. + # Make sure we use a system-scoped token to interact with this API. + + # Default limits here mirror the legacy config-based default values. + # Note: disk quota is new in nova as of unified limits. + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10 --region $REGION_NAME servers + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 20 --region $REGION_NAME class:VCPU + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit $((50 * 1024)) --region $REGION_NAME class:MEMORY_MB + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 20 --region $REGION_NAME class:DISK_GB + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 128 --region $REGION_NAME server_metadata_items + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 5 --region $REGION_NAME server_injected_files + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10240 --region $REGION_NAME server_injected_file_content_bytes + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 255 --region $REGION_NAME server_injected_file_path_bytes + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 100 --region $REGION_NAME server_key_pairs + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10 --region $REGION_NAME server_groups + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10 --region $REGION_NAME server_group_members + + # Tell nova to use these limits + iniset $NOVA_CONF quota driver "nova.quota.UnifiedLimitsDriver" + + # Configure oslo_limit so it can talk to keystone + iniset $NOVA_CONF oslo_limit user_domain_name $SERVICE_DOMAIN_NAME + iniset $NOVA_CONF oslo_limit password $SERVICE_PASSWORD + iniset $NOVA_CONF oslo_limit username nova + iniset $NOVA_CONF oslo_limit auth_type password + iniset $NOVA_CONF oslo_limit auth_url $KEYSTONE_SERVICE_URI + iniset $NOVA_CONF oslo_limit system_scope all + iniset $NOVA_CONF oslo_limit endpoint_id \ + $(openstack endpoint list --service nova -f value -c ID) + + # Allow the nova service user to read quotas + openstack --os-cloud devstack-system-admin role add --user nova \ + --user-domain $SERVICE_DOMAIN_NAME --system all reader +} + function init_nova_service_user_conf { iniset $NOVA_CONF service_user send_service_user_token True iniset $NOVA_CONF service_user auth_type password @@ -727,7 +847,6 @@ function init_nova_service_user_conf { iniset $NOVA_CONF service_user user_domain_name "$SERVICE_DOMAIN_NAME" iniset $NOVA_CONF service_user project_name "$SERVICE_PROJECT_NAME" iniset $NOVA_CONF service_user project_domain_name "$SERVICE_DOMAIN_NAME" - iniset $NOVA_CONF service_user auth_strategy keystone } function conductor_conf { @@ -741,30 +860,50 @@ function create_nova_keys_dir { sudo install -d -o $STACK_USER ${NOVA_STATE_PATH} ${NOVA_STATE_PATH}/keys } +function init_nova_db { + local dbname="$1" + local conffile="$2" + recreate_database $dbname + $NOVA_BIN_DIR/nova-manage --config-file $conffile db sync --local_cell +} + # init_nova() - Initialize databases, etc. function init_nova { # All nova components talk to a central database. # Only do this step once on the API node for an entire cluster. if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then + # (Re)create nova databases + if [[ "$CELLSV2_SETUP" == "singleconductor" ]]; then + # If we are doing singleconductor mode, we have some strange + # interdependencies. in that the main config refers to cell1 + # instead of cell0. In that case, just make sure the cell0 database + # is created before we need it below, but don't db_sync it until + # after the cellN databases are there. + recreate_database nova_cell0 + else + async_run nova-cell-0 init_nova_db nova_cell0 $NOVA_CONF + fi + + for i in $(seq 1 $NOVA_NUM_CELLS); do + async_run nova-cell-$i init_nova_db nova_cell${i} $(conductor_conf $i) + done + recreate_database $NOVA_API_DB $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF api_db sync - recreate_database nova_cell0 - # map_cell0 will create the cell mapping record in the nova_api DB so - # this needs to come after the api_db sync happens. We also want to run - # this before the db sync below since that will migrate both the nova - # and nova_cell0 databases. + # this needs to come after the api_db sync happens. $NOVA_BIN_DIR/nova-manage cell_v2 map_cell0 --database_connection `database_connection_url nova_cell0` - # (Re)create nova databases - for i in $(seq 1 $NOVA_NUM_CELLS); do - recreate_database nova_cell${i} - $NOVA_BIN_DIR/nova-manage --config-file $(conductor_conf $i) db sync --local_cell + # Wait for DBs to finish from above + for i in $(seq 0 $NOVA_NUM_CELLS); do + async_wait nova-cell-$i done - # Migrate nova and nova_cell0 databases. - $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync + if [[ "$CELLSV2_SETUP" == "singleconductor" ]]; then + # We didn't db sync cell0 above, so run it now + $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync + fi # Run online migrations on the new databases # Needed for flavor conversion @@ -809,10 +948,25 @@ function install_nova { # a websockets/html5 or flash powered VNC console for vm instances NOVNC_FROM_PACKAGE=$(trueorfalse False NOVNC_FROM_PACKAGE) if [ "$NOVNC_FROM_PACKAGE" = "True" ]; then + # Installing novnc on Debian bullseye breaks the global pip + # install. This happens because novnc pulls in distro cryptography + # which will be prefered by distro pip, but if anything has + # installed pyOpenSSL from pypi (keystone) that is not compatible + # with distro cryptography. Fix this by installing + # python3-openssl (pyOpenSSL) from the distro which pip will prefer + # on Debian. Ubuntu has inverse problems so we only do this for + # Debian. + local novnc_packages + novnc_packages="novnc" + GetOSVersion + if [[ "$os_VENDOR" = "Debian" ]] ; then + novnc_packages="$novnc_packages python3-openssl" + fi + NOVNC_WEB_DIR=/usr/share/novnc - install_package novnc + install_package $novnc_packages else - NOVNC_WEB_DIR=$DEST/noVNC + NOVNC_WEB_DIR=$DEST/novnc git_clone $NOVNC_REPO $NOVNC_WEB_DIR $NOVNC_BRANCH fi fi @@ -849,17 +1003,8 @@ function start_nova_api { local old_path=$PATH export PATH=$NOVA_BIN_DIR:$PATH - if [ "$NOVA_USE_MOD_WSGI" == "False" ]; then - run_process n-api "$NOVA_BIN_DIR/nova-api" - nova_url=$service_protocol://$SERVICE_HOST:$service_port - # Start proxy if tsl enabled - if is_service_enabled tls-proxy; then - start_tls_proxy nova '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT - fi - else - run_process "n-api" "$(which uwsgi) --procname-prefix nova-api --ini $NOVA_UWSGI_CONF" - nova_url=$service_protocol://$SERVICE_HOST/compute/v2.1/ - fi + run_process "n-api" "$(which uwsgi) --procname-prefix nova-api --ini $NOVA_UWSGI_CONF" + nova_url=$service_protocol://$SERVICE_HOST/compute/v2.1/ echo "Waiting for nova-api to start..." if ! wait_for_service $SERVICE_TIMEOUT $nova_url; then @@ -906,12 +1051,28 @@ function start_nova_compute { # by the compute process. configure_console_compute + # Set rebuild timeout longer for BFV instances because we likely have + # slower disk than expected. Default is 20s/GB + iniset $NOVA_CPU_CONF DEFAULT reimage_timeout_per_gb 180 + # Configure the OVSDB connection for os-vif if [ -n "$OVSDB_SERVER_LOCAL_HOST" ]; then iniset $NOVA_CPU_CONF os_vif_ovs ovsdb_connection "tcp:$OVSDB_SERVER_LOCAL_HOST:6640" fi + # Workaround bug #1939108 + if [[ "$VIRT_DRIVER" == "libvirt" && "$LIBVIRT_TYPE" == "qemu" ]]; then + iniset $NOVA_CPU_CONF workarounds libvirt_disable_apic True + fi + + if [[ "$NOVA_CPU_UUID" ]]; then + echo -n $NOVA_CPU_UUID > $NOVA_CONF_DIR/compute_id + fi + if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then + if [ ${NOVA_LIBVIRT_TB_CACHE_SIZE} -gt 0 ]; then + iniset $NOVA_CPU_CONF libvirt tb_cache_size ${NOVA_LIBVIRT_TB_CACHE_SIZE} + fi # The group **$LIBVIRT_GROUP** is added to the current user in this script. # ``sg`` is used in run_process to execute nova-compute as a member of the # **$LIBVIRT_GROUP** group. @@ -928,6 +1089,15 @@ function start_nova_compute { # gets its own configuration and own log file. local fake_conf="${NOVA_FAKE_CONF}-${i}" iniset $fake_conf DEFAULT host "${HOSTNAME}${i}" + # Ensure that each fake compute has its own state path so that it + # can have its own compute_id file + local state_path + state_path="$NOVA_STATE_PATH/${HOSTNAME}${i}" + COMPUTE_ID=$(uuidgen) + sudo mkdir -p "$state_path" + iniset $fake_conf DEFAULT state_path "$state_path" + # use the generated UUID as the stable compute node UUID + echo "$COMPUTE_ID" | sudo tee "$state_path/compute_id" run_process "n-cpu-${i}" "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF --config-file $fake_conf" done else @@ -946,22 +1116,17 @@ function start_nova_rest { local old_path=$PATH export PATH=$NOVA_BIN_DIR:$PATH - local api_cell_conf=$NOVA_CONF local compute_cell_conf=$NOVA_CONF run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf" - if [ "$NOVA_USE_MOD_WSGI" == "False" ]; then - run_process n-api-meta "$NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf" - else - run_process n-api-meta "$(which uwsgi) --procname-prefix nova-api-meta --ini $NOVA_METADATA_UWSGI_CONF" - fi + run_process n-api-meta "$(which uwsgi) --procname-prefix nova-api-meta --ini $NOVA_METADATA_UWSGI_CONF" export PATH=$old_path } function enable_nova_console_proxies { for i in $(seq 1 $NOVA_NUM_CELLS); do - for srv in n-novnc n-xvnc n-spice n-sproxy; do + for srv in n-novnc n-spice n-sproxy; do if is_service_enabled $srv; then enable_service ${srv}-cell${i} fi @@ -979,7 +1144,6 @@ function start_nova_console_proxies { # console proxies run globally for singleconductor, else they run per cell if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then run_process n-novnc "$NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR" - run_process n-xvnc "$NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf" run_process n-spice "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR" run_process n-sproxy "$NOVA_BIN_DIR/nova-serialproxy --config-file $api_cell_conf" else @@ -988,7 +1152,6 @@ function start_nova_console_proxies { local conf conf=$(conductor_conf $i) run_process n-novnc-cell${i} "$NOVA_BIN_DIR/nova-novncproxy --config-file $conf --web $NOVNC_WEB_DIR" - run_process n-xvnc-cell${i} "$NOVA_BIN_DIR/nova-xvpvncproxy --config-file $conf" run_process n-spice-cell${i} "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $conf --web $SPICE_WEB_DIR" run_process n-sproxy-cell${i} "$NOVA_BIN_DIR/nova-serialproxy --config-file $conf" done @@ -1033,14 +1196,6 @@ function is_nova_ready { # happen between here and the script ending. However, in multinode # tests this can very often not be the case. So ensure that the # compute is up before we move on. - - # TODO(sdague): honestly, this probably should be a plug point for - # an external system. - if [[ "$VIRT_DRIVER" == 'xenserver' ]]; then - # xenserver encodes information in the hostname of the compute - # because of the dom0/domU split. Just ignore for now. - return - fi wait_for_compute $NOVA_READY_TIMEOUT } @@ -1079,13 +1234,13 @@ function stop_nova_rest { function stop_nova_console_proxies { if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then - for srv in n-novnc n-xvnc n-spice n-sproxy; do + for srv in n-novnc n-spice n-sproxy; do stop_process $srv done else enable_nova_console_proxies for i in $(seq 1 $NOVA_NUM_CELLS); do - for srv in n-novnc n-xvnc n-spice n-sproxy; do + for srv in n-novnc n-spice n-sproxy; do stop_process ${srv}-cell${i} done done diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index d3827c30dd..c0713f9953 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -56,17 +56,25 @@ EOF # Installs required distro-specific libvirt packages. function install_libvirt { + # NOTE(yoctozepto): The common consensus [1] is that libvirt-python should + # be installed from distro packages. However, various projects might be + # trying to ensure it is installed using pip AND use upper-constraints + # with that, causing pip to try to upgrade it and to fail. + # The following line removes libvirt-python from upper-constraints and + # avoids the situation described above. Now only if installed packages + # explicitly depend on a newer (or, in general, incompatible) libvirt-python + # version, will pip try to reinstall it. + # [1] https://review.opendev.org/c/openstack/devstack/+/798514 + $REQUIREMENTS_DIR/.venv/bin/edit-constraints \ + $REQUIREMENTS_DIR/upper-constraints.txt -- libvirt-python if is_ubuntu; then - install_package qemu-system libvirt-clients libvirt-daemon-system libvirt-dev + install_package qemu-system libvirt-clients libvirt-daemon-system libvirt-dev python3-libvirt systemd-coredump if is_arch "aarch64"; then - install_package qemu-efi + install_package qemu-efi-aarch64 fi - # uninstall in case the libvirt version changed - pip_uninstall libvirt-python - pip_install_gr libvirt-python #pip_install_gr - elif is_fedora || is_suse; then + elif is_fedora; then # Optionally enable the virt-preview repo when on Fedora if [[ $DISTRO =~ f[0-9][0-9] ]] && [[ ${ENABLE_FEDORA_VIRT_PREVIEW_REPO} == "True" ]]; then @@ -74,19 +82,28 @@ function install_libvirt { sudo dnf copr enable -y @virtmaint-sig/virt-preview fi + if is_openeuler; then + qemu_package=qemu + else + qemu_package=qemu-kvm + fi + # Note that in CentOS/RHEL this needs to come from the RDO # repositories (qemu-kvm-ev ... which provides this package) # as the base system version is too old. We should have # pre-installed these - install_package qemu-kvm - + install_package $qemu_package install_package libvirt libvirt-devel - if is_arch "aarch64"; then - install_package edk2.git-aarch64 + + if [[ $DISTRO =~ rhel9 ]]; then + pip_install_gr libvirt-python + else + install_package python3-libvirt fi - pip_uninstall libvirt-python - pip_install_gr libvirt-python + if is_arch "aarch64"; then + install_package edk2-aarch64 + fi fi if [[ $DEBUG_LIBVIRT_COREDUMPS == True ]]; then @@ -110,8 +127,8 @@ cgroup_device_acl = [ EOF fi - if is_fedora || is_suse; then - # Starting with fedora 18 and opensuse-12.3 enable stack-user to + if is_fedora; then + # Starting with fedora 18 enable stack-user to # virsh -c qemu:///system by creating a policy-kit rule for # stack-user using the new Javascript syntax rules_dir=/etc/polkit-1/rules.d diff --git a/lib/nova_plugins/hypervisor-fake b/lib/nova_plugins/hypervisor-fake index 87ee49fa4b..39cb45ca67 100644 --- a/lib/nova_plugins/hypervisor-fake +++ b/lib/nova_plugins/hypervisor-fake @@ -36,7 +36,7 @@ function cleanup_nova_hypervisor { # configure_nova_hypervisor - Set config files, create data dirs, etc function configure_nova_hypervisor { - iniset $NOVA_CONF DEFAULT compute_driver "fake.FakeDriver" + iniset $NOVA_CONF DEFAULT compute_driver "fake.FakeDriverWithoutFakeNodes" # Disable arbitrary limits iniset $NOVA_CONF quota driver nova.quota.NoopQuotaDriver } diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic index bda6ef6998..9a39c798a8 100644 --- a/lib/nova_plugins/hypervisor-ironic +++ b/lib/nova_plugins/hypervisor-ironic @@ -47,9 +47,17 @@ function configure_nova_hypervisor { iniset $NOVA_CONF ironic username admin iniset $NOVA_CONF ironic password $ADMIN_PASSWORD iniset $NOVA_CONF ironic auth_url $KEYSTONE_SERVICE_URI - iniset $NOVA_CONF ironic project_domain_id default + if is_ironic_enforce_scope; then + iniset $NOVA_CONF ironic system_scope all + else + iniset $NOVA_CONF ironic project_domain_id default + iniset $NOVA_CONF ironic project_name demo + fi + if is_ironic_sharded; then + iniset $NOVA_CONF ironic shard $IRONIC_SHARD_1_NAME + fi + iniset $NOVA_CONF ironic user_domain_id default - iniset $NOVA_CONF ironic project_name demo iniset $NOVA_CONF ironic region_name $REGION_NAME # These are used with crufty legacy ironicclient @@ -82,7 +90,6 @@ function stop_nova_hypervisor { : } - # Restore xtrace $_XTRACE_HYP_IRONIC diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 321775d324..4b44c1f86f 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -40,6 +40,9 @@ function configure_nova_hypervisor { configure_libvirt iniset $NOVA_CONF libvirt virt_type "$LIBVIRT_TYPE" iniset $NOVA_CONF libvirt cpu_mode "$LIBVIRT_CPU_MODE" + if [ "$LIBVIRT_CPU_MODE" == "custom" ] ; then + iniset $NOVA_CONF libvirt cpu_model "$LIBVIRT_CPU_MODEL" + fi # Do not enable USB tablet input devices to avoid QEMU CPU overhead. iniset $NOVA_CONF DEFAULT pointer_model "ps2mouse" iniset $NOVA_CONF libvirt live_migration_uri "qemu+ssh://$STACK_USER@%s/system" @@ -53,6 +56,10 @@ function configure_nova_hypervisor { # arm64-specific configuration if is_arch "aarch64"; then iniset $NOVA_CONF libvirt cpu_mode "host-passthrough" + # NOTE(mnaser): We cannot have `cpu_model` set if the `cpu_mode` is + # set to `host-passthrough`, or `nova-compute` refuses to + # start. + inidelete $NOVA_CONF libvirt cpu_model fi if isset ENABLE_FILE_INJECTION; then @@ -111,9 +118,6 @@ function install_nova_hypervisor { sudo dpkg-statoverride --add --update $STAT_OVERRIDE fi done - elif is_suse; then - # Workaround for missing dependencies in python-libguestfs - install_package python-libguestfs guestfs-data augeas augeas-lenses elif is_fedora; then install_package python3-libguestfs fi diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver deleted file mode 100644 index 511ec1bc09..0000000000 --- a/lib/nova_plugins/hypervisor-xenserver +++ /dev/null @@ -1,107 +0,0 @@ -#!/bin/bash -# -# lib/nova_plugins/hypervisor-xenserver -# Configure the XenServer hypervisor - -# Enable with: -# VIRT_DRIVER=xenserver - -# Dependencies: -# ``functions`` file -# ``nova`` configuration - -# install_nova_hypervisor - install any external requirements -# configure_nova_hypervisor - make configuration changes, including those to other services -# start_nova_hypervisor - start any external services -# stop_nova_hypervisor - stop any external services -# cleanup_nova_hypervisor - remove transient data and cache - -# Save trace setting -_XTRACE_XENSERVER=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1} - - -# Entry Points -# ------------ - -# clean_nova_hypervisor - Clean up an installation -function cleanup_nova_hypervisor { - # This function intentionally left blank - : -} - -# configure_nova_hypervisor - Set config files, create data dirs, etc -function configure_nova_hypervisor { - if [ -z "$XENAPI_CONNECTION_URL" ]; then - die $LINENO "XENAPI_CONNECTION_URL is not specified" - fi - - # Check os-xenapi plugin is enabled - local plugins="${DEVSTACK_PLUGINS}" - local plugin - local found=0 - for plugin in ${plugins//,/ }; do - if [[ "$plugin" = "os-xenapi" ]]; then - found=1 - break - fi - done - if [[ $found -ne 1 ]]; then - die $LINENO "os-xenapi plugin is not specified. Please enable this plugin in local.conf" - fi - - iniset $NOVA_CONF DEFAULT compute_driver "xenapi.XenAPIDriver" - iniset $NOVA_CONF xenserver connection_url "$XENAPI_CONNECTION_URL" - iniset $NOVA_CONF xenserver connection_username "$XENAPI_USER" - iniset $NOVA_CONF xenserver connection_password "$XENAPI_PASSWORD" - iniset $NOVA_CONF DEFAULT flat_injected "False" - - local dom0_ip - dom0_ip=$(echo "$XENAPI_CONNECTION_URL" | cut -d "/" -f 3-) - - local ssh_dom0 - ssh_dom0="sudo -u $DOMZERO_USER ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@$dom0_ip" - - # install console logrotate script - tar -czf - -C $NOVA_DIR/tools/xenserver/ rotate_xen_guest_logs.sh | - $ssh_dom0 'tar -xzf - -C /root/ && chmod +x /root/rotate_xen_guest_logs.sh && mkdir -p /var/log/xen/guest' - - # Create a cron job that will rotate guest logs - $ssh_dom0 crontab - << CRONTAB -* * * * * /root/rotate_xen_guest_logs.sh >/dev/null 2>&1 -CRONTAB - -} - -# install_nova_hypervisor() - Install external components -function install_nova_hypervisor { - # xenapi functionality is now included in os-xenapi library which houses the plugin - # so this function intentionally left blank - : -} - -# start_nova_hypervisor - Start any required external services -function start_nova_hypervisor { - # This function intentionally left blank - : -} - -# stop_nova_hypervisor - Stop any external services -function stop_nova_hypervisor { - # This function intentionally left blank - : -} - - -# Restore xtrace -$_XTRACE_XENSERVER - -# Local variables: -# mode: shell-script -# End: diff --git a/lib/os-vif b/lib/os-vif new file mode 100644 index 0000000000..7c8bee3744 --- /dev/null +++ b/lib/os-vif @@ -0,0 +1,22 @@ +#!/bin/bash + +function is_ml2_ovs { + if [[ "${Q_AGENT}" == "openvswitch" ]]; then + echo "True" + fi + echo "False" +} + +# This should be true for any ml2/ovs job but should be set to false for +# all other ovs based jobs e.g. ml2/ovn +OS_VIF_OVS_ISOLATE_VIF=${OS_VIF_OVS_ISOLATE_VIF:=$(is_ml2_ovs)} +OS_VIF_OVS_ISOLATE_VIF=$(trueorfalse False OS_VIF_OVS_ISOLATE_VIF) + +function configure_os_vif { + if [[ -e ${NOVA_CONF} ]]; then + iniset ${NOVA_CONF} os_vif_ovs isolate_vif ${OS_VIF_OVS_ISOLATE_VIF} + fi + if [[ -e ${NEUTRON_CONF} ]]; then + iniset ${NEUTRON_CONF} os_vif_ovs isolate_vif ${OS_VIF_OVS_ISOLATE_VIF} + fi +} diff --git a/lib/oslo b/lib/oslo deleted file mode 100644 index 3ae64c8210..0000000000 --- a/lib/oslo +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -# -# lib/oslo -# -# Functions to install **Oslo** libraries from git -# -# We need this to handle the fact that projects would like to use -# pre-released versions of oslo libraries. -# -# Included for compatibility with grenade, remove in Queens -source $TOP_DIR/lib/libraries diff --git a/lib/placement b/lib/placement index b7798669a1..03aaa0344b 100644 --- a/lib/placement +++ b/lib/placement @@ -37,7 +37,7 @@ if [[ ${USE_VENV} = True ]]; then else PLACEMENT_BIN_DIR=$(get_python_exec_prefix) fi -PLACEMENT_UWSGI=$PLACEMENT_BIN_DIR/placement-api +PLACEMENT_UWSGI=placement.wsgi.api:application PLACEMENT_UWSGI_CONF=$PLACEMENT_CONF_DIR/placement-uwsgi.ini if is_service_enabled tls-proxy; then @@ -48,6 +48,12 @@ fi PLACEMENT_SERVICE_PROTOCOL=${PLACEMENT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} PLACEMENT_SERVICE_HOST=${PLACEMENT_SERVICE_HOST:-$SERVICE_HOST} +# Flag to set the oslo_policy.enforce_scope and oslo_policy.enforce_new_defaults. +# This is used to switch the Placement API policies scope and new defaults. +# By Default, these flag are False. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +PLACEMENT_ENFORCE_SCOPE=$(trueorfalse False PLACEMENT_ENFORCE_SCOPE) + # Functions # --------- @@ -62,33 +68,7 @@ function is_placement_enabled { # runs that a clean run would need to clean up function cleanup_placement { sudo rm -f $(apache_site_config_for placement-api) - remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" -} - -# _config_placement_apache_wsgi() - Set WSGI config files -function _config_placement_apache_wsgi { - local placement_api_apache_conf - local venv_path="" - local placement_bin_dir="" - placement_bin_dir=$(get_python_exec_prefix) - placement_api_apache_conf=$(apache_site_config_for placement-api) - - if [[ ${USE_VENV} = True ]]; then - venv_path="python-path=${PROJECT_VENV["placement"]}/lib/$(python_version)/site-packages" - placement_bin_dir=${PROJECT_VENV["placement"]}/bin - fi - - sudo cp $FILES/apache-placement-api.template $placement_api_apache_conf - sudo sed -e " - s|%APACHE_NAME%|$APACHE_NAME|g; - s|%PUBLICWSGI%|$placement_bin_dir/placement-api|g; - s|%SSLENGINE%|$placement_ssl|g; - s|%SSLCERTFILE%|$placement_certfile|g; - s|%SSLKEYFILE%|$placement_keyfile|g; - s|%USER%|$STACK_USER|g; - s|%VIRTUALENV%|$venv_path|g - s|%APIWORKERS%|$API_WORKERS|g - " -i $placement_api_apache_conf + remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "placement-api" } # create_placement_conf() - Write config @@ -106,10 +86,13 @@ function configure_placement { sudo install -d -o $STACK_USER $PLACEMENT_CONF_DIR create_placement_conf - if [[ "$WSGI_MODE" == "uwsgi" ]]; then - write_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" "/placement" + write_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" "/placement" "" "placement-api" + if [[ "$PLACEMENT_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then + iniset $PLACEMENT_CONF oslo_policy enforce_new_defaults True + iniset $PLACEMENT_CONF oslo_policy enforce_scope True else - _config_placement_apache_wsgi + iniset $PLACEMENT_CONF oslo_policy enforce_new_defaults False + iniset $PLACEMENT_CONF oslo_policy enforce_scope False fi } @@ -134,7 +117,6 @@ function init_placement { # install_placement() - Collect source and prepare function install_placement { - install_apache_wsgi # Install the openstackclient placement client plugin for CLI pip_install_gr osc-placement git_clone $PLACEMENT_REPO $PLACEMENT_DIR $PLACEMENT_BRANCH @@ -143,12 +125,7 @@ function install_placement { # start_placement_api() - Start the API processes ahead of other things function start_placement_api { - if [[ "$WSGI_MODE" == "uwsgi" ]]; then - run_process "placement-api" "$(which uwsgi) --procname-prefix placement --ini $PLACEMENT_UWSGI_CONF" - else - enable_apache_site placement-api - restart_apache_server - fi + run_process "placement-api" "$(which uwsgi) --procname-prefix placement --ini $PLACEMENT_UWSGI_CONF" echo "Waiting for placement-api to start..." if ! wait_for_service $SERVICE_TIMEOUT $PLACEMENT_SERVICE_PROTOCOL://$PLACEMENT_SERVICE_HOST/placement; then @@ -162,12 +139,7 @@ function start_placement { # stop_placement() - Disable the api service and stop it. function stop_placement { - if [[ "$WSGI_MODE" == "uwsgi" ]]; then - stop_process "placement-api" - else - disable_apache_site placement-api - restart_apache_server - fi + stop_process "placement-api" } # Restore xtrace diff --git a/lib/rpc_backend b/lib/rpc_backend index 743b4ae170..bbb41499be 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -52,20 +52,7 @@ function install_rpc_backend { if is_service_enabled rabbit; then # Install rabbitmq-server install_package rabbitmq-server - if is_suse; then - install_package rabbitmq-server-plugins - # the default systemd socket activation only listens on the loopback interface - # which causes rabbitmq to try to start its own epmd - sudo mkdir -p /etc/systemd/system/epmd.socket.d - cat </dev/null -[Socket] -ListenStream= -ListenStream=[::]:4369 -EOF - sudo systemctl daemon-reload - sudo systemctl restart epmd.socket epmd.service - fi - if is_fedora || is_suse; then + if is_fedora; then # NOTE(jangutter): If rabbitmq is not running (as in a fresh # install) then rabbit_setuser triggers epmd@0.0.0.0.socket with # socket activation. This fails the first time and does not get diff --git a/lib/swift b/lib/swift index 790fb99442..862927437d 100644 --- a/lib/swift +++ b/lib/swift @@ -179,12 +179,9 @@ function is_swift_enabled { # cleanup_swift() - Remove residual data files function cleanup_swift { rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz} - if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then - sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 - fi - if [[ -e ${SWIFT_DISK_IMAGE} ]]; then - rm ${SWIFT_DISK_IMAGE} - fi + + destroy_disk ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1 + rm -rf ${SWIFT_DATA_DIR}/run/ if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then _cleanup_swift_apache_wsgi @@ -321,8 +318,8 @@ function generate_swift_config_services { iniuncomment ${swift_node_config} DEFAULT mount_check iniset ${swift_node_config} DEFAULT mount_check false - iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode - iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes + iniuncomment ${swift_node_config} ${server_type}-replicator rsync_module + iniset ${swift_node_config} ${server_type}-replicator rsync_module "{replication_ip}::${server_type}{replication_port}" # Using a sed and not iniset/iniuncomment because we want to a global # modification and make sure it works for new sections. @@ -335,7 +332,6 @@ function configure_swift { local node_number local swift_node_config local swift_log_dir - local user_group # Make sure to kill all swift processes first $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true @@ -353,7 +349,7 @@ function configure_swift { # partitions (which make more sense when you have a multi-node # setup) we configure it with our version of rsync. sed -e " - s/%GROUP%/${USER_GROUP}/; + s/%GROUP%/$(id -g -n ${STACK_USER})/; s/%USER%/${STACK_USER}/; s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,; " $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf @@ -406,6 +402,11 @@ function configure_swift { # Versioned Writes iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:versioned_writes allow_versioned_writes true + # Add sha1 temporary https://storyboard.openstack.org/#!/story/2010068 + if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]]; then + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempurl allowed_digests "sha1 sha256 sha512" + fi + # Configure Ceilometer if is_service_enabled ceilometer; then iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer "set log_level" "WARN" @@ -431,7 +432,7 @@ function configure_swift { swift_pipeline+=" authtoken" if is_service_enabled s3api;then swift_pipeline+=" s3token" - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token auth_uri ${KEYSTONE_AUTH_URI_V3} + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token auth_uri ${KEYSTONE_SERVICE_URI_V3} iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token delay_auth_decision true fi swift_pipeline+=" keystoneauth" @@ -522,7 +523,7 @@ function configure_swift { local auth_vers auth_vers=$(iniget ${testfile} func_test auth_version) iniset ${testfile} func_test auth_host ${KEYSTONE_SERVICE_HOST} - if [[ "$KEYSTONE_AUTH_PROTOCOL" == "https" ]]; then + if [[ "$KEYSTONE_SERVICE_PROTOCOL" == "https" ]]; then iniset ${testfile} func_test auth_port 443 else iniset ${testfile} func_test auth_port 80 @@ -546,9 +547,6 @@ function configure_swift { local swift_log_dir=${SWIFT_DATA_DIR}/logs sudo rm -rf ${swift_log_dir} local swift_log_group=adm - if is_suse; then - swift_log_group=root - fi sudo install -d -o ${STACK_USER} -g ${swift_log_group} ${swift_log_dir}/hourly if [[ $SYSLOG != "False" ]]; then @@ -576,28 +574,7 @@ function create_swift_disk { sudo install -d -o ${STACK_USER} -g ${user_group} ${SWIFT_DATA_DIR}/{drives,cache,run,logs} # Create a loopback disk and format it to XFS. - if [[ -e ${SWIFT_DISK_IMAGE} ]]; then - if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then - sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 - sudo rm -f ${SWIFT_DISK_IMAGE} - fi - fi - - mkdir -p ${SWIFT_DATA_DIR}/drives/images - sudo touch ${SWIFT_DISK_IMAGE} - sudo chown ${STACK_USER}: ${SWIFT_DISK_IMAGE} - - truncate -s ${SWIFT_LOOPBACK_DISK_SIZE} ${SWIFT_DISK_IMAGE} - - # Make a fresh XFS filesystem - /sbin/mkfs.xfs -f -i size=1024 ${SWIFT_DISK_IMAGE} - - # Mount the disk with mount options to make it as efficient as possible - mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1 - if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then - sudo mount -t xfs -o loop,noatime,nodiratime,logbufs=8 \ - ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1 - fi + create_disk ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1 ${SWIFT_LOOPBACK_DISK_SIZE} # Create a link to the above mount and # create all of the directories needed to emulate a few different servers @@ -867,12 +844,15 @@ function stop_swift { function swift_configure_tempurls { # note we are using swift credentials! - OS_USERNAME=swift \ - OS_PASSWORD=$SERVICE_PASSWORD \ - OS_USER_DOMAIN_NAME=$SERVICE_DOMAIN_NAME \ - OS_PROJECT_NAME=$SERVICE_PROJECT_NAME \ - OS_PROJECT_DOMAIN_NAME=$SERVICE_DOMAIN_NAME \ - openstack object store account \ + openstack --os-cloud="" \ + --os-region-name="$REGION_NAME" \ + --os-auth-url="$KEYSTONE_SERVICE_URI" \ + --os-username="swift" \ + --os-password="$SERVICE_PASSWORD" \ + --os-user-domain-name="$SERVICE_DOMAIN_NAME" \ + --os-project-name="$SERVICE_PROJECT_NAME" \ + --os-project-domain-name="$SERVICE_DOMAIN_NAME" \ + object store account \ set --property "Temp-URL-Key=$SWIFT_TEMPURL_KEY" } diff --git a/lib/tempest b/lib/tempest index 8ee986d555..1ebe9c5f1f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -18,7 +18,7 @@ # - ``PUBLIC_NETWORK_NAME`` # - ``VIRT_DRIVER`` # - ``LIBVIRT_TYPE`` -# - ``KEYSTONE_SERVICE_URI``, ``KEYSTONE_SERVICE_URI_V3`` from lib/keystone +# - ``KEYSTONE_SERVICE_URI_V3`` from lib/keystone # # Optional Dependencies: # @@ -29,6 +29,7 @@ # - ``DEFAULT_INSTANCE_USER`` # - ``DEFAULT_INSTANCE_ALT_USER`` # - ``CINDER_ENABLED_BACKENDS`` +# - ``CINDER_BACKUP_DRIVER`` # - ``NOVA_ALLOW_DUPLICATE_NETWORKS`` # # ``stack.sh`` calls the entry points in this order: @@ -71,6 +72,17 @@ TEMPEST_VOLUME_VENDOR=${TEMPEST_VOLUME_VENDOR:-$TEMPEST_DEFAULT_VOLUME_VENDOR} TEMPEST_DEFAULT_STORAGE_PROTOCOL="iSCSI" TEMPEST_STORAGE_PROTOCOL=${TEMPEST_STORAGE_PROTOCOL:-$TEMPEST_DEFAULT_STORAGE_PROTOCOL} +# Glance/Image variables +# When Glance image import is enabled, image creation is asynchronous and images +# may not yet be active when tempest looks for them. In that case, we poll +# Glance every TEMPEST_GLANCE_IMPORT_POLL_INTERVAL seconds for the number of +# times specified by TEMPEST_GLANCE_IMPORT_POLL_LIMIT. If you are importing +# multiple images, set TEMPEST_GLANCE_IMAGE_COUNT so the poller does not quit +# too early (though it will not exceed the polling limit). +TEMPEST_GLANCE_IMPORT_POLL_INTERVAL=${TEMPEST_GLANCE_IMPORT_POLL_INTERVAL:-1} +TEMPEST_GLANCE_IMPORT_POLL_LIMIT=${TEMPEST_GLANCE_IMPORT_POLL_LIMIT:-12} +TEMPEST_GLANCE_IMAGE_COUNT=${TEMPEST_GLANCE_IMAGE_COUNT:-1} + # Neutron/Network variables IPV6_ENABLED=$(trueorfalse True IPV6_ENABLED) IPV6_SUBNET_ATTRIBUTES_ENABLED=$(trueorfalse True IPV6_SUBNET_ATTRIBUTES_ENABLED) @@ -90,6 +102,10 @@ TEMPEST_USE_TEST_ACCOUNTS=$(trueorfalse False TEMPEST_USE_TEST_ACCOUNTS) # it will run tempest with TEMPEST_CONCURRENCY=${TEMPEST_CONCURRENCY:-$(nproc)} +TEMPEST_FLAVOR_RAM=${TEMPEST_FLAVOR_RAM:-192} +TEMPEST_FLAVOR_ALT_RAM=${TEMPEST_FLAVOR_ALT_RAM:-256} + +TEMPEST_USE_ISO_IMAGE=$(trueorfalse False TEMPEST_USE_ISO_IMAGE) # Functions # --------- @@ -107,10 +123,81 @@ function remove_disabled_extensions { # Takes an image ID parameter as input function image_size_in_gib { local size - size=$(openstack image show $1 -c size -f value) + size=$(openstack --os-cloud devstack-admin image show $1 -c size -f value) echo $size | python3 -c "import math; print(int(math.ceil(float(int(input()) / 1024.0 ** 3))))" } +function set_tempest_venv_constraints { + local tmp_c + tmp_c=$1 + if [[ $TEMPEST_VENV_UPPER_CONSTRAINTS == "master" ]]; then + (cd $REQUIREMENTS_DIR && + git show master:upper-constraints.txt 2>/dev/null || + git show origin/master:upper-constraints.txt) > $tmp_c + # NOTE(gmann): we need to set the below env var pointing to master + # constraints even that is what default in tox.ini. Otherwise it can + # create the issue for grenade run where old and new devstack can have + # different tempest (old and master) to install. For detail problem, + # refer to the https://bugs.launchpad.net/devstack/+bug/2003993 + export UPPER_CONSTRAINTS_FILE=https://releases.openstack.org/constraints/upper/master + export TOX_CONSTRAINTS_FILE=https://releases.openstack.org/constraints/upper/master + else + echo "Using $TEMPEST_VENV_UPPER_CONSTRAINTS constraints in Tempest virtual env." + cat $TEMPEST_VENV_UPPER_CONSTRAINTS > $tmp_c + # NOTE: setting both tox env var and once Tempest start using new var + # TOX_CONSTRAINTS_FILE then we can remove the old one. + export UPPER_CONSTRAINTS_FILE=$TEMPEST_VENV_UPPER_CONSTRAINTS + export TOX_CONSTRAINTS_FILE=$TEMPEST_VENV_UPPER_CONSTRAINTS + fi +} + +# Makes a call to glance to get a list of active images, ignoring +# ramdisk and kernel images. Takes 3 arguments, an array and two +# variables. The array will contain the list of active image UUIDs; +# if an image with ``DEFAULT_IMAGE_NAME`` is found, its UUID will be +# set as the value img_id ($2) parameters. +function get_active_images { + declare -n img_array=$1 + declare -n img_id=$2 + + # start with a fresh array in case we are called multiple times + img_array=() + + # NOTE(gmaan): Most of the iso image require ssh to be enabled explicitly + # and if we set those iso images in image_ref and image_ref_alt that can + # cause test to fail because many tests using image_ref and image_ref_alt + # to boot server also perform ssh. We skip to set iso image in tempest + # unless it is requested via TEMPEST_USE_ISO_IMAGE. + while read -r IMAGE_NAME IMAGE_UUID DISK_FORMAT; do + if [[ "$DISK_FORMAT" == "iso" ]] && [[ "$TEMPEST_USE_ISO_IMAGE" == False ]]; then + continue + fi + if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then + img_id="$IMAGE_UUID" + fi + img_array+=($IMAGE_UUID) + done < <(openstack --os-cloud devstack-admin image list --long --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2,$4 }') +} + +function poll_glance_images { + declare -n image_array=$1 + declare -n image_id=$2 + local -i poll_count + + poll_count=$TEMPEST_GLANCE_IMPORT_POLL_LIMIT + while (( poll_count-- > 0 )) ; do + sleep $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL + get_active_images image_array image_id + if (( ${#image_array[*]} >= $TEMPEST_GLANCE_IMAGE_COUNT )) ; then + return + fi + done + local msg + msg="Polling limit of $TEMPEST_GLANCE_IMPORT_POLL_LIMIT exceeded; " + msg+="poll interval was $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL sec" + warn $LINENO "$msg" +} + # configure_tempest() - Set config files, create data dirs, etc function configure_tempest { if [[ "$INSTALL_TEMPEST" == "True" ]]; then @@ -120,6 +207,8 @@ function configure_tempest { pip_install_gr testrepository fi + local ENABLED_SERVICES=${SERVICES_FOR_TEMPEST:=$ENABLED_SERVICES} + local image_lines local images local num_images @@ -152,13 +241,21 @@ function configure_tempest { declare -a images if is_service_enabled glance; then - while read -r IMAGE_NAME IMAGE_UUID; do - if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then - image_uuid="$IMAGE_UUID" - image_uuid_alt="$IMAGE_UUID" + get_active_images images image_uuid + + if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then + # Glance image import is asynchronous and may be configured + # to do image conversion. If image import is being used, + # it's possible that this code is being executed before the + # import has completed and there may be no active images yet. + if [[ "$GLANCE_USE_IMPORT_WORKFLOW" == "True" ]]; then + poll_glance_images images image_uuid + if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then + echo "Only found ${#images[*]} image(s), was looking for $TEMPEST_GLANCE_IMAGE_COUNT" + exit 1 + fi fi - images+=($IMAGE_UUID) - done < <(openstack image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }') + fi case "${#images[*]}" in 0) @@ -168,13 +265,22 @@ function configure_tempest { 1) if [ -z "$image_uuid" ]; then image_uuid=${images[0]} - image_uuid_alt=${images[0]} fi + image_uuid_alt=$image_uuid ;; *) if [ -z "$image_uuid" ]; then image_uuid=${images[0]} - image_uuid_alt=${images[1]} + if [ -z "$image_uuid_alt" ]; then + image_uuid_alt=${images[1]} + fi + elif [ -z "$image_uuid_alt" ]; then + for image in ${images[@]}; do + if [[ "$image" != "$image_uuid" ]]; then + image_uuid_alt=$image + break + fi + done fi ;; esac @@ -194,23 +300,25 @@ function configure_tempest { local alt_username=${ALT_USERNAME:-alt_demo} local alt_project_name=${ALT_TENANT_NAME:-alt_demo} local admin_project_id - admin_project_id=$(openstack project list | awk "/ admin / { print \$2 }") + admin_project_id=$(openstack --os-cloud devstack-admin project list | awk "/ admin / { print \$2 }") if is_service_enabled nova; then # If ``DEFAULT_INSTANCE_TYPE`` is not declared, use the new behavior # Tempest creates its own instance types - available_flavors=$(nova flavor-list) + available_flavors=$(openstack --os-cloud devstack-admin flavor list) if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then # Determine the flavor disk size based on the image size. disk=$(image_size_in_gib $image_uuid) - openstack flavor create --id 42 --ram 128 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano + ram=${TEMPEST_FLAVOR_RAM} + openstack --os-cloud devstack-admin flavor create --id 42 --ram ${ram} --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano fi flavor_ref=42 if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then # Determine the alt flavor disk size based on the alt image size. disk=$(image_size_in_gib $image_uuid_alt) - openstack flavor create --id 84 --ram 192 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro + ram=${TEMPEST_FLAVOR_ALT_RAM} + openstack --os-cloud devstack-admin flavor create --id 84 --ram ${ram} --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro fi flavor_ref_alt=84 else @@ -236,7 +344,7 @@ function configure_tempest { fi flavor_ref=${flavors[0]} flavor_ref_alt=$flavor_ref - flavor_ref_size=$(openstack flavor show --format value --column disk "${flavor_ref}") + flavor_ref_size=$(openstack --os-cloud devstack-admin flavor show --format value --column disk "${flavor_ref}") # Ensure ``flavor_ref`` and ``flavor_ref_alt`` have different values. # Some resize instance in tempest tests depends on this. @@ -249,7 +357,7 @@ function configure_tempest { # flavor selected as default, e.g. m1.small, # we need to perform additional check. # - flavor_ref_alt_size=$(openstack flavor show --format value --column disk "${f}") + flavor_ref_alt_size=$(openstack --os-cloud devstack-admin flavor show --format value --column disk "${f}") if [[ "${flavor_ref_alt_size}" -lt "${flavor_ref_size}" ]]; then continue fi @@ -261,6 +369,20 @@ function configure_tempest { fi fi + if is_service_enabled glance; then + git_clone $OSTESTIMAGES_REPO $OSTESTIMAGES_DIR $OSTESTIMAGES_BRANCH + pushd $OSTESTIMAGES_DIR + tox -egenerate + popd + iniset $TEMPEST_CONFIG image images_manifest_file ${OSTESTIMAGES_DIR}/images/manifest.yaml + local image_conversion + image_conversion=$(iniget $GLANCE_IMAGE_IMPORT_CONF image_conversion output_format) + if [[ -n "$image_conversion" ]]; then + iniset $TEMPEST_CONFIG image-feature-enabled image_conversion True + fi + iniset $TEMPEST_CONFIG image-feature-enabled image_format_enforcement $GLANCE_ENFORCE_IMAGE_FORMAT + fi + iniset $TEMPEST_CONFIG network project_network_cidr $FIXED_RANGE ssh_connect_method=${TEMPEST_SSH_CONNECT_METHOD:-$ssh_connect_method} @@ -270,10 +392,10 @@ function configure_tempest { # If NEUTRON_CREATE_INITIAL_NETWORKS is not true, there is no network created # and the public_network_id should not be set. if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]] && is_networking_extension_supported 'external-net'; then - public_network_id=$(openstack network show -f value -c id $PUBLIC_NETWORK_NAME) + public_network_id=$(openstack --os-cloud devstack-admin network show -f value -c id $PUBLIC_NETWORK_NAME) # make sure shared network presence does not confuses the tempest tests - openstack network create --share shared - openstack subnet create --description shared-subnet --subnet-range ${TEMPEST_SHARED_POOL:-192.168.233.0/24} --network shared shared-subnet + openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create --share shared + openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --description shared-subnet --subnet-range ${TEMPEST_SHARED_POOL:-192.168.233.0/24} --network shared shared-subnet fi iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG @@ -290,7 +412,6 @@ function configure_tempest { iniset $TEMPEST_CONFIG volume build_timeout $BUILD_TIMEOUT # Identity - iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_URI/v2.0/" iniset $TEMPEST_CONFIG identity uri_v3 "$KEYSTONE_SERVICE_URI_V3" iniset $TEMPEST_CONFIG identity user_lockout_failure_attempts $KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS iniset $TEMPEST_CONFIG identity user_lockout_duration $KEYSTONE_LOCKOUT_DURATION @@ -301,19 +422,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG auth admin_project_name $admin_project_name iniset $TEMPEST_CONFIG auth admin_domain_name $admin_domain_name fi - if [ "$ENABLE_IDENTITY_V2" == "True" ]; then - # Run Identity API v2 tests ONLY if needed - iniset $TEMPEST_CONFIG identity-feature-enabled api_v2 True - else - # Skip Identity API v2 tests by default - iniset $TEMPEST_CONFIG identity-feature-enabled api_v2 False - fi iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v3} - if [[ "$TEMPEST_AUTH_VERSION" != "v2" ]]; then - # we're going to disable v2 admin unless we're using v2 by default. - iniset $TEMPEST_CONFIG identity-feature-enabled api_v2_admin False - fi - if is_service_enabled tls-proxy; then iniset $TEMPEST_CONFIG identity ca_certificates_file $SSL_BUNDLE_FILE fi @@ -347,10 +456,12 @@ function configure_tempest { if [[ ! -z "$TEMPEST_HTTP_IMAGE" ]]; then iniset $TEMPEST_CONFIG image http_image $TEMPEST_HTTP_IMAGE fi - if [ "$VIRT_DRIVER" = "xenserver" ]; then - iniset $TEMPEST_CONFIG image disk_formats "ami,ari,aki,vhd,raw,iso" - fi iniset $TEMPEST_CONFIG image-feature-enabled import_image $GLANCE_USE_IMPORT_WORKFLOW + iniset $TEMPEST_CONFIG image-feature-enabled os_glance_reserved True + if is_service_enabled g-api-r; then + iniset $TEMPEST_CONFIG image alternate_image_endpoint image_remote + fi + # Compute iniset $TEMPEST_CONFIG compute image_ref $image_uuid iniset $TEMPEST_CONFIG compute image_ref_alt $image_uuid_alt @@ -402,6 +513,9 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute-feature-enabled live_migrate_back_and_forth ${LIVE_MIGRATE_BACK_AND_FORTH:-False} iniset $TEMPEST_CONFIG compute-feature-enabled attach_encrypted_volume ${ATTACH_ENCRYPTED_VOLUME_AVAILABLE:-True} + # Starting Wallaby, nova sanitizes instance hostnames having freeform characters with dashes + iniset $TEMPEST_CONFIG compute-feature-enabled hostname_fqdn_sanitization True + if [[ -n "$NOVA_FILTERS" ]]; then iniset $TEMPEST_CONFIG compute-feature-enabled scheduler_enabled_filters ${NOVA_FILTERS} fi @@ -410,9 +524,19 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute-feature-enabled volume_multiattach True fi - if is_service_enabled n-novnc; then + if is_service_enabled n-novnc || [ "$NOVA_VNC_ENABLED" != False ]; then iniset $TEMPEST_CONFIG compute-feature-enabled vnc_console True fi + if is_service_enabled n-spice || [ "$NOVA_SPICE_ENABLED" != False ]; then + iniset $TEMPEST_CONFIG compute-feature-enabled spice_console True + fi + if is_service_enabled n-sproxy || [ "$NOVA_SERIAL_ENABLED" != False ]; then + iniset $TEMPEST_CONFIG compute-feature-enabled serial_console True + fi + + # NOTE(gmaan): Since 2025.2, 'manager' role is available in nova. + local nova_policy_roles="admin,manager,member,reader,service" + iniset $TEMPEST_CONFIG compute-feature-enabled nova_policy_roles $nova_policy_roles # Network iniset $TEMPEST_CONFIG network project_networks_reachable false @@ -423,18 +547,24 @@ function configure_tempest { iniset $TEMPEST_CONFIG network-feature-enabled ipv6_subnet_attributes "$IPV6_SUBNET_ATTRIBUTES_ENABLED" iniset $TEMPEST_CONFIG network-feature-enabled port_security $NEUTRON_PORT_SECURITY + iniset $TEMPEST_CONFIG enforce_scope neutron "$NEUTRON_ENFORCE_SCOPE" + # Scenario - if [ "$VIRT_DRIVER" = "xenserver" ]; then - SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES} - SCENARIO_IMAGE_FILE="cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.vhd.tgz" - iniset $TEMPEST_CONFIG scenario img_disk_format vhd - iniset $TEMPEST_CONFIG scenario img_container_format ovf - else - SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES} - SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_FILE_NAME - fi + SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES} + SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_FILE_NAME + SCENARIO_IMAGE_TYPE=${SCENARIO_IMAGE_TYPE:-cirros} iniset $TEMPEST_CONFIG scenario img_file $SCENARIO_IMAGE_DIR/$SCENARIO_IMAGE_FILE + # since version 0.6.0 cirros uses dhcpcd dhcp client by default, however, cirros, prior to the + # version 0.6.0, used udhcpc (the only available client at that time) which is also tempest's default + if [[ "$SCENARIO_IMAGE_TYPE" == "cirros" ]]; then + # the image is a cirros image + # use dhcpcd client when version greater or equal 0.6.0 + if [[ $(echo $CIRROS_VERSION | tr -d '.') -ge 060 ]]; then + iniset $TEMPEST_CONFIG scenario dhcp_client dhcpcd + fi + fi + # If using provider networking, use the physical network for validation rather than private TEMPEST_SSH_NETWORK_NAME=$PRIVATE_NETWORK_NAME if is_provider_network; then @@ -449,13 +579,6 @@ function configure_tempest { iniset $TEMPEST_CONFIG validation network_for_ssh $TEMPEST_SSH_NETWORK_NAME # Volume - # Set the service catalog entry for Tempest to run on. Typically - # used to try different Volume API version targets. The tempest - # default it to 'volumev3'(v3 APIs endpoint) , so only set this - # if you want to change it. - if [[ -n "$TEMPEST_VOLUME_TYPE" ]]; then - iniset $TEMPEST_CONFIG volume catalog_type $TEMPEST_VOLUME_TYPE - fi # Only turn on TEMPEST_VOLUME_MANAGE_SNAPSHOT by default for "lvm" backends if [[ "$CINDER_ENABLED_BACKENDS" == *"lvm"* ]]; then TEMPEST_VOLUME_MANAGE_SNAPSHOT=${TEMPEST_VOLUME_MANAGE_SNAPSHOT:-True} @@ -477,14 +600,12 @@ function configure_tempest { TEMPEST_VOLUME_REVERT_TO_SNAPSHOT=${TEMPEST_VOLUME_REVERT_TO_SNAPSHOT:-True} fi iniset $TEMPEST_CONFIG volume-feature-enabled volume_revert $(trueorfalse False TEMPEST_VOLUME_REVERT_TO_SNAPSHOT) + iniset $TEMPEST_CONFIG volume-feature-enabled extend_attached_encrypted_volume ${TEMPEST_EXTEND_ATTACHED_ENCRYPTED_VOLUME:-False} + if [[ "$CINDER_BACKUP_DRIVER" == *"swift"* ]]; then + iniset $TEMPEST_CONFIG volume backup_driver swift + fi local tempest_volume_min_microversion=${TEMPEST_VOLUME_MIN_MICROVERSION:-None} local tempest_volume_max_microversion=${TEMPEST_VOLUME_MAX_MICROVERSION:-"latest"} - # Reset microversions to None where v2 is running which does not support microversion. - # Both "None" means no microversion testing. - if [[ "$TEMPEST_VOLUME_TYPE" == "volumev2" ]]; then - tempest_volume_min_microversion=None - tempest_volume_max_microversion=None - fi if [ "$tempest_volume_min_microversion" == "None" ]; then inicomment $TEMPEST_CONFIG volume min_microversion else @@ -533,6 +654,10 @@ function configure_tempest { iniset $TEMPEST_CONFIG volume storage_protocol "$TEMPEST_STORAGE_PROTOCOL" fi + if [[ $ENABLE_VOLUME_MULTIATTACH == "True" ]]; then + iniset $TEMPEST_CONFIG volume volume_type_multiattach $VOLUME_TYPE_MULTIATTACH + fi + # Placement Features # Set the microversion range for placement. # Setting [None, latest] range of microversion which allow Tempest to run all microversions tests. @@ -600,6 +725,30 @@ function configure_tempest { fi done + # ``enforce_scope`` + # If services enable the enforce_scope for their policy + # we need to enable the same on Tempest side so that + # test can be run with scoped token. + if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope keystone true + fi + + if [[ "$NOVA_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope nova true + fi + + if [[ "$PLACEMENT_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope placement true + fi + + if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope glance true + fi + + if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope cinder true + fi + if [ "$VIRT_DRIVER" = "libvirt" ] && [ "$LIBVIRT_TYPE" = "lxc" ]; then # libvirt-lxc does not support boot from volume or attaching volumes # so basically anything with cinder is out of the question. @@ -613,19 +762,17 @@ function configure_tempest { local tmp_cfg_file tmp_cfg_file=$(mktemp) cd $TEMPEST_DIR - if [[ "$OFFLINE" != "True" ]]; then - tox -revenv-tempest --notest - fi - # The requirements might be on a different branch, while tempest needs master requirements. local tmp_u_c_m tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX) - (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > $tmp_u_c_m + set_tempest_venv_constraints $tmp_u_c_m + if [[ "$OFFLINE" != "True" ]]; then + tox -revenv-tempest --notest + fi tox -evenv-tempest -- pip install -c $tmp_u_c_m -r requirements.txt rm -f $tmp_u_c_m # Auth: - iniset $TEMPEST_CONFIG auth tempest_roles "member" if [[ $TEMPEST_USE_TEST_ACCOUNTS == "True" ]]; then if [[ $TEMPEST_HAS_ADMIN == "True" ]]; then tox -evenv-tempest -- tempest account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-project-name $admin_project_name -r $TEMPEST_CONCURRENCY --with-admin etc/accounts.yaml @@ -649,12 +796,12 @@ function configure_tempest { # Neutron API Extensions # disable metering if we didn't enable the service - if ! is_service_enabled q-metering; then + if ! is_service_enabled q-metering neutron-metering; then DISABLE_NETWORK_API_EXTENSIONS+=", metering" fi # disable l3_agent_scheduler if we didn't enable L3 agent - if ! is_service_enabled q-l3; then + if ! is_service_enabled q-l3 neutron-l3; then DISABLE_NETWORK_API_EXTENSIONS+=", l3_agent_scheduler" fi @@ -695,22 +842,29 @@ function configure_tempest { # install_tempest() - Collect source and prepare function install_tempest { git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH - pip_install 'tox!=2.8.0' + # NOTE(gmann): Pinning tox<4.0.0 for stable/zed and lower. Tox 4.0.0 + # released after zed was released and has some incompatible changes + # and it is ok not to fix the issues caused by tox 4.0.0 in stable + # beanches jobs. We can continue testing the stable/zed and lower + # branches with tox<4.0.0 + pip_install 'tox!=2.8.0,<4.0.0' pushd $TEMPEST_DIR # NOTE(gmann): checkout the TEMPEST_BRANCH in case TEMPEST_BRANCH # is tag name not master. git_clone would not checkout tag because # TEMPEST_DIR already exist until RECLONE is true. git checkout $TEMPEST_BRANCH + local tmp_u_c_m + tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX) + set_tempest_venv_constraints $tmp_u_c_m + tox -r --notest -efull - # TODO: remove the trailing pip constraint when a proper fix - # arrives for bug https://bugs.launchpad.net/devstack/+bug/1906322 - $TEMPEST_DIR/.tox/tempest/bin/pip install -U -r $RC_DIR/tools/cap-pip.txt # NOTE(mtreinish) Respect constraints in the tempest full venv, things that # are using a tox job other than full will not be respecting constraints but # running pip install -U on tempest requirements - $TEMPEST_DIR/.tox/tempest/bin/pip install -c $REQUIREMENTS_DIR/upper-constraints.txt -r requirements.txt + $TEMPEST_DIR/.tox/tempest/bin/pip install -c $tmp_u_c_m -r requirements.txt PROJECT_VENV["tempest"]=${TEMPEST_DIR}/.tox/tempest + rm -f $tmp_u_c_m popd } @@ -718,10 +872,9 @@ function install_tempest { function install_tempest_plugins { pushd $TEMPEST_DIR if [[ $TEMPEST_PLUGINS != 0 ]] ; then - # The requirements might be on a different branch, while tempest & tempest plugins needs master requirements. local tmp_u_c_m tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX) - (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > $tmp_u_c_m + set_tempest_venv_constraints $tmp_u_c_m tox -evenv-tempest -- pip install -c $tmp_u_c_m $TEMPEST_PLUGINS rm -f $tmp_u_c_m echo "Checking installed Tempest plugins:" diff --git a/lib/tls b/lib/tls index b3cc0b4159..fa0a448d7d 100644 --- a/lib/tls +++ b/lib/tls @@ -169,7 +169,7 @@ default_md = default [ req ] default_bits = 1024 -default_md = sha1 +default_md = sha256 prompt = no distinguished_name = req_distinguished_name @@ -212,9 +212,6 @@ function init_CA { if is_fedora; then sudo cp $INT_CA_DIR/ca-chain.pem /usr/share/pki/ca-trust-source/anchors/devstack-chain.pem sudo update-ca-trust - elif is_suse; then - sudo cp $INT_CA_DIR/ca-chain.pem /usr/share/pki/trust/anchors/devstack-chain.pem - sudo update-ca-certificates elif is_ubuntu; then sudo cp $INT_CA_DIR/ca-chain.pem /usr/local/share/ca-certificates/devstack-int.crt sudo cp $ROOT_CA_DIR/cacert.pem /usr/local/share/ca-certificates/devstack-root.crt @@ -261,7 +258,7 @@ function make_cert { if [ ! -r "$ca_dir/$cert_name.crt" ]; then # Generate a signing request $OPENSSL req \ - -sha1 \ + -sha256 \ -newkey rsa \ -nodes \ -keyout $ca_dir/private/$cert_name.key \ @@ -301,7 +298,7 @@ function make_int_CA { if [ ! -r "$ca_dir/cacert.pem" ]; then # Create a signing certificate request $OPENSSL req -config $ca_dir/ca.conf \ - -sha1 \ + -sha256 \ -newkey rsa \ -nodes \ -keyout $ca_dir/private/cacert.key \ @@ -367,8 +364,11 @@ function deploy_int_CA { function fix_system_ca_bundle_path { if is_service_enabled tls-proxy; then local capath - capath=$(python3 -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass') - + if [[ "$GLOBAL_VENV" == "True" ]] ; then + capath=$($DEVSTACK_VENV/bin/python3 -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass') + else + capath=$(python$PYTHON3_VERSION -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass') + fi if [[ ! $capath == "" && ! $capath =~ ^/etc/.* && ! -L $capath ]]; then if is_fedora; then sudo rm -f $capath @@ -376,9 +376,6 @@ function fix_system_ca_bundle_path { elif is_ubuntu; then sudo rm -f $capath sudo ln -s /etc/ssl/certs/ca-certificates.crt $capath - elif is_suse; then - sudo rm -f $capath - sudo ln -s /etc/ssl/ca-bundle.pem $capath else echo "Don't know how to set the CA bundle, expect the install to fail." fi @@ -441,9 +438,6 @@ function enable_mod_ssl { if is_ubuntu; then sudo a2enmod ssl - elif is_suse; then - sudo a2enmod ssl - sudo a2enflag SSL elif is_fedora; then # Fedora enables mod_ssl by default : @@ -458,6 +452,7 @@ function enable_mod_ssl { # =============== function tune_apache_connections { + local should_restart=$1 local tuning_file=$APACHE_SETTINGS_DIR/connection-tuning.conf if ! [ -f $tuning_file ] ; then sudo bash -c "cat > $tuning_file" << EOF @@ -500,7 +495,12 @@ ThreadLimit 64 MaxRequestsPerChild 0 EOF - restart_apache_server + if [ "$should_restart" != "norestart" ] ; then + # Only restart the apache server if we know we really want to + # do so. Too many restarts in a short period of time is treated + # as an error by systemd. + restart_apache_server + fi fi } @@ -515,7 +515,8 @@ function start_tls_proxy { # 8190 is the default apache size. local f_header_size=${6:-8190} - tune_apache_connections + # We don't restart apache here as we'll do it at the end of the function. + tune_apache_connections norestart local config_file config_file=$(apache_site_config_for $b_service) @@ -536,6 +537,7 @@ $listen_string SSLEngine On SSLCertificateFile $DEVSTACK_CERT + SSLProtocol -all +TLSv1.3 +TLSv1.2 # Disable KeepAlive to fix bug #1630664 a.k.a the # ('Connection aborted.', BadStatusLine("''",)) error @@ -549,22 +551,23 @@ $listen_string # Avoid races (at the cost of performance) to re-use a pooled connection # where the connection is closed (bug 1807518). + # Set acquire=1 to disable waiting for connection pool members so that + # we can determine when apache is overloaded (returns 503). SetEnv proxy-initial-not-pooled - ProxyPass http://$b_host:$b_port/ retry=0 nocanon + ProxyPass http://$b_host:$b_port/ retry=0 nocanon acquire=1 ProxyPassReverse http://$b_host:$b_port/ ErrorLog $APACHE_LOG_DIR/tls-proxy_error.log ErrorLogFormat "%{cu}t [%-m:%l] [pid %P:tid %T] %7F: %E: [client\ %a] [frontend\ %A] %M% ,\ referer\ %{Referer}i" LogLevel info - CustomLog $APACHE_LOG_DIR/tls-proxy_access.log "%{%Y-%m-%d}t %{%T}t.%{msec_frac}t [%l] %a \"%r\" %>s %b" + CustomLog $APACHE_LOG_DIR/tls-proxy_access.log combined EOF - if is_suse ; then - sudo a2enflag SSL - fi for mod in headers ssl proxy proxy_http; do - enable_apache_mod $mod + # We don't need to restart here as we will restart once at the end + # of the function. + enable_apache_mod $mod norestart done enable_apache_site $b_service restart_apache_server diff --git a/openrc b/openrc index beeaebea42..e800abeb3d 100644 --- a/openrc +++ b/openrc @@ -7,9 +7,6 @@ # Set OS_USERNAME to override the default user name 'demo' # Set ADMIN_PASSWORD to set the password for 'admin' and 'demo' -# NOTE: support for the old NOVA_* novaclient environment variables has -# been removed. - if [[ -n "$1" ]]; then OS_USERNAME=$1 fi @@ -35,26 +32,11 @@ fi # Get some necessary configuration source $RC_DIR/lib/tls -# The OpenStack ecosystem has standardized the term **project** as the -# entity that owns resources. In some places **tenant** remains -# referenced, but in all cases this just means **project**. We will -# warn if we need to turn on legacy **tenant** support to have a -# working environment. +# Minimal configuration +export OS_AUTH_TYPE=password export OS_PROJECT_NAME=${OS_PROJECT_NAME:-demo} - -echo "WARNING: setting legacy OS_TENANT_NAME to support cli tools." -export OS_TENANT_NAME=$OS_PROJECT_NAME - -# In addition to the owning entity (project), nova stores the entity performing -# the action as the **user**. export OS_USERNAME=${OS_USERNAME:-demo} - -# With Keystone you pass the keystone password instead of an api key. -# Recent versions of novaclient use OS_PASSWORD instead of NOVA_API_KEYs -# or NOVA_PASSWORD. export OS_PASSWORD=${ADMIN_PASSWORD:-secret} - -# Region export OS_REGION_NAME=${REGION_NAME:-RegionOne} # Set the host API endpoint. This will default to HOST_IP if SERVICE_IP_VERSION @@ -73,30 +55,14 @@ else GLANCE_HOST=${GLANCE_HOST:-$HOST_IP} fi -# Identity API version -export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-3} - -# Ask keystoneauth1 to use keystone -export OS_AUTH_TYPE=password - -# Authenticating against an OpenStack cloud using Keystone returns a **Token** -# and **Service Catalog**. The catalog contains the endpoints for all services -# the user/project has access to - including nova, glance, keystone, swift, ... -# We currently recommend using the version 3 *identity api*. -# - # If you don't have a working .stackenv, this is the backup position KEYSTONE_BACKUP=$SERVICE_PROTOCOL://$SERVICE_HOST:5000 KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_URI:-$KEYSTONE_BACKUP} export OS_AUTH_URL=${OS_AUTH_URL:-$KEYSTONE_SERVICE_URI} -# Currently, in order to use openstackclient with Identity API v3, -# we need to set the domain which the user and project belong to. -if [ "$OS_IDENTITY_API_VERSION" = "3" ]; then - export OS_USER_DOMAIN_ID=${OS_USER_DOMAIN_ID:-"default"} - export OS_PROJECT_DOMAIN_ID=${OS_PROJECT_DOMAIN_ID:-"default"} -fi +export OS_USER_DOMAIN_ID=${OS_USER_DOMAIN_ID:-"default"} +export OS_PROJECT_DOMAIN_ID=${OS_PROJECT_DOMAIN_ID:-"default"} # Set OS_CACERT to a default CA certificate chain if it exists. if [[ ! -v OS_CACERT ]] ; then @@ -106,8 +72,3 @@ if [[ ! -v OS_CACERT ]] ; then export OS_CACERT=$DEFAULT_OS_CACERT fi fi - -# Currently cinderclient needs you to specify the *volume api* version. This -# needs to match the config of your catalog returned by Keystone. -export CINDER_VERSION=${CINDER_VERSION:-3} -export OS_VOLUME_API_VERSION=${OS_VOLUME_API_VERSION:-$CINDER_VERSION} diff --git a/playbooks/post.yaml b/playbooks/post.yaml index 9e66f20e9e..0047d78ea5 100644 --- a/playbooks/post.yaml +++ b/playbooks/post.yaml @@ -17,9 +17,18 @@ dest: "{{ stage_dir }}/verify_tempest_conf.log" state: hard when: tempest_log.stat.exists + - name: Capture most recent qemu crash dump, if any + shell: + executable: /bin/bash + cmd: | + coredumpctl -o {{ devstack_log_dir }}/qemu.coredump dump /usr/bin/qemu-system-x86_64 + ignore_errors: yes roles: - export-devstack-journal - apache-logs-conf + # This should run as early as possible to make sure we don't skew + # the post-tempest results with other activities. + - capture-performance-data - devstack-project-conf # capture-system-logs should be the last role before stage-output - capture-system-logs diff --git a/playbooks/tox/pre.yaml b/playbooks/tox/pre.yaml index d7e4670a80..68d5254251 100644 --- a/playbooks/tox/pre.yaml +++ b/playbooks/tox/pre.yaml @@ -5,4 +5,10 @@ bindep_profile: test bindep_dir: "{{ zuul_work_dir }}" - test-setup - - ensure-tox + # NOTE(gmann): Pinning tox<4.0.0 for stable/zed and lower. Tox 4.0.0 + # released after zed was released and has some incompatible changes + # and it is ok not to fix the issues caused by tox 4.0.0 in stable + # beanches jobs. We can continue testing the stable/zed and lower + # branches with tox<4.0.0 + - role: ensure-tox + ensure_tox_version: "<4" diff --git a/roles/apache-logs-conf/tasks/main.yaml b/roles/apache-logs-conf/tasks/main.yaml index bd64574c9b..6b7ea37857 100644 --- a/roles/apache-logs-conf/tasks/main.yaml +++ b/roles/apache-logs-conf/tasks/main.yaml @@ -64,6 +64,7 @@ 'Debian': '/etc/apache2/sites-enabled/' 'Suse': '/etc/apache2/conf.d/' 'RedHat': '/etc/httpd/conf.d/' + 'openEuler': '/etc/httpd/conf.d/' - name: Discover configurations find: diff --git a/roles/capture-performance-data/README.rst b/roles/capture-performance-data/README.rst new file mode 100644 index 0000000000..b7a37c223f --- /dev/null +++ b/roles/capture-performance-data/README.rst @@ -0,0 +1,25 @@ +Generate performance logs for staging + +Captures usage information from mysql, systemd, apache logs, and other +parts of the system and generates a performance.json file in the +staging directory. + +**Role Variables** + +.. zuul:rolevar:: stage_dir + :default: {{ ansible_user_dir }} + + The base stage directory + +.. zuul:rolevar:: devstack_conf_dir + :default: /opt/stack + + The base devstack destination directory + +.. zuul:rolevar:: debian_suse_apache_deref_logs + + The apache logs found in the debian/suse locations + +.. zuul:rolevar:: redhat_apache_deref_logs + + The apache logs found in the redhat locations diff --git a/roles/capture-performance-data/defaults/main.yaml b/roles/capture-performance-data/defaults/main.yaml new file mode 100644 index 0000000000..7bd79f4c4f --- /dev/null +++ b/roles/capture-performance-data/defaults/main.yaml @@ -0,0 +1,3 @@ +devstack_base_dir: /opt/stack +devstack_conf_dir: "{{ devstack_base_dir }}" +stage_dir: "{{ ansible_user_dir }}" diff --git a/roles/capture-performance-data/tasks/main.yaml b/roles/capture-performance-data/tasks/main.yaml new file mode 100644 index 0000000000..51a11b60bc --- /dev/null +++ b/roles/capture-performance-data/tasks/main.yaml @@ -0,0 +1,18 @@ +- name: Generate statistics + shell: + executable: /bin/bash + cmd: | + source {{ devstack_conf_dir }}/stackrc + source {{ devstack_conf_dir }}/inc/python + setup_devstack_virtualenv + $PYTHON {{ devstack_conf_dir }}/tools/get-stats.py \ + --db-user="$DATABASE_USER" \ + --db-pass="$DATABASE_PASSWORD" \ + --db-host="$DATABASE_HOST" \ + {{ apache_logs }} > {{ stage_dir }}/performance.json + vars: + apache_logs: >- + {% for i in debian_suse_apache_deref_logs.results | default([]) + redhat_apache_deref_logs.results | default([]) %} + --apache-log="{{ i.stat.path }}" + {% endfor %} + ignore_errors: yes diff --git a/roles/capture-system-logs/README.rst b/roles/capture-system-logs/README.rst index c28412457a..1376f63bfc 100644 --- a/roles/capture-system-logs/README.rst +++ b/roles/capture-system-logs/README.rst @@ -9,6 +9,7 @@ Stage a number of different logs / reports: - coredumps - dns resolver - listen53 +- services - unbound.log - deprecation messages diff --git a/roles/capture-system-logs/tasks/main.yaml b/roles/capture-system-logs/tasks/main.yaml index 905806d529..4b5ec4836b 100644 --- a/roles/capture-system-logs/tasks/main.yaml +++ b/roles/capture-system-logs/tasks/main.yaml @@ -4,7 +4,13 @@ executable: /bin/bash cmd: | sudo iptables-save > {{ stage_dir }}/iptables.txt - df -h > {{ stage_dir }}/df.txt + + # NOTE(sfernand): Run 'df' with a 60s timeout to prevent hangs from + # stale NFS mounts. + timeout -s 9 60s df -h > {{ stage_dir }}/df.txt || true + # If 'df' times out, the mount output helps debug which NFS share + # is unresponsive. + mount > {{ stage_dir }}/mount.txt for py_ver in 2 3; do if [[ `which python${py_ver}` ]]; then @@ -19,6 +25,9 @@ rpm -qa | sort > {{ stage_dir }}/rpm-qa.txt fi + # Services status + sudo systemctl status --all > services.txt 2>/dev/null + # NOTE(kchamart) The 'audit.log' can be useful in cases when QEMU # failed to start due to denials from SELinux — useful for CentOS # and Fedora machines. For Ubuntu (which runs AppArmor), DevStack diff --git a/roles/devstack-ipv6-only-deployments-verification/README.rst b/roles/devstack-ipv6-only-deployments-verification/README.rst new file mode 100644 index 0000000000..3bddf5ea60 --- /dev/null +++ b/roles/devstack-ipv6-only-deployments-verification/README.rst @@ -0,0 +1,16 @@ +Verify all addresses in IPv6-only deployments + +This role needs to be invoked from a playbook that +runs tests. This role verifies the IPv6 settings on the +devstack side and that devstack deploys with all addresses +being IPv6. This role is invoked before tests are run so that +if there is any missing IPv6 setting, deployments can fail +the job early. + + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/devstack-ipv6-only-deployments-verification/defaults/main.yaml b/roles/devstack-ipv6-only-deployments-verification/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/devstack-ipv6-only-deployments-verification/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/devstack-ipv6-only-deployments-verification/tasks/main.yaml b/roles/devstack-ipv6-only-deployments-verification/tasks/main.yaml new file mode 100644 index 0000000000..59d3b79bc1 --- /dev/null +++ b/roles/devstack-ipv6-only-deployments-verification/tasks/main.yaml @@ -0,0 +1,4 @@ +- name: Verify the ipv6-only deployments + become: true + become_user: stack + shell: "{{ devstack_base_dir }}/devstack/tools/verify-ipv6-only-deployments.sh" diff --git a/roles/export-devstack-journal/tasks/main.yaml b/roles/export-devstack-journal/tasks/main.yaml index ef839edaaf..db38b10a44 100644 --- a/roles/export-devstack-journal/tasks/main.yaml +++ b/roles/export-devstack-journal/tasks/main.yaml @@ -45,7 +45,7 @@ cmd: | journalctl -o export \ --since="$(cat {{ devstack_base_dir }}/log-start-timestamp.txt)" \ - | xz --threads=0 - > {{ stage_dir }}/logs/devstack.journal.xz + | gzip > {{ stage_dir }}/logs/devstack.journal.gz - name: Save journal README become: true diff --git a/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 b/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 index fe36653102..30519f63d7 100644 --- a/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 +++ b/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 @@ -7,7 +7,7 @@ devstack run. To use it, you will need to convert it so journalctl can read it locally. After downloading the file: - $ /lib/systemd/systemd-journal-remote <(xzcat ./devstack.journal.xz) -o output.journal + $ /lib/systemd/systemd-journal-remote <(zcat ./devstack.journal.gz) -o output.journal Note this binary is not in the regular path. On Debian/Ubuntu platforms, you will need to have the "systemd-journal-remote" package diff --git a/roles/orchestrate-devstack/tasks/main.yaml b/roles/orchestrate-devstack/tasks/main.yaml index f747943f3c..b8ee7e35a7 100644 --- a/roles/orchestrate-devstack/tasks/main.yaml +++ b/roles/orchestrate-devstack/tasks/main.yaml @@ -4,6 +4,7 @@ when: inventory_hostname == 'controller' - name: Setup devstack on sub-nodes + any_errors_fatal: true block: - name: Distribute the build sshkey for the user "stack" @@ -18,6 +19,11 @@ name: sync-devstack-data when: devstack_services['tls-proxy']|default(false) + - name: Sync controller ceph.conf and key rings to subnode + include_role: + name: sync-controller-ceph-conf-and-keys + when: devstack_plugins is defined and 'devstack-plugin-ceph' in devstack_plugins + - name: Run devstack on the sub-nodes include_role: name: run-devstack diff --git a/roles/process-stackviz/tasks/main.yaml b/roles/process-stackviz/tasks/main.yaml index c51c66cdb3..3ba3d9c2e6 100644 --- a/roles/process-stackviz/tasks/main.yaml +++ b/roles/process-stackviz/tasks/main.yaml @@ -1,70 +1,73 @@ -- name: Devstack checks if stackviz archive exists - stat: - path: "/opt/cache/files/stackviz-latest.tar.gz" - register: stackviz_archive +- name: Process Stackviz + block: -- debug: - msg: "Stackviz archive could not be found in /opt/cache/files/stackviz-latest.tar.gz" - when: not stackviz_archive.stat.exists + - name: Devstack checks if stackviz archive exists + stat: + path: "/opt/cache/files/stackviz-latest.tar.gz" + register: stackviz_archive -- name: Check if subunit data exists - stat: - path: "{{ zuul_work_dir }}/testrepository.subunit" - register: subunit_input + - debug: + msg: "Stackviz archive could not be found in /opt/cache/files/stackviz-latest.tar.gz" + when: not stackviz_archive.stat.exists -- debug: - msg: "Subunit file could not be found at {{ zuul_work_dir }}/testrepository.subunit" - when: not subunit_input.stat.exists + - name: Check if subunit data exists + stat: + path: "{{ zuul_work_dir }}/testrepository.subunit" + register: subunit_input -- name: Install stackviz - when: - - stackviz_archive.stat.exists - - subunit_input.stat.exists - block: - - include_role: - name: ensure-pip + - debug: + msg: "Subunit file could not be found at {{ zuul_work_dir }}/testrepository.subunit" + when: not subunit_input.stat.exists + + - name: Install stackviz + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + block: + - include_role: + name: ensure-pip + + - pip: + name: "file://{{ stackviz_archive.stat.path }}" + virtualenv: /tmp/stackviz + virtualenv_command: '{{ ensure_pip_virtualenv_command }}' + extra_args: -U - - pip: - name: "file://{{ stackviz_archive.stat.path }}" - virtualenv: /tmp/stackviz - virtualenv_command: '{{ ensure_pip_virtualenv_command }}' - extra_args: -U + - name: Deploy stackviz static html+js + command: cp -pR /tmp/stackviz/share/stackviz-html {{ stage_dir }}/stackviz + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists -- name: Deploy stackviz static html+js - command: cp -pR /tmp/stackviz/share/stackviz-html {{ stage_dir }}/stackviz - when: - - stackviz_archive.stat.exists - - subunit_input.stat.exists + - name: Check if dstat data exists + stat: + path: "{{ devstack_base_dir }}/logs/dstat-csv.log" + register: dstat_input + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists -- name: Check if dstat data exists - stat: - path: "{{ devstack_base_dir }}/logs/dstat-csv.log" - register: dstat_input - when: - - stackviz_archive.stat.exists - - subunit_input.stat.exists + - name: Run stackviz with dstat + shell: | + cat {{ subunit_input.stat.path }} | \ + /tmp/stackviz/bin/stackviz-export \ + --dstat "{{ devstack_base_dir }}/logs/dstat-csv.log" \ + --env --stdin \ + {{ stage_dir }}/stackviz/data + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + - dstat_input.stat.exists -- name: Run stackviz with dstat - shell: | - cat {{ subunit_input.stat.path }} | \ - /tmp/stackviz/bin/stackviz-export \ - --dstat "{{ devstack_base_dir }}/logs/dstat-csv.log" \ - --env --stdin \ - {{ stage_dir }}/stackviz/data - when: - - stackviz_archive.stat.exists - - subunit_input.stat.exists - - dstat_input.stat.exists - failed_when: False + - name: Run stackviz without dstat + shell: | + cat {{ subunit_input.stat.path }} | \ + /tmp/stackviz/bin/stackviz-export \ + --env --stdin \ + {{ stage_dir }}/stackviz/data + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + - not dstat_input.stat.exists -- name: Run stackviz without dstat - shell: | - cat {{ subunit_input.stat.path }} | \ - /tmp/stackviz/bin/stackviz-export \ - --env --stdin \ - {{ stage_dir }}/stackviz/data - when: - - stackviz_archive.stat.exists - - subunit_input.stat.exists - - not dstat_input.stat.exists - failed_when: False + ignore_errors: yes diff --git a/roles/setup-devstack-cache/tasks/main.yaml b/roles/setup-devstack-cache/tasks/main.yaml index 84f33f0e16..3adff17d5d 100644 --- a/roles/setup-devstack-cache/tasks/main.yaml +++ b/roles/setup-devstack-cache/tasks/main.yaml @@ -2,6 +2,7 @@ # This uses hard links to avoid using extra space. command: "find {{ devstack_cache_dir }}/files -mindepth 1 -maxdepth 1 -exec cp -l {} {{ devstack_base_dir }}/devstack/files/ ;" become: true + ignore_errors: yes - name: Set ownership of cached files file: diff --git a/roles/setup-devstack-source-dirs/tasks/main.yaml b/roles/setup-devstack-source-dirs/tasks/main.yaml index 294c29cd29..cb7c6e3af8 100644 --- a/roles/setup-devstack-source-dirs/tasks/main.yaml +++ b/roles/setup-devstack-source-dirs/tasks/main.yaml @@ -43,9 +43,9 @@ base_branch={{ devstack_sources_branch }} if git branch -a | grep "$base_branch" > /dev/null ; then git checkout $base_branch - elif [[ "$base_branch" == stable/* ]]; then + elif [[ "$base_branch" == stable/* ]] || [[ "$base_branch" == unmaintained/* ]]; then # Look for an eol tag for the stable branch. - eol_tag=${base_branch#stable/}-eol + eol_tag="${base_branch#*/}-eol" if git tag -l |grep $eol_tag >/dev/null; then git checkout $eol_tag git reset --hard $eol_tag diff --git a/roles/sync-controller-ceph-conf-and-keys/README.rst b/roles/sync-controller-ceph-conf-and-keys/README.rst new file mode 100644 index 0000000000..e3d2bb42a4 --- /dev/null +++ b/roles/sync-controller-ceph-conf-and-keys/README.rst @@ -0,0 +1,3 @@ +Sync ceph config and keys between controller and subnodes + +Simply copy the contents of /etc/ceph on the controller to subnodes. diff --git a/roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml b/roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml new file mode 100644 index 0000000000..71ece579e6 --- /dev/null +++ b/roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml @@ -0,0 +1,15 @@ +- name: Ensure /etc/ceph exists on subnode + become: true + file: + path: /etc/ceph + state: directory + +- name: Copy /etc/ceph from controller to subnode + become: true + synchronize: + owner: yes + group: yes + perms: yes + src: /etc/ceph/ + dest: /etc/ceph/ + delegate_to: controller diff --git a/samples/local.conf b/samples/local.conf index 8b76137c38..55b729809d 100644 --- a/samples/local.conf +++ b/samples/local.conf @@ -49,7 +49,7 @@ SERVICE_PASSWORD=$ADMIN_PASSWORD # path of the destination log file. A timestamp will be appended to the given name. LOGFILE=$DEST/logs/stack.sh.log -# Old log files are automatically removed after 7 days to keep things neat. Change +# Old log files are automatically removed after 2 days to keep things neat. Change # the number of days by setting ``LOGDAYS``. LOGDAYS=2 diff --git a/samples/local.sh b/samples/local.sh index a1c5c8143b..7e6ae70ad4 100755 --- a/samples/local.sh +++ b/samples/local.sh @@ -31,7 +31,7 @@ if is_service_enabled nova; then # ``demo``) # Get OpenStack user auth - source $TOP_DIR/openrc + export OS_CLOUD=devstack # Add first keypair found in localhost:$HOME/.ssh for i in $HOME/.ssh/id_rsa.pub $HOME/.ssh/id_dsa.pub; do diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 146f010243..0000000000 --- a/setup.cfg +++ /dev/null @@ -1,12 +0,0 @@ -[metadata] -name = DevStack -summary = OpenStack DevStack -description-file = - README.rst -author = OpenStack -author-email = openstack-discuss@lists.openstack.org -home-page = https://docs.openstack.org/devstack/latest -classifier = - Intended Audience :: Developers - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux diff --git a/setup.py b/setup.py deleted file mode 100755 index 70c2b3f32b..0000000000 --- a/setup.py +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -setuptools.setup( - setup_requires=['pbr'], - pbr=True) diff --git a/stack.sh b/stack.sh index 036afd7b00..965f58007d 100755 --- a/stack.sh +++ b/stack.sh @@ -12,7 +12,7 @@ # a multi-node developer install. # To keep this script simple we assume you are running on a recent **Ubuntu** -# (Bionic or newer), **Fedora** (F24 or newer), or **CentOS/RHEL** +# (Bionic or newer) or **CentOS/RHEL/RockyLinux** # (7 or newer) machine. (It may work on other platforms but support for those # platforms is left to those who added them to DevStack.) It should work in # a VM or physical server. Additionally, we maintain a list of ``deb`` and @@ -67,7 +67,9 @@ unset `env | grep -E '^OS_' | cut -d = -f 1` umask 022 # Not all distros have sbin in PATH for regular users. -PATH=$PATH:/usr/local/sbin:/usr/sbin:/sbin +# osc will normally be installed at /usr/local/bin/openstack so ensure +# /usr/local/bin is also in the path +PATH=$PATH:/usr/local/bin:/usr/local/sbin:/usr/sbin:/sbin # Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0") && pwd) @@ -96,19 +98,25 @@ fi # templates and other useful files in the ``files`` subdirectory FILES=$TOP_DIR/files if [ ! -d $FILES ]; then - die $LINENO "missing devstack/files" + set +o xtrace + echo "missing devstack/files" + exit 1 fi # ``stack.sh`` keeps function libraries here # Make sure ``$TOP_DIR/inc`` directory is present if [ ! -d $TOP_DIR/inc ]; then - die $LINENO "missing devstack/inc" + set +o xtrace + echo "missing devstack/inc" + exit 1 fi # ``stack.sh`` keeps project libraries here # Make sure ``$TOP_DIR/lib`` directory is present if [ ! -d $TOP_DIR/lib ]; then - die $LINENO "missing devstack/lib" + set +o xtrace + echo "missing devstack/lib" + exit 1 fi # Check if run in POSIX shell @@ -221,7 +229,7 @@ write_devstack_version # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -SUPPORTED_DISTROS="bionic|focal|f31|f32|opensuse-15.2|opensuse-tumbleweed|rhel8" +SUPPORTED_DISTROS="trixie|bookworm|jammy|noble|rhel9|rhel10" if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then echo "WARNING: this script has not been tested on $DISTRO" @@ -272,7 +280,6 @@ chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh - # Configure Distro Repositories # ----------------------------- @@ -294,10 +301,20 @@ function _install_epel { } function _install_rdo { - # NOTE(ianw) 2020-04-30 : when we have future branches, we - # probably want to install the relevant branch RDO release as - # well. But for now it's all master. - sudo dnf -y install https://rdoproject.org/repos/rdo-release.el8.rpm + if [[ $DISTRO =~ "rhel" ]]; then + VERSION=${DISTRO:4:2} + rdo_release=${TARGET_BRANCH#*/} + if [[ "$TARGET_BRANCH" == "master" ]]; then + # adding delorean-deps repo to provide current master rpms + sudo wget https://trunk.rdoproject.org/centos${VERSION}-master/delorean-deps.repo -O /etc/yum.repos.d/delorean-deps.repo + else + if sudo dnf provides centos-release-openstack-${rdo_release} >/dev/null 2>&1; then + sudo dnf -y install centos-release-openstack-${rdo_release} + else + sudo wget https://trunk.rdoproject.org/centos${VERSION}-${rdo_release}/delorean-deps.repo -O /etc/yum.repos.d/delorean-deps.repo + fi + fi + fi sudo dnf -y update } @@ -319,7 +336,9 @@ fi # Destination path for devstack logs if [[ -n ${LOGDIR:-} ]]; then - mkdir -p $LOGDIR + sudo mkdir -p $LOGDIR + safe_chown -R $STACK_USER $LOGDIR + safe_chmod 0755 $LOGDIR fi # Destination path for service data @@ -330,11 +349,14 @@ if [[ ! -d $DATA_DIR ]]; then safe_chmod 0755 $DATA_DIR fi +# Create and/or clean the async state directory +async_init + # Configure proper hostname # Certain services such as rabbitmq require that the local hostname resolves # correctly. Make sure it exists in /etc/hosts so that is always true. LOCAL_HOSTNAME=`hostname -s` -if ! fgrep -qwe "$LOCAL_HOSTNAME" /etc/hosts; then +if ! grep -Fqwe "$LOCAL_HOSTNAME" /etc/hosts; then sudo sed -i "s/\(^127.0.0.1.*\)/\1 $LOCAL_HOSTNAME/" /etc/hosts fi @@ -343,32 +365,38 @@ fi # to speed things up SKIP_EPEL_INSTALL=$(trueorfalse False SKIP_EPEL_INSTALL) -if [[ $DISTRO == "rhel8" ]]; then - # If we have /etc/ci/mirror_info.sh assume we're on a OpenStack CI - # node, where EPEL is installed (but disabled) and already - # pointing at our internal mirror - if [[ -f /etc/ci/mirror_info.sh ]]; then - SKIP_EPEL_INSTALL=True - sudo dnf config-manager --set-enabled epel - fi - - # PowerTools repo provides libyaml-devel required by devstack itself and - # EPEL packages assume that the PowerTools repository is enable. - sudo dnf config-manager --set-enabled PowerTools +if [[ $DISTRO == "rhel9" ]]; then + # for CentOS Stream 9 repository + sudo dnf config-manager --set-enabled crb + # for RHEL 9 repository + sudo dnf config-manager --set-enabled codeready-builder-for-rhel-9-x86_64-rpms + # rabbitmq and other packages are provided by RDO repositories. + _install_rdo - if [[ ${SKIP_EPEL_INSTALL} != True ]]; then - _install_epel + # Some distributions (Rocky Linux 9) provide curl-minimal instead of curl, + # it triggers a conflict when devstack wants to install "curl". + # Swap curl-minimal with curl. + if is_package_installed curl-minimal; then + sudo dnf swap -y curl-minimal curl fi - # Along with EPEL, CentOS (and a-likes) require some packages only - # available in RDO repositories (e.g. OVS, or later versions of - # kvm) to run. +elif [[ $DISTRO == "rhel10" ]]; then + # for CentOS Stream 10 repository + sudo dnf config-manager --set-enabled crb + # rabbitmq and other packages are provided by RDO repositories. _install_rdo +elif [[ $DISTRO == "openEuler-22.03" ]]; then + # There are some problem in openEuler. We should fix it first. Some required + # package/action runs before fixup script. So we can't fix there. + # + # 1. the hostname package is not installed by default + # 2. Some necessary packages are in openstack repo, for example liberasurecode-devel + # 3. python3-pip can be uninstalled by `get_pip.py` automaticly. + # 4. Ensure wget installation before use + install_package hostname openstack-release-wallaby wget + uninstall_package python3-pip - # NOTE(cgoncalves): workaround RHBZ#1154272 - # dnf fails for non-privileged users when expired_repos.json doesn't exist. - # RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=1154272 - # Patch: https://github.com/rpm-software-management/dnf/pull/1448 - echo "[]" | sudo tee /var/cache/dnf/expired_repos.json + # Add yum repository for libvirt7.X + sudo wget https://eur.openeuler.openatom.cn/coprs/g/sig-openstack/Libvirt-7.X/repo/openeuler-22.03_LTS/group_sig-openstack-Libvirt-7.X-openeuler-22.03_LTS.repo -O /etc/yum.repos.d/libvirt7.2.0.repo fi # Ensure python is installed @@ -553,6 +581,12 @@ rm -f $SSL_BUNDLE_FILE source $TOP_DIR/lib/database source $TOP_DIR/lib/rpc_backend +# load host tuning functions and defaults +source $TOP_DIR/lib/host +# tune host memory early to ensure zswap/ksm are configured before +# doing memory intensive operation like cloning repos or unpacking packages. +tune_host + # Configure Projects # ================== @@ -583,8 +617,10 @@ source $TOP_DIR/lib/swift source $TOP_DIR/lib/neutron source $TOP_DIR/lib/ldap source $TOP_DIR/lib/dstat +source $TOP_DIR/lib/atop source $TOP_DIR/lib/tcpdump source $TOP_DIR/lib/etcd3 +source $TOP_DIR/lib/os-vif # Extras Source # -------------- @@ -666,6 +702,8 @@ if initialize_database_backends; then # Last chance for the database password. This must be handled here # because read_password is not a library function. read_password DATABASE_PASSWORD "ENTER A PASSWORD TO USE FOR THE DATABASE." + + define_database_baseurl else echo "No database enabled" fi @@ -706,16 +744,6 @@ if is_service_enabled keystone; then fi -# Nova -# ----- - -if is_service_enabled nova && [[ "$VIRT_DRIVER" == 'xenserver' ]]; then - # Look for the backend password here because read_password - # is not a library function. - read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN." -fi - - # Swift # ----- @@ -742,14 +770,28 @@ save_stackenv $LINENO # Bring down global requirements before any use of pip_install. This is # necessary to ensure that the constraints file is in place before we # attempt to apply any constraints to pip installs. -git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH +# We always need the master branch in addition to any stable branch, so +# override GIT_DEPTH here. +GIT_DEPTH=0 git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH # Install package requirements # Source it so the entire environment is available echo_summary "Installing package prerequisites" source $TOP_DIR/tools/install_prereqs.sh -# Configure an appropriate Python environment +# Configure an appropriate Python environment. +# +# NOTE(ianw) 2021-08-11 : We install the latest pip here because pip +# is very active and changes are not generally reflected in the LTS +# distros. This often involves important things like dependency or +# conflict resolution, and has often been required because the +# complicated constraints etc. used by openstack have tickled bugs in +# distro versions of pip. We want to find these problems as they +# happen, rather than years later when we try to update our LTS +# distro. Whilst it is clear that global installations of upstream +# pip are less and less common, with virtualenv's being the general +# approach now; there are a lot of devstack plugins that assume a +# global install environment. if [[ "$OFFLINE" != "True" ]]; then PYPI_ALTERNATIVE_URL=${PYPI_ALTERNATIVE_URL:-""} $TOP_DIR/tools/install_pip.sh fi @@ -758,6 +800,20 @@ fi source $TOP_DIR/tools/fixup_stuff.sh fixup_all +if [[ "$GLOBAL_VENV" == "True" ]] ; then + # TODO(frickler): find a better solution for this + sudo ln -sf /opt/stack/data/venv/bin/cinder-manage /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/cinder-rtstool /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/glance /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/nova-manage /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/openstack /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/privsep-helper /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/rally /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/tox /usr/local/bin + + setup_devstack_virtualenv +fi + # Install subunit for the subunit output stream pip_install -U os-testr @@ -857,7 +913,7 @@ fi install_keystonemiddleware if is_service_enabled keystone; then - if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then + if [ "$KEYSTONE_SERVICE_HOST" == "$SERVICE_HOST" ]; then stack_install_service keystone configure_keystone fi @@ -925,6 +981,11 @@ if is_service_enabled tls-proxy; then fix_system_ca_bundle_path fi +if is_service_enabled cinder || [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then + # os-brick setup required by glance, cinder, and nova + init_os_brick +fi + # Extras Install # -------------- @@ -937,6 +998,9 @@ if use_library_from_git "python-openstackclient"; then setup_dev_lib "python-openstackclient" else pip_install_gr python-openstackclient + if is_service_enabled openstack-cli-server; then + install_openstack_cli_server + fi fi # Installs alias for osc so that we can collect timing for all @@ -1001,22 +1065,6 @@ fi # Save configuration values save_stackenv $LINENO -# Kernel Samepage Merging (KSM) -# ----------------------------- - -# Processes that mark their memory as mergeable can share identical memory -# pages if KSM is enabled. This is particularly useful for nova + libvirt -# backends but any other setup that marks its memory as mergeable can take -# advantage. The drawback is there is higher cpu load; however, we tend to -# be memory bound not cpu bound so enable KSM by default but allow people -# to opt out if the CPU time is more important to them. - -if [[ $ENABLE_KSM == "True" ]] ; then - if [[ -f /sys/kernel/mm/ksm/run ]] ; then - sudo sh -c "echo 1 > /sys/kernel/mm/ksm/run" - fi -fi - # Start Services # ============== @@ -1027,6 +1075,12 @@ fi # A better kind of sysstat, with the top process per time slice start_dstat +if is_service_enabled atop; then + configure_atop + install_atop + start_atop +fi + # Run a background tcpdump for debugging # Note: must set TCPDUMP_ARGS with the enabled service if is_service_enabled tcpdump; then @@ -1044,37 +1098,18 @@ fi # Keystone # -------- -# Rather than just export these, we write them out to a -# intermediate userrc file that can also be used to debug if -# something goes wrong between here and running -# tools/create_userrc.sh (this script relies on services other -# than keystone being available, so we can't call it right now) -cat > $TOP_DIR/userrc_early <> $TOP_DIR/userrc_early start_tls_proxy http-services '*' 443 $SERVICE_HOST 80 fi -source $TOP_DIR/userrc_early +# Write a clouds.yaml file and use the devstack-admin cloud +write_clouds_yaml +export OS_CLOUD=${OS_CLOUD:-devstack-admin} if is_service_enabled keystone; then echo_summary "Starting Keystone" - if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then + if [ "$KEYSTONE_SERVICE_HOST" == "$SERVICE_HOST" ]; then init_keystone start_keystone bootstrap_keystone @@ -1082,34 +1117,33 @@ if is_service_enabled keystone; then create_keystone_accounts if is_service_enabled nova; then - create_nova_accounts + async_runfunc create_nova_accounts fi if is_service_enabled glance; then - create_glance_accounts + async_runfunc create_glance_accounts fi if is_service_enabled cinder; then - create_cinder_accounts + async_runfunc create_cinder_accounts fi if is_service_enabled neutron; then - create_neutron_accounts + async_runfunc create_neutron_accounts fi if is_service_enabled swift; then - create_swift_accounts + async_runfunc create_swift_accounts fi fi -# Write a clouds.yaml file -write_clouds_yaml - # Horizon # ------- if is_service_enabled horizon; then echo_summary "Configuring Horizon" - configure_horizon + async_runfunc configure_horizon fi +async_wait create_nova_accounts create_glance_accounts create_cinder_accounts +async_wait create_neutron_accounts create_swift_accounts configure_horizon # Glance # ------ @@ -1117,7 +1151,7 @@ fi # NOTE(yoctozepto): limited to node hosting the database which is the controller if is_service_enabled $DATABASE_BACKENDS && is_service_enabled glance; then echo_summary "Configuring Glance" - init_glance + async_runfunc init_glance fi @@ -1131,7 +1165,7 @@ if is_service_enabled neutron; then # Run init_neutron only on the node hosting the Neutron API server if is_service_enabled $DATABASE_BACKENDS && is_service_enabled neutron; then - init_neutron + async_runfunc init_neutron fi fi @@ -1140,7 +1174,8 @@ fi # ---- if is_service_enabled q-dhcp; then - # Delete traces of nova networks from prior runs + # TODO(frickler): These are remnants from n-net, check which parts are really + # still needed for Neutron. # Do not kill any dnsmasq instance spawned by NetworkManager netman_pid=$(pidof NetworkManager || true) if [ -z "$netman_pid" ]; then @@ -1155,13 +1190,18 @@ if is_service_enabled q-dhcp; then sudo sysctl -w net.ipv4.ip_forward=1 fi +# os-vif +# ------ +if is_service_enabled nova neutron; then + configure_os_vif +fi # Storage Service # --------------- if is_service_enabled swift; then echo_summary "Configuring Swift" - init_swift + async_runfunc init_swift fi @@ -1170,7 +1210,7 @@ fi if is_service_enabled cinder; then echo_summary "Configuring Cinder" - init_cinder + async_runfunc init_cinder fi # Placement Service @@ -1178,9 +1218,16 @@ fi if is_service_enabled placement; then echo_summary "Configuring placement" - init_placement + async_runfunc init_placement fi +# Wait for neutron and placement before starting nova +async_wait init_neutron +async_wait init_placement +async_wait init_glance +async_wait init_swift +async_wait init_cinder + # Compute Service # --------------- @@ -1188,12 +1235,7 @@ if is_service_enabled nova; then echo_summary "Configuring Nova" init_nova - # Additional Nova configuration that is dependent on other services - # TODO(stephenfin): Is it possible for neutron to *not* be enabled now? If - # not, remove the if here - if is_service_enabled neutron; then - configure_neutron_nova - fi + async_runfunc configure_neutron_nova fi @@ -1227,15 +1269,21 @@ fi # deployments. This ensures the keys match across nova and cinder across all # hosts. FIXED_KEY=${FIXED_KEY:-bae3516cc1c0eb18b05440eba8012a4a880a2ee04d584a9c1579445e675b12defdc716ec} +if is_service_enabled cinder; then + iniset $CINDER_CONF key_manager fixed_key "$FIXED_KEY" +fi + +async_wait configure_neutron_nova + +# NOTE(clarkb): This must come after async_wait configure_neutron_nova because +# configure_neutron_nova modifies $NOVA_CONF and $NOVA_CPU_CONF as well. If +# we don't wait then these two ini updates race either other and can result +# in unexpected configs. if is_service_enabled nova; then iniset $NOVA_CONF key_manager fixed_key "$FIXED_KEY" iniset $NOVA_CPU_CONF key_manager fixed_key "$FIXED_KEY" fi -if is_service_enabled cinder; then - iniset $CINDER_CONF key_manager fixed_key "$FIXED_KEY" -fi - # Launch the nova-api and wait for it to answer before continuing if is_service_enabled n-api; then echo_summary "Starting Nova API" @@ -1247,10 +1295,7 @@ if is_service_enabled ovn-controller ovn-controller-vtep; then start_ovn_services fi -if is_service_enabled neutron-api; then - echo_summary "Starting Neutron" - start_neutron_api -elif is_service_enabled q-svc; then +if is_service_enabled q-svc neutron-api; then echo_summary "Starting Neutron" configure_neutron_after_post_config start_neutron_service_and_check @@ -1267,7 +1312,7 @@ if is_service_enabled neutron; then start_neutron fi # Once neutron agents are started setup initial network elements -if is_service_enabled q-svc && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then +if is_service_enabled q-svc neutron-api && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then echo_summary "Creating initial neutron network elements" # Here's where plugins can wire up their own networks instead # of the code in lib/neutron_plugins/services/l3 @@ -1282,7 +1327,7 @@ fi if is_service_enabled nova; then echo_summary "Starting Nova" start_nova - create_flavors + async_runfunc create_flavors fi if is_service_enabled cinder; then echo_summary "Starting Cinder" @@ -1324,6 +1369,7 @@ if is_service_enabled $DATABASE_BACKENDS && is_service_enabled glance; then done fi +async_wait create_flavors if is_service_enabled horizon; then echo_summary "Starting Horizon" @@ -1340,7 +1386,7 @@ fi # which is helpful in image bundle steps. if is_service_enabled nova && is_service_enabled keystone; then - USERRC_PARAMS="-PA --target-dir $TOP_DIR/accrc" + USERRC_PARAMS="-PA --target-dir $TOP_DIR/accrc --os-password $ADMIN_PASSWORD" if [ -f $SSL_BUNDLE_FILE ]; then USERRC_PARAMS="$USERRC_PARAMS --os-cacert $SSL_BUNDLE_FILE" @@ -1467,8 +1513,25 @@ else exec 1>&3 fi +# Make sure we didn't leak any background tasks +async_cleanup + # Dump out the time totals time_totals +async_print_timing + +if is_service_enabled mysql; then + if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" && "$MYSQL_HOST" ]]; then + echo "" + echo "" + echo "Post-stack database query stats:" + mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST stats -e \ + 'SELECT * FROM queries' -t 2>/dev/null + mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST stats -e \ + 'DELETE FROM queries' 2>/dev/null + fi +fi + # Using the cloud # =============== diff --git a/stackrc b/stackrc index a36f8970e6..93f8b1cd6d 100644 --- a/stackrc +++ b/stackrc @@ -72,8 +72,10 @@ if ! isset ENABLED_SERVICES ; then ENABLED_SERVICES+=,g-api # Cinder ENABLED_SERVICES+=,c-sch,c-api,c-vol + # OVN + ENABLED_SERVICES+=,ovn-controller,ovn-northd,ovs-vswitchd,ovsdb-server # Neutron - ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt,q-l3 + ENABLED_SERVICES+=,q-svc,q-ovn-agent # Dashboard ENABLED_SERVICES+=,horizon # Additional services @@ -83,7 +85,7 @@ fi # Global toggle for enabling services under mod_wsgi. If this is set to # ``True`` all services that use HTTPD + mod_wsgi as the preferred method of # deployment, will be deployed under Apache. If this is set to ``False`` all -# services will rely on the local toggle variable (e.g. ``KEYSTONE_USE_MOD_WSGI``) +# services will rely on the local toggle variable. ENABLE_HTTPD_MOD_WSGI_SERVICES=True # Set the default Nova APIs to enable @@ -119,24 +121,11 @@ else SYSTEMCTL="sudo systemctl" fi - -# Whether or not to enable Kernel Samepage Merging (KSM) if available. -# This allows programs that mark their memory as mergeable to share -# memory pages if they are identical. This is particularly useful with -# libvirt backends. This reduces memory usage at the cost of CPU overhead -# to scan memory. We default to enabling it because we tend to be more -# memory constrained than CPU bound. -ENABLE_KSM=$(trueorfalse True ENABLE_KSM) - # Passwords generated by interactive devstack runs if [[ -r $RC_DIR/.localrc.password ]]; then source $RC_DIR/.localrc.password fi -# Control whether Python 3 should be used at all. -# TODO(frickler): Drop this when all consumers are fixed -export USE_PYTHON3=True - # Adding the specific version of Python 3 to this variable will install # the app using that version of the interpreter instead of just 3. _DEFAULT_PYTHON3_VERSION="$(_get_python_version python3)" @@ -173,20 +162,25 @@ else export PS4='+ $(short_source): ' fi -# Configure Identity API version: 2.0, 3 -IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-3} - -# Set the option ENABLE_IDENTITY_V2 to True. It defines whether the DevStack -# deployment will be deploying the Identity v2 pipelines. If this option is set -# to ``False``, DevStack will: i) disable Identity v2; ii) configure Tempest to -# skip Identity v2 specific tests; and iii) configure Horizon to use Identity -# v3. When this option is set to ``False``, the option IDENTITY_API_VERSION -# will to be set to ``3`` in order to make DevStack register the Identity -# endpoint as v3. This flag is experimental and will be used as basis to -# identify the projects which still have issues to operate with Identity v3. -ENABLE_IDENTITY_V2=$(trueorfalse False ENABLE_IDENTITY_V2) -if [ "$ENABLE_IDENTITY_V2" == "False" ]; then - IDENTITY_API_VERSION=3 +# Global option for enforcing scope. If enabled, ENFORCE_SCOPE overrides +# each services ${SERVICE}_ENFORCE_SCOPE variables +ENFORCE_SCOPE=$(trueorfalse False ENFORCE_SCOPE) + +# Devstack supports the use of a global virtualenv. These variables enable +# and disable this functionality as well as set the path to the virtualenv. +# Note that the DATA_DIR is selected because grenade testing uses a shared +# DATA_DIR but different DEST dirs and we don't want two sets of venvs, +# instead we want one global set. +DEVSTACK_VENV=${DEVSTACK_VENV:-$DATA_DIR/venv} + +# NOTE(kopecmartin): remove this once this is fixed +# https://bugs.launchpad.net/devstack/+bug/2031639 +# This couldn't go to fixup_stuff as that's called after projects +# (e.g. certain paths) are set taking GLOBAL_VENV into account +if [[ "$os_VENDOR" =~ (CentOSStream|Rocky) ]]; then + GLOBAL_VENV=$(trueorfalse False GLOBAL_VENV) +else + GLOBAL_VENV=$(trueorfalse True GLOBAL_VENV) fi # Enable use of Python virtual environments. Individual project use of @@ -196,13 +190,23 @@ fi USE_VENV=$(trueorfalse False USE_VENV) # Add packages that need to be installed into a venv but are not in any -# requirmenets files here, in a comma-separated list -ADDITIONAL_VENV_PACKAGES=${ADITIONAL_VENV_PACKAGES:-""} +# requirements files here, in a comma-separated list. +# Currently only used when USE_VENV is true (individual project venvs) +ADDITIONAL_VENV_PACKAGES=${ADDITIONAL_VENV_PACKAGES:-""} # This can be used to turn database query logging on and off # (currently only implemented for MySQL backend) DATABASE_QUERY_LOGGING=$(trueorfalse False DATABASE_QUERY_LOGGING) +# This can be used to turn on various non-default items in the +# performance_schema that are of interest to us +MYSQL_GATHER_PERFORMANCE=$(trueorfalse True MYSQL_GATHER_PERFORMANCE) + +# This can be used to reduce the amount of memory mysqld uses while running. +# These are unscientifically determined, and could reduce performance or +# cause other issues. +MYSQL_REDUCE_MEMORY=$(trueorfalse True MYSQL_REDUCE_MEMORY) + # Set a timeout for git operations. If git is still running when the # timeout expires, the command will be retried up to 3 times. This is # in the format for timeout(1); @@ -219,6 +223,9 @@ GIT_TIMEOUT=${GIT_TIMEOUT:-0} # proxy uwsgi in front of it, or "mod_wsgi", which runs in # apache. mod_wsgi is deprecated, don't use it. WSGI_MODE=${WSGI_MODE:-"uwsgi"} +if [[ "$WSGI_MODE" != "uwsgi" ]]; then + die $LINENO "$WSGI_MODE is no longer a supported WSGI mode. Only uwsgi is valid." +fi # Repositories # ------------ @@ -245,7 +252,7 @@ REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements} # Setting the variable to 'ALL' will activate the download for all # libraries. -DEVSTACK_SERIES="wallaby" +DEVSTACK_SERIES="2026.1" ############## # @@ -298,7 +305,11 @@ REQUIREMENTS_BRANCH=${REQUIREMENTS_BRANCH:-$TARGET_BRANCH} # Tempest test suite TEMPEST_REPO=${TEMPEST_REPO:-${GIT_BASE}/openstack/tempest.git} TEMPEST_BRANCH=${TEMPEST_BRANCH:-$BRANCHLESS_TARGET_BRANCH} +TEMPEST_VENV_UPPER_CONSTRAINTS=${TEMPEST_VENV_UPPER_CONSTRAINTS:-master} +OSTESTIMAGES_REPO=${OSTESTIMAGES_REPO:-${GIT_BASE}/openstack/os-test-images.git} +OSTESTIMAGES_BRANCH=${OSTESTIMAGES_BRANCH:-$BRANCHLESS_TARGET_BRANCH} +OSTESTIMAGES_DIR=${DEST}/os-test-images ############## # @@ -384,6 +395,10 @@ GITBRANCH["futurist"]=${FUTURIST_BRANCH:-$TARGET_BRANCH} GITREPO["debtcollector"]=${DEBTCOLLECTOR_REPO:-${GIT_BASE}/openstack/debtcollector.git} GITBRANCH["debtcollector"]=${DEBTCOLLECTOR_BRANCH:-$TARGET_BRANCH} +# etcd3gw library +GITREPO["etcd3gw"]=${ETCD3GW_REPO:-${GIT_BASE}/openstack/etcd3gw.git} +GITBRANCH["etcd3gw"]=${ETCD3GW_BRANCH:-$BRANCHLESS_TARGET_BRANCH} + # helpful state machines GITREPO["automaton"]=${AUTOMATON_REPO:-${GIT_BASE}/openstack/automaton.git} GITBRANCH["automaton"]=${AUTOMATON_BRANCH:-$TARGET_BRANCH} @@ -412,6 +427,10 @@ GITBRANCH["oslo.db"]=${OSLODB_BRANCH:-$TARGET_BRANCH} GITREPO["oslo.i18n"]=${OSLOI18N_REPO:-${GIT_BASE}/openstack/oslo.i18n.git} GITBRANCH["oslo.i18n"]=${OSLOI18N_BRANCH:-$TARGET_BRANCH} +# oslo.limit +GITREPO["oslo.limit"]=${OSLOLIMIT_REPO:-${GIT_BASE}/openstack/oslo.limit.git} +GITBRANCH["oslo.limit"]=${OSLOLIMIT_BRANCH:-$TARGET_BRANCH} + # oslo.log GITREPO["oslo.log"]=${OSLOLOG_REPO:-${GIT_BASE}/openstack/oslo.log.git} GITBRANCH["oslo.log"]=${OSLOLOG_BRANCH:-$TARGET_BRANCH} @@ -545,6 +564,10 @@ GITREPO["neutron-lib"]=${NEUTRON_LIB_REPO:-${GIT_BASE}/openstack/neutron-lib.git GITBRANCH["neutron-lib"]=${NEUTRON_LIB_BRANCH:-$TARGET_BRANCH} GITDIR["neutron-lib"]=$DEST/neutron-lib +# os-resource-classes library containing a list of standardized resource classes for OpenStack +GITREPO["os-resource-classes"]=${OS_RESOURCE_CLASSES_REPO:-${GIT_BASE}/openstack/os-resource-classes.git} +GITBRANCH["os-resource-classes"]=${OS_RESOURCE_CLASSES_BRANCH:-$TARGET_BRANCH} + # os-traits library for resource provider traits in the placement service GITREPO["os-traits"]=${OS_TRAITS_REPO:-${GIT_BASE}/openstack/os-traits.git} GITBRANCH["os-traits"]=${OS_TRAITS_BRANCH:-$TARGET_BRANCH} @@ -554,27 +577,10 @@ GITREPO["ovsdbapp"]=${OVSDBAPP_REPO:-${GIT_BASE}/openstack/ovsdbapp.git} GITBRANCH["ovsdbapp"]=${OVSDBAPP_BRANCH:-$TARGET_BRANCH} GITDIR["ovsdbapp"]=$DEST/ovsdbapp -################## -# -# TripleO / Heat Agent Components -# -################## - -# run-parts script required by os-refresh-config -DIB_UTILS_REPO=${DIB_UTILS_REPO:-${GIT_BASE}/openstack/dib-utils.git} -DIB_UTILS_BRANCH=${DIB_UTILS_BRANCH:-$BRANCHLESS_TARGET_BRANCH} - -# os-apply-config configuration template tool -OAC_REPO=${OAC_REPO:-${GIT_BASE}/openstack/os-apply-config.git} -OAC_BRANCH=${OAC_BRANCH:-$TRAILING_TARGET_BRANCH} - -# os-collect-config configuration agent -OCC_REPO=${OCC_REPO:-${GIT_BASE}/openstack/os-collect-config.git} -OCC_BRANCH=${OCC_BRANCH:-$TRAILING_TARGET_BRANCH} - -# os-refresh-config configuration run-parts tool -ORC_REPO=${ORC_REPO:-${GIT_BASE}/openstack/os-refresh-config.git} -ORC_BRANCH=${ORC_BRANCH:-$TRAILING_TARGET_BRANCH} +# os-ken used by neutron +GITREPO["os-ken"]=${OS_KEN_REPO:-${GIT_BASE}/openstack/os-ken.git} +GITBRANCH["os-ken"]=${OS_KEN_BRANCH:-$TARGET_BRANCH} +GITDIR["os-ken"]=$DEST/os-ken ################# @@ -590,8 +596,8 @@ IRONIC_PYTHON_AGENT_REPO=${IRONIC_PYTHON_AGENT_REPO:-${GIT_BASE}/openstack/ironi IRONIC_PYTHON_AGENT_BRANCH=${IRONIC_PYTHON_AGENT_BRANCH:-$TARGET_BRANCH} # a websockets/html5 or flash powered VNC console for vm instances -NOVNC_REPO=${NOVNC_REPO:-https://github.com/novnc/noVNC.git} -NOVNC_BRANCH=${NOVNC_BRANCH:-v1.1.0} +NOVNC_REPO=${NOVNC_REPO:-https://github.com/novnc/novnc.git} +NOVNC_BRANCH=${NOVNC_BRANCH:-v1.3.0} # a websockets/html5 or flash powered SPICE console for vm instances SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git} @@ -605,21 +611,21 @@ ENABLE_VOLUME_MULTIATTACH=$(trueorfalse False ENABLE_VOLUME_MULTIATTACH) # Nova hypervisor configuration. We default to libvirt with **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can -# also install an **LXC**, **OpenVZ** or **XenAPI** based system. If xenserver-core -# is installed, the default will be XenAPI +# also install an **LXC** or **OpenVZ** based system. DEFAULT_VIRT_DRIVER=libvirt -is_package_installed xenserver-core && DEFAULT_VIRT_DRIVER=xenserver VIRT_DRIVER=${VIRT_DRIVER:-$DEFAULT_VIRT_DRIVER} case "$VIRT_DRIVER" in ironic|libvirt) LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} - LIBVIRT_CPU_MODE=${LIBVIRT_CPU_MODE:-none} + LIBVIRT_CPU_MODE=${LIBVIRT_CPU_MODE:-custom} + LIBVIRT_CPU_MODEL=${LIBVIRT_CPU_MODEL:-Nehalem} + + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + if [[ "$os_VENDOR" =~ (Debian|Ubuntu) ]]; then - # The groups change with newer libvirt. Older Ubuntu used - # 'libvirtd', but now uses libvirt like Debian. Do a quick check - # to see if libvirtd group already exists to handle grenade's case. - LIBVIRT_GROUP=$(cut -d ':' -f 1 /etc/group | grep 'libvirtd$' || true) - LIBVIRT_GROUP=${LIBVIRT_GROUP:-libvirt} + LIBVIRT_GROUP=libvirt else LIBVIRT_GROUP=libvirtd fi @@ -633,14 +639,6 @@ case "$VIRT_DRIVER" in fake) NUMBER_FAKE_NOVA_COMPUTE=${NUMBER_FAKE_NOVA_COMPUTE:-1} ;; - xenserver) - # Xen config common to nova and neutron - XENAPI_USER=${XENAPI_USER:-"root"} - # This user will be used for dom0 - domU communication - # should be able to log in to dom0 without a password - # will be used to install the plugins - DOMZERO_USER=${DOMZERO_USER:-"domzero"} - ;; *) ;; esac @@ -654,21 +652,20 @@ esac # If the file ends in .tar.gz, uncompress the tarball and and select the first # .img file inside it as the image. If present, use "*-vmlinuz*" as the kernel # and "*-initrd*" as the ramdisk -# example: http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-amd64.tar.gz +# example: https://cloud-images.ubuntu.com/releases/jammy/release/ubuntu-22.04-server-cloudimg-amd64.tar.gz # * disk image (*.img,*.img.gz) # if file ends in .img, then it will be uploaded and registered as a to # glance as a disk image. If it ends in .gz, it is uncompressed first. # example: -# http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-armel-disk1.img -# http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz +# https://cloud-images.ubuntu.com/releases/jammy/release/ubuntu-22.04-server-cloudimg-amd64.img +# https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz # * OpenVZ image: # OpenVZ uses its own format of image, and does not support UEC style images -#IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image -#IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image +#IMAGE_URLS="https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image -CIRROS_VERSION=${CIRROS_VERSION:-"0.5.1"} -CIRROS_ARCH=${CIRROS_ARCH:-"x86_64"} +CIRROS_VERSION=${CIRROS_VERSION:-"0.6.3"} +CIRROS_ARCH=${CIRROS_ARCH:-$(uname -m)} # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of # which may be set in ``local.conf``. Also allow ``DEFAULT_IMAGE_NAME`` and @@ -684,27 +681,22 @@ if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs} DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz} - IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + IMAGE_URLS+="https://github.com/cirros-dev/cirros/releases/download/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; *) # otherwise, use the qcow image DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk} DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img} - IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + IMAGE_URLS+="https://github.com/cirros-dev/cirros/releases/download/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; esac ;; vsphere) DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.2-i386-disk.vmdk} DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-$DEFAULT_IMAGE_NAME} IMAGE_URLS+="http://partnerweb.vmware.com/programs/vmdkimage/${DEFAULT_IMAGE_FILE_NAME}";; - xenserver) - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.5-x86_64-disk} - DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.5-x86_64-disk.vhd.tgz} - IMAGE_URLS+="http://ca.downloads.xensource.com/OpenStack/cirros-0.3.5-x86_64-disk.vhd.tgz" - IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz";; fake) # Use the same as the default for libvirt DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk} DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img} - IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + IMAGE_URLS+="https://github.com/cirros-dev/cirros/releases/download/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; esac DOWNLOAD_DEFAULT_IMAGES=False fi @@ -718,12 +710,11 @@ fi EXTRA_CACHE_URLS="" # etcd3 defaults -ETCD_VERSION=${ETCD_VERSION:-v3.3.12} -ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"dc5d82df095dae0a2970e4d870b6929590689dd707ae3d33e7b86da0f7f211b6"} -ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"170b848ac1a071fe7d495d404a868a2c0090750b2944f8a260ef1c6125b2b4f4"} -ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"77f807b1b51abbf51e020bb05bdb8ce088cb58260fcd22749ea32eee710463d3"} -# etcd v3.2.x doesn't have anything for s390x -ETCD_SHA256_S390X=${ETCD_SHA256_S390X:-""} +ETCD_VERSION=${ETCD_VERSION:-v3.5.21} +ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"adddda4b06718e68671ffabff2f8cee48488ba61ad82900e639d108f2148501c"} +ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"95bf6918623a097c0385b96f139d90248614485e781ec9bee4768dbb6c79c53f"} +ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"6fb6ecb3d1b331eb177dc610a8efad3aceb1f836d6aeb439ba0bfac5d5c2a38c"} +ETCD_SHA256_S390X=${ETCD_SHA256_S390X:-"a211a83961ba8a7e94f7d6343ad769e699db21a715ba4f3b68cf31ea28f9c951"} # Make sure etcd3 downloads the correct architecture if is_arch "x86_64"; then ETCD_ARCH="amd64" @@ -735,15 +726,8 @@ elif is_arch "ppc64le"; then ETCD_ARCH="ppc64le" ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_PPC64} elif is_arch "s390x"; then - # An etcd3 binary for s390x is not available on github like it is - # for other arches. Only continue if a custom download URL was - # provided. - if [[ -n "${ETCD_DOWNLOAD_URL}" ]]; then - ETCD_ARCH="s390x" - ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_S390X} - else - exit_distro_not_supported "etcd3. No custom ETCD_DOWNLOAD_URL provided." - fi + ETCD_ARCH="s390x" + ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_S390X} else exit_distro_not_supported "invalid hardware type - $ETCD_ARCH" fi @@ -767,8 +751,8 @@ for image_url in ${IMAGE_URLS//,/ }; do fi done -# 24Gb default volume backing file size -VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-24G} +# 30Gb default volume backing file size +VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-30G} # Prefixes for volume and instance names VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} @@ -802,7 +786,7 @@ NOVA_READY_TIMEOUT=${NOVA_READY_TIMEOUT:-$SERVICE_TIMEOUT} SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT=${SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT:-5} # Service graceful shutdown timeout -WORKER_TIMEOUT=${WORKER_TIMEOUT:-90} +WORKER_TIMEOUT=${WORKER_TIMEOUT:-80} # Common Configuration # -------------------- @@ -879,7 +863,31 @@ SERVICE_HOST=${SERVICE_HOST:-${DEF_SERVICE_HOST}} # This is either 127.0.0.1 for IPv4 or ::1 for IPv6 SERVICE_LOCAL_HOST=${SERVICE_LOCAL_HOST:-${DEF_SERVICE_LOCAL_HOST}} -REGION_NAME=${REGION_NAME:-RegionOne} +# TUNNEL IP version +# This is the IP version to use for tunnel endpoints +TUNNEL_IP_VERSION=${TUNNEL_IP_VERSION:-4} + +# Validate TUNNEL_IP_VERSION +if [[ $TUNNEL_IP_VERSION != "4" ]] && [[ $TUNNEL_IP_VERSION != "6" ]]; then + die $LINENO "TUNNEL_IP_VERSION must be either 4 or 6" +fi + +if [[ "$TUNNEL_IP_VERSION" == 4 ]]; then + DEF_TUNNEL_ENDPOINT_IP=$HOST_IP +fi + +if [[ "$TUNNEL_IP_VERSION" == 6 ]]; then + # Only die if the user has not over-ridden the endpoint IP + if [[ "$HOST_IPV6" == "" ]] && [[ "$TUNNEL_ENDPOINT_IP" == "" ]]; then + die $LINENO "Could not determine host IPv6 address. See local.conf for suggestions on setting HOST_IPV6." + fi + + DEF_TUNNEL_ENDPOINT_IP=$HOST_IPV6 +fi + +# Allow the use of an alternate address for tunnel endpoints. +# Default is dependent on TUNNEL_IP_VERSION above. +TUNNEL_ENDPOINT_IP=${TUNNEL_ENDPOINT_IP:-${DEF_TUNNEL_ENDPOINT_IP}} # Configure services to use syslog instead of writing to individual log files SYSLOG=$(trueorfalse False SYSLOG) diff --git a/tests/test_ini_config.sh b/tests/test_ini_config.sh index 6ed1647f34..fd3896d6ba 100755 --- a/tests/test_ini_config.sh +++ b/tests/test_ini_config.sh @@ -44,6 +44,12 @@ empty = multi = foo1 multi = foo2 +[fff] +ampersand = + +[ggg] +backslash = + [key_with_spaces] rgw special key = something @@ -85,7 +91,7 @@ fi # test iniget_sections VAL=$(iniget_sections "${TEST_INI}") -assert_equal "$VAL" "default aaa bbb ccc ddd eee key_with_spaces \ +assert_equal "$VAL" "default aaa bbb ccc ddd eee fff ggg key_with_spaces \ del_separate_options del_same_option del_missing_option \ del_missing_option_multi del_no_options" @@ -124,6 +130,23 @@ iniset ${SUDO_ARG} ${TEST_INI} bbb handlers "33,44" VAL=$(iniget ${TEST_INI} bbb handlers) assert_equal "$VAL" "33,44" "inset at EOF" +# Test with ampersand in values +for i in `seq 3`; do + iniset ${TEST_INI} fff ampersand '&y' +done +VAL=$(iniget ${TEST_INI} fff ampersand) +assert_equal "$VAL" "&y" "iniset ampersands in option" + +# Test with backslash in value +iniset ${TEST_INI} ggg backslash 'foo\bar' +VAL=$(iniget ${TEST_INI} ggg backslash) +assert_equal "$VAL" 'foo\bar' "iniset backslash in value" + +# Test with both ampersand and backslash +iniset ${TEST_INI} ggg backslash 'foo\bar&baz' +VAL=$(iniget ${TEST_INI} ggg backslash) +assert_equal "$VAL" 'foo\bar&baz' "iniset ampersand and backslash in value" + # test empty option if ini_has_option ${SUDO_ARG} ${TEST_INI} ddd empty; then passed "ini_has_option: ddd.empty present" diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index ab7583d042..9552c93c4f 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -44,7 +44,8 @@ ALL_LIBS+=" debtcollector os-brick os-traits automaton futurist oslo.service" ALL_LIBS+=" oslo.cache oslo.reports osprofiler cursive" ALL_LIBS+=" keystoneauth ironic-lib neutron-lib oslo.privsep" ALL_LIBS+=" diskimage-builder os-vif python-brick-cinderclient-ext" -ALL_LIBS+=" castellan python-barbicanclient ovsdbapp" +ALL_LIBS+=" castellan python-barbicanclient ovsdbapp os-ken os-resource-classes" +ALL_LIBS+=" oslo.limit etcd3gw" # Generate the above list with # echo ${!GITREPO[@]} diff --git a/tests/test_meta_config.sh b/tests/test_meta_config.sh index 087aaf468b..30479f245a 100755 --- a/tests/test_meta_config.sh +++ b/tests/test_meta_config.sh @@ -137,6 +137,9 @@ foo=bar [some] random=config +[[test12|run_tests.sh/test.conf]] +foo=bar + [[test-multi-sections|test-multi-sections.conf]] [sec-1] cfg_item1 = abcd @@ -389,13 +392,12 @@ EXPECT_VAL=0 check_result "$VAL" "$EXPECT_VAL" set -e -echo -n "merge_config_group test10 not directory: " +echo -n "merge_config_group test10 create directory: " set +e -# function is expected to fail and exit, running it -# in a subprocess to let this script proceed -(merge_config_group test.conf test10) +STACK_USER=$(id -u -n) +merge_config_group test.conf test10 VAL=$? -EXPECT_VAL=255 +EXPECT_VAL=0 check_result "$VAL" "$EXPECT_VAL" set -e @@ -414,9 +416,21 @@ random = config non = sense' check_result "$VAL" "$EXPECT_VAL" +echo -n "merge_config_group test12 directory as file: " +set +e +# function is expected to fail and exit, running it +# in a subprocess to let this script proceed +(merge_config_group test.conf test12) +VAL=$? +EXPECT_VAL=255 +check_result "$VAL" "$EXPECT_VAL" +set -e + rm -f test.conf test1c.conf test2a.conf \ test-space.conf test-equals.conf test-strip.conf \ test-colon.conf test-env.conf test-multiline.conf \ test-multi-sections.conf test-same.conf rm -rf test-etc +rm -rf does-not-exist-dir + diff --git a/tests/test_package_ordering.sh b/tests/test_package_ordering.sh index bfc2a1954f..f221c821a0 100755 --- a/tests/test_package_ordering.sh +++ b/tests/test_package_ordering.sh @@ -8,7 +8,7 @@ TOP=$(cd $(dirname "$0")/.. && pwd) source $TOP/tests/unittest.sh export LC_ALL=en_US.UTF-8 -PKG_FILES=$(find $TOP/files/debs $TOP/files/rpms $TOP/files/rpms-suse -type f) +PKG_FILES=$(find $TOP/files/debs $TOP/files/rpms -type f) TMPDIR=$(mktemp -d) diff --git a/tests/test_write_devstack_local_conf_role.sh b/tests/test_write_devstack_local_conf_role.sh index b2bc0a2c46..71d8d51614 100755 --- a/tests/test_write_devstack_local_conf_role.sh +++ b/tests/test_write_devstack_local_conf_role.sh @@ -6,4 +6,4 @@ TOP=$(cd $(dirname "$0")/.. && pwd) source $TOP/functions source $TOP/tests/unittest.sh -python ./roles/write-devstack-local-conf/library/test.py +${PYTHON} $TOP/roles/write-devstack-local-conf/library/test.py diff --git a/tests/unittest.sh b/tests/unittest.sh index 3703ece91d..fced2abe65 100644 --- a/tests/unittest.sh +++ b/tests/unittest.sh @@ -17,6 +17,8 @@ ERROR=0 PASS=0 FAILED_FUNCS="" +export PYTHON=$(which python3 2>/dev/null) + # pass a test, printing out MSG # usage: passed message function passed { diff --git a/tools/build_venv.sh b/tools/build_venv.sh index cfa39a82e0..a439163b5d 100755 --- a/tools/build_venv.sh +++ b/tools/build_venv.sh @@ -38,7 +38,7 @@ if [[ -z "$TOP_DIR" ]]; then fi # Build new venv -virtualenv $VENV_DEST +python$PYTHON3_VERSION -m venv --system-site-packages $VENV_DEST # Install modern pip PIP_VIRTUAL_ENV=$VENV_DEST pip_install -U pip diff --git a/tools/cap-pip.txt b/tools/cap-pip.txt deleted file mode 100644 index 8ee551b261..0000000000 --- a/tools/cap-pip.txt +++ /dev/null @@ -1 +0,0 @@ -pip<20.3 diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh index 919cacb036..cb8d7aa328 100755 --- a/tools/create-stack-user.sh +++ b/tools/create-stack-user.sh @@ -44,6 +44,15 @@ fi if ! getent passwd $STACK_USER >/dev/null; then echo "Creating a user called $STACK_USER" useradd -g $STACK_USER -s /bin/bash -d $DEST -m $STACK_USER + # RHEL based distros create home dir with 700 permissions, + # And Ubuntu 21.04+ with 750, i.e missing executable + # permission for either group or others + # Devstack deploy will have issues with this, fix it by + # adding executable permission + if [[ $(stat -c '%A' $DEST|grep -o x|wc -l) -lt 3 ]]; then + echo "Executable permission missing for $DEST, adding it" + chmod +x $DEST + fi fi echo "Giving stack user passwordless sudo privileges" diff --git a/tools/dbcounter/dbcounter.py b/tools/dbcounter/dbcounter.py new file mode 100644 index 0000000000..86e5529c97 --- /dev/null +++ b/tools/dbcounter/dbcounter.py @@ -0,0 +1,121 @@ +import json +import logging +import os +import threading +import time +import queue + +import sqlalchemy +from sqlalchemy.engine import CreateEnginePlugin +from sqlalchemy import event + +# https://docs.sqlalchemy.org/en/14/core/connections.html? +# highlight=createengineplugin#sqlalchemy.engine.CreateEnginePlugin + +LOG = logging.getLogger(__name__) + +# The theory of operation here is that we register this plugin with +# sqlalchemy via an entry_point. It gets loaded by virtue of plugin= +# being in the database connection URL, which gives us an opportunity +# to hook the engines that get created. +# +# We opportunistically spawn a thread, which we feed "hits" to over a +# queue, and which occasionally writes those hits to a special +# database called 'stats'. We access that database with the same user, +# pass, and host as the main connection URL for simplicity. + + +class LogCursorEventsPlugin(CreateEnginePlugin): + def __init__(self, url, kwargs): + self.db_name = url.database + LOG.info('Registered counter for database %s' % self.db_name) + new_url = sqlalchemy.engine.URL.create(url.drivername, + url.username, + url.password, + url.host, + url.port, + 'stats') + + self.engine = sqlalchemy.create_engine(new_url) + self.queue = queue.Queue() + self.thread = None + + def update_url(self, url): + return url.difference_update_query(["dbcounter"]) + + def engine_created(self, engine): + """Hook the engine creation process. + + This is the plug point for the sqlalchemy plugin. Using + plugin=$this in the URL causes this method to be called when + the engine is created, giving us a chance to hook it below. + """ + event.listen(engine, "before_cursor_execute", self._log_event) + + def ensure_writer_thread(self): + self.thread = threading.Thread(target=self.stat_writer, daemon=True) + self.thread.start() + + def _log_event(self, conn, cursor, statement, parameters, context, + executemany): + """Queue a "hit" for this operation to be recorded. + + Attepts to determine the operation by the first word of the + statement, or 'OTHER' if it cannot be determined. + """ + + # Start our thread if not running. If we were forked after the + # engine was created and this plugin was associated, our + # writer thread is gone, so respawn. + if not self.thread or not self.thread.is_alive(): + self.ensure_writer_thread() + + try: + op = statement.strip().split(' ', 1)[0] or 'OTHER' + except Exception: + op = 'OTHER' + + self.queue.put((self.db_name, op)) + + def do_incr(self, db, op, count): + """Increment the counter for (db,op) by count.""" + + query = sqlalchemy.text('INSERT INTO queries (db, op, count) ' + ' VALUES (:db, :op, :count) ' + ' ON DUPLICATE KEY UPDATE count=count+:count') + try: + with self.engine.begin() as conn: + r = conn.execute(query, {'db': db, 'op': op, 'count': count}) + except Exception as e: + LOG.error('Failed to account for access to database %r: %s', + db, e) + + def stat_writer(self): + """Consume messages from the queue and write them in batches. + + This reads "hists" from from a queue fed by _log_event() and + writes (db,op)+=count stats to the database after ten seconds + of no activity to avoid triggering a write for every SELECT + call. Write no less often than every sixty seconds to avoid being + starved by constant activity. + """ + LOG.debug('[%i] Writer thread running' % os.getpid()) + while True: + to_write = {} + last = time.time() + while time.time() - last < 60: + try: + item = self.queue.get(timeout=10) + to_write.setdefault(item, 0) + to_write[item] += 1 + except queue.Empty: + break + + if to_write: + LOG.debug('[%i] Writing DB stats %s' % ( + os.getpid(), + ','.join(['%s:%s=%i' % (db, op, count) + for (db, op), count in to_write.items()]))) + + for (db, op), count in to_write.items(): + self.do_incr(db, op, count) diff --git a/tools/dbcounter/pyproject.toml b/tools/dbcounter/pyproject.toml new file mode 100644 index 0000000000..d74d688997 --- /dev/null +++ b/tools/dbcounter/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["sqlalchemy", "setuptools>=42"] +build-backend = "setuptools.build_meta" \ No newline at end of file diff --git a/tools/dbcounter/setup.cfg b/tools/dbcounter/setup.cfg new file mode 100644 index 0000000000..12300bf619 --- /dev/null +++ b/tools/dbcounter/setup.cfg @@ -0,0 +1,14 @@ +[metadata] +name = dbcounter +author = Dan Smith +author_email = dms@danplanet.com +version = 0.1 +description = A teeny tiny dbcounter plugin for use with devstack +url = http://github.com/openstack/devstack +license = Apache + +[options] +py_modules = dbcounter +entry_points = + [sqlalchemy.plugins] + dbcounter = dbcounter:LogCursorEventsPlugin diff --git a/tools/file_tracker.sh b/tools/file_tracker.sh new file mode 100755 index 0000000000..9c31b30a56 --- /dev/null +++ b/tools/file_tracker.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +set -o errexit + +# time to sleep between checks +SLEEP_TIME=20 + +function tracker { + echo "Number of open files | Number of open files not in use | Maximum number of files allowed to be opened" + while true; do + cat /proc/sys/fs/file-nr + sleep $SLEEP_TIME + done +} + +function usage { + echo "Usage: $0 [-x] [-s N]" 1>&2 + exit 1 +} + +while getopts ":s:x" opt; do + case $opt in + s) + SLEEP_TIME=$OPTARG + ;; + x) + set -o xtrace + ;; + *) + usage + ;; + esac +done + +tracker diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 25f726892f..9e2818f2cc 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -26,76 +26,6 @@ if [[ -z "$TOP_DIR" ]]; then FILES=$TOP_DIR/files fi -# Keystone Port Reservation -# ------------------------- -# Reserve and prevent ``KEYSTONE_AUTH_PORT`` and ``KEYSTONE_AUTH_PORT_INT`` from -# being used as ephemeral ports by the system. The default(s) are 35357 and -# 35358 which are in the Linux defined ephemeral port range (in disagreement -# with the IANA ephemeral port range). This is a workaround for bug #1253482 -# where Keystone will try and bind to the port and the port will already be -# in use as an ephemeral port by another process. This places an explicit -# exception into the Kernel for the Keystone AUTH ports. -function fixup_keystone { - keystone_ports=${KEYSTONE_AUTH_PORT:-35357},${KEYSTONE_AUTH_PORT_INT:-35358} - - # Only do the reserved ports when available, on some system (like containers) - # where it's not exposed we are almost pretty sure these ports would be - # exclusive for our DevStack. - if sysctl net.ipv4.ip_local_reserved_ports >/dev/null 2>&1; then - # Get any currently reserved ports, strip off leading whitespace - reserved_ports=$(sysctl net.ipv4.ip_local_reserved_ports | awk -F'=' '{print $2;}' | sed 's/^ //') - - if [[ -z "${reserved_ports}" ]]; then - # If there are no currently reserved ports, reserve the keystone ports - sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports} - else - # If there are currently reserved ports, keep those and also reserve the - # Keystone specific ports. Duplicate reservations are merged into a single - # reservation (or range) automatically by the kernel. - sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports},${reserved_ports} - fi - else - echo_summary "WARNING: unable to reserve keystone ports" - fi -} - -# Ubuntu Repositories -#-------------------- -# Enable universe for bionic since it is missing when installing from ISO. -function fixup_ubuntu { - if [[ "$DISTRO" != "bionic" ]]; then - return - fi - - # This pulls in apt-add-repository - install_package "software-properties-common" - - # Enable universe - sudo add-apt-repository -y universe - - if [[ -f /etc/ci/mirror_info.sh ]] ; then - # If we are on a nodepool provided host and it has told us about - # where we can find local mirrors then use that mirror. - source /etc/ci/mirror_info.sh - sudo apt-add-repository -y "deb $NODEPOOL_UCA_MIRROR bionic-updates/ussuri main" - else - # Enable UCA:ussuri for updated versions of QEMU and libvirt - sudo add-apt-repository -y cloud-archive:ussuri - fi - REPOS_UPDATED=False - apt_get_update - - # Since pip10, pip will refuse to uninstall files from packages - # that were created with distutils (rather than more modern - # setuptools). This is because it technically doesn't have a - # manifest of what to remove. However, in most cases, simply - # overwriting works. So this hacks around those packages that - # have been dragged in by some other system dependency - sudo rm -rf /usr/lib/python3/dist-packages/httplib2-*.egg-info - sudo rm -rf /usr/lib/python3/dist-packages/pyasn1_modules-*.egg-info - sudo rm -rf /usr/lib/python3/dist-packages/PyYAML-*.egg-info -} - # Python Packages # --------------- @@ -106,6 +36,12 @@ function fixup_fedora { # Disable selinux to avoid configuring to allow Apache access # to Horizon files (LP#1175444) if selinuxenabled; then + #persit selinux config across reboots + cat << EOF | sudo tee /etc/selinux/config +SELINUX=permissive +SELINUXTYPE=targeted +EOF + # then disable at runtime sudo setenforce 0 fi @@ -143,30 +79,21 @@ function fixup_fedora { # overwriting works. So this hacks around those packages that # have been dragged in by some other system dependency sudo rm -rf /usr/lib64/python3*/site-packages/PyYAML-*.egg-info -} -function fixup_suse { - if ! is_suse; then - return + # After updating setuptools based on the requirements, the files from the + # python3-setuptools RPM are deleted, it breaks some tools such as semanage + # (used in diskimage-builder) that use the -s flag of the python + # interpreter, enforcing the use of the packages from /usr/lib. + # Importing setuptools in a such environment fails. + # Enforce the package re-installation to fix those applications. + if is_package_installed python3-setuptools; then + sudo dnf reinstall -y python3-setuptools fi +} - # Deactivate and disable apparmor profiles in openSUSE and SLE - # distros to avoid issues with haproxy and dnsmasq. In newer - # releases, systemctl stop apparmor is actually a no-op, so we - # have to use aa-teardown to make sure we've deactivated the - # profiles: - # - # https://www.suse.com/releasenotes/x86_64/SUSE-SLES/15/#fate-325343 - # https://gitlab.com/apparmor/apparmor/merge_requests/81 - # https://build.opensuse.org/package/view_file/openSUSE:Leap:15.2/apparmor/apparmor.service?expand=1 - if sudo systemctl is-active -q apparmor; then - sudo systemctl stop apparmor - fi - if [ -x /usr/sbin/aa-teardown ]; then - sudo /usr/sbin/aa-teardown - fi - if sudo systemctl is-enabled -q apparmor; then - sudo systemctl disable apparmor +function fixup_ubuntu { + if ! is_ubuntu; then + return fi # Since pip10, pip will refuse to uninstall files from packages @@ -175,26 +102,12 @@ function fixup_suse { # manifest of what to remove. However, in most cases, simply # overwriting works. So this hacks around those packages that # have been dragged in by some other system dependency - sudo rm -rf /usr/lib/python3.6/site-packages/ply-*.egg-info - sudo rm -rf /usr/lib/python3.6/site-packages/six-*.egg-info - - # Ensure trusted CA certificates are up to date - # See https://bugzilla.suse.com/show_bug.cgi?id=1154871 - # May be removed once a new opensuse-15 image is available in nodepool - sudo zypper up -y p11-kit ca-certificates-mozilla -} - -function fixup_ovn_centos { - if [[ $os_VENDOR != "CentOS" ]]; then - return - fi - # OVN packages are part of this release for CentOS - yum_install centos-release-openstack-victoria + sudo rm -rf /usr/lib/python3/dist-packages/PyYAML-*.egg-info + sudo rm -rf /usr/lib/python3/dist-packages/pyasn1_modules-*.egg-info + sudo rm -rf /usr/lib/python3/dist-packages/simplejson-*.egg-info } function fixup_all { - fixup_keystone fixup_ubuntu fixup_fedora - fixup_suse } diff --git a/tools/generate-devstack-plugins-list.py b/tools/generate-devstack-plugins-list.py index 1cacd06bf8..bc28515a26 100644 --- a/tools/generate-devstack-plugins-list.py +++ b/tools/generate-devstack-plugins-list.py @@ -73,8 +73,11 @@ def has_devstack_plugin(session, proj): s = requests.Session() # sometimes gitea gives us a 500 error; retry sanely # https://stackoverflow.com/a/35636367 +# We need to disable raise_on_status because if any repo endup with 500 then +# propose-updates job which run this script will fail. retries = Retry(total=3, backoff_factor=1, - status_forcelist=[ 500 ]) + status_forcelist=[ 500 ], + raise_on_status=False) s.mount('https://', HTTPAdapter(max_retries=retries)) found_plugins = filter(functools.partial(has_devstack_plugin, s), projects) diff --git a/tools/get-stats.py b/tools/get-stats.py new file mode 100755 index 0000000000..b958af61b2 --- /dev/null +++ b/tools/get-stats.py @@ -0,0 +1,220 @@ +#!/usr/bin/python3 + +import argparse +import csv +import datetime +import glob +import itertools +import json +import logging +import os +import re +import socket +import subprocess +import sys + +try: + import psutil +except ImportError: + psutil = None + print('No psutil, process information will not be included', + file=sys.stderr) + +try: + import pymysql +except ImportError: + pymysql = None + print('No pymysql, database information will not be included', + file=sys.stderr) + +LOG = logging.getLogger('perf') + +# https://www.elastic.co/blog/found-crash-elasticsearch#mapping-explosion + + +def tryint(value): + try: + return int(value) + except (ValueError, TypeError): + return value + + +def get_service_stats(service): + stats = {'MemoryCurrent': 0} + output = subprocess.check_output(['/usr/bin/systemctl', 'show', service] + + ['-p%s' % stat for stat in stats]) + for line in output.decode().split('\n'): + if not line: + continue + stat, val = line.split('=') + stats[stat] = tryint(val) + + return stats + + +def get_services_stats(): + services = [os.path.basename(s) for s in + glob.glob('/etc/systemd/system/devstack@*.service')] + \ + ['apache2.service'] + return [dict(service=service, **get_service_stats(service)) + for service in services] + + +def get_process_stats(proc): + cmdline = proc.cmdline() + if 'python' in cmdline[0]: + cmdline = cmdline[1:] + return {'cmd': cmdline[0], + 'pid': proc.pid, + 'args': ' '.join(cmdline[1:]), + 'rss': proc.memory_info().rss} + + +def get_processes_stats(matches): + me = os.getpid() + procs = psutil.process_iter() + + def proc_matches(proc): + return me != proc.pid and any( + re.search(match, ' '.join(proc.cmdline())) + for match in matches) + + return [ + get_process_stats(proc) + for proc in procs + if proc_matches(proc)] + + +def get_db_stats(host, user, passwd): + dbs = [] + try: + db = pymysql.connect(host=host, user=user, password=passwd, + database='stats', + cursorclass=pymysql.cursors.DictCursor) + except pymysql.err.OperationalError as e: + if 'Unknown database' in str(e): + print('No stats database; assuming devstack failed', + file=sys.stderr) + return [] + raise + + with db: + with db.cursor() as cur: + cur.execute('SELECT db,op,count FROM queries') + for row in cur: + dbs.append({k: tryint(v) for k, v in row.items()}) + return dbs + + +def get_http_stats_for_log(logfile): + stats = {} + apache_fields = ('host', 'a', 'b', 'date', 'tz', 'request', 'status', + 'length', 'c', 'agent') + ignore_agents = ('curl', 'uwsgi', 'nova-status') + ignored_services = set() + for line in csv.reader(open(logfile), delimiter=' '): + fields = dict(zip(apache_fields, line)) + if len(fields) != len(apache_fields): + # Not a combined access log, so we can bail completely + return [] + try: + method, url, http = fields['request'].split(' ') + except ValueError: + method = url = http = '' + if 'HTTP' not in http: + # Not a combined access log, so we can bail completely + return [] + + # Tempest's User-Agent is unchanged, but client libraries and + # inter-service API calls use proper strings. So assume + # 'python-urllib' is tempest so we can tell it apart. + if 'python-urllib' in fields['agent'].lower(): + agent = 'tempest' + else: + agent = fields['agent'].split(' ')[0] + if agent.startswith('python-'): + agent = agent.replace('python-', '') + if '/' in agent: + agent = agent.split('/')[0] + + if agent in ignore_agents: + continue + + try: + service, rest = url.strip('/').split('/', 1) + except ValueError: + # Root calls like "GET /identity" + service = url.strip('/') + rest = '' + + if not service.isalpha(): + ignored_services.add(service) + continue + + method_key = '%s-%s' % (agent, method) + try: + length = int(fields['length']) + except ValueError: + LOG.warning('[%s] Failed to parse length %r from line %r' % ( + logfile, fields['length'], line)) + length = 0 + stats.setdefault(service, {'largest': 0}) + stats[service].setdefault(method_key, 0) + stats[service][method_key] += 1 + stats[service]['largest'] = max(stats[service]['largest'], + length) + + if ignored_services: + LOG.warning('Ignored services: %s' % ','.join( + sorted(ignored_services))) + + # Flatten this for ES + return [{'service': service, 'log': os.path.basename(logfile), + **vals} + for service, vals in stats.items()] + + +def get_http_stats(logfiles): + return list(itertools.chain.from_iterable(get_http_stats_for_log(log) + for log in logfiles)) + + +def get_report_info(): + return { + 'timestamp': datetime.datetime.now().isoformat(), + 'hostname': socket.gethostname(), + 'version': 2, + } + + +if __name__ == '__main__': + process_defaults = ['privsep', 'mysqld', 'erlang', 'etcd'] + parser = argparse.ArgumentParser() + parser.add_argument('--db-user', default='root', + help=('MySQL user for collecting stats ' + '(default: "root")')) + parser.add_argument('--db-pass', default=None, + help='MySQL password for db-user') + parser.add_argument('--db-host', default='localhost', + help='MySQL hostname') + parser.add_argument('--apache-log', action='append', default=[], + help='Collect API call stats from this apache log') + parser.add_argument('--process', action='append', + default=process_defaults, + help=('Include process stats for this cmdline regex ' + '(default is %s)' % ','.join(process_defaults))) + args = parser.parse_args() + + logging.basicConfig(level=logging.WARNING) + + data = { + 'services': get_services_stats(), + 'db': pymysql and args.db_pass and get_db_stats(args.db_host, + args.db_user, + args.db_pass) or [], + 'processes': psutil and get_processes_stats(args.process) or [], + 'api': get_http_stats(args.apache_log), + 'report': get_report_info(), + } + + print(json.dumps(data, indent=2)) diff --git a/tools/image_list.sh b/tools/image_list.sh index 3a27c4acfd..81231be9f3 100755 --- a/tools/image_list.sh +++ b/tools/image_list.sh @@ -22,7 +22,7 @@ source $TOP_DIR/functions # Possible virt drivers, if we have more, add them here. Always keep # dummy in the end position to trigger the fall through case. -DRIVERS="openvz ironic libvirt vsphere xenserver dummy" +DRIVERS="openvz ironic libvirt vsphere dummy" # Extra variables to trigger getting additional images. export ENABLED_SERVICES="h-api,tr-api" diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 9afd2e53c2..027693fc0a 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -38,7 +38,6 @@ FILES=$TOP_DIR/files # [1] https://opendev.org/openstack/project-config/src/branch/master/nodepool/elements/cache-devstack/source-repository-pip PIP_GET_PIP_URL=${PIP_GET_PIP_URL:-"https://bootstrap.pypa.io/get-pip.py"} -LOCAL_PIP="$FILES/$(basename $PIP_GET_PIP_URL)" GetDistro echo "Distro: $DISTRO" @@ -46,25 +45,26 @@ echo "Distro: $DISTRO" function get_versions { # FIXME(dhellmann): Deal with multiple python versions here? This # is just used for reporting, so maybe not? - PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null || true) + PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null || which pip3 2>/dev/null || true) if [[ -n $PIP ]]; then PIP_VERSION=$($PIP --version | awk '{ print $2}') echo "pip: $PIP_VERSION" else echo "pip: Not Installed" fi - # Show python3 module version - python${PYTHON3_VERSION} -m pip --version } function install_get_pip { + _pip_url=$PIP_GET_PIP_URL + _local_pip="$FILES/$(basename $_pip_url)" + # If get-pip.py isn't python, delete it. This was probably an # outage on the server. - if [[ -r $LOCAL_PIP ]]; then - if ! head -1 $LOCAL_PIP | grep -q '#!/usr/bin/env python'; then - echo "WARNING: Corrupt $LOCAL_PIP found removing" - rm $LOCAL_PIP + if [[ -r $_local_pip ]]; then + if ! head -1 $_local_pip | grep -q '#!/usr/bin/env python'; then + echo "WARNING: Corrupt $_local_pip found removing" + rm $_local_pip fi fi @@ -78,22 +78,20 @@ function install_get_pip { # Thus we use curl's "-z" feature to always check the modified # since and only download if a new version is out -- but only if # it seems we downloaded the file originally. - if [[ ! -r $LOCAL_PIP || -r $LOCAL_PIP.downloaded ]]; then + if [[ ! -r $_local_pip || -r $_local_pip.downloaded ]]; then # only test freshness if LOCAL_PIP is actually there, # otherwise we generate a scary warning. local timecond="" - if [[ -r $LOCAL_PIP ]]; then - timecond="-z $LOCAL_PIP" + if [[ -r $_local_pip ]]; then + timecond="-z $_local_pip" fi curl -f --retry 6 --retry-delay 5 \ - $timecond -o $LOCAL_PIP $PIP_GET_PIP_URL || \ + $timecond -o $_local_pip $_pip_url || \ die $LINENO "Download of get-pip.py failed" - touch $LOCAL_PIP.downloaded + touch $_local_pip.downloaded fi - # TODO: remove the trailing pip constraint when a proper fix - # arrives for bug https://bugs.launchpad.net/devstack/+bug/1906322 - sudo -H -E python${PYTHON3_VERSION} $LOCAL_PIP -c $TOOLS_DIR/cap-pip.txt + sudo -H -E python${PYTHON3_VERSION} $_local_pip } @@ -115,14 +113,6 @@ function configure_pypi_alternative_url { } -# Setuptools 8 implements PEP 440, and 8.0.4 adds a warning triggered any time -# pkg_resources inspects the list of installed Python packages if there are -# non-compliant version numbers in the egg-info (for example, from distro -# system packaged Python libraries). This is off by default after 8.2 but can -# be enabled by uncommenting the lines below. -#PYTHONWARNINGS=$PYTHONWARNINGS,always::RuntimeWarning:pkg_resources -#export PYTHONWARNINGS - # Show starting versions get_versions @@ -130,28 +120,30 @@ if [[ -n $PYPI_ALTERNATIVE_URL ]]; then configure_pypi_alternative_url fi -# Just use system pkgs on Focal -if [[ "$DISTRO" == focal ]]; then - exit 0 +if is_fedora && [[ ${DISTRO} == f* || ${DISTRO} == rhel* ]]; then + # get-pip.py will not install over the python3-pip package in + # Fedora 34 any more. + # https://bugzilla.redhat.com/show_bug.cgi?id=1988935 + # https://github.com/pypa/pip/issues/9904 + # You can still install using get-pip.py if python3-pip is *not* + # installed; this *should* remain separate under /usr/local and not break + # if python3-pip is later installed. + # For general sanity, we just use the packaged pip. It should be + # recent enough anyway. This is included via rpms/general + : # Simply fall through +elif is_ubuntu; then + # pip on Ubuntu 20.04 and higher is new enough, too + # drop setuptools from u-c + sed -i -e '/setuptools/d' $REQUIREMENTS_DIR/upper-constraints.txt +else + install_get_pip + + # Note setuptools is part of requirements.txt and we want to make sure + # we obey any versioning as described there. + pip_install_gr setuptools fi -# Eradicate any and all system packages - -# Python in fedora/suse depends on the python-pip package so removing it -# results in a nonfunctional system. pip on fedora installs to /usr so pip -# can safely override the system pip for all versions of fedora -if ! is_fedora && ! is_suse; then - if is_package_installed python3-pip ; then - uninstall_package python3-pip - fi -fi - -install_get_pip - set -x -# Note setuptools is part of requirements.txt and we want to make sure -# we obey any versioning as described there. -pip_install_gr setuptools get_versions diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh index a7c03d26cd..bb470b2927 100755 --- a/tools/install_prereqs.sh +++ b/tools/install_prereqs.sh @@ -74,13 +74,13 @@ install_package $PACKAGES if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then if is_ubuntu || is_fedora; then install_package rsyslog-relp - elif is_suse; then - install_package rsyslog-module-relp else exit_distro_not_supported "rsyslog-relp installation" fi fi +# TODO(clarkb) remove these once we are switched to global venv by default +export PYTHON=$(which python${PYTHON3_VERSION} 2>/dev/null || which python3 2>/dev/null) # Mark end of run # --------------- diff --git a/tools/make_cert.sh b/tools/make_cert.sh index e91464fc0f..0212d0033a 100755 --- a/tools/make_cert.sh +++ b/tools/make_cert.sh @@ -27,7 +27,7 @@ function usage { } CN=$1 -if [ -z "$CN" ]]; then +if [ -z "$CN" ]; then usage fi ORG_UNIT_NAME=${2:-$ORG_UNIT_NAME} @@ -52,5 +52,5 @@ init_CA make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME # Create a cert bundle -cat $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/cacert.pem >$DEVSTACK_CERT - +cat $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key \ + $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/cacert.pem >$DEVSTACK_CERT diff --git a/tools/memory_tracker.sh b/tools/memory_tracker.sh index 6c36534f01..2f404c26fb 100755 --- a/tools/memory_tracker.sh +++ b/tools/memory_tracker.sh @@ -14,7 +14,12 @@ set -o errexit -PYTHON=${PYTHON:-python3} +# TODO(frickler): make this use stackrc variables +if [ -x /opt/stack/data/venv/bin/python ]; then + PYTHON=/opt/stack/data/venv/bin/python +else + PYTHON=${PYTHON:-python3} +fi # time to sleep between checks SLEEP_TIME=20 diff --git a/tools/mlock_report.py b/tools/mlock_report.py index b15a0bf80b..8cbda15895 100644 --- a/tools/mlock_report.py +++ b/tools/mlock_report.py @@ -6,7 +6,7 @@ LCK_SUMMARY_REGEX = re.compile( - "^VmLck:\s+(?P[\d]+)\s+kB", re.MULTILINE) + r"^VmLck:\s+(?P[\d]+)\s+kB", re.MULTILINE) def main(): @@ -24,17 +24,19 @@ def _get_report(): # iterate over the /proc/%pid/status files manually try: s = open("%s/%d/status" % (psutil.PROCFS_PATH, proc.pid), 'r') - except EnvironmentError: + with s: + for line in s: + result = LCK_SUMMARY_REGEX.search(line) + if result: + locked = int(result.group('locked')) + if locked: + mlock_users.append({'name': proc.name(), + 'pid': proc.pid, + 'locked': locked}) + except OSError: + # pids can disappear, we're ok with that continue - with s: - for line in s: - result = LCK_SUMMARY_REGEX.search(line) - if result: - locked = int(result.group('locked')) - if locked: - mlock_users.append({'name': proc.name(), - 'pid': proc.pid, - 'locked': locked}) + # produce a single line log message with per process mlock stats if mlock_users: diff --git a/tools/outfilter.py b/tools/outfilter.py index e910f79ff2..3955d39794 100644 --- a/tools/outfilter.py +++ b/tools/outfilter.py @@ -26,8 +26,8 @@ import re import sys -IGNORE_LINES = re.compile('(set \+o|xtrace)') -HAS_DATE = re.compile('^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3} \|') +IGNORE_LINES = re.compile(r'(set \+o|xtrace)') +HAS_DATE = re.compile(r'^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3} \|') def get_options(): @@ -76,7 +76,8 @@ def main(): # with zuulv3 native jobs and ansible capture it may become # clearer what to do if HAS_DATE.search(line) is None: - now = datetime.datetime.utcnow() + now = datetime.datetime.now(datetime.timezone.utc).replace( + tzinfo=None) ts_line = ("%s | %s" % ( now.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3], line)) @@ -89,13 +90,10 @@ def main(): if outfile: # We've opened outfile as a binary file to get the - # non-buffered behaviour. on python3, sys.stdin was + # non-buffered behaviour. on python3, sys.stdin was # opened with the system encoding and made the line into # utf-8, so write the logfile out in utf-8 bytes. - if sys.version_info < (3,): - outfile.write(ts_line) - else: - outfile.write(ts_line.encode('utf-8')) + outfile.write(ts_line.encode('utf-8')) outfile.flush() diff --git a/tools/ping_neutron.sh b/tools/ping_neutron.sh index 73fe3f3bdf..ab8e8dfca8 100755 --- a/tools/ping_neutron.sh +++ b/tools/ping_neutron.sh @@ -30,7 +30,8 @@ ping_neutron.sh [ping args] This provides a wrapper to ping neutron guests that are on isolated tenant networks that the caller can't normally reach. It does so by -creating a network namespace probe. +using either the DHCP or Metadata network namespace to support both +ML2/OVS and OVN. It takes arguments like ping, except the first arg must be the network name. @@ -44,6 +45,12 @@ EOF exit 1 } +# BUG: with duplicate network names, this fails pretty hard since it +# will just pick the first match. +function _get_net_id { + openstack --os-cloud devstack-admin --os-region-name="$REGION_NAME" --os-project-name admin --os-username admin --os-password $ADMIN_PASSWORD network list | grep $1 | head -n 1 | awk '{print $2}' +} + NET_NAME=$1 if [[ -z "$NET_NAME" ]]; then @@ -53,12 +60,11 @@ fi REMAINING_ARGS="${@:2}" -# BUG: with duplicate network names, this fails pretty hard. -NET_ID=$(openstack network show -f value -c id "$NET_NAME") -PROBE_ID=$(neutron-debug probe-list -c id -c network_id | grep "$NET_ID" | awk '{print $2}' | head -n 1) +NET_ID=`_get_net_id $NET_NAME` +NET_NS=$(ip netns list | grep "$NET_ID" | head -n 1) # This runs a command inside the specific netns -NET_NS_CMD="ip netns exec qprobe-$PROBE_ID" +NET_NS_CMD="ip netns exec $NET_NS" PING_CMD="sudo $NET_NS_CMD ping $REMAINING_ARGS" echo "Running $PING_CMD" diff --git a/tools/uec/meta.py b/tools/uec/meta.py deleted file mode 100644 index 1d994a60d6..0000000000 --- a/tools/uec/meta.py +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import BaseHTTPServer -import SimpleHTTPServer -import sys - - -def main(host, port, HandlerClass=SimpleHTTPServer.SimpleHTTPRequestHandler, - ServerClass=BaseHTTPServer.HTTPServer, protocol="HTTP/1.0"): - """simple http server that listens on a give address:port.""" - - server_address = (host, port) - - HandlerClass.protocol_version = protocol - httpd = ServerClass(server_address, HandlerClass) - - sa = httpd.socket.getsockname() - print("Serving HTTP on", sa[0], "port", sa[1], "...") - httpd.serve_forever() - -if __name__ == '__main__': - if sys.argv[1:]: - address = sys.argv[1] - else: - address = '0.0.0.0' - if ':' in address: - host, port = address.split(':') - else: - host = address - port = 8080 - - main(host, int(port)) diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py index 7be995e8f3..87312d9469 100755 --- a/tools/update_clouds_yaml.py +++ b/tools/update_clouds_yaml.py @@ -14,14 +14,14 @@ # Update the clouds.yaml file. - import argparse import os.path +import sys import yaml -class UpdateCloudsYaml(object): +class UpdateCloudsYaml: def __init__(self, args): if args.file: self._clouds_path = args.file @@ -35,25 +35,26 @@ def __init__(self, args): self._cloud = args.os_cloud self._cloud_data = { 'region_name': args.os_region_name, - 'identity_api_version': args.os_identity_api_version, - 'volume_api_version': args.os_volume_api_version, 'auth': { 'auth_url': args.os_auth_url, 'username': args.os_username, + 'user_domain_id': 'default', 'password': args.os_password, }, } + if args.os_project_name and args.os_system_scope: print( - "WARNING: os_project_name and os_system_scope were both" - " given. os_system_scope will take priority.") - if args.os_project_name and not args.os_system_scope: + "WARNING: os_project_name and os_system_scope were both " + "given. os_system_scope will take priority." + ) + + if args.os_system_scope: # system-scoped + self._cloud_data['auth']['system_scope'] = args.os_system_scope + elif args.os_project_name: # project-scoped self._cloud_data['auth']['project_name'] = args.os_project_name - if args.os_identity_api_version == '3' and not args.os_system_scope: - self._cloud_data['auth']['user_domain_id'] = 'default' self._cloud_data['auth']['project_domain_id'] = 'default' - if args.os_system_scope: - self._cloud_data['auth']['system_scope'] = args.os_system_scope + if args.os_cacert: self._cloud_data['cacert'] = args.os_cacert @@ -65,7 +66,7 @@ def run(self): def _read_clouds(self): try: with open(self._clouds_path) as clouds_file: - self._clouds = yaml.load(clouds_file) + self._clouds = yaml.safe_load(clouds_file) except IOError: # The user doesn't have a clouds.yaml file. print("The user clouds.yaml file didn't exist.") @@ -89,8 +90,6 @@ def main(): parser.add_argument('--file') parser.add_argument('--os-cloud', required=True) parser.add_argument('--os-region-name', default='RegionOne') - parser.add_argument('--os-identity-api-version', default='3') - parser.add_argument('--os-volume-api-version', default='3') parser.add_argument('--os-cacert') parser.add_argument('--os-auth-url', required=True) parser.add_argument('--os-username', required=True) diff --git a/tools/verify-ipv6-address.py b/tools/verify-ipv6-address.py new file mode 100644 index 0000000000..dc18fa6d8a --- /dev/null +++ b/tools/verify-ipv6-address.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python3 + +import argparse +import ipaddress +import sys + +def main(): + parser = argparse.ArgumentParser( + description="Check if a given string is a valid IPv6 address.", + formatter_class=argparse.RawTextHelpFormatter, + ) + parser.add_argument( + "address", + help=( + "The IPv6 address string to validate.\n" + "Examples:\n" + " 2001:0db8:85a3:0000:0000:8a2e:0370:7334\n" + " 2001:db8::1\n" + " ::1\n" + " fe80::1%eth0 (scope IDs are handled)" + ), + ) + args = parser.parse_args() + + try: + # try to create a IPv6Address: if we fail to parse or get an + # IPv4Address then die + ip_obj = ipaddress.ip_address(args.address.strip('[]')) + if isinstance(ip_obj, ipaddress.IPv6Address): + sys.exit(0) + else: + sys.exit(1) + except ValueError: + sys.exit(1) + except Exception as e: + print(f"An unexpected error occurred during validation: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/tools/verify-ipv6-only-deployments.sh b/tools/verify-ipv6-only-deployments.sh new file mode 100755 index 0000000000..a1acecbb3f --- /dev/null +++ b/tools/verify-ipv6-only-deployments.sh @@ -0,0 +1,95 @@ +#!/bin/bash +# +# +# NOTE(gmann): This script is used in 'devstack-tempest-ipv6' zuul job to verify that +# services are deployed on IPv6 properly or not. This will capture if any devstck or devstack +# plugins are missing the required setting to listen on IPv6 address. This is run as part of +# run phase of zuul job and before test run. Child job of 'devstack-tempest-ipv6' +# can expand the IPv6 verification specific to project by defining the new post-run script which +# will run along with this base script. +# If there are more common verification for IPv6 then we can always extent this script. + +# Keep track of the DevStack directory +TOP_DIR=$(cd $(dirname "$0")/../../devstack && pwd) +source $TOP_DIR/stackrc +source $TOP_DIR/openrc admin admin + +function verify_devstack_ipv6_setting { + local _service_host='' + _service_host=$(echo $SERVICE_HOST | tr -d []) + local _host_ipv6='' + _host_ipv6=$(echo $HOST_IPV6 | tr -d []) + local _service_listen_address='' + _service_listen_address=$(echo $SERVICE_LISTEN_ADDRESS | tr -d []) + local _service_local_host='' + _service_local_host=$(echo $SERVICE_LOCAL_HOST | tr -d []) + local _tunnel_endpoint_ip='' + _tunnel_endpoint_ip=$(echo $TUNNEL_ENDPOINT_IP | tr -d []) + if [[ "$SERVICE_IP_VERSION" != 6 ]]; then + echo $SERVICE_IP_VERSION "SERVICE_IP_VERSION is not set to 6 which is must for devstack to deploy services with IPv6 address." + exit 1 + fi + if [[ "$TUNNEL_IP_VERSION" != 6 ]]; then + echo $TUNNEL_IP_VERSION "TUNNEL_IP_VERSION is not set to 6 so TUNNEL_ENDPOINT_IP cannot be an IPv6 address." + exit 1 + fi + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_service_host"; then + echo $SERVICE_HOST "SERVICE_HOST is not IPv6 which means devstack cannot deploy services on IPv6 addresses." + exit 1 + fi + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_host_ipv6"; then + echo $HOST_IPV6 "HOST_IPV6 is not IPv6 which means devstack cannot deploy services on IPv6 addresses." + exit 1 + fi + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_service_listen_address"; then + echo $SERVICE_LISTEN_ADDRESS "SERVICE_LISTEN_ADDRESS is not IPv6 which means devstack cannot deploy services on IPv6 addresses." + exit 1 + fi + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_service_local_host"; then + echo $SERVICE_LOCAL_HOST "SERVICE_LOCAL_HOST is not IPv6 which means devstack cannot deploy services on IPv6 addresses." + exit 1 + fi + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_tunnel_endpoint_ip"; then + echo $TUNNEL_ENDPOINT_IP "TUNNEL_ENDPOINT_IP is not IPv6 which means devstack will not deploy with an IPv6 endpoint address." + exit 1 + fi + echo "Devstack is properly configured with IPv6" + echo "SERVICE_IP_VERSION:" $SERVICE_IP_VERSION "HOST_IPV6:" $HOST_IPV6 "SERVICE_HOST:" $SERVICE_HOST "SERVICE_LISTEN_ADDRESS:" $SERVICE_LISTEN_ADDRESS "SERVICE_LOCAL_HOST:" $SERVICE_LOCAL_HOST "TUNNEL_IP_VERSION:" $TUNNEL_IP_VERSION "TUNNEL_ENDPOINT_IP:" $TUNNEL_ENDPOINT_IP +} + +function sanity_check_system_ipv6_enabled { + if [ ! -f "/proc/sys/net/ipv6/conf/default/disable_ipv6" ] || [ "$(cat /proc/sys/net/ipv6/conf/default/disable_ipv6)" -ne "0" ]; then + echo "IPv6 is disabled in system" + exit 1 + fi + echo "IPv6 is enabled in system" +} + +function verify_service_listen_address_is_ipv6 { + local endpoints_verified=False + local all_ipv6=True + endpoints=$(openstack endpoint list -f value -c URL) + for endpoint in ${endpoints}; do + local endpoint_address='' + endpoint_address=$(echo "$endpoint" | awk -F/ '{print $3}' | awk -F] '{print $1}') + endpoint_address=$(echo $endpoint_address | tr -d '[]') + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$endpoint_address"; then + all_ipv6=False + echo $endpoint ": This is not an IPv6 endpoint which means corresponding service is not listening on an IPv6 address." + continue + fi + endpoints_verified=True + done + if [[ "$all_ipv6" == "False" ]] || [[ "$endpoints_verified" == "False" ]]; then + exit 1 + fi + echo "All services deployed by devstack are on IPv6 endpoints" + echo $endpoints +} + +#First thing to verify if system has IPv6 enabled or not +sanity_check_system_ipv6_enabled +#Verify whether devstack is configured properly with IPv6 setting +verify_devstack_ipv6_setting +#Get all registrfed endpoints by devstack in keystone and verify that each endpoints address is IPv6. +verify_service_listen_address_is_ipv6 diff --git a/tools/worlddump.py b/tools/worlddump.py index 22770f15b6..26ced3f653 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -19,7 +19,6 @@ import argparse import datetime -from distutils import spawn import fnmatch import io import os @@ -32,7 +31,6 @@ 'nova-compute', 'neutron-dhcp-agent', 'neutron-l3-agent', - 'neutron-linuxbridge-agent', 'neutron-metadata-agent', 'neutron-openvswitch-agent', 'cinder-volume', @@ -52,7 +50,7 @@ def get_options(): def filename(dirname, name=""): - now = datetime.datetime.utcnow() + now = datetime.datetime.now(datetime.timezone.utc) fmt = "worlddump-%Y-%m-%d-%H%M%S" if name: fmt += "-" + name @@ -76,7 +74,7 @@ def _dump_cmd(cmd): def _find_cmd(cmd): - if not spawn.find_executable(cmd): + if not shutil.which(cmd): print("*** %s not found: skipping" % cmd) return False return True @@ -134,7 +132,7 @@ def disk_space(): def ebtables_dump(): - tables = ['filter', 'nat', 'broute'] + tables = ['filter', 'nat'] _header("EB Tables Dump") if not _find_cmd('ebtables'): return diff --git a/tools/xen/README.md b/tools/xen/README.md deleted file mode 100644 index 287301156e..0000000000 --- a/tools/xen/README.md +++ /dev/null @@ -1,3 +0,0 @@ -Note: XenServer relative tools have been moved to `os-xenapi`_ and be maintained there. - -.. _os-xenapi: https://opendev.org/x/os-xenapi/ diff --git a/tox.ini b/tox.ini index ed28636d3a..26cd68c031 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -minversion = 1.6 +minversion = 3.18.0 skipsdist = True envlist = bashate @@ -12,8 +12,8 @@ basepython = python3 # against devstack, just set BASHATE_INSTALL_PATH=/path/... to your # modified bashate tree deps = - {env:BASHATE_INSTALL_PATH:bashate==2.0.0} -whitelist_externals = bash + {env:BASHATE_INSTALL_PATH:bashate} +allowlist_externals = bash commands = bash -c "find {toxinidir} \ -not \( -type d -name .?\* -prune \) \ -not \( -type d -name doc -prune \) \ @@ -34,8 +34,10 @@ commands = bash -c "find {toxinidir} \ -print0 | xargs -0 bashate -v -iE006 -eE005,E042" [testenv:docs] -deps = -r{toxinidir}/doc/requirements.txt -whitelist_externals = bash +deps = + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} + -r{toxinidir}/doc/requirements.txt +allowlist_externals = bash setenv = TOP_DIR={toxinidir} commands = @@ -43,7 +45,7 @@ commands = [testenv:pdf-docs] deps = {[testenv:docs]deps} -whitelist_externals = +allowlist_externals = make commands = sphinx-build -W -b latex doc/source doc/build/pdf diff --git a/unstack.sh b/unstack.sh index 3197cf136f..8e8996c63b 100755 --- a/unstack.sh +++ b/unstack.sh @@ -61,7 +61,6 @@ source $TOP_DIR/lib/tls # Source project function libraries source $TOP_DIR/lib/infra -source $TOP_DIR/lib/oslo source $TOP_DIR/lib/lvm source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone @@ -73,6 +72,7 @@ source $TOP_DIR/lib/swift source $TOP_DIR/lib/neutron source $TOP_DIR/lib/ldap source $TOP_DIR/lib/dstat +source $TOP_DIR/lib/atop source $TOP_DIR/lib/etcd3 # Extras Source @@ -168,19 +168,27 @@ if is_service_enabled etcd3; then cleanup_etcd3 fi -if is_service_enabled dstat; then - stop_dstat +if is_service_enabled openstack-cli-server; then + stop_service devstack@openstack-cli-server +fi + +stop_dstat + +if is_service_enabled atop; then + stop_atop fi # NOTE: Cinder automatically installs the lvm2 package, independently of the # enabled backends. So if Cinder is enabled, and installed successfully we are # sure lvm2 (lvremove, /etc/lvm/lvm.conf, etc.) is here. if is_service_enabled cinder && is_package_installed lvm2; then - # Using /bin/true here indicates a BUG - maybe the - # DEFAULT_VOLUME_GROUP_NAME doesn't exist? We should - # isolate this further down in lib/cinder cleanup. - clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME || /bin/true clean_lvm_filter fi clean_pyc_files +rm -Rf $DEST/async + +# Clean any safe.directory items we wrote into the global +# gitconfig. We can identify the relevant ones by checking that they +# point to somewhere in our $DEST directory. +sudo sed -i "\+directory = ${DEST}+ d" /etc/gitconfig