diff --git a/.gitignore b/.gitignore index e4820903d4..ad153f4a07 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,42 @@ -proto *~ +.*.sw? *.log -src +*-log +*.log.* +*-log.* +*.pem +*.pyc +.localrc.auto +.localrc.password +.prereqs +.tox +.stackenv +accrc +doc/files +doc/build +files/*.gz +files/*.vmdk +files/*.rpm +files/*.rpm.* +files/*.deb +files/*.deb.* +files/*.qcow2 +files/*.img +files/images +files/pip-* +files/get-pip.py* +files/ir-deploy* +files/ironic-inspector* +files/etcd* +/local.conf +local.sh localrc +proto +shocco +src +stack-screenrc +userrc_early +AUTHORS +ChangeLog +tools/dbcounter/build/ +tools/dbcounter/dbcounter.egg-info/ diff --git a/.gitreview b/.gitreview index 570d31a987..e1bf63ba7a 100644 --- a/.gitreview +++ b/.gitreview @@ -1,4 +1,4 @@ [gerrit] -host=review.openstack.org +host=review.opendev.org port=29418 -project=openstack-dev/devstack.git +project=openstack/devstack.git diff --git a/.mailmap b/.mailmap new file mode 100644 index 0000000000..43e4e6ec46 --- /dev/null +++ b/.mailmap @@ -0,0 +1,7 @@ +# Format is: +# +# +Jiajun Liu +Jian Wen +Joe Gordon +Sean Dague diff --git a/.zuul.yaml b/.zuul.yaml new file mode 100644 index 0000000000..2227f185dd --- /dev/null +++ b/.zuul.yaml @@ -0,0 +1,1140 @@ +- nodeset: + name: openstack-single-node-jammy + nodes: + - name: controller + label: ubuntu-jammy + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: openstack-single-node-noble + nodes: + - name: controller + label: ubuntu-noble + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: openstack-single-node-focal + nodes: + - name: controller + label: ubuntu-focal + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: openstack-single-node-bionic + nodes: + - name: controller + label: ubuntu-bionic + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: devstack-single-node-almalinux-10 + nodes: + - name: controller + label: almalinux-10-8GB + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: devstack-single-node-centos-9-stream + nodes: + - name: controller + label: centos-9-stream + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: devstack-single-node-centos-10-stream + nodes: + - name: controller + label: centos-10-stream-8GB + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: devstack-single-node-debian-trixie + nodes: + - name: controller + label: debian-trixie-8GB + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: devstack-single-node-debian-bookworm + nodes: + - name: controller + label: debian-bookworm + groups: + - name: tempest + nodes: + - controller + +# TODO(frickler): drop this dummy nodeset once all references have been removed +- nodeset: + name: devstack-single-node-opensuse-15 + nodes: [] + +- nodeset: + name: devstack-single-node-rockylinux-9 + nodes: + - name: controller + label: rockylinux-9 + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: devstack-single-node-rockylinux-10 + nodes: + - name: controller + label: rockylinux-10-8GB + groups: + - name: tempest + nodes: + - controller + +- nodeset: + name: openstack-two-node-centos-10-stream + nodes: + - name: controller + label: centos-10-stream-8GB + - name: compute1 + label: centos-10-stream-8GB + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- nodeset: + name: openstack-two-node-centos-9-stream + nodes: + - name: controller + label: centos-9-stream + - name: compute1 + label: centos-9-stream + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- nodeset: + name: openstack-two-node-jammy + nodes: + - name: controller + label: ubuntu-jammy + - name: compute1 + label: ubuntu-jammy + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- nodeset: + name: openstack-two-node-noble + nodes: + - name: controller + label: ubuntu-noble + - name: compute1 + label: ubuntu-noble + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- nodeset: + name: openstack-two-node-focal + nodes: + - name: controller + label: ubuntu-focal + - name: compute1 + label: ubuntu-focal + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- nodeset: + name: openstack-two-node-bionic + nodes: + - name: controller + label: ubuntu-bionic + - name: compute1 + label: ubuntu-bionic + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- nodeset: + name: openstack-three-node-focal + nodes: + - name: controller + label: ubuntu-focal + - name: compute1 + label: ubuntu-focal + - name: compute2 + label: ubuntu-focal + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + - compute2 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + - compute2 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + - compute2 + +- nodeset: + name: openstack-three-node-bionic + nodes: + - name: controller + label: ubuntu-bionic + - name: compute1 + label: ubuntu-bionic + - name: compute2 + label: ubuntu-bionic + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + - compute2 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + - compute2 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + - compute2 + +- nodeset: + name: devstack-two-node-debian-bookworm + nodes: + - name: controller + label: debian-bookworm + - name: compute1 + label: debian-bookworm + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- nodeset: + name: devstack-two-node-debian-trixie + nodes: + - name: controller + label: debian-trixie-8GB + - name: compute1 + label: debian-trixie-8GB + groups: + # Node where tests are executed and test results collected + - name: tempest + nodes: + - controller + # Nodes running the compute service + - name: compute + nodes: + - controller + - compute1 + # Nodes that are not the controller + - name: subnode + nodes: + - compute1 + # Switch node for multinode networking setup + - name: switch + nodes: + - controller + # Peer nodes for multinode networking setup + - name: peers + nodes: + - compute1 + +- job: + name: devstack-base + parent: openstack-multinode-fips + abstract: true + description: | + Base abstract Devstack job. + + Defines plays and base variables, but it does not include any project + and it does not run any service by default. This is a common base for + all single Devstack jobs, single or multinode. + Variables are defined in job.vars, which is what is then used by single + node jobs and by multi node jobs for the controller, as well as in + job.group-vars.peers, which is what is used by multi node jobs for subnode + nodes (everything but the controller). + required-projects: + - opendev.org/openstack/devstack + # this is a workaround for a packaging bug in ubuntu + # remove when https://bugs.launchpad.net/nova/+bug/2109592 + # is resolved and oslo.config is not a dep of the novnc deb + # via the defunct python3-novnc package. + - novnc/novnc + + roles: + - zuul: opendev.org/openstack/openstack-zuul-jobs + vars: + devstack_localrc: + DATABASE_PASSWORD: secretdatabase + RABBIT_PASSWORD: secretrabbit + ADMIN_PASSWORD: secretadmin + SERVICE_PASSWORD: secretservice + NETWORK_GATEWAY: 10.1.0.1 + FIXED_RANGE: 10.1.0.0/20 + IPV4_ADDRS_SAFE_TO_USE: 10.1.0.0/20 + FLOATING_RANGE: 172.24.5.0/24 + PUBLIC_NETWORK_GATEWAY: 172.24.5.1 + LOGFILE: /opt/stack/logs/devstacklog.txt + LOG_COLOR: false + VERBOSE: true + VERBOSE_NO_TIMESTAMP: true + ERROR_ON_CLONE: true + # Gate jobs can't deal with nested virt. Disable it by default. + LIBVIRT_TYPE: '{{ devstack_libvirt_type | default("qemu") }}' + devstack_services: + # Ignore any default set by devstack. Emit a "disable_all_services". + base: false + zuul_copy_output: + '{{ devstack_conf_dir }}/local.conf': logs + '{{ devstack_conf_dir }}/localrc': logs + '{{ devstack_conf_dir }}/.localrc.auto': logs + '{{ devstack_conf_dir }}/.stackenv': logs + '{{ devstack_log_dir }}/dstat-csv.log': logs + '{{ devstack_log_dir }}/atop': logs + '{{ devstack_log_dir }}/devstacklog.txt': logs + '{{ devstack_log_dir }}/devstacklog.txt.summary': logs + '{{ devstack_log_dir }}/tcpdump.pcap': logs + '{{ devstack_log_dir }}/worlddump-latest.txt': logs + '{{ devstack_log_dir }}/qemu.coredump': logs + '{{ devstack_full_log}}': logs + '{{ stage_dir }}/verify_tempest_conf.log': logs + '{{ stage_dir }}/performance.json': logs + '{{ stage_dir }}/apache': logs + '{{ stage_dir }}/apache_config': logs + '{{ stage_dir }}/etc': logs + /var/log/rabbitmq: logs + /var/log/postgresql: logs + /var/log/mysql: logs + /var/log/libvirt: logs + /etc/libvirt: logs + /etc/lvm: logs + /etc/sudoers: logs + /etc/sudoers.d: logs + '{{ stage_dir }}/iptables.txt': logs + '{{ stage_dir }}/df.txt': logs + '{{ stage_dir }}/mount.txt': logs + '{{ stage_dir }}/pip2-freeze.txt': logs + '{{ stage_dir }}/pip3-freeze.txt': logs + '{{ stage_dir }}/dpkg-l.txt': logs + '{{ stage_dir }}/rpm-qa.txt': logs + '{{ stage_dir }}/core': logs + '{{ stage_dir }}/listen53.txt': logs + '{{ stage_dir }}/services.txt': logs + '{{ stage_dir }}/deprecations.log': logs + '{{ stage_dir }}/audit.log': logs + /etc/ceph: logs + /var/log/ceph: logs + /var/log/openvswitch: logs + /var/log/glusterfs: logs + /etc/glusterfs/glusterd.vol: logs + /etc/resolv.conf: logs + /var/log/unbound.log: logs + extensions_to_txt: + conf: true + log: true + localrc: true + stackenv: true + auto: true + group-vars: + subnode: + devstack_localrc: + DATABASE_PASSWORD: secretdatabase + RABBIT_PASSWORD: secretrabbit + ADMIN_PASSWORD: secretadmin + SERVICE_PASSWORD: secretservice + NETWORK_GATEWAY: 10.1.0.1 + FIXED_RANGE: 10.1.0.0/20 + IPV4_ADDRS_SAFE_TO_USE: 10.1.0.0/20 + FLOATING_RANGE: 172.24.5.0/24 + PUBLIC_NETWORK_GATEWAY: 172.24.5.1 + LOGFILE: /opt/stack/logs/devstacklog.txt + LOG_COLOR: false + VERBOSE: true + VERBOSE_NO_TIMESTAMP: true + ERROR_ON_CLONE: true + LIBVIRT_TYPE: qemu + devstack_services: + base: false + pre-run: playbooks/pre.yaml + run: playbooks/devstack.yaml + post-run: playbooks/post.yaml + irrelevant-files: &common-irrelevant-files + # Documentation related + - ^.*\.rst$ + - ^api-ref/.*$ + - ^doc/.*$ + - ^releasenotes/.*$ + # Translations + - ^.*/locale/.*po$ + # pre-commit config + - ^.pre-commit-config.yaml$ + # gitreview config + - ^.gitreview$ + +- job: + name: devstack-minimal + parent: devstack-base + description: | + Minimal devstack base job, intended for use by jobs that need + less than the normal minimum set of required-projects. + nodeset: openstack-single-node-noble + required-projects: + - opendev.org/openstack/requirements + vars: + devstack_localrc: + # Multinode specific settings + SERVICE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + HOST_IP: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + PUBLIC_BRIDGE_MTU: '{{ external_bridge_mtu }}' + devstack_services: + # Shared services + dstat: false + etcd3: true + memory_tracker: true + file_tracker: true + mysql: true + rabbit: true + openstack-cli-server: true + group-vars: + subnode: + devstack_services: + # Shared services + dstat: false + memory_tracker: true + file_tracker: true + openstack-cli-server: true + devstack_localrc: + # Multinode specific settings + HOST_IP: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}" + SERVICE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + PUBLIC_BRIDGE_MTU: '{{ external_bridge_mtu }}' + # Subnode specific settings + DATABASE_TYPE: mysql + RABBIT_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + DATABASE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + + +- job: + name: devstack + parent: devstack-minimal + description: | + Base devstack job for integration gate. + + This base job can be used for single node and multinode devstack jobs. + + With a single node nodeset, this job sets up an "all-in-one" (aio) + devstack with the seven OpenStack services included in the devstack tree: + keystone, glance, cinder, neutron, nova, placement, and swift. + + With a two node nodeset, this job sets up an aio + compute node. + The controller can be customised using host-vars.controller, the + sub-nodes can be customised using group-vars.subnode. + + Descendent jobs can enable / disable services, add devstack configuration + options, enable devstack plugins, configure log files or directories to be + transferred to the log server. + + The job assumes that there is only one controller node. The number of + subnodes can be scaled up seamlessly by setting a custom nodeset in + job.nodeset. + + The run playbook consists of a single role, so it can be easily rewritten + and extended. + required-projects: + - opendev.org/openstack/cinder + - opendev.org/openstack/glance + - opendev.org/openstack/keystone + - opendev.org/openstack/neutron + - opendev.org/openstack/nova + - opendev.org/openstack/placement + - opendev.org/openstack/swift + - opendev.org/openstack/os-test-images + timeout: 7200 + vars: + # based on observation of the integrated gate + # tempest-integrated-compute was only using ~1.7GB of swap + # when zswap and the host turning are enabled that increase + # slightly to ~2GB. we are setting the swap size to 8GB to + # be safe and account for more complex scenarios. + # we should revisit this value after some time to see if we + # can reduce it. + configure_swap_size: 8192 + devstack_localrc: + # Common OpenStack services settings + SWIFT_REPLICAS: 1 + SWIFT_START_ALL_SERVICES: false + SWIFT_HASH: 1234123412341234 + DEBUG_LIBVIRT_COREDUMPS: true + NOVA_VNC_ENABLED: true + OVN_DBS_LOG_LEVEL: dbg + # tune the host to optimize memory usage and hide io latency + # these setting will configure the kernel to treat the host page + # cache and swap with equal priority, and prefer deferring writes + # changing the default swappiness, dirty_ratio and + # the vfs_cache_pressure + ENABLE_SYSCTL_MEM_TUNING: true + # the net tuning optimizes ipv4 tcp fast open and config the default + # qdisk policy to pfifo_fast which effectively disable all qos. + # this minimizes the cpu load of the host network stack + ENABLE_SYSCTL_NET_TUNING: true + # zswap allows the kernel to compress pages in memory before swapping + # them to disk. this can reduce the amount of swap used and improve + # performance. effectively this trades a small amount of cpu for an + # increase in swap performance by reducing the amount of data + # written to disk. the overall speedup is proportional to the + # compression ratio and the speed of the swap device. + # NOTE: this option is ignored when not using nova with the libvirt + # virt driver. + NOVA_LIBVIRT_TB_CACHE_SIZE: 128 + ENABLE_ZSWAP: true + devstack_local_conf: + post-config: + $NEUTRON_CONF: + DEFAULT: + global_physnet_mtu: '{{ external_bridge_mtu }}' + devstack_services: + # Core services enabled for this branch. + # This list replaces the test-matrix. + # Shared services + dstat: false + etcd3: true + memory_tracker: true + file_tracker: true + mysql: true + rabbit: true + tls-proxy: true + # Keystone services + key: true + # Glance services + g-api: true + # Nova services + n-api: true + n-api-meta: true + n-cond: true + n-cpu: true + n-novnc: true + n-sch: true + # Placement service + placement-api: true + # OVN services + ovn-controller: true + ovn-northd: true + ovs-vswitchd: true + ovsdb-server: true + # Neutron services + q-svc: true + q-ovn-agent: true + # Swift services + s-account: true + s-container: true + s-object: true + s-proxy: true + # Cinder services + c-api: true + c-bak: true + c-sch: true + c-vol: true + # Services we don't need. + # This section is not really needed, it's for readability. + horizon: false + tempest: false + # Test matrix emits ceilometer but ceilomenter is not installed in the + # integrated gate, so specifying the services has not effect. + # ceilometer-*: false + group-vars: + subnode: + devstack_services: + # Core services enabled for this branch. + # This list replaces the test-matrix. + # Shared services + dstat: false + memory_tracker: true + file_tracker: true + tls-proxy: true + # Nova services + n-cpu: true + # Placement services + placement-client: true + # OVN services + ovn-controller: true + ovs-vswitchd: true + ovsdb-server: true + # Neutron services + q-ovn-agent: true + # Cinder services + c-bak: true + c-vol: true + # Services we don't run at all on subnode. + # This section is not really needed, it's for readability. + # keystone: false + # s-*: false + horizon: false + tempest: false + # Test matrix emits ceilometer but ceilometer is not installed in the + # integrated gate, so specifying the services has not effect. + # ceilometer-*: false + devstack_localrc: + # Subnode specific settings + GLANCE_HOSTPORT: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}:9292" + Q_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}" + NOVA_VNC_ENABLED: true + ENABLE_CHASSIS_AS_GW: false + # tune the host to optimize memory usage and hide io latency + # these setting will configure the kernel to treat the host page + # cache and swap with equal priority, and prefer deferring writes + # changing the default swappiness, dirty_ratio and + # the vfs_cache_pressure + ENABLE_SYSCTL_MEM_TUNING: true + # the net tuning optimizes ipv4 tcp fast open and config the default + # qdisk policy to pfifo_fast which effectively disable all qos. + # this minimizes the cpu load of the host network stack + ENABLE_SYSCTL_NET_TUNING: true + # zswap allows the kernel to compress pages in memory before swapping + # them to disk. this can reduce the amount of swap used and improve + # performance. effectivly this trades a small amount of cpu for an + # increase in swap performance by reducing the amount of data + # written to disk. the overall speedup is porportional to the + # compression ratio and the speed of the swap device. + ENABLE_ZSWAP: true + # NOTE: this option is ignored when not using nova with the libvirt + # virt driver. + NOVA_LIBVIRT_TB_CACHE_SIZE: 128 + +- job: + name: devstack-ipv6 + parent: devstack + description: | + Devstack single node job for integration gate with IPv6, + all services and tunnels using IPv6 addresses. + vars: + devstack_localrc: + SERVICE_IP_VERSION: 6 + SERVICE_HOST: "" + TUNNEL_IP_VERSION: 6 + +- job: + name: devstack-enforce-scope + parent: devstack + description: | + This job runs the devstack with scope checks enabled. + vars: + devstack_localrc: + ENFORCE_SCOPE: true + +- job: + name: devstack-multinode + parent: devstack + nodeset: openstack-two-node-noble + description: | + Simple multinode test to verify multinode functionality on devstack side. + This is not meant to be used as a parent job. + +# NOTE(ianw) Platform tests have traditionally been non-voting because +# we often have to rush things through devstack to stabilise the gate, +# and these platforms don't have the round-the-clock support to avoid +# becoming blockers in that situation. +- job: + name: devstack-platform-almalinux-purple-lion-ovn-source + parent: tempest-full-py3 + description: AlmaLinux 10 platform test + nodeset: devstack-single-node-almalinux-10 + timeout: 9000 + voting: false + vars: + configure_swap_size: 4096 + devstack_localrc: + OVN_BUILD_FROM_SOURCE: True + OVN_BRANCH: "branch-24.03" + OVS_BRANCH: "branch-3.3" + OVS_SYSCONFDIR: "/usr/local/etc/openvswitch" + +- job: + name: devstack-platform-centos-10-stream + parent: tempest-full-py3 + description: CentOS 10 Stream platform test + nodeset: devstack-single-node-centos-10-stream + timeout: 9000 + voting: false + +- job: + name: devstack-platform-centos-9-stream + parent: tempest-full-py3 + description: CentOS 9 Stream platform test + nodeset: devstack-single-node-centos-9-stream + vars: + devstack_localrc: + # TODO(ykarel) Remove this when moving to 10-stream + PYTHON3_VERSION: 3.11 + timeout: 9000 + voting: false + +- job: + name: devstack-platform-debian-trixie + parent: tempest-full-py3 + description: Debian Trixie platform test + nodeset: devstack-single-node-debian-trixie + timeout: 9000 + vars: + configure_swap_size: 4096 + +- job: + name: devstack-platform-debian-bookworm + parent: tempest-full-py3 + description: Debian Bookworm platform test + nodeset: devstack-single-node-debian-bookworm + timeout: 9000 + vars: + configure_swap_size: 4096 + +- job: + name: devstack-platform-rocky-blue-onyx + parent: tempest-full-py3 + description: Rocky Linux 9 Blue Onyx platform test + nodeset: devstack-single-node-rockylinux-9 + timeout: 9000 + # NOTE(danms): This has been failing lately with some repository metadata + # errors. We're marking this as non-voting until it appears to have + # stabilized: + # https://zuul.openstack.org/builds?job_name=devstack-platform-rocky-blue-onyx&skip=0 + voting: false + vars: + configure_swap_size: 4096 + devstack_localrc: + # TODO(ykarel) Remove this when moving to rocky10 + PYTHON3_VERSION: 3.11 + +- job: + name: devstack-platform-rocky-red-quartz + parent: tempest-full-py3 + description: Rocky Linux Red Quartz platform test + nodeset: devstack-single-node-rockylinux-10 + timeout: 9000 + voting: false + vars: + configure_swap_size: 4096 + +- job: + name: devstack-platform-ubuntu-jammy + parent: tempest-full-py3 + description: Ubuntu 22.04 LTS (Jammy) platform test + nodeset: openstack-single-node-jammy + timeout: 9000 + vars: + configure_swap_size: 8192 + +- job: + name: devstack-platform-ubuntu-noble-ovn-source + parent: devstack-platform-ubuntu-noble + description: Ubuntu 24.04 LTS (noble) platform test (OVN from source) + voting: false + vars: + devstack_localrc: + OVN_BUILD_FROM_SOURCE: True + OVN_BRANCH: "branch-24.03" + OVS_BRANCH: "branch-3.3" + OVS_SYSCONFDIR: "/usr/local/etc/openvswitch" + +- job: + name: devstack-platform-ubuntu-noble-ovs + parent: tempest-full-py3 + description: Ubuntu 24.04 LTS (noble) platform test (OVS) + nodeset: openstack-single-node-noble + voting: false + timeout: 9000 + vars: + configure_swap_size: 8192 + devstack_localrc: + Q_AGENT: openvswitch + Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch + Q_ML2_TENANT_NETWORK_TYPE: vxlan + devstack_services: + # Disable OVN services + ovn-northd: false + ovn-controller: false + ovs-vswitchd: false + ovsdb-server: false + # Disable Neutron ML2/OVN services + q-ovn-metadata-agent: false + # Enable Neutron ML2/OVS services + q-agt: true + q-dhcp: true + q-l3: true + q-meta: true + q-metering: true + group-vars: + subnode: + devstack_services: + # Disable OVN services + ovn-controller: false + ovs-vswitchd: false + ovsdb-server: false + # Disable Neutron ML2/OVN services + q-ovn-metadata-agent: false + # Enable Neutron ML2/OVS services + q-agt: true + +- job: + name: devstack-no-tls-proxy + parent: tempest-full-py3 + description: | + Tempest job with tls-proxy off. + + Some gates run devstack like this and it follows different code paths. + vars: + devstack_services: + tls-proxy: false + +- job: + name: devstack-tox-base + parent: devstack + description: | + Base job for devstack-based functional tests that use tox. + + This job is not intended to be run directly. It's just here + for organizational purposes for devstack-tox-functional and + devstack-tox-functional-consumer. + post-run: playbooks/tox/post.yaml + vars: + tox_envlist: functional + tox_install_siblings: false + +- job: + name: devstack-tox-functional + parent: devstack-tox-base + description: | + Base job for devstack-based functional tests that use tox. + + Runs devstack, then runs the tox ``functional`` environment, + then collects tox/testr build output like normal tox jobs. + + Turns off tox sibling installation. Projects may be involved + in the devstack deployment and so may be in the required-projects + list, but may not want to test against master of the other + projects in their tox env. Child jobs can set tox_install_siblings + to True to re-enable sibling processing. + run: playbooks/tox/run-both.yaml + +- job: + name: devstack-tox-functional-consumer + parent: devstack + description: | + Base job for devstack-based functional tests for projects that + consume the devstack cloud. + + This base job should only be used by projects that are not involved + in the devstack deployment step, but are instead projects that are using + devstack to get a cloud against which they can test things. + + Runs devstack in pre-run, then runs the tox ``functional`` environment, + then collects tox/testr build output like normal tox jobs. + + Turns off tox sibling installation. Projects may be involved + in the devstack deployment and so may be in the required-projects + list, but may not want to test against master of the other + projects in their tox env. Child jobs can set tox_install_siblings + to True to re-enable sibling processing. + pre-run: + - playbooks/devstack.yaml + - playbooks/tox/pre.yaml + run: playbooks/tox/run.yaml + +- job: + name: devstack-unit-tests + nodeset: ubuntu-noble + description: | + Runs unit tests on devstack project. + + It runs ``run_tests.sh``. + pre-run: playbooks/unit-tests/pre.yaml + run: playbooks/unit-tests/run.yaml + +- project: + templates: + - integrated-gate-py3 + - publish-openstack-docs-pti + check: + jobs: + - devstack + - devstack-ipv6 + - devstack-enforce-scope + - devstack-platform-almalinux-purple-lion-ovn-source + - devstack-platform-centos-10-stream + - devstack-platform-centos-9-stream + - devstack-platform-debian-bookworm + - devstack-platform-debian-trixie + - devstack-platform-rocky-blue-onyx + - devstack-platform-rocky-red-quartz + - devstack-platform-ubuntu-noble-ovn-source + - devstack-platform-ubuntu-noble-ovs + - devstack-platform-ubuntu-jammy + - devstack-multinode + - devstack-unit-tests + - openstack-tox-bashate + - ironic-tempest-bios-ipmi-direct + - swift-dsvm-functional + - grenade: + irrelevant-files: *common-irrelevant-files + - neutron-ovs-grenade-multinode: + irrelevant-files: *common-irrelevant-files + - neutron-ovn-tempest-ovs-release: + voting: false + irrelevant-files: *common-irrelevant-files + - tempest-multinode-full-py3: + voting: false + irrelevant-files: *common-irrelevant-files + - openstacksdk-functional-devstack: + irrelevant-files: *common-irrelevant-files + - tempest-ipv6-only: + irrelevant-files: *common-irrelevant-files + - nova-ceph-multistore: + irrelevant-files: *common-irrelevant-files + gate: + jobs: + - devstack + - devstack-ipv6 + - devstack-platform-debian-bookworm + - devstack-platform-debian-trixie + - devstack-platform-ubuntu-noble + # NOTE(danms): Disabled due to instability, see comment in the job + # definition above. + # - devstack-platform-rocky-blue-onyx + - devstack-enforce-scope + - devstack-multinode + - devstack-unit-tests + - openstack-tox-bashate + - neutron-ovs-grenade-multinode: + irrelevant-files: *common-irrelevant-files + - ironic-tempest-bios-ipmi-direct + - swift-dsvm-functional + - grenade: + irrelevant-files: *common-irrelevant-files + - openstacksdk-functional-devstack: + irrelevant-files: *common-irrelevant-files + - tempest-ipv6-only: + irrelevant-files: *common-irrelevant-files + - nova-ceph-multistore: + irrelevant-files: *common-irrelevant-files + # Please add a note on each job and conditions for the job not + # being experimental any more, so we can keep this list somewhat + # pruned. + # + # * nova-next: maintained by nova for unreleased/undefaulted + # things, this job is not experimental but often is used to test + # things that are not yet production ready or to test what will be + # the new default after a deprecation period has ended. + # * nova-multi-cell: maintained by nova and now is voting in the + # check queue for nova changes but relies on devstack configuration + + experimental: + jobs: + - nova-multi-cell + - nova-next + - devstack-plugin-ceph-tempest-py3: + irrelevant-files: *common-irrelevant-files + - neutron-ovs-tempest-dvr: + irrelevant-files: *common-irrelevant-files + - neutron-ovs-tempest-dvr-ha-multinode-full: + irrelevant-files: *common-irrelevant-files + - cinder-tempest-lvm-multibackend: + irrelevant-files: *common-irrelevant-files + - tempest-pg-full: + irrelevant-files: *common-irrelevant-files + - devstack-no-tls-proxy + periodic: + jobs: + - devstack-no-tls-proxy + periodic-weekly: + jobs: + - devstack-platform-almalinux-purple-lion-ovn-source + - devstack-platform-centos-10-stream + - devstack-platform-centos-9-stream + - devstack-platform-debian-bookworm + - devstack-platform-rocky-blue-onyx + - devstack-platform-rocky-red-quartz + - devstack-platform-ubuntu-noble-ovn-source + - devstack-platform-ubuntu-noble-ovs + - devstack-platform-ubuntu-jammy diff --git a/AUTHORS b/AUTHORS deleted file mode 100644 index 9d8366ba88..0000000000 --- a/AUTHORS +++ /dev/null @@ -1,18 +0,0 @@ -Andy Smith -Anthony Young -Brad Hall -Chmouel Boudjnah -Dean Troyer -Devin Carlen -Eddie Hebert -Jake Dahn -James E. Blair -Jason Cannavale -Jay Pipes -Jesse Andrews -Justin Shepherd -Scott Moser -Todd Willey -Tres Henry -Vishvananda Ishaya -Yun Mao diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst new file mode 100644 index 0000000000..bb511656f1 --- /dev/null +++ b/CONTRIBUTING.rst @@ -0,0 +1,19 @@ +The source repository for this project can be found at: + + https://opendev.org/openstack/devstack + +Pull requests submitted through GitHub are not monitored. + +To start contributing to OpenStack, follow the steps in the contribution guide +to set up and use Gerrit: + + https://docs.openstack.org/contributors/code-and-documentation/quick-start.html + +Bugs should be filed on Launchpad: + + https://bugs.launchpad.net/devstack + +For more specific information about contributing to this repository, see the +Devstack contributor guide: + + https://docs.openstack.org/devstack/latest/contributor/contributing.html diff --git a/FUTURE.rst b/FUTURE.rst new file mode 100644 index 0000000000..11bea30f0b --- /dev/null +++ b/FUTURE.rst @@ -0,0 +1,113 @@ +============= + Quo Vadimus +============= + +Where are we going? + +This is a document in Devstack to outline where we are headed in the +future. The future might be near or far, but this is where we'd like +to be. + +This is intended to help people contribute, because it will be a +little clearer if a contribution takes us closer to or further away to +our end game. + +================== + Default Services +================== + +Devstack is designed as a development environment first. There are a +lot of ways to compose the OpenStack services, but we do need one +default. + +That should be the Compute Layer (currently Glance + Nova + Cinder + +Neutron Core (not advanced services) + Keystone). It should be the +base building block going forward, and the introduction point of +people to OpenStack via Devstack. + +================ + Service Howtos +================ + +Starting from the base building block all services included in +OpenStack should have an overview page in the Devstack +documentation. That should include the following: + +- A helpful high level overview of that service +- What it depends on (both other OpenStack services and other system + components) +- What new daemons are needed to be started, including where they + should live + +This provides a map for people doing multinode testing to understand +what portions are control plane, which should live on worker nodes. + +Service how to pages will start with an ugly "This team has provided +no information about this service" until someone does. + +=================== + Included Services +=================== + +Devstack doesn't need to eat the world. Given the existence of the +external devstack plugin architecture, the future direction is to move +the bulk of the support code out of devstack itself and into external +plugins. + +This will also promote a more clean separation between services. + +============================= + Included Backends / Drivers +============================= + +Upstream Devstack should only include Open Source backends / drivers, +it's intent is for Open Source development of OpenStack. Proprietary +drivers should be supported via external plugins. + +Just being Open Source doesn't mean it should be in upstream Devstack +if it's not required for base development of OpenStack +components. When in doubt, external plugins should be used. + +======================================== + OpenStack Services vs. System Services +======================================== + +ENABLED_SERVICES is currently entirely too overloaded. We should have +a separation of actual OpenStack services that you have to run (n-cpu, +g-api) and required backends like mysql and rabbitmq. + +=========================== + Splitting up of Functions +=========================== + +The functions-common file has grown over time, and needs to be split +up into smaller libraries that handle specific domains. + +====================== + Testing of Functions +====================== + +Every function in a functions file should get tests. The devstack +testing framework is young, but we do have some unit tests for the +tree, and those should be enhanced. + +============================== + Not Co-Gating with the World +============================== + +As projects spin up functional test jobs, Devstack should not be +co-gated with every single one of those. The Devstack team has one of +the fastest turn arounds for blocking bugs of any Open Stack +project. + +Basic service validation should be included as part of Devstack +installation to mitigate this. + +============================ + Documenting all the things +============================ + +Devstack started off as an explanation as much as an install +script. We would love contributions to that further enhance the +comments and explanations about what is happening, even if it seems a +little pedantic at times. diff --git a/HACKING.rst b/HACKING.rst new file mode 100644 index 0000000000..6a91e0a6a8 --- /dev/null +++ b/HACKING.rst @@ -0,0 +1,298 @@ +Contributing to DevStack +======================== + + +General +------- + +DevStack is written in UNIX shell script. It uses a number of bash-isms +and so is limited to Bash (version 4 and up) and compatible shells. +Shell script was chosen because it best illustrates the steps used to +set up and interact with OpenStack components. + +DevStack's official repository is located on opendev.org at +https://opendev.org/openstack/devstack. Besides the master branch that +tracks the OpenStack trunk branches a separate branch is maintained for all +OpenStack releases starting with Diablo (stable/diablo). + +Contributing code to DevStack follows the usual OpenStack process as described +in `How To Contribute`__ in the OpenStack wiki. `DevStack's LaunchPad project`__ +contains the usual links for blueprints, bugs, etc. + +__ contribute_ +.. _contribute: https://docs.openstack.org/infra/manual/developers.html + +__ lp_ +.. _lp: https://launchpad.net/devstack + +The `Gerrit review +queue `__ +is used for all commits. + +The primary script in DevStack is ``stack.sh``, which performs the bulk of the +work for DevStack's use cases. There is a subscript ``functions`` that contains +generally useful shell functions and is used by a number of the scripts in +DevStack. + +A number of additional scripts can be found in the ``tools`` directory that may +be useful in supporting DevStack installations. Of particular note are ``info.sh`` +to collect and report information about the installed system, and ``install_prereqs.sh`` +that handles installation of the prerequisite packages for DevStack. It is +suitable, for example, to pre-load a system for making a snapshot. + +Repo Layout +----------- + +The DevStack repo generally keeps all of the primary scripts at the root +level. + +``doc`` - Contains the Sphinx source for the documentation. +A complete doc build can be run with ``tox -edocs``. + +``extras.d`` - Contains the dispatch scripts called by the hooks in +``stack.sh``, ``unstack.sh`` and ``clean.sh``. See :doc:`the plugins +docs ` for more information. + +``files`` - Contains a variety of otherwise lost files used in +configuring and operating DevStack. This includes templates for +configuration files and the system dependency information. This is also +where image files are downloaded and expanded if necessary. + +``lib`` - Contains the sub-scripts specific to each project. This is +where the work of managing a project's services is located. Each +top-level project (Keystone, Nova, etc) has a file here. Additionally +there are some for system services and project plugins. These +variables and functions are also used by related projects, such as +Grenade, to manage a DevStack installation. + +``samples`` - Contains a sample of the local files not included in the +DevStack repo. + +``tests`` - the DevStack test suite is rather sparse, mostly consisting +of test of specific fragile functions in the ``functions`` and +``functions-common`` files. + +``tools`` - Contains a collection of stand-alone scripts. While these +may reference the top-level DevStack configuration they can generally be +run alone. + + +Scripts +------- + +DevStack scripts should generally begin by calling ``env(1)`` in the shebang line:: + + #!/usr/bin/env bash + +Sometimes the script needs to know the location of the DevStack install directory. +``TOP_DIR`` should always point there, even if the script itself is located in +a subdirectory:: + + # Keep track of the current DevStack directory. + TOP_DIR=$(cd $(dirname "$0") && pwd) + +Many scripts will utilize shared functions from the ``functions`` file. There are +also rc files (``stackrc`` and ``openrc``) that are often included to set the primary +configuration of the user environment:: + + # Keep track of the current DevStack directory. + TOP_DIR=$(cd $(dirname "$0") && pwd) + + # Import common functions + source $TOP_DIR/functions + + # Import configuration + source $TOP_DIR/openrc + +``stack.sh`` is a rather large monolithic script that flows through from beginning +to end. It has been broken down into project-specific subscripts (as noted above) +located in ``lib`` to make ``stack.sh`` more manageable and to promote code reuse. + +These library sub-scripts have a number of fixed entry points, some of which may +just be stubs. These entry points will be called by ``stack.sh`` in the +following order:: + + install_XXXX + configure_XXXX + init_XXXX + start_XXXX + stop_XXXX + cleanup_XXXX + +There is a sub-script template in ``lib/templates`` to be used in creating new +service sub-scripts. The comments in ``<>`` are meta comments describing +how to use the template and should be removed. + +In order to show the dependencies and conditions under which project functions +are executed the top-level conditional testing for things like ``is_service_enabled`` +should be done in ``stack.sh``. There may be nested conditionals that need +to be in the sub-script, such as testing for keystone being enabled in +``configure_swift()``. + + +stackrc +------- + +``stackrc`` is the global configuration file for DevStack. It is responsible for +calling ``local.conf`` (or ``localrc`` if it exists) so local user configuration +is recognized. + +The criteria for what belongs in ``stackrc`` can be vaguely summarized as +follows: + +* All project repositories and branches handled directly in ``stack.sh`` +* Global configuration that may be referenced in ``local.conf``, i.e. ``DEST``, ``DATA_DIR`` +* Global service configuration like ``ENABLED_SERVICES`` +* Variables used by multiple services that do not have a clear owner, i.e. + ``VOLUME_BACKING_FILE_SIZE`` (nova-compute and cinder) or + ``PUBLIC_NETWORK_NAME`` (only neutron but formerly nova-network too) +* Variables that can not be cleanly declared in a project file due to + dependency ordering, i.e. the order of sourcing the project files can + not be changed for other reasons but the earlier file needs to dereference a + variable set in the later file. This should be rare. + +Also, variable declarations in ``stackrc`` before ``local.conf`` is sourced +do NOT allow overriding (the form +``FOO=${FOO:-baz}``); if they did then they can already be changed in ``local.conf`` +and can stay in the project file. + + +Documentation +------------- + +The DevStack repo now contains all of the static pages of devstack.org in +the ``doc/source`` directory. The OpenStack CI system rebuilds the docs after every +commit and updates devstack.org (now a redirect to https://docs.openstack.org/devstack/latest/). + +All of the scripts are processed with shocco_ to render them with the comments +as text describing the script below. For this reason we tend to be a little +verbose in the comments _ABOVE_ the code they pertain to. Shocco also supports +Markdown formatting in the comments; use it sparingly. Specifically, ``stack.sh`` +uses Markdown headers to divide the script into logical sections. + +.. _shocco: https://github.com/dtroyer/shocco/tree/rst_support + +The script used to drive shocco is tools/build_docs.sh. +The complete docs build is also handled with tox -edocs per the +OpenStack project standard. + + +Bash Style Guidelines +~~~~~~~~~~~~~~~~~~~~~ +DevStack defines a bash set of best practices for maintaining large +collections of bash scripts. These should be considered as part of the +review process. + +DevStack uses the bashate_ style checker +to enforce basic guidelines, similar to pep8 and flake8 tools for Python. The +list below is not complete for what bashate checks, nor is it all checked +by bashate. So many lines of code, so little time. + +.. _bashate: https://pypi.org/project/bashate/ + +Whitespace Rules +---------------- + +- lines should not include trailing whitespace +- there should be no hard tabs in the file +- indents are 4 spaces, and all indentation should be some multiple of + them + +Control Structure Rules +----------------------- + +- then should be on the same line as the if +- do should be on the same line as the for + +Example:: + + if [[ -r $TOP_DIR/local.conf ]]; then + LRC=$(get_meta_section_files $TOP_DIR/local.conf local) + for lfile in $LRC; do + if [[ "$lfile" == "localrc" ]]; then + if [[ -r $TOP_DIR/localrc ]]; then + warn $LINENO "localrc and local.conf:[[local]] both exist, using localrc" + else + echo "# Generated file, do not edit" >$TOP_DIR/.localrc.auto + get_meta_section $TOP_DIR/local.conf local $lfile >>$TOP_DIR/.localrc.auto + fi + fi + done + fi + +Variables and Functions +----------------------- + +- functions should be used whenever possible for clarity +- functions should use ``local`` variables as much as possible to + ensure they are isolated from the rest of the environment +- local variables should be lower case, global variables should be + upper case +- function names should_have_underscores, NotCamelCase. +- functions should be declared as per the regex ^function foo {$ + with code starting on the next line + + +Review Criteria +--------------- + +There are some broad criteria that will be followed when reviewing +your change + +* **Is it passing tests** -- your change will not be reviewed + thoroughly unless the official CI has run successfully against it. + +* **Does this belong in DevStack** -- DevStack reviewers have a + default position of "no" but are ready to be convinced by your + change. + + For very large changes, you should consider :doc:`the plugins system + ` to see if your code is better abstracted from the main + repository. + + For smaller changes, you should always consider if the change can be + encapsulated by per-user settings in ``local.conf``. A common example + is adding a simple config-option to an ``ini`` file. Specific flags + are not usually required for this, although adding documentation + about how to achieve a larger goal (which might include turning on + various settings, etc) is always welcome. + +* **Work-arounds** -- often things get broken and DevStack can be in a + position to fix them. Work-arounds are fine, but should be + presented in the context of fixing the root-cause of the problem. + This means it is well-commented in the code and the change-log and + mostly likely includes links to changes or bugs that fix the + underlying problem. + +* **Should this be upstream** -- DevStack generally does not override + default choices provided by projects and attempts to not + unexpectedly modify behavior. + +* **Context in commit messages** -- DevStack touches many different + areas and reviewers need context around changes to make good + decisions. We also always want it to be clear to someone -- perhaps + even years from now -- why we were motivated to make a change at the + time. + + +Making Changes, Testing, and CI +------------------------------- + +Changes to Devstack are tested by automated continuous integration jobs +that run on a variety of Linux Distros using a handful of common +configurations. What this means is that every change to Devstack is +self testing. One major benefit of this is that developers do not +typically need to add new non voting test jobs to add features to +Devstack. Instead the features can be added, then if testing passes +with the feature enabled the change is ready to merge (pending code +review). + +A concrete example of this was the switch from screen based service +management to systemd based service management. No new jobs were +created for this. Instead the features were added to devstack, tested +locally and in CI using a change that enabled the feature, then once +the enabling change was passing and the new behavior communicated and +documented it was merged. + +Using this process has been proven to be effective and leads to +quicker implementation of desired features. diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000..68c771a099 --- /dev/null +++ b/LICENSE @@ -0,0 +1,176 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000..970d8009eb --- /dev/null +++ b/Makefile @@ -0,0 +1,100 @@ +# DevStack Makefile of Sanity + +# Interesting targets: +# ds-remote - Create a Git remote for use by ds-push and ds-pull targets +# DS_REMOTE_URL must be set on the command line +# +# ds-push - Merge a list of branches taken from .ds-test and push them +# to the ds-remote repo in ds-test branch +# +# ds-pull - Pull the remote ds-test branch into a fresh local branch +# +# refresh - Performs a sequence of unstack, refresh and stack + +# Duplicated from stackrc for now +DEST=/opt/stack + +all: + @echo "This just saved you from a terrible mistake!" + +# Do Some Work +stack: + ./stack.sh + +unstack: + ./unstack.sh + +docs: + tox -edocs + +# Just run the shocco source formatting build +docs-build: + INSTALL_SHOCCO=True tools/build_docs.sh + +# Just run the Sphinx docs build +docs-rst: + python setup.py build_sphinx + +# Run the bashate test +bashate: + tox -ebashate + +# Run the function tests +test: + tests/test_ini_config.sh + tests/test_meta_config.sh + tests/test_ip.sh + tests/test_refs.sh + +# Spiff up the place a bit +clean: + ./clean.sh + rm -rf accrc doc/build test*-e *.egg-info + +# Clean out the cache too +realclean: clean + rm -rf files/cirros*.tar.gz files/Fedora*.qcow2 + +# Repo stuffs + +pull: + git pull + + +# These repo targets are used to maintain a branch in a remote repo that +# consists of one or more local branches merged and pushed to the remote. +# This is most useful for iterative testing on multiple or remote servers +# while keeping the working repo local. +# +# It requires: +# * a remote pointing to a remote repo, often GitHub is used for this +# * a branch name to be used on the remote +# * a local file containing the list of local branches to be merged into +# the remote branch + +GIT_REMOTE_NAME=ds-test +GIT_REMOTE_BRANCH=ds-test + +# Push the current branch to a remote named ds-test +ds-push: + git checkout master + git branch -D $(GIT_REMOTE_BRANCH) || true + git checkout -b $(GIT_REMOTE_BRANCH) + for i in $(shell cat .$(GIT_REMOTE_BRANCH) | grep -v "^#" | grep "[^ ]"); do \ + git merge --no-edit $$i; \ + done + git push -f $(GIT_REMOTE_NAME) HEAD:$(GIT_REMOTE_BRANCH) + +# Pull the ds-test branch +ds-pull: + git checkout master + git branch -D $(GIT_REMOTE_BRANCH) || true + git pull $(GIT_REMOTE_NAME) $(GIT_REMOTE_BRANCH) + git checkout $(GIT_REMOTE_BRANCH) + +# Add the remote - set DS_REMOTE_URL=htps://example.com/ on the command line +ds-remote: + git remote add $(GIT_REMOTE_NAME) $(DS_REMOTE_URL) + +# Refresh the current DevStack checkout nd re-initialize +refresh: unstack ds-pull stack diff --git a/README.md b/README.md deleted file mode 100644 index a185f34f96..0000000000 --- a/README.md +++ /dev/null @@ -1,42 +0,0 @@ -Devstack is a set of scripts and utilities to quickly deploy an OpenStack cloud. - -# Goals - -* To quickly build dev OpenStack environments in a clean oneiric environment -* To describe working configurations of OpenStack (which code branches work together? what do config files look like for those branches?) -* To make it easier for developers to dive into OpenStack so that they can productively contribute without having to understand every part of the system at once -* To make it easy to prototype cross-project features - -Read more at http://devstack.org (built from the gh-pages branch) - -IMPORTANT: Be sure to carefully read stack.sh and any other scripts you execute before you run them, as they install software and may alter your networking configuration. We strongly recommend that you run stack.sh in a clean and disposable vm when you are first getting started. - -# Versions - -The devstack master branch generally points to trunk versions of OpenStack components. For older, stable versions, look for branches named stable/[mil -estone]. For example, you can do the following to create a diablo OpenStack cloud: - - git checkout stable/diablo - ./stack.sh - -# To start a dev cloud (Installing in a dedicated, disposable vm is safer than installing on your dev machine!): - - ./stack.sh - -When the script finishes executing, you should be able to access OpenStack endpoints, like so: - -* Horizon: http://myhost/ -* Keystone: http://myhost:5000/v2.0/ - -We also provide an environment file that you can use to interact with your cloud via CLI: - - # source openrc file to load your environment with osapi and ec2 creds - . openrc - # list instances - nova list - # list instances using ec2 api - euca-describe-instances - -# Customizing - -You can override environment variables used in stack.sh by creating file name 'localrc'. It is likely that you will need to do this to tweak your networking configuration should you need to access your cloud from a different host. diff --git a/README.rst b/README.rst new file mode 100644 index 0000000000..86b85da956 --- /dev/null +++ b/README.rst @@ -0,0 +1,97 @@ +DevStack is a set of scripts and utilities to quickly deploy an OpenStack cloud +from git source trees. + +Goals +===== + +* To quickly build dev OpenStack environments in a clean Ubuntu or RockyLinux + environment +* To describe working configurations of OpenStack (which code branches + work together? what do config files look like for those branches?) +* To make it easier for developers to dive into OpenStack so that they can + productively contribute without having to understand every part of the + system at once +* To make it easy to prototype cross-project features +* To provide an environment for the OpenStack CI testing on every commit + to the projects + +Read more at https://docs.openstack.org/devstack/latest + +IMPORTANT: Be sure to carefully read `stack.sh` and any other scripts you +execute before you run them, as they install software and will alter your +networking configuration. We strongly recommend that you run `stack.sh` +in a clean and disposable vm when you are first getting started. + +Versions +======== + +The DevStack master branch generally points to trunk versions of OpenStack +components. For older, stable versions, look for branches named +stable/[release] in the DevStack repo. For example, you can do the +following to create a Zed OpenStack cloud:: + + git checkout stable/zed + ./stack.sh + +You can also pick specific OpenStack project releases by setting the appropriate +`*_BRANCH` variables in the ``localrc`` section of `local.conf` (look in +`stackrc` for the default set). Usually just before a release there will be +milestone-proposed branches that need to be tested:: + + GLANCE_REPO=https://opendev.org/openstack/glance.git + GLANCE_BRANCH=milestone-proposed + +Start A Dev Cloud +================= + +Installing in a dedicated disposable VM is safer than installing on your +dev machine! Plus you can pick one of the supported Linux distros for +your VM. To start a dev cloud run the following NOT AS ROOT (see +**DevStack Execution Environment** below for more on user accounts): + + ./stack.sh + +When the script finishes executing, you should be able to access OpenStack +endpoints, like so: + +* Horizon: http://myhost/ +* Keystone: http://myhost/identity/v3/ + +We also provide an environment file that you can use to interact with your +cloud via CLI:: + + # source openrc file to load your environment with OpenStack CLI creds + . openrc + # list instances + openstack server list + +DevStack Execution Environment +============================== + +DevStack runs rampant over the system it runs on, installing things and +uninstalling other things. Running this on a system you care about is a recipe +for disappointment, or worse. Alas, we're all in the virtualization business +here, so run it in a VM. And take advantage of the snapshot capabilities +of your hypervisor of choice to reduce testing cycle times. You might even save +enough time to write one more feature before the next feature freeze... + +``stack.sh`` needs to have root access for a lot of tasks, but uses +``sudo`` for all of those tasks. However, it needs to be not-root for +most of its work and for all of the OpenStack services. ``stack.sh`` +specifically does not run if started as root. + +DevStack will not automatically create the user, but provides a helper +script in ``tools/create-stack-user.sh``. Run that (as root!) or just +check it out to see what DevStack's expectations are for the account +it runs under. Many people simply use their usual login (the default +'ubuntu' login on a UEC image for example). + +Customizing +=========== + +DevStack can be extensively configured via the configuration file +`local.conf`. It is likely that you will need to provide and modify +this file if you want anything other than the most basic setup. Start +by reading the `configuration guide +`_ +for details of the configuration file and the many available options. diff --git a/clean.sh b/clean.sh new file mode 100755 index 0000000000..092f557a88 --- /dev/null +++ b/clean.sh @@ -0,0 +1,148 @@ +#!/bin/bash + +# **clean.sh** + +# ``clean.sh`` does its best to eradicate traces of a Grenade +# run except for the following: +# - both base and target code repos are left alone +# - packages (system and pip) are left alone + +# This means that all data files are removed. More?? + +# Keep track of the current devstack directory. +TOP_DIR=$(cd $(dirname "$0") && pwd) + +# Import common functions +source $TOP_DIR/functions + +FILES=$TOP_DIR/files + +# Load local configuration +source $TOP_DIR/openrc + +# Get the variables that are set in stack.sh +if [[ -r $TOP_DIR/.stackenv ]]; then + source $TOP_DIR/.stackenv +fi + +# Determine what system we are running on. This provides ``os_VENDOR``, +# ``os_RELEASE``, ``os_PACKAGE``, ``os_CODENAME`` +# and ``DISTRO`` +GetDistro + +# Import apache functions +source $TOP_DIR/lib/apache +source $TOP_DIR/lib/ldap + +# Import database library +source $TOP_DIR/lib/database +source $TOP_DIR/lib/rpc_backend + +source $TOP_DIR/lib/tls + +source $TOP_DIR/lib/libraries +source $TOP_DIR/lib/lvm +source $TOP_DIR/lib/horizon +source $TOP_DIR/lib/keystone +source $TOP_DIR/lib/glance +source $TOP_DIR/lib/nova +source $TOP_DIR/lib/placement +source $TOP_DIR/lib/cinder +source $TOP_DIR/lib/swift +source $TOP_DIR/lib/neutron + +set -o xtrace + +# Extras Source +# -------------- + +# Phase: source +if [[ -d $TOP_DIR/extras.d ]]; then + for i in $TOP_DIR/extras.d/*.sh; do + [[ -r $i ]] && source $i source + done +fi + +# Let unstack.sh do its thing first +$TOP_DIR/unstack.sh --all + +# Run extras +# ========== + +# Phase: clean +load_plugin_settings +run_phase clean + +if [[ -d $TOP_DIR/extras.d ]]; then + for i in $TOP_DIR/extras.d/*.sh; do + [[ -r $i ]] && source $i clean + done +fi + +# Clean projects + +# BUG: cinder tgt doesn't exit cleanly if it's not running. +cleanup_cinder || /bin/true + +cleanup_glance +cleanup_keystone +cleanup_nova +cleanup_placement +cleanup_neutron +cleanup_swift +cleanup_horizon + +if is_service_enabled ldap; then + cleanup_ldap +fi + +# Do the hypervisor cleanup until this can be moved back into lib/nova +if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + cleanup_nova_hypervisor +fi + +# Clean out /etc +sudo rm -rf /etc/keystone /etc/glance /etc/nova /etc/cinder /etc/swift /etc/neutron /etc/openstack/ + +# Clean out tgt +sudo rm -f /etc/tgt/conf.d/* + +# Clean up the message queue +cleanup_rpc_backend +cleanup_database + +# Clean out data and status +sudo rm -rf $DATA_DIR $DEST/status $DEST/async + +# Clean out the log file and log directories +if [[ -n "$LOGFILE" ]] && [[ -f "$LOGFILE" ]]; then + sudo rm -f $LOGFILE +fi +if [[ -n "$LOGDIR" ]] && [[ -d "$LOGDIR" ]]; then + sudo rm -rf $LOGDIR +fi + +# Clean out the systemd unit files. +sudo find $SYSTEMD_DIR -type f -name '*devstack@*service' -delete +# Make systemd aware of the deletion. +$SYSTEMCTL daemon-reload + +# Clean up venvs +DIRS_TO_CLEAN="$WHEELHOUSE ${PROJECT_VENV[@]} .config/openstack" +rm -rf $DIRS_TO_CLEAN + +# Clean up files + +FILES_TO_CLEAN=".localrc.auto .localrc.password " +FILES_TO_CLEAN+="docs/files docs/html shocco/ " +FILES_TO_CLEAN+="stack-screenrc test*.conf* test.ini* " +FILES_TO_CLEAN+=".stackenv .prereqs" + +for file in $FILES_TO_CLEAN; do + rm -rf $TOP_DIR/$file +done + +rm -rf ~/.config/openstack + +# Clear any fstab entries made +sudo sed -i '/.*comment=devstack-.*/ d' /etc/fstab diff --git a/data/devstack-plugins-registry.header b/data/devstack-plugins-registry.header new file mode 100644 index 0000000000..576dbbd35a --- /dev/null +++ b/data/devstack-plugins-registry.header @@ -0,0 +1,21 @@ +.. Note to patch submitters: + + # ============================= # + # THIS FILE IS AUTOGENERATED ! # + # ============================= # + + ** Plugins are found automatically and added to this list ** + + This file is created by a periodic proposal job. You should not + edit this file. + + You should edit the files data/devstack-plugins-registry.footer + data/devstack-plugins-registry.header to modify this text. + +========================== + DevStack Plugin Registry +========================== + +The following list is an automatically-generated collection of +available DevStack plugins. This includes, but is not limited to, +official OpenStack projects. diff --git a/doc/requirements.txt b/doc/requirements.txt new file mode 100644 index 0000000000..7980b93ed7 --- /dev/null +++ b/doc/requirements.txt @@ -0,0 +1,7 @@ +pbr>=2.0.0,!=2.1.0 + +Pygments +docutils +sphinx>=2.0.0,!=2.1.0 # BSD +openstackdocstheme>=2.2.1 # Apache-2.0 +zuul-sphinx>=0.2.0 diff --git a/doc/source/assets/images/devstack.png b/doc/source/assets/images/devstack.png new file mode 100644 index 0000000000..ca6297e127 Binary files /dev/null and b/doc/source/assets/images/devstack.png differ diff --git a/doc/source/assets/images/logo-blue.png b/doc/source/assets/images/logo-blue.png new file mode 100644 index 0000000000..6b363afeee Binary files /dev/null and b/doc/source/assets/images/logo-blue.png differ diff --git a/doc/source/assets/images/logo-blue.xcf b/doc/source/assets/images/logo-blue.xcf new file mode 100644 index 0000000000..fff75ee21d Binary files /dev/null and b/doc/source/assets/images/logo-blue.xcf differ diff --git a/doc/source/assets/images/logo.png b/doc/source/assets/images/logo.png new file mode 100644 index 0000000000..9c2087e44f Binary files /dev/null and b/doc/source/assets/images/logo.png differ diff --git a/doc/source/assets/images/neutron-network-1.png b/doc/source/assets/images/neutron-network-1.png new file mode 100644 index 0000000000..7730ca93f1 Binary files /dev/null and b/doc/source/assets/images/neutron-network-1.png differ diff --git a/doc/source/assets/images/neutron-network-2.png b/doc/source/assets/images/neutron-network-2.png new file mode 100644 index 0000000000..919935119d Binary files /dev/null and b/doc/source/assets/images/neutron-network-2.png differ diff --git a/doc/source/assets/images/neutron-network-3.png b/doc/source/assets/images/neutron-network-3.png new file mode 100644 index 0000000000..34f03ed5c9 Binary files /dev/null and b/doc/source/assets/images/neutron-network-3.png differ diff --git a/doc/source/assets/images/screen_session_1.png b/doc/source/assets/images/screen_session_1.png new file mode 100644 index 0000000000..6ad6752bb1 Binary files /dev/null and b/doc/source/assets/images/screen_session_1.png differ diff --git a/doc/source/assets/images/small_logo.png b/doc/source/assets/images/small_logo.png new file mode 100644 index 0000000000..181459fe45 Binary files /dev/null and b/doc/source/assets/images/small_logo.png differ diff --git a/doc/source/assets/local.conf b/doc/source/assets/local.conf new file mode 120000 index 0000000000..cfc2a4e9d8 --- /dev/null +++ b/doc/source/assets/local.conf @@ -0,0 +1 @@ +../../../samples/local.conf \ No newline at end of file diff --git a/doc/source/conf.py b/doc/source/conf.py new file mode 100644 index 0000000000..bb0357286a --- /dev/null +++ b/doc/source/conf.py @@ -0,0 +1,259 @@ +# -*- coding: utf-8 -*- +# +# Tempest documentation build configuration file, created by +# sphinx-quickstart on Tue May 21 17:43:32 2013. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ----------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = [ + 'sphinx.ext.autodoc', + 'zuul_sphinx', + 'openstackdocstheme', +] + +# openstackdocstheme options +openstackdocs_repo_name = 'openstack/devstack' +openstackdocs_pdf_link = True +openstackdocs_bug_project = 'devstack' +openstackdocs_bug_tag = '' +openstackdocs_auto_name = False +# This repo is not tagged, so don't set versions +openstackdocs_auto_version = False +version = '' +release = '' + +todo_include_todos = True + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'DevStack' +copyright = u'2014, OpenStack Foundation' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ['_build'] + +# The reST default role (used for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +add_module_names = False + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'native' + +# A list of ignored prefixes for module index sorting. +modindex_common_prefix = ['DevStack-doc.'] + +# -- Options for man page output ---------------------------------------------- +man_pages = [] + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'openstackdocs' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +html_domain_indices = False + +# If false, no index is generated. +html_use_index = False + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'DevStack-doc' + + +# -- Options for LaTeX output -------------------------------------------------- + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ('index', 'doc-devstack.tex', u'DevStack Docs', + u'OpenStack DevStack Team', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + +# -- Options for Texinfo output ------------------------------------------------ + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ('index', 'DevStack-doc', u'DevStack Docs', + u'OpenStack DevStack Team', 'DevStack-doc', 'DevStack documentation', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' + + +# -- Options for Epub output --------------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = u'DevStack Documentation' +epub_author = u'OpenStack DevStack Team' +epub_publisher = u'OpenStack DevStack Team' +epub_copyright = u'2014, OpenStack DevStack Team' + +# The language of the text. It defaults to the language option +# or en if the language is not set. +#epub_language = '' + +# The scheme of the identifier. Typical schemes are ISBN or URL. +#epub_scheme = '' + +# The unique identifier of the text. This can be a ISBN number +# or the project homepage. +#epub_identifier = '' + +# A unique identification for the text. +#epub_uid = '' + +# A tuple containing the cover image and cover page html template filenames. +#epub_cover = () + +# HTML files that should be inserted before the pages created by sphinx. +# The format is a list of tuples containing the path and title. +#epub_pre_files = [] + +# HTML files shat should be inserted after the pages created by sphinx. +# The format is a list of tuples containing the path and title. +#epub_post_files = [] + +# A list of files that should not be packed into the epub file. +#epub_exclude_files = [] + +# The depth of the table of contents in toc.ncx. +#epub_tocdepth = 3 + +# Allow duplicate toc entries. +#epub_tocdup = True diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst new file mode 100644 index 0000000000..3cfba716ca --- /dev/null +++ b/doc/source/configuration.rst @@ -0,0 +1,806 @@ +============= +Configuration +============= + +.. contents:: + :local: + :depth: 1 + +local.conf +========== + +DevStack configuration is modified via the file ``local.conf``. It is +a modified INI format file that introduces a meta-section header to +carry additional information regarding the configuration files to be +changed. + +A sample is provided in ``devstack/samples`` + +The new header is similar to a normal INI section header but with double +brackets (``[[ ... ]]``) and two internal fields separated by a pipe +(``|``). Note that there are no spaces between the double brackets and the +internal fields. Likewise, there are no spaces between the pipe and the +internal fields: +:: + + '[[' '|' ']]' + +where ```` is one of a set of phase names defined by ``stack.sh`` +and ```` is the configuration filename. The filename +is eval'ed in the ``stack.sh`` context so all environment variables are +available and may be used. Using the project config file variables in +the header is strongly suggested (see the ``NOVA_CONF`` example below). +If the path of the config file does not exist it is skipped. + +The defined phases are: + +- **local** - extracts ``localrc`` from ``local.conf`` before + ``stackrc`` is sourced +- **post-config** - runs after the layer 2 services are configured and + before they are started +- **extra** - runs after services are started and before any files in + ``extra.d`` are executed +- **post-extra** - runs after files in ``extra.d`` are executed +- **test-config** - runs after tempest (and plugins) are configured + +The file is processed strictly in sequence; meta-sections may be +specified more than once but if any settings are duplicated the last to +appear in the file will be used. + +:: + + [[post-config|$NOVA_CONF]] + [DEFAULT] + use_syslog = True + + [osapi_v3] + enabled = False + +A specific meta-section ``local|localrc`` is used to provide a default +``localrc`` file (actually ``.localrc.auto``). This allows all custom +settings for DevStack to be contained in a single file. If ``localrc`` +exists it will be used instead to preserve backward-compatibility. + +:: + + [[local|localrc]] + IPV4_ADDRS_SAFE_TO_USE=10.254.1.0/24 + ADMIN_PASSWORD=speciale + LOGFILE=$DEST/logs/stack.sh.log + +Note that ``Q_PLUGIN_CONF_FILE`` is unique in that it is assumed to +*NOT* start with a ``/`` (slash) character. A slash will need to be +added: + +:: + + [[post-config|/$Q_PLUGIN_CONF_FILE]] + +Also note that the ``localrc`` section is sourced as a shell script +fragment and MUST conform to the shell requirements, specifically no +whitespace around ``=`` (equals). + +openrc +====== + +``openrc`` configures login credentials suitable for use with the +OpenStack command-line tools. ``openrc`` sources ``stackrc`` at the +beginning (which in turn sources the ``localrc`` section of +``local.conf``) in order to pick up ``HOST_IP`` and/or ``SERVICE_HOST`` +to use in the endpoints. The values shown below are the default values. + +OS\_PROJECT\_NAME (OS\_TENANT\_NAME) + Keystone has + standardized the term *project* as the entity that owns resources. In + some places references still exist to the previous term + *tenant* for this use. Also, *project\_name* is preferred to + *project\_id*. OS\_TENANT\_NAME remains supported for compatibility + with older tools. + + :: + + OS_PROJECT_NAME=demo + +OS\_USERNAME + In addition to the owning entity (project), OpenStack calls the entity + performing the action *user*. + + :: + + OS_USERNAME=demo + +OS\_PASSWORD + Keystone's default authentication requires a password be provided. + The usual cautions about putting passwords in environment variables + apply, for most DevStack uses this may be an acceptable tradeoff. + + :: + + OS_PASSWORD=secret + +HOST\_IP, SERVICE\_HOST + Set API endpoint host using ``HOST_IP``. ``SERVICE_HOST`` may also + be used to specify the endpoint, which is convenient for some + ``local.conf`` configurations. Typically, ``HOST_IP`` is set in the + ``localrc`` section. + + :: + + HOST_IP=127.0.0.1 + SERVICE_HOST=$HOST_IP + +OS\_AUTH\_URL + Authenticating against an OpenStack cloud using Keystone returns a + *Token* and *Service Catalog*. The catalog contains the endpoints + for all services the user/tenant has access to - including Nova, + Glance, Keystone and Swift. + + :: + + OS_AUTH_URL=http://$SERVICE_HOST:5000/v3.0 + +KEYSTONECLIENT\_DEBUG, NOVACLIENT\_DEBUG + Set command-line client log level to ``DEBUG``. These are commented + out by default. + + :: + + # export KEYSTONECLIENT_DEBUG=1 + # export NOVACLIENT_DEBUG=1 + + + +.. _minimal-configuration: + +Minimal Configuration +===================== + +While ``stack.sh`` is happy to run without a ``localrc`` section in +``local.conf``, devlife is better when there are a few minimal variables +set. This is an example of a minimal configuration that touches the +values that most often need to be set. + +- no logging +- pre-set the passwords to prevent interactive prompts +- move network ranges away from the local network (``IPV4_ADDRS_SAFE_TO_USE`` + and ``FLOATING_RANGE``, commented out below) +- set the host IP if detection is unreliable (``HOST_IP``, commented + out below) + +:: + + [[local|localrc]] + ADMIN_PASSWORD=secret + DATABASE_PASSWORD=$ADMIN_PASSWORD + RABBIT_PASSWORD=$ADMIN_PASSWORD + SERVICE_PASSWORD=$ADMIN_PASSWORD + #IPV4_ADDRS_SAFE_TO_USE=172.31.1.0/24 + #FLOATING_RANGE=192.168.20.0/25 + #HOST_IP=10.3.4.5 + +If the ``*_PASSWORD`` variables are not set here you will be prompted to +enter values for them by ``stack.sh``. + +.. warning:: Only use alphanumeric characters in your passwords, as some + services fail to work when using special characters. + +The network ranges must not overlap with any networks in use on the +host. Overlap is not uncommon as RFC-1918 'private' ranges are commonly +used for both the local networking and Nova's fixed and floating ranges. + +``HOST_IP`` is normally detected on the first run of ``stack.sh`` but +often is indeterminate on later runs due to the IP being moved from an +Ethernet interface to a bridge on the host. Setting it here also makes it +available for ``openrc`` to set ``OS_AUTH_URL``. ``HOST_IP`` is not set +by default. + +``HOST_IPV6`` is normally detected on the first run of ``stack.sh`` but +will not be set if there is no IPv6 address on the default Ethernet interface. +Setting it here also makes it available for ``openrc`` to set ``OS_AUTH_URL``. +``HOST_IPV6`` is not set by default. + +For architecture specific configurations which differ from the x86 default +here, see `arch-configuration`_. + +Historical Notes +================ + +Historically DevStack obtained all local configuration and +customizations from a ``localrc`` file. In Oct 2013 the +``local.conf`` configuration method was introduced (in `review 46768 +`__) to simplify this +process. + +Configuration Notes +=================== + +.. contents:: + :local: + +Service Repos +------------- + +The Git repositories used to check out the source for each service are +controlled by a pair of variables set for each service. ``*_REPO`` +points to the repository and ``*_BRANCH`` selects which branch to +check out. These may be overridden in ``local.conf`` to pull source +from a different repo for testing, such as a Gerrit branch +proposal. ``GIT_BASE`` points to the primary repository server. + +:: + + NOVA_REPO=$GIT_BASE/openstack/nova.git + NOVA_BRANCH=master + +To pull a branch directly from Gerrit, get the repo and branch from +the Gerrit review page:: + + git fetch https://review.opendev.org/openstack/nova \ + refs/changes/50/5050/1 && git checkout FETCH_HEAD + +The repo is the stanza following ``fetch`` and the branch is the +stanza following that:: + + NOVA_REPO=https://review.opendev.org/openstack/nova + NOVA_BRANCH=refs/changes/50/5050/1 + + +Installation Directory +---------------------- + +The DevStack install directory is set by the ``DEST`` variable. By +default it is ``/opt/stack``. + +By setting it early in the ``localrc`` section you can reference it in +later variables. It can be useful to set it even though it is not +changed from the default value. + +:: + + DEST=/opt/stack + +Logging +------- + +.. _enable_logging: + +Enable Logging +~~~~~~~~~~~~~~ + +By default ``stack.sh`` output is only written to the console where it +runs. It can be sent to a file in addition to the console by setting +``LOGFILE`` to the fully-qualified name of the destination log file. A +timestamp will be appended to the given filename for each run of +``stack.sh``. + +:: + + LOGFILE=$DEST/logs/stack.sh.log + +Old log files are cleaned automatically if ``LOGDAYS`` is set to the +number of days of old log files to keep. + +:: + + LOGDAYS=2 + +Some coloring is used during the DevStack runs to make it easier to +see what is going on. This can be disabled with:: + + LOG_COLOR=False + +When using the logfile, by default logs are sent to the console and +the file. You can set ``VERBOSE`` to ``false`` if you only wish the +logs to be sent to the file (this may avoid having double-logging in +some cases where you are capturing the script output and the log +files). If ``VERBOSE`` is ``true`` you can additionally set +``VERBOSE_NO_TIMESTAMP`` to avoid timestamps being added to each +output line sent to the console. This can be useful in some +situations where the console output is being captured by a runner or +framework (e.g. Ansible) that adds its own timestamps. Note that the +log lines sent to the ``LOGFILE`` will still be prefixed with a +timestamp. + +Logging the Service Output +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +By default, services run under ``systemd`` and are natively logging to +the systemd journal. + +To query the logs use the ``journalctl`` command, such as:: + + sudo journalctl --unit devstack@* + +More examples can be found in :ref:`journalctl-examples`. + +Example Logging Configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For example, non-interactive installs probably wish to save output to +a file, keep service logs and disable color in the stored files. + +:: + + [[local|localrc]] + DEST=/opt/stack/ + LOGFILE=$DEST/stack.sh.log + LOG_COLOR=False + +Database Backend +---------------- + +Multiple database backends are available. The available databases are defined +in the lib/databases directory. +``mysql`` is the default database, choose a different one by putting the +following in the ``localrc`` section:: + + disable_service mysql + enable_service postgresql + +``mysql`` is the default database. + +RPC Backend +----------- + +Support for a RabbitMQ RPC backend is included. Additional RPC +backends may be available via external plugins. Enabling or disabling +RabbitMQ is handled via the usual service functions and +``ENABLED_SERVICES``. + +Example disabling RabbitMQ in ``local.conf``:: + + disable_service rabbit + +Apache Frontend +--------------- + +The Apache web server is enabled for services that support via WSGI. Today this +means HTTPD and uWSGI but historically this meant HTTPD + mod_wsgi. This +historical legacy is captured by the naming of many variables, which include +``MOD_WSGI`` rather than ``UWSGI``. + +Some services support alternative deployment strategies (e.g. eventlet). You +can enable these ``ENABLE_HTTPD_MOD_WSGI_SERVICES`` to ``False`` in your +``local.conf``. In addition, each service that can be run under HTTPD + +mod_wsgi also has an override toggle available that can be set in your +``local.conf``. These are, however, slowly being removed as services have +adopted standardized deployment mechanisms and more generally moved away from +eventlet. + +Example (Swift):: + + SWIFT_USE_MOD_WSGI="True" + +Example (Heat):: + + HEAT_USE_MOD_WSGI="True" + +Libraries from Git +------------------ + +By default devstack installs OpenStack server components from git, +however it installs client libraries from released versions on pypi. +This is appropriate if you are working on server development, but if +you want to see how an unreleased version of the client affects the +system you can have devstack install it from upstream, or from local +git trees by specifying it in ``LIBS_FROM_GIT``. Multiple libraries +can be specified as a comma separated list. + +:: + + LIBS_FROM_GIT=python-keystoneclient,oslo.config + +Setting the variable to ``ALL`` will activate the download for all +libraries. + +Virtual Environments +-------------------- + +Enable the use of Python virtual environments by setting ``USE_VENV`` +to ``True``. This will enable the creation of venvs for each project +that is defined in the ``PROJECT_VENV`` array. + +Each entry in the ``PROJECT_VENV`` array contains the directory name +of a venv to be used for the project. The array index is the project +name. Multiple projects can use the same venv if desired. + +:: + + PROJECT_VENV["glance"]=${GLANCE_DIR}.venv + +``ADDITIONAL_VENV_PACKAGES`` is a comma-separated list of additional +packages to be installed into each venv. Often projects will not have +certain packages listed in its ``requirements.txt`` file because they +are 'optional' requirements, i.e. only needed for certain +configurations. By default, the enabled databases will have their +Python bindings added when they are enabled. + +:: + + ADDITIONAL_VENV_PACKAGES="python-foo, python-bar" + +A clean install every time +-------------------------- + +By default ``stack.sh`` only clones the project repos if they do not +exist in ``$DEST``. ``stack.sh`` will freshen each repo on each run if +``RECLONE`` is set to ``yes``. This avoids having to manually remove +repos in order to get the current branch from ``$GIT_BASE``. + +:: + + RECLONE=yes + +Upgrade packages installed by pip +--------------------------------- + +By default ``stack.sh`` only installs Python packages if no version is +currently installed or the current version does not match a specified +requirement. If ``PIP_UPGRADE`` is set to ``True`` then existing +required Python packages will be upgraded to the most recent version +that matches requirements. + +:: + + PIP_UPGRADE=True + +Guest Images +------------ + +Images provided in URLS via the comma-separated ``IMAGE_URLS`` +variable will be downloaded and uploaded to glance by DevStack. + +Default guest-images are predefined for each type of hypervisor and +their testing-requirements in ``stack.sh``. Setting +``DOWNLOAD_DEFAULT_IMAGES=False`` will prevent DevStack downloading +these default images; in that case, you will want to populate +``IMAGE_URLS`` with sufficient images to satisfy testing-requirements. + +:: + + DOWNLOAD_DEFAULT_IMAGES=False + IMAGE_URLS="http://foo.bar.com/image.qcow," + IMAGE_URLS+="http://foo.bar.com/image2.qcow" + + +Instance Type +------------- + +``DEFAULT_INSTANCE_TYPE`` can be used to configure the default instance +type. When this parameter is not specified, Devstack creates additional +micro & nano flavors for really small instances to run Tempest tests. + +For guests with larger memory requirements, ``DEFAULT_INSTANCE_TYPE`` +should be specified in the configuration file so Tempest selects the +default flavors instead. + +KVM on Power with QEMU 2.4 requires 512 MB to load the firmware - +`QEMU 2.4 - PowerPC `__ so users +running instances on ppc64/ppc64le can choose one of the default +created flavors as follows: + +:: + + DEFAULT_INSTANCE_TYPE=m1.tiny + + +IP Version +---------- + +``IP_VERSION`` can be used to configure Neutron to create either an +IPv4, IPv6, or dual-stack self-service project data-network by with +either ``IP_VERSION=4``, ``IP_VERSION=6``, or ``IP_VERSION=4+6`` +respectively. + +:: + + IP_VERSION=4+6 + +The following optional variables can be used to alter the default IPv6 +behavior: + +:: + + IPV6_RA_MODE=slaac + IPV6_ADDRESS_MODE=slaac + IPV6_ADDRS_SAFE_TO_USE=fd$IPV6_GLOBAL_ID::/56 + IPV6_PRIVATE_NETWORK_GATEWAY=fd$IPV6_GLOBAL_ID::1 + +*Note*: ``IPV6_ADDRS_SAFE_TO_USE`` and ``IPV6_PRIVATE_NETWORK_GATEWAY`` +can be configured with any valid IPv6 prefix. The default values make +use of an auto-generated ``IPV6_GLOBAL_ID`` to comply with RFC4193. + +Service IP Version +~~~~~~~~~~~~~~~~~~ + +DevStack can enable service operation over either IPv4 or IPv6 by +setting ``SERVICE_IP_VERSION`` to either ``SERVICE_IP_VERSION=4`` or +``SERVICE_IP_VERSION=6`` respectively. + +When set to ``4`` devstack services will open listen sockets on +``0.0.0.0`` and service endpoints will be registered using ``HOST_IP`` +as the address. + +When set to ``6`` devstack services will open listen sockets on ``::`` +and service endpoints will be registered using ``HOST_IPV6`` as the +address. + +The default value for this setting is ``4``. Dual-mode support, for +example ``4+6`` is not currently supported. ``HOST_IPV6`` can +optionally be used to alter the default IPv6 address:: + + HOST_IPV6=${some_local_ipv6_address} + +Tunnel IP Version +~~~~~~~~~~~~~~~~~ + +DevStack can enable tunnel operation over either IPv4 or IPv6 by +setting ``TUNNEL_IP_VERSION`` to either ``TUNNEL_IP_VERSION=4`` or +``TUNNEL_IP_VERSION=6`` respectively. + +When set to ``4`` Neutron will use an IPv4 address for tunnel endpoints, +for example, ``HOST_IP``. + +When set to ``6`` Neutron will use an IPv6 address for tunnel endpoints, +for example, ``HOST_IPV6``. + +The default value for this setting is ``4``. Dual-mode support, for +example ``4+6`` is not supported, as this value must match the address +family of the local tunnel endpoint IP(v6) address. + +The value of ``TUNNEL_IP_VERSION`` has a direct relationship to the +setting of ``TUNNEL_ENDPOINT_IP``, which will default to ``HOST_IP`` +when set to ``4``, and ``HOST_IPV6`` when set to ``6``. + +Multi-node setup +~~~~~~~~~~~~~~~~ + +See the :doc:`multi-node lab guide` + +Projects +-------- + +Neutron +~~~~~~~ + +See the :doc:`neutron configuration guide` for +details on configuration of Neutron + + +Swift +~~~~~ + +Swift is disabled by default. When enabled, it is configured with +only one replica to avoid being IO/memory intensive on a small +VM. + +If you would like to enable Swift you can add this to your ``localrc`` +section: + +:: + + enable_service s-proxy s-object s-container s-account + +If you want a minimal Swift install with only Swift and Keystone you +can have this instead in your ``localrc`` section: + +:: + + disable_all_services + enable_service key mysql s-proxy s-object s-container s-account + +If you only want to do some testing of a real normal swift cluster +with multiple replicas you can do so by customizing the variable +``SWIFT_REPLICAS`` in your ``localrc`` section (usually to 3). + +You can manually override the ring building to use specific storage +nodes, for example when you want to test a multinode environment. In +this case you have to set a space-separated list of IPs in +``SWIFT_STORAGE_IPS`` in your ``localrc`` section that should be used +as Swift storage nodes. +Please note that this does not create a multinode setup, it is only +used when adding nodes to the Swift rings. + +:: + + SWIFT_STORAGE_IPS="192.168.1.10 192.168.1.11 192.168.1.12" + +Swift S3 +++++++++ + +If you are enabling ``s3api`` in ``ENABLED_SERVICES`` DevStack will +install the s3api middleware emulation. Swift will be configured to +act as a S3 endpoint for Keystone so effectively replacing the +``nova-objectstore``. + +Only Swift proxy server is launched in the systemd system all other +services are started in background and managed by ``swift-init`` tool. + +Tempest +~~~~~~~ + +If tempest has been successfully configured, a basic set of smoke +tests can be run as follows: + +:: + + $ cd /opt/stack/tempest + $ tox -e smoke + +By default tempest is downloaded and the config file is generated, but the +tempest package is not installed in the system's global site-packages (the +package install includes installing dependences). So tempest won't run +outside of tox. If you would like to install it add the following to your +``localrc`` section: + +:: + + INSTALL_TEMPEST=True + + +Cinder +~~~~~~ + +The logical volume group used to hold the Cinder-managed volumes is +set by ``VOLUME_GROUP_NAME``, the logical volume name prefix is set with +``VOLUME_NAME_PREFIX`` and the size of the volume backing file is set +with ``VOLUME_BACKING_FILE_SIZE``. + +:: + + VOLUME_GROUP_NAME="stack-volumes" + VOLUME_NAME_PREFIX="volume-" + VOLUME_BACKING_FILE_SIZE=24G + +When running highly concurrent tests, the default per-project quotas +for volumes, backups, or snapshots may be too small. These can be +adjusted by setting ``CINDER_QUOTA_VOLUMES``, ``CINDER_QUOTA_BACKUPS``, +or ``CINDER_QUOTA_SNAPSHOTS`` to the desired value. (The default for +each is 10.) + +DevStack's Cinder LVM configuration module currently supports both iSCSI and +NVMe connections, and we can choose which one to use with options +``CINDER_TARGET_HELPER``, ``CINDER_TARGET_PROTOCOL``, ``CINDER_TARGET_PREFIX``, +and ``CINDER_TARGET_PORT``. + +Defaults use iSCSI with the LIO target manager:: + + CINDER_TARGET_HELPER="lioadm" + CINDER_TARGET_PROTOCOL="iscsi" + CINDER_TARGET_PREFIX="iqn.2010-10.org.openstack:" + CINDER_TARGET_PORT=3260 + +Additionally there are 3 supported transport protocols for NVMe, +``nvmet_rdma``, ``nvmet_tcp``, and ``nvmet_fc``, and when the ``nvmet`` target +is selected the protocol, prefix, and port defaults will change to more +sensible defaults for NVMe:: + + CINDER_TARGET_HELPER="nvmet" + CINDER_TARGET_PROTOCOL="nvmet_rdma" + CINDER_TARGET_PREFIX="nvme-subsystem-1" + CINDER_TARGET_PORT=4420 + +When selecting the RDMA transport protocol DevStack will create on Cinder nodes +a Software RoCE device on top of the ``HOST_IP_IFACE`` and if it is not defined +then on top of the interface with IP address ``HOST_IP`` or ``HOST_IPV6``. + +This Soft-RoCE device will always be created on the Nova compute side since we +cannot tell beforehand whether there will be an RDMA connection or not. + + +Keystone +~~~~~~~~ + +Multi-Region Setup +++++++++++++++++++ + +We want to setup two devstack (RegionOne and RegionTwo) with shared +keystone (same users and services) and horizon. Keystone and Horizon +will be located in RegionOne. Full spec is available at: +``__. + +In RegionOne: + +:: + + REGION_NAME=RegionOne + +In RegionTwo: + +:: + + disable_service horizon + KEYSTONE_SERVICE_HOST= + REGION_NAME=RegionTwo + KEYSTONE_REGION_NAME=RegionOne + +In the devstack for RegionOne, we set REGION_NAME as RegionOne, so region of +the services started in this devstack are registered as RegionOne. In devstack +for RegionTwo, similarly, we set REGION_NAME as RegionTwo since we want +services started in this devstack to be registered in RegionTwo. But Keystone +service is started and registered in RegionOne, not RegionTwo, so we use +KEYSTONE_REGION_NAME to specify the region of Keystone service. +KEYSTONE_REGION_NAME has a default value the same as REGION_NAME thus we omit +it in the configuration of RegionOne. + +Glance +++++++ + +The default image size quota of 1GiB may be too small if larger images +are to be used. Change the default at setup time with: + +:: + + GLANCE_LIMIT_IMAGE_SIZE_TOTAL=5000 + +or at runtime via: + +:: + + openstack --os-cloud devstack-system-admin registered limit set \ + --service glance --default-limit 5000 --region RegionOne image_size_total + +.. _arch-configuration: + +Architectures +------------- + +The upstream CI runs exclusively on nodes with x86 architectures, but +OpenStack supports even more architectures. Some of them need to configure +Devstack in a certain way. + +KVM on s390x (IBM z Systems) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +KVM on s390x (IBM z Systems) is supported since the *Kilo* release. For +an all-in-one setup, these minimal settings in the ``local.conf`` file +are needed:: + + [[local|localrc]] + ADMIN_PASSWORD=secret + DATABASE_PASSWORD=$ADMIN_PASSWORD + RABBIT_PASSWORD=$ADMIN_PASSWORD + SERVICE_PASSWORD=$ADMIN_PASSWORD + + DOWNLOAD_DEFAULT_IMAGES=False + IMAGE_URLS="https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-s390x-disk1.img" + + # Provide a custom etcd3 binary download URL and ints sha256. + # The binary must be located under '//etcd--linux-s390x.tar.gz' + # on this URL. + # Build instructions for etcd3: https://github.com/linux-on-ibm-z/docs/wiki/Building-etcd + ETCD_DOWNLOAD_URL= + ETCD_SHA256= + + enable_service n-sproxy + disable_service n-novnc + + [[post-config|$NOVA_CONF]] + + [serial_console] + base_url=ws://$HOST_IP:6083/ # optional + +Reasoning: + +* The default image of Devstack is x86 only, so we deactivate the download + with ``DOWNLOAD_DEFAULT_IMAGES``. The referenced guest image + in the code above (``IMAGE_URLS``) serves as an example. The list of + possible s390x guest images is not limited to that. + +* This platform doesn't support a graphical console like VNC or SPICE. + The technical reason is the missing framebuffer on the platform. This + means we rely on the substitute feature *serial console* which needs the + proxy service ``n-sproxy``. We also disable VNC's proxy ``n-novnc`` for + that reason . The configuration in the ``post-config`` section is only + needed if you want to use the *serial console* outside of the all-in-one + setup. + +* A link to an etcd3 binary and its sha256 needs to be provided as the + binary for s390x is not hosted on github like it is for other + architectures. For more details see + https://bugs.launchpad.net/devstack/+bug/1693192. Etcd3 can easily be + built along https://github.com/linux-on-ibm-z/docs/wiki/Building-etcd. + +.. note:: To run *Tempest* against this *Devstack* all-in-one, you'll need + to use a guest image which is smaller than 1GB when uncompressed. + The example image from above is bigger than that! diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst new file mode 100644 index 0000000000..8b5a85b3df --- /dev/null +++ b/doc/source/contributor/contributing.rst @@ -0,0 +1,57 @@ +============================ +So You Want to Contribute... +============================ + +For general information on contributing to OpenStack, please check out the +`contributor guide `_ to get started. +It covers all the basics that are common to all OpenStack projects: the accounts +you need, the basics of interacting with our Gerrit review system, how we +communicate as a community, etc. + +Below will cover the more project specific information you need to get started +with Devstack. + +Communication +~~~~~~~~~~~~~ +* IRC channel ``#openstack-qa`` at OFTC. +* Mailing list (prefix subjects with ``[qa][devstack]`` for faster responses) + http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss + +Contacting the Core Team +~~~~~~~~~~~~~~~~~~~~~~~~ +Please refer to the `Devstack Core Team +`_ contacts. + +New Feature Planning +~~~~~~~~~~~~~~~~~~~~ +If you want to propose a new feature please read `Feature Proposal Process`_ +Devstack features are tracked on `Launchpad BP `_. + +Task Tracking +~~~~~~~~~~~~~ +We track our tasks in `Launchpad `_. + +Reporting a Bug +~~~~~~~~~~~~~~~ +You found an issue and want to make sure we are aware of it? You can do so on +`Launchpad `__. +More info about Launchpad usage can be found on `OpenStack docs page +`_ + +Getting Your Patch Merged +~~~~~~~~~~~~~~~~~~~~~~~~~ +All changes proposed to the Devstack require two ``Code-Review +2`` votes from +Devstack core reviewers before one of the core reviewers can approve the patch +by giving ``Workflow +1`` vote. There are 2 exceptions, approving patches to +unblock the gate and patches that do not relate to the Devstack's core logic, +like for example old job cleanups, can be approved by single core reviewers. + +Project Team Lead Duties +~~~~~~~~~~~~~~~~~~~~~~~~ +All common PTL duties are enumerated in the `PTL guide +`_. + +The Release Process for QA is documented in `QA Release Process +`_. + +.. _Feature Proposal Process: https://wiki.openstack.org/wiki/QA#Feature_Proposal_.26_Design_discussions diff --git a/doc/source/debugging.rst b/doc/source/debugging.rst new file mode 100644 index 0000000000..3ca0ad94b4 --- /dev/null +++ b/doc/source/debugging.rst @@ -0,0 +1,52 @@ +===================== +System-wide debugging +===================== + +A lot can go wrong during a devstack run, and there are a few inbuilt +tools to help you. + +dstat +----- + +Enable the ``dstat`` service to produce performance logs during the +devstack run. These will be logged to the journal and also as a CSV +file. + +memory_tracker +-------------- + +The ``memory_tracker`` service periodically monitors RAM usage and +provides consumption output when available memory is seen to be +falling (i.e. processes are consuming memory). It also provides +output showing locked (unswappable) memory. + +file_tracker +------------ + +The ``file_tracker`` service periodically monitors the number of +open files in the system. + +tcpdump +------- + +Enable the ``tcpdump`` service to run a background tcpdump. You must +set the ``TCPDUMP_ARGS`` variable to something suitable (there is no +default). For example, to trace iSCSI communication during a job in +the OpenStack gate and copy the result into the log output, you might +use: + +.. code-block:: yaml + + job: + name: devstack-job + parent: devstack + vars: + devstack_services: + tcpdump: true + devstack_localrc: + TCPDUMP_ARGS: "-i any tcp port 3260" + zuul_copy_output: + '{{ devstack_log_dir }}/tcpdump.pcap': logs + + + diff --git a/doc/source/development.rst b/doc/source/development.rst new file mode 100644 index 0000000000..957de9b0e1 --- /dev/null +++ b/doc/source/development.rst @@ -0,0 +1,117 @@ +========================== + Developing with Devstack +========================== + +Now that you have your nifty DevStack up and running, what can you do +with it? + +Inspecting Services +=================== + +By default most services in DevStack are running as `systemd` units +named `devstack@$servicename.service`. You can see running services +with. + +.. code-block:: bash + + sudo systemctl status "devstack@*" + +To learn more about the basics of systemd, see :doc:`/systemd` + +Patching a Service +================== + +If you want to make a quick change to a running service the easiest +way to do that is to change the code directly in /opt/stack/$service +and then restart the affected daemons. + +.. code-block:: bash + + sudo systemctl restart devstack@n-cpu.service + +If your change impacts more than one daemon you can restart by +wildcard as well. + +.. code-block:: bash + + sudo systemctl restart "devstack@n-*" + +.. warning:: + + All changes you are making are in checked out git trees that + DevStack thinks it has full control over. Uncommitted work, or + work committed to the master branch, may be overwritten during + subsequent DevStack runs. + +Testing a Patch Series +====================== + +When testing a larger set of patches, or patches that will impact more +than one service within a project, it is often less confusing to use +custom git locations, and make all your changes in a dedicated git +tree. + +In your ``local.conf`` you can add ``**_REPO``, ``**_BRANCH`` for most projects +to use a custom git tree instead of the default upstream ones. + +For instance: + +.. code-block:: bash + + [[local|localrc]] + NOVA_REPO=/home/sdague/nova + NOVA_BRANCH=fold_disk_config + +Will use a custom git tree and branch when doing any devstack +operations, such as ``stack.sh``. + +When testing complicated changes committing to these trees, then doing +``./unstack.sh && ./stack.sh`` is often a valuable way to +iterate. This does take longer per iteration than direct patching, as +the whole devstack needs to rebuild. + +You can use this same approach to test patches that are up for review +in gerrit by using the ref name that gerrit assigns to each change. + +.. code-block:: bash + + [[local|localrc]] + NOVA_BRANCH=refs/changes/10/353710/1 + + +Testing Changes to Libraries +============================ + +When testing changes to libraries consumed by OpenStack services (such +as oslo or any of the python-fooclient libraries) things are a little +more complicated. By default we only test with released versions of +these libraries that are on pypi. + +You must first override this with the setting ``LIBS_FROM_GIT``. This +will enable your DevStack with the git version of that library instead +of the released version. + +After that point you can also specify ``**_REPO``, ``**_BRANCH`` to use +your changes instead of just upstream master. + +.. code-block:: bash + + [[local|localrc]] + LIBS_FROM_GIT=oslo.policy + OSLOPOLICY_REPO=/home/sdague/oslo.policy + OSLOPOLICY_BRANCH=better_exception + +As libraries are not installed `editable` by pip, after you make any +local changes you will need to: + +* cd to top of library path +* sudo pip install -U . +* restart all services you want to use the new library + +You can do that with wildcards such as + +.. code-block:: bash + + sudo systemctl restart "devstack@n-*" + +which will restart all nova services. diff --git a/doc/source/faq.rst b/doc/source/faq.rst new file mode 100644 index 0000000000..8214de0f6a --- /dev/null +++ b/doc/source/faq.rst @@ -0,0 +1,233 @@ +=== +FAQ +=== + +.. contents:: + :local: + +General Questions +================= + +Can I use DevStack for production? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +DevStack is targeted at developers and CI systems to use the raw +upstream code. It makes many choices that are not appropriate for +production systems. + +Your best choice is probably to choose a `distribution of OpenStack +`__. + +Can I use DevStack as a development environment? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Sure, you can. That said, there are a couple of things you should note before +doing so: + +- DevStack makes a lot of configuration changes to your system and should not + be run in your main development environment. + +- All the repositories that DevStack clones when deploying are considered + volatile by default and thus are subject to hard resets. This is necessary to + keep you in sync with the latest upstream, which is what you want in a CI + situation, but it can result in branches being overwritten and files being + removed. + + The corollary of this is that if you are working on a specific project, using + the DevStack project repository (defaulted to ``/opt/stack/``) as + the single master repository for storing all your work is not recommended. + This behavior can be overridden by setting the ``RECLONE`` config option to + ``no``. Alternatively, you can avoid running ``stack.sh`` to redeploy by + restarting services manually. In any case, you should generally ensure work + in progress is pushed to Gerrit or otherwise backed up before running + ``stack.sh``. + +- If you use DevStack within a VM, you may wish to mount a local OpenStack + directory, such as ``~/src/openstack``, inside the VM and configure DevStack + to use this as the clone location using the ``{PROJECT}_REPO`` config + variables. For example, assuming you're using Vagrant and sharing your home + directory, you should place the following in ``local.conf``: + + .. code-block:: shell + + NEUTRON_REPO=/home/vagrant/src/neutron + NOVA_REPO=/home/vagrant/src/nova + KEYSTONE_REPO=/home/vagrant/src/keystone + GLANCE_REPO=/home/vagrant/src/glance + SWIFT_REPO=/home/vagrant/src/swift + HORIZON_REPO=/home/vagrant/src/horizon + CINDER_REPO=/home/vagrant/src/cinder + HEAT_REPO=/home/vagrant/src/heat + TEMPEST_REPO=/home/vagrant/src/tempest + HEATCLIENT_REPO=/home/vagrant/src/python-heatclient + GLANCECLIENT_REPO=/home/vagrant/src/python-glanceclient + NOVACLIENT_REPO=/home/vagrant/src/python-novaclient + NEUTRONCLIENT_REPO=/home/vagrant/src/python-neutronclient + OPENSTACKCLIENT_REPO=/home/vagrant/src/python-openstackclient + HEAT_CFNTOOLS_REPO=/home/vagrant/src/heat-cfntools + HEAT_TEMPLATES_REPO=/home/vagrant/src/heat-templates + NEUTRON_FWAAS_REPO=/home/vagrant/src/neutron-fwaas + # ... + +Why a shell script, why not chef/puppet/... +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The script is meant to be read by humans (as well as ran by +computers); it is the primary documentation after all. Using a recipe +system requires everyone to agree and understand chef or puppet. + +I'd like to help! +~~~~~~~~~~~~~~~~~ + +That isn't a question, but please do! The source for DevStack is at +`opendev.org `__ and bug +reports go to `LaunchPad +`__. Contributions follow the +usual process as described in the `developer guide +`__. This +Sphinx documentation is housed in the doc directory. + +Why not use packages? +~~~~~~~~~~~~~~~~~~~~~ + +Unlike packages, DevStack leaves your cloud ready to develop - +checkouts of the code and services running locally under systemd, +making it easy to hack on and test new patches. However, many people +are doing the hard work of packaging and recipes for production +deployments. + +Why isn't $MY\_FAVORITE\_DISTRO supported? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +DevStack is meant for developers and those who want to see how +OpenStack really works. DevStack is known to run on the distro/release +combinations listed in ``README.md``. DevStack is only supported on +releases other than those documented in ``README.md`` on a best-effort +basis. + +Are there any differences between Ubuntu and CentOS/Fedora support? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Both should work well and are tested by DevStack CI. + +Why can't I use another shell? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +DevStack now uses some specific bash-ism that require Bash 4, such as +associative arrays. Simple compatibility patches have been accepted in +the past when they are not complex, at this point no additional +compatibility patches will be considered except for shells matching +the array functionality as it is very ingrained in the repo and +project management. + +Can I test on OS/X? +~~~~~~~~~~~~~~~~~~~ + +Some people have success with bash 4 installed via homebrew to keep +running tests on OS/X. + +Can I at least source ``openrc`` with ``zsh``? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +People have reported success with a special function to run ``openrc`` +through bash for this + +.. code-block:: bash + + function sourceopenrc { + pushd ~/devstack >/dev/null + eval $(bash -c ". openrc $1 $2 >/dev/null;env|sed -n '/OS_/ { s/^/export /;p}'") + popd >/dev/null + } + + +Operation and Configuration +=========================== + +Can DevStack handle a multi-node installation? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Yes, see :doc:`multinode lab guide ` + +How can I document the environment that DevStack is using? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +DevStack includes a script (``tools/info.sh``) that gathers the +versions of the relevant installed apt packages, pip packages and git +repos. This is a good way to verify what Python modules are +installed. + +How do I turn off a service that is enabled by default? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Services can be turned off by adding ``disable_service xxx`` to +``local.conf`` (using ``c-vol`` in this example): + + :: + + disable_service c-vol + +Is enabling a service that defaults to off done with the reverse of the above? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Of course! + + :: + + enable_service q-svc + +How do I run a specific OpenStack release? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +DevStack master tracks the upstream master of all the projects. If you +would like to run a stable branch of OpenStack, you should use the +corresponding stable branch of DevStack as well. For instance the +``stable/ocata`` version of DevStack will already default to all the +projects running at ``stable/ocata`` levels. + +Note: it's also possible to manually adjust the ``*_BRANCH`` variables +further if you would like to test specific milestones, or even custom +out of tree branches. This is done with entries like the following in +your ``local.conf`` + +:: + + [[local|localrc]] + GLANCE_BRANCH=11.0.0.0rc1 + NOVA_BRANCH=12.0.0.0.rc1 + + +Upstream DevStack is only tested with master and stable +branches. Setting custom BRANCH definitions is not guaranteed to +produce working results. + +What can I do about RabbitMQ not wanting to start on my fresh new VM? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This is often caused by ``erlang`` not being happy with the hostname +resolving to a reachable IP address. Make sure your hostname resolves +to a working IP address; setting it to 127.0.0.1 in ``/etc/hosts`` is +often good enough for a single-node installation. And in an extreme +case, use ``clean.sh`` to eradicate it and try again. + +Why are my configuration changes ignored? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You may have run into the package prerequisite installation +timeout. ``tools/install_prereqs.sh`` has a timer that skips the +package installation checks if it was run within the last +``PREREQ_RERUN_HOURS`` hours (default is 2). To override this, set +``FORCE_PREREQ=1`` and the package checks will never be skipped. + +Miscellaneous +============= + +``tools/fixup_stuff.sh`` is broken and shouldn't 'fix' just one version of packages. +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Stuff in there is to correct problems in an environment that need to +be fixed elsewhere or may/will be fixed in a future release. In the +case of ``httplib2`` and ``prettytable`` specific problems with +specific versions are being worked around. If later releases have +those problems than we'll add them to the script. Knowing about the +broken future releases is valuable rather than polling to see if it +has been fixed. diff --git a/doc/source/guides.rst b/doc/source/guides.rst new file mode 100644 index 0000000000..e7b46b6e55 --- /dev/null +++ b/doc/source/guides.rst @@ -0,0 +1,80 @@ +Guides +====== + +.. warning:: + + The guides are point in time contributions, and may not always be + up to date with the latest work in devstack. + +Walk through various setups used by stackers + +.. toctree:: + :glob: + :hidden: + :maxdepth: 1 + + guides/single-vm + guides/single-machine + guides/lxc + guides/multinode-lab + guides/neutron + guides/devstack-with-nested-kvm + guides/nova + guides/devstack-with-octavia + guides/devstack-with-ldap + +All-In-One Single VM +-------------------- + +Run :doc:`OpenStack in a VM `. The VMs launched in your cloud will be slow as +they are running in QEMU (emulation), but it is useful if you don't have +spare hardware laying around. :doc:`[Read] ` + +All-In-One Single Machine +------------------------- + +Run :doc:`OpenStack on dedicated hardware ` This can include a +server-class machine or a laptop at home. +:doc:`[Read] ` + +All-In-One LXC Container +------------------------- + +Run :doc:`OpenStack in a LXC container `. Beneficial for intermediate +and advanced users. The VMs launched in this cloud will be fully accelerated but +not all OpenStack features are supported. :doc:`[Read] ` + +Multi-Node Lab +-------------- + +Setup a :doc:`multi-node cluster ` with dedicated VLANs for VMs & Management. +:doc:`[Read] ` + +DevStack with Neutron Networking +-------------------------------- + +Building a DevStack cluster with :doc:`Neutron Networking `. +This guide is meant for building lab environments with a dedicated +control node and multiple compute nodes. + +DevStack with KVM-based Nested Virtualization +--------------------------------------------- + +Procedure to setup :doc:`DevStack with KVM-based Nested Virtualization +`. With this setup, Nova instances +will be more performant than with plain QEMU emulation. + +Nova and devstack +-------------------------------- + +Guide to working with nova features :doc:`Nova and devstack `. + +Configure Octavia +----------------- + +Guide on :doc:`Configure Octavia `. + +Deploying DevStack with LDAP +---------------------------- + +Guide to setting up :doc:`DevStack with LDAP `. diff --git a/doc/source/guides/devstack-with-ldap.rst b/doc/source/guides/devstack-with-ldap.rst new file mode 100644 index 0000000000..4c54723c71 --- /dev/null +++ b/doc/source/guides/devstack-with-ldap.rst @@ -0,0 +1,174 @@ +============================ +Deploying DevStack with LDAP +============================ + +The OpenStack Identity service has the ability to integrate with LDAP. The goal +of this guide is to walk you through setting up an LDAP-backed OpenStack +development environment. + +Introduction +============ + +LDAP support in keystone is read-only. You can use it to back an entire +OpenStack deployment to a single LDAP server, or you can use it to back +separate LDAP servers to specific keystone domains. Users within those domains +can authenticate against keystone, assume role assignments, and interact with +other OpenStack services. + +Configuration +============= + +To deploy an OpenLDAP server, make sure ``ldap`` is added to the list of +``ENABLED_SERVICES`` in the ``local.conf`` file:: + + enable_service ldap + +Devstack will require a password to set up an LDAP administrator. This +administrative user is also the bind user specified in keystone's configuration +files, similar to a ``keystone`` user for MySQL databases. + +Devstack will prompt you for a password when running ``stack.sh`` if +``LDAP_PASSWORD`` is not set. You can add the following to your +``local.conf``:: + + LDAP_PASSWORD=super_secret_password + +At this point, devstack should have everything it needs to deploy OpenLDAP, +bootstrap it with a minimal set of users, and configure it to back to a domain +in keystone. You can do this by running the ``stack.sh`` script:: + + $ ./stack.sh + +Once ``stack.sh`` completes, you should have a running keystone deployment with +a basic set of users. It is important to note that not all users will live +within LDAP. Instead, keystone will back different domains to different +identity sources. For example, the ``default`` domain will be backed by MySQL. +This is usually where you'll find your administrative and services users. If +you query keystone for a list of domains, you should see a domain called +``Users``. This domain is set up by devstack and points to OpenLDAP. + +User Management +=============== + +Initially, there will only be two users in the LDAP server. The ``Manager`` +user is used by keystone to talk to OpenLDAP. The ``demo`` user is a generic +user that you should be able to see if you query keystone for users within the +``Users`` domain. Both of these users were added to LDAP using basic LDAP +utilities installed by devstack (e.g. ``ldap-utils``) and LDIFs. The LDIFs used +to create these users can be found in ``devstack/files/ldap/``. + +Listing Users +------------- + +To list all users in LDAP directly, you can use ``ldapsearch`` with the LDAP +user bootstrapped by devstack:: + + $ ldapsearch -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ + -H ldap://localhost -b dc=openstack,dc=org + +As you can see, devstack creates an OpenStack domain called ``openstack.org`` +as a container for the ``Manager`` and ``demo`` users. + +Creating Users +-------------- + +Since keystone's LDAP integration is read-only, users must be added directly to +LDAP. Users added directly to OpenLDAP will automatically be placed into the +``Users`` domain. + +LDIFs can be used to add users via the command line. The following is an +example LDIF that can be used to create a new LDAP user, let's call it +``peter.ldif.in``:: + + dn: cn=peter,ou=Users,dc=openstack,dc=org + cn: peter + displayName: Peter Quill + givenName: Peter Quill + mail: starlord@openstack.org + objectClass: inetOrgPerson + objectClass: top + sn: peter + uid: peter + userPassword: im-a-better-pilot-than-rocket + +Now, we use the ``Manager`` user to create a user for Peter in LDAP:: + + $ ldapadd -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ + -H ldap://localhost -c -f peter.ldif.in + +We should be able to assign Peter roles on projects. After Peter has some level +of authorization, he should be able to login to Horizon by specifying the +``Users`` domain and using his ``peter`` username and password. Authorization +can be given to Peter by creating a project within the ``Users`` domain and +giving him a role assignment on that project:: + + $ openstack project create --domain Users awesome-mix-vol-1 + +-------------+----------------------------------+ + | Field | Value | + +-------------+----------------------------------+ + | description | | + | domain_id | 61a2de23107c46bea2d758167af707b9 | + | enabled | True | + | id | 7d422396d54945cdac8fe1e8e32baec4 | + | is_domain | False | + | name | awesome-mix-vol-1 | + | parent_id | 61a2de23107c46bea2d758167af707b9 | + | tags | [] | + +-------------+----------------------------------+ + $ openstack role add --user peter --user-domain Users \ + --project awesome-mix-vol-1 --project-domain Users admin + + +Deleting Users +-------------- + +We can use the same basic steps to remove users from LDAP, but instead of using +LDIFs, we can just pass the ``dn`` of the user we want to delete:: + + $ ldapdelete -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ + -H ldap://localhost cn=peter,ou=Users,dc=openstack,dc=org + +Group Management +================ + +Like users, groups are considered specific identities. This means that groups +also fall under the same read-only constraints as users and they can be managed +directly with LDAP in the same way users are with LDIFs. + +Adding Groups +------------- + +Let's define a specific group with the following LDIF:: + + dn: cn=guardians,ou=UserGroups,dc=openstack,dc=org + objectClass: groupOfNames + cn: guardians + description: Guardians of the Galaxy + member: cn=peter,dc=openstack,dc=org + member: cn=gamora,dc=openstack,dc=org + member: cn=drax,dc=openstack,dc=org + member: cn=rocket,dc=openstack,dc=org + member: cn=groot,dc=openstack,dc=org + +We can create the group using the same ``ldapadd`` command as we did with +users:: + + $ ldapadd -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ + -H ldap://localhost -c -f guardian-group.ldif.in + +If we check the group membership in Horizon, we'll see that only Peter is a +member of the ``guardians`` group, despite the whole crew being specified in +the LDIF. Once those accounts are created in LDAP, they will automatically be +added to the ``guardians`` group. They will also assume any role assignments +given to the ``guardians`` group. + +Deleting Groups +--------------- + +Just like users, groups can be deleted using the ``dn``:: + + $ ldapdelete -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \ + -H ldap://localhost cn=guardians,ou=UserGroups,dc=openstack,dc=org + +Note that this operation will not remove users within that group. It will only +remove the group itself and the memberships any users had with that group. diff --git a/doc/source/guides/devstack-with-nested-kvm.rst b/doc/source/guides/devstack-with-nested-kvm.rst new file mode 100644 index 0000000000..ba483e9ec9 --- /dev/null +++ b/doc/source/guides/devstack-with-nested-kvm.rst @@ -0,0 +1,141 @@ +.. _kvm_nested_virt: + +======================================================= +Configure DevStack with KVM-based Nested Virtualization +======================================================= + +When using virtualization technologies like KVM, one can take advantage +of "Nested VMX" (i.e. the ability to run KVM on KVM) so that the VMs in +cloud (Nova guests) can run relatively faster than with plain QEMU +emulation. + +Kernels shipped with Linux distributions doesn't have this enabled by +default. This guide outlines the configuration details to enable nested +virtualization in KVM-based environments. And how to setup DevStack +(that'll run in a VM) to take advantage of this. + + +Nested Virtualization Configuration +=================================== + +Configure Nested KVM for Intel-based Machines +--------------------------------------------- + +Procedure to enable nested KVM virtualization on Intel-based machines. + +Check if the nested KVM Kernel parameter is enabled: + +:: + + cat /sys/module/kvm_intel/parameters/nested + N + +Temporarily remove the KVM intel Kernel module, enable nested +virtualization to be persistent across reboots and add the Kernel +module back: + +:: + + sudo rmmod kvm-intel + sudo sh -c "echo 'options kvm-intel nested=y' >> /etc/modprobe.d/dist.conf" + sudo modprobe kvm-intel + +Ensure the Nested KVM Kernel module parameter for Intel is enabled on +the host: + +:: + + cat /sys/module/kvm_intel/parameters/nested + Y + + modinfo kvm_intel | grep nested + parm: nested:bool + +Start your VM, now it should have KVM capabilities -- you can verify +that by ensuring ``/dev/kvm`` character device is present. + + +Configure Nested KVM for AMD-based Machines +------------------------------------------- + +Procedure to enable nested KVM virtualization on AMD-based machines. + +Check if the nested KVM Kernel parameter is enabled: + +:: + + cat /sys/module/kvm_amd/parameters/nested + 0 + + +Temporarily remove the KVM AMD Kernel module, enable nested +virtualization to be persistent across reboots and add the Kernel module +back: + +:: + + sudo rmmod kvm-amd + sudo sh -c "echo 'options kvm-amd nested=1' >> /etc/modprobe.d/dist.conf" + sudo modprobe kvm-amd + +Ensure the Nested KVM Kernel module parameter for AMD is enabled on the +host: + +:: + + cat /sys/module/kvm_amd/parameters/nested + 1 + + modinfo kvm_amd | grep -i nested + parm: nested:int + +To make the above value persistent across reboots, add an entry in +/etc/modprobe.d/dist.conf so it looks as below:: + + cat /etc/modprobe.d/dist.conf + options kvm-amd nested=y + + +Expose Virtualization Extensions to DevStack VM +----------------------------------------------- + +Edit the VM's libvirt XML configuration via ``virsh`` utility: + +:: + + sudo virsh edit devstack-vm + +Add the below snippet to expose the host CPU features to the VM: + +:: + + + + + +Ensure DevStack VM is Using KVM +------------------------------- + +Before invoking ``stack.sh`` in the VM, ensure that KVM is enabled. This +can be verified by checking for the presence of the file ``/dev/kvm`` in +your VM. If it is present, DevStack will default to using the config +attribute ``virt_type = kvm`` in ``/etc/nova.conf``; otherwise, it'll fall +back to ``virt_type=qemu``, i.e. plain QEMU emulation. + +Optionally, to explicitly set the type of virtualization, to KVM, by the +libvirt driver in nova, the below config attribute can be used in +DevStack's ``local.conf``: + +:: + + LIBVIRT_TYPE=kvm + + +Once DevStack is configured successfully, verify if the Nova instances +are using KVM by noticing the QEMU CLI invoked by Nova is using the +parameter ``accel=kvm``, e.g.: + +:: + + ps -ef | grep -i qemu + root 29773 1 0 11:24 ? 00:00:00 /usr/bin/qemu-system-x86_64 -machine accel=kvm [. . .] diff --git a/doc/source/guides/devstack-with-octavia.rst b/doc/source/guides/devstack-with-octavia.rst new file mode 100644 index 0000000000..55939f0f12 --- /dev/null +++ b/doc/source/guides/devstack-with-octavia.rst @@ -0,0 +1,144 @@ +Devstack with Octavia Load Balancing +==================================== + +Starting with the OpenStack Pike release, Octavia is now a standalone service +providing load balancing services for OpenStack. + +This guide will show you how to create a devstack with `Octavia API`_ enabled. + +.. _Octavia API: https://docs.openstack.org/api-ref/load-balancer/v2/index.html + +Phase 1: Create DevStack + 2 nova instances +-------------------------------------------- + +First, set up a VM of your choice with at least 8 GB RAM and 16 GB disk space, +make sure it is updated. Install git and any other developer tools you find +useful. + +Install devstack:: + + git clone https://opendev.org/openstack/devstack + cd devstack/tools + sudo ./create-stack-user.sh + cd ../.. + sudo mv devstack /opt/stack + sudo chown -R stack.stack /opt/stack/devstack + +This will clone the current devstack code locally, then setup the "stack" +account that devstack services will run under. Finally, it will move devstack +into its default location in /opt/stack/devstack. + +Edit your ``/opt/stack/devstack/local.conf`` to look like:: + + [[local|localrc]] + # ===== BEGIN localrc ===== + DATABASE_PASSWORD=password + ADMIN_PASSWORD=password + SERVICE_PASSWORD=password + SERVICE_TOKEN=password + RABBIT_PASSWORD=password + GIT_BASE=https://opendev.org + # Optional settings: + # OCTAVIA_AMP_BASE_OS=centos + # OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID=9-stream + # OCTAVIA_AMP_IMAGE_SIZE=3 + # OCTAVIA_LB_TOPOLOGY=ACTIVE_STANDBY + # OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD=True + # LIBS_FROM_GIT+=octavia-lib, + # Enable Logging + LOGFILE=$DEST/logs/stack.sh.log + VERBOSE=True + LOG_COLOR=True + enable_service rabbit + enable_plugin neutron $GIT_BASE/openstack/neutron + # Octavia supports using QoS policies on the VIP port: + enable_service q-qos + enable_service placement-api placement-client + # Octavia services + enable_plugin octavia $GIT_BASE/openstack/octavia master + enable_plugin octavia-dashboard $GIT_BASE/openstack/octavia-dashboard + enable_plugin ovn-octavia-provider $GIT_BASE/openstack/ovn-octavia-provider + enable_plugin octavia-tempest-plugin $GIT_BASE/openstack/octavia-tempest-plugin + enable_service octavia o-api o-cw o-hm o-hk o-da + # If you are enabling barbican for TLS offload in Octavia, include it here. + # enable_plugin barbican $GIT_BASE/openstack/barbican + # enable_service barbican + # Cinder (optional) + disable_service c-api c-vol c-sch + # Tempest + enable_service tempest + # ===== END localrc ===== + +.. note:: + For best performance it is highly recommended to use KVM + virtualization instead of QEMU. + Also make sure nested virtualization is enabled as documented in + :ref:`the respective guide `. + By adding ``LIBVIRT_CPU_MODE="host-passthrough"`` to your + ``local.conf`` you enable the guest VMs to make use of all features your + host's CPU provides. + +Run stack.sh and do some sanity checks:: + + sudo su - stack + cd /opt/stack/devstack + ./stack.sh + . ./openrc + + openstack network list # should show public and private networks + +Create two nova instances that we can use as test http servers:: + + # create nova instances on private network + openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1 + openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2 + openstack server list # should show the nova instances just created + + # add secgroup rules to allow ssh etc.. + openstack security group rule create default --protocol icmp + openstack security group rule create default --protocol tcp --dst-port 22:22 + openstack security group rule create default --protocol tcp --dst-port 80:80 + +Set up a simple web server on each of these instances. One possibility is to use +the `Golang test server`_ that is used by the Octavia project for CI testing +as well. +Copy the binary to your instances and start it as shown below +(username 'cirros', password 'gocubsgo'):: + + INST_IP= + scp -O test_server.bin cirros@${INST_IP}: + ssh -f cirros@${INST_IP} ./test_server.bin -id ${INST_IP} + +When started this way the test server will respond to HTTP requests with +its own IP. + +Phase 2: Create your load balancer +---------------------------------- + +Create your load balancer:: + + openstack loadbalancer create --wait --name lb1 --vip-subnet-id private-subnet + openstack loadbalancer listener create --wait --protocol HTTP --protocol-port 80 --name listener1 lb1 + openstack loadbalancer pool create --wait --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1 + openstack loadbalancer healthmonitor create --wait --delay 5 --timeout 2 --max-retries 1 --type HTTP pool1 + openstack loadbalancer member create --wait --subnet-id private-subnet --address --protocol-port 80 pool1 + openstack loadbalancer member create --wait --subnet-id private-subnet --address --protocol-port 80 pool1 + +Please note: The fields are the IP addresses of the nova +servers created in Phase 1. +Also note, using the API directly you can do all of the above commands in one +API call. + +Phase 3: Test your load balancer +-------------------------------- + +:: + + openstack loadbalancer show lb1 # Note the vip_address + curl http:// + curl http:// + +This should show the "Welcome to " message from each member server. + + +.. _Golang test server: https://opendev.org/openstack/octavia-tempest-plugin/src/branch/master/octavia_tempest_plugin/contrib/test_server diff --git a/doc/source/guides/lxc.rst b/doc/source/guides/lxc.rst new file mode 100644 index 0000000000..dcaa4166c4 --- /dev/null +++ b/doc/source/guides/lxc.rst @@ -0,0 +1,164 @@ +================================ +All-In-One Single LXC Container +================================ + +This guide walks you through the process of deploying OpenStack using devstack +in an LXC container instead of a VM. + +The primary benefits to running devstack inside a container instead of a VM is +faster performance and lower memory overhead while still providing a suitable +level of isolation. This can be particularly useful when you want to simulate +running OpenStack on multiple nodes. + +.. Warning:: Containers do not provide the same level of isolation as a virtual + machine. + +.. Note:: Not all OpenStack features support running inside of a container. See + `Limitations`_ section below for details. :doc:`OpenStack in a VM ` + is recommended for beginners. + +Prerequisites +============== + +This guide is written for Ubuntu 14.04 but should be adaptable for any modern +Linux distribution. + +Install the LXC package:: + + sudo apt-get install lxc + +You can verify support for containerization features in your currently running +kernel using the ``lxc-checkconfig`` command. + +Container Setup +=============== + +Configuration +--------------- + +For a successful run of ``stack.sh`` and to permit use of KVM to run the VMs you +launch inside your container, we need to use the following additional +configuration options. Place the following in a file called +``devstack-lxc.conf``:: + + # Permit access to /dev/loop* + lxc.cgroup.devices.allow = b 7:* rwm + + # Setup access to /dev/net/tun and /dev/kvm + lxc.mount.entry = /dev/net/tun dev/net/tun none bind,create=file 0 0 + lxc.mount.entry = /dev/kvm dev/kvm none bind,create=file 0 0 + + # Networking + lxc.network.type = veth + lxc.network.flags = up + lxc.network.link = lxcbr0 + + +Create Container +------------------- + +The configuration and rootfs for LXC containers are created using the +``lxc-create`` command. + +We will name our container ``devstack`` and use the ``ubuntu`` template which +will use ``debootstrap`` to build a Ubuntu rootfs. It will default to the same +release and architecture as the host system. We also install the additional +packages ``bsdmainutils`` and ``git`` as we'll need them to run devstack:: + + sudo lxc-create -n devstack -t ubuntu -f devstack-lxc.conf -- --packages=bsdmainutils,git + +The first time it builds the rootfs will take a few minutes to download, unpack, +and configure all the necessary packages for a minimal installation of Ubuntu. +LXC will cache this and subsequent containers will only take seconds to create. + +.. Note:: To speed up the initial rootfs creation, you can specify a mirror to + download the Ubuntu packages from by appending ``--mirror=`` and then the URL + of a Ubuntu mirror. To see other other template options, you can run + ``lxc-create -t ubuntu -h``. + +Start Container +---------------- + +To start the container, run:: + + sudo lxc-start -n devstack + +A moment later you should be presented with the login prompt for your container. +You can login using the username ``ubuntu`` and password ``ubuntu``. + +You can also ssh into your container. On your host, run +``sudo lxc-info -n devstack`` to get the IP address (e.g. +``ssh ubuntu@$(sudo lxc-info -n devstack | awk '/IP/ { print $2 }')``). + +Run Devstack +------------- + +You should now be logged into your container and almost ready to run devstack. +The commands in this section should all be run inside your container. + +.. Tip:: You can greatly reduce the runtime of your initial devstack setup by + ensuring you have your apt sources.list configured to use a fast mirror. + Check and update ``/etc/apt/sources.list`` if necessary and then run + ``apt-get update``. + +#. Download DevStack + + :: + + git clone https://opendev.org/openstack/devstack + +#. Configure + + Refer to :ref:`minimal-configuration` if you wish to configure the behaviour + of devstack. + +#. Start the install + + :: + + cd devstack + ./stack.sh + +Cleanup +------- + +To stop the container:: + + lxc-stop -n devstack + +To delete the container:: + + lxc-destroy -n devstack + +Limitations +============ + +Not all OpenStack features may function correctly or at all when ran from within +a container. + +Cinder +------- + +Unable to create LVM backed volume +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + In our configuration, we have not whitelisted access to device-mapper or LVM + devices. Doing so will permit your container to have access and control of LVM + on the host system. To enable, add the following to your + ``devstack-lxc.conf`` before running ``lxc-create``:: + + lxc.cgroup.devices.allow = c 10:236 rwm + lxc.cgroup.devices.allow = b 252:* rwm + + Additionally you'll need to set ``udev_rules = 0`` in the ``activation`` + section of ``/etc/lvm/lvm.conf`` unless you mount devtmpfs in your container. + +Unable to attach volume to instance +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + It is not possible to attach cinder volumes to nova instances due to parts of + the Linux iSCSI implementation not being network namespace aware. This can be + worked around by using network pass-through instead of a separate network + namespace but such a setup significantly reduces the isolation of the + container (e.g. a ``halt`` command issued in the container will cause the host + system to shutdown). diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst new file mode 100644 index 0000000000..ef339f1f5c --- /dev/null +++ b/doc/source/guides/multinode-lab.rst @@ -0,0 +1,462 @@ +============== +Multi-Node Lab +============== + +Here is OpenStack in a realistic test configuration with multiple +physical servers. + +Prerequisites Linux & Network +============================= + +Minimal Install +--------------- + +You need to have a system with a fresh install of Linux. You can +download the `Minimal +CD `__ for +Ubuntu releases since DevStack will download & install all the +additional dependencies. The netinstall ISO is available for +`Fedora `__ +and +`CentOS/RHEL `__. + +Install a couple of packages to bootstrap configuration: + +:: + + apt-get install -y git sudo || dnf install -y git sudo + +Network Configuration +--------------------- + +The first iteration of the lab uses OpenStack's FlatDHCP network +controller so only a single network will be required. It should be on +its own subnet without DHCP; the host IPs and floating IP pool(s) will +come out of this block. This example uses the following: + +- Gateway: 192.168.42.1 +- Physical nodes: 192.168.42.11-192.168.42.99 +- Floating IPs: 192.168.42.128-192.168.42.254 + +Configure each node with a static IP. For Ubuntu edit +``/etc/network/interfaces``: + +:: + + auto eth0 + iface eth0 inet static + address 192.168.42.11 + netmask 255.255.255.0 + gateway 192.168.42.1 + +For Fedora and CentOS/RHEL edit +``/etc/sysconfig/network-scripts/ifcfg-eth0``: + +:: + + BOOTPROTO=static + IPADDR=192.168.42.11 + NETMASK=255.255.255.0 + GATEWAY=192.168.42.1 + +Installation shake and bake +=========================== + +Add the DevStack User +--------------------- + +OpenStack runs as a non-root user that has sudo access to root. There is +nothing special about the name, we'll use ``stack`` here. Every node +must use the same name and preferably uid. If you created a user during +the OS install you can use it and give it sudo privileges below. +Otherwise create the stack user: + +:: + + useradd -s /bin/bash -d /opt/stack -m stack + +Ensure home directory for the ``stack`` user has executable permission for all, +as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750`` +which can cause issues during deployment. + +:: + + chmod +x /opt/stack + +This user will be making many changes to your system during installation +and operation so it needs to have sudo privileges to root without a +password: + +:: + + echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack + +From here on use the ``stack`` user. **Logout** and **login** as the +``stack`` user. + +Set Up Ssh +---------- + +Set up the stack user on each node with an ssh key for access: + +:: + + mkdir ~/.ssh; chmod 700 ~/.ssh + echo "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCyYjfgyPazTvGpd8OaAvtU2utL8W6gWC4JdRS1J95GhNNfQd657yO6s1AH5KYQWktcE6FO/xNUC2reEXSGC7ezy+sGO1kj9Limv5vrvNHvF1+wts0Cmyx61D2nQw35/Qz8BvpdJANL7VwP/cFI/p3yhvx2lsnjFE3hN8xRB2LtLUopUSVdBwACOVUmH2G+2BWMJDjVINd2DPqRIA4Zhy09KJ3O1Joabr0XpQL0yt/I9x8BVHdAx6l9U0tMg9dj5+tAjZvMAFfye3PJcYwwsfJoFxC8w/SLtqlFX7Ehw++8RtvomvuipLdmWCy+T9hIkl+gHYE4cS3OIqXH7f49jdJf jesse@spacey.local" > ~/.ssh/authorized_keys + +Download DevStack +----------------- + +Grab the latest version of DevStack: + +:: + + git clone https://opendev.org/openstack/devstack + cd devstack + +Up to this point all of the steps apply to each node in the cluster. +From here on there are some differences between the cluster controller +(aka 'head node') and the compute nodes. + +Configure Cluster Controller +---------------------------- + +The cluster controller runs all OpenStack services. Configure the +cluster controller's DevStack in ``local.conf``: + +:: + + [[local|localrc]] + HOST_IP=192.168.42.11 + FIXED_RANGE=10.4.128.0/20 + FLOATING_RANGE=192.168.42.128/25 + LOGFILE=/opt/stack/logs/stack.sh.log + ADMIN_PASSWORD=labstack + DATABASE_PASSWORD=supersecret + RABBIT_PASSWORD=supersecret + SERVICE_PASSWORD=supersecret + +In the multi-node configuration the first 10 or so IPs in the private +subnet are usually reserved. Add this to ``local.sh`` to have it run +after every ``stack.sh`` run: + +:: + + for i in `seq 2 10`; do /opt/stack/nova/bin/nova-manage fixed reserve 10.4.128.$i; done + +Fire up OpenStack: + +:: + + ./stack.sh + +A stream of activity ensues. When complete you will see a summary of +``stack.sh``'s work, including the relevant URLs, accounts and passwords +to poke at your shiny new OpenStack. The most recent log file is +available in ``stack.sh.log``. + +Configure Compute Nodes +----------------------- + +The compute nodes only run the OpenStack worker services. For additional +machines, create a ``local.conf`` with: + +:: + + [[local|localrc]] + HOST_IP=192.168.42.12 # change this per compute node + FIXED_RANGE=10.4.128.0/20 + FLOATING_RANGE=192.168.42.128/25 + LOGFILE=/opt/stack/logs/stack.sh.log + ADMIN_PASSWORD=labstack + DATABASE_PASSWORD=supersecret + RABBIT_PASSWORD=supersecret + SERVICE_PASSWORD=supersecret + DATABASE_TYPE=mysql + SERVICE_HOST=192.168.42.11 + MYSQL_HOST=$SERVICE_HOST + RABBIT_HOST=$SERVICE_HOST + GLANCE_HOSTPORT=$SERVICE_HOST:9292 + ENABLED_SERVICES=n-cpu,c-vol,placement-client,ovn-controller,ovs-vswitchd,ovsdb-server,q-ovn-metadata-agent + NOVA_VNC_ENABLED=True + NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_lite.html" + VNCSERVER_LISTEN=$HOST_IP + VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN + +Fire up OpenStack: + +:: + + ./stack.sh + +A stream of activity ensues. When complete you will see a summary of +``stack.sh``'s work, including the relevant URLs, accounts and passwords +to poke at your shiny new OpenStack. The most recent log file is +available in ``stack.sh.log``. + +Starting in the Ocata release, Nova requires a `Cells v2`_ deployment. Compute +node services must be mapped to a cell before they can be used. + +After each compute node is stacked, verify it shows up in the +``nova service-list --binary nova-compute`` output. The compute service is +registered in the cell database asynchronously so this may require polling. + +Once the compute node services shows up, run the ``./tools/discover_hosts.sh`` +script from the control node to map compute hosts to the single cell. + +The compute service running on the primary control node will be +discovered automatically when the control node is stacked so this really +only needs to be performed for subnodes. + +.. _Cells v2: https://docs.openstack.org/nova/latest/user/cells.html + +Configure Tempest Node to run the Tempest tests +----------------------------------------------- + +If there is a need to execute Tempest tests against different Cluster +Controller node then it can be done by re-using the ``local.conf`` file from +the Cluster Controller node but with not enabled Controller services in +``ENABLED_SERVICES`` variable. This variable needs to contain only ``tempest`` +as a configured service. Then variable ``SERVICES_FOR_TEMPEST`` must be +configured to contain those services that were enabled on the Cluster +Controller node in the ``ENABLED_SERVICES`` variable. For example the +``local.conf`` file could look as follows: + +:: + + [[local|localrc]] + HOST_IP=192.168.42.12 # change this per compute node + FIXED_RANGE=10.4.128.0/20 + FLOATING_RANGE=192.168.42.128/25 + LOGFILE=/opt/stack/logs/stack.sh.log + ADMIN_PASSWORD=labstack + DATABASE_PASSWORD=supersecret + RABBIT_PASSWORD=supersecret + SERVICE_PASSWORD=supersecret + DATABASE_TYPE=mysql + SERVICE_HOST=192.168.42.11 + MYSQL_HOST=$SERVICE_HOST + RABBIT_HOST=$SERVICE_HOST + GLANCE_HOSTPORT=$SERVICE_HOST:9292 + NOVA_VNC_ENABLED=True + NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_lite.html" + VNCSERVER_LISTEN=$HOST_IP + VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN + ENABLED_SERVICES=tempest + SERVICES_FOR_TEMPEST=keystone,nova,neutron,glance + +Then just execute the devstack: + +:: + + ./stack.sh + + +Cleaning Up After DevStack +-------------------------- + +Shutting down OpenStack is now as simple as running the included +``unstack.sh`` script: + +:: + + ./unstack.sh + +A more aggressive cleanup can be performed using ``clean.sh``. It +removes certain troublesome packages and attempts to leave the system in +a state where changing the database or queue manager can be reliably +performed. + +:: + + ./clean.sh + +Sometimes running instances are not cleaned up. DevStack attempts to do +this when it runs but there are times it needs to still be done by hand: + +:: + + sudo rm -rf /etc/libvirt/qemu/inst* + sudo virsh list | grep inst | awk '{print $1}' | xargs -n1 virsh destroy + +Going further +============= + +Additional Users +---------------- + +DevStack creates two OpenStack users (``admin`` and ``demo``) and two +projects (also ``admin`` and ``demo``). ``admin`` is exactly what it +sounds like, a privileged administrative account that is a member of +both the ``admin`` and ``demo`` projects. ``demo`` is a normal user +account that is only a member of the ``demo`` project. Creating +additional OpenStack users can be done through the dashboard, sometimes +it is easier to do them in bulk from a script, especially since they get +blown away every time ``stack.sh`` runs. The following steps are ripe +for scripting: + +:: + + # Get admin creds + . openrc admin admin + + # List existing projects + openstack project list + + # List existing users + openstack user list + + # Add a user and project + NAME=bob + PASSWORD=BigSecret + PROJECT=$NAME + openstack project create $PROJECT + openstack user create $NAME --password=$PASSWORD --project $PROJECT + openstack role add Member --user $NAME --project $PROJECT + # The Member role is created by stack.sh + # openstack role assignment list + +Swift +----- + +Swift, OpenStack Object Storage, requires a significant amount of resources +and is disabled by default in DevStack. The support in DevStack is geared +toward a minimal installation but can be used for testing. To implement a +true multi-node test of swift, additional steps will be required. Enabling it is as +simple as enabling the ``swift`` service in ``local.conf``: + +:: + + enable_service s-proxy s-object s-container s-account + +Swift, OpenStack Object Storage, will put its data files in ``SWIFT_DATA_DIR`` (default +``/opt/stack/data/swift``). The size of the data 'partition' created +(really a loop-mounted file) is set by ``SWIFT_LOOPBACK_DISK_SIZE``. The +Swift config files are located in ``SWIFT_CONF_DIR`` (default +``/etc/swift``). All of these settings can be overridden in (wait for +it...) ``local.conf``. + +Volumes +------- + +DevStack will automatically use an existing LVM volume group named +``stack-volumes`` to store cloud-created volumes. If ``stack-volumes`` +doesn't exist, DevStack will set up a loop-mounted file to contain +it. If the default size is insufficient for the number and size of volumes +required, it can be overridden by setting ``VOLUME_BACKING_FILE_SIZE`` in +``local.conf`` (sizes given in ``truncate`` compatible format, e.g. ``24G``). + +``stack-volumes`` can be pre-created on any physical volume supported by +Linux's LVM. The name of the volume group can be changed by setting +``VOLUME_GROUP_NAME`` in ``localrc``. ``stack.sh`` deletes all logical +volumes in ``VOLUME_GROUP_NAME`` that begin with ``VOLUME_NAME_PREFIX`` as +part of cleaning up from previous runs. It is recommended to not use the +root volume group as ``VOLUME_GROUP_NAME``. + +The details of creating the volume group depends on the server hardware +involved but looks something like this: + +:: + + pvcreate /dev/sdc + vgcreate stack-volumes /dev/sdc + +Syslog +------ + +DevStack is capable of using ``rsyslog`` to aggregate logging across the +cluster. It is off by default; to turn it on set ``SYSLOG=True`` in +``local.conf``. ``SYSLOG_HOST`` defaults to ``HOST_IP``; on the compute +nodes it must be set to the IP of the cluster controller to send syslog +output there. In the example above, add this to the compute node +``local.conf``: + +:: + + SYSLOG_HOST=192.168.42.11 + +Using Alternate Repositories/Branches +------------------------------------- + +The git repositories for all of the OpenStack services are defined in +``stackrc``. Since this file is a part of the DevStack package changes +to it will probably be overwritten as updates are applied. Every setting +in ``stackrc`` can be redefined in ``local.conf``. + +To change the repository or branch that a particular OpenStack service +is created from, simply change the value of ``*_REPO`` or ``*_BRANCH`` +corresponding to that service. + +After making changes to the repository or branch, if ``RECLONE`` is not +set in ``localrc`` it may be necessary to remove the corresponding +directory from ``/opt/stack`` to force git to re-clone the repository. + +For example, to pull nova, OpenStack Compute, from a proposed release candidate +in the primary nova repository: + +:: + + NOVA_BRANCH=rc-proposed + +To pull glance, OpenStack Image service, from an experimental fork: + +:: + + GLANCE_BRANCH=try-something-big + GLANCE_REPO=https://github.com/mcuser/glance.git + +Notes stuff you might need to know +================================== + +Set MySQL Password +------------------ + +If you forgot to set the root password you can do this: + +:: + + mysqladmin -u root -pnova password 'supersecret' + +Live Migration +-------------- + +In order for live migration to work with the default live migration URI:: + + [libvirt] + live_migration_uri = qemu+ssh://stack@%s/system + +SSH keys need to be exchanged between each compute node: + +1. The SOURCE root user's public RSA key (likely in /root/.ssh/id_rsa.pub) + needs to be in the DESTINATION stack user's authorized_keys file + (~stack/.ssh/authorized_keys). This can be accomplished by manually + copying the contents from the file on the SOURCE to the DESTINATION. If + you have a password configured for the stack user, then you can use the + following command to accomplish the same thing:: + + ssh-copy-id -i /root/.ssh/id_rsa.pub stack@DESTINATION + +2. The DESTINATION host's public ECDSA key (/etc/ssh/ssh_host_ecdsa_key.pub) + needs to be in the SOURCE root user's known_hosts file + (/root/.ssh/known_hosts). This can be accomplished by running the + following on the SOURCE machine (hostname must be used):: + + ssh-keyscan -H DEST_HOSTNAME | sudo tee -a /root/.ssh/known_hosts + +3. Verify that login via ssh works without a password:: + + ssh -i /root/.ssh/id_rsa stack@DESTINATION + +In essence, this means that every compute node's root user's public RSA key +must exist in every other compute node's stack user's authorized_keys file and +every compute node's public ECDSA key needs to be in every other compute +node's root user's known_hosts file. Please note that if the root or stack +user does not have a SSH key, one can be generated using:: + + ssh-keygen -t rsa + +The above steps are necessary because libvirtd runs as root when the +live_migration_uri uses the "qemu:///system" family of URIs. For more +information, see the `libvirt documentation`_. + +.. _libvirt documentation: https://libvirt.org/drvqemu.html#securitydriver diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst new file mode 100644 index 0000000000..a7adeeff73 --- /dev/null +++ b/doc/source/guides/neutron.rst @@ -0,0 +1,548 @@ +====================================== +Using DevStack with neutron Networking +====================================== + +This guide will walk you through using OpenStack neutron with the ML2 +plugin and the Open vSwitch mechanism driver. + + +.. _single-interface-ovs: + +Using Neutron with a Single Interface +===================================== + +In some instances, like on a developer laptop, there is only one +network interface that is available. In this scenario, the physical +interface is added to the Open vSwitch bridge, and the IP address of +the laptop is migrated onto the bridge interface. That way, the +physical interface can be used to transmit self service project +network traffic, the OpenStack API traffic, and management traffic. + + +.. warning:: + + When using a single interface networking setup, there will be a + temporary network outage as your IP address is moved from the + physical NIC of your machine, to the OVS bridge. If you are SSH'd + into the machine from another computer, there is a risk of being + disconnected from your ssh session (due to arp cache + invalidation), which would stop the stack.sh or leave it in an + unfinished state. In these cases, start stack.sh inside its own + screen session so it can continue to run. + + +Physical Network Setup +---------------------- + +In most cases where DevStack is being deployed with a single +interface, there is a hardware router that is being used for external +connectivity and DHCP. The developer machine is connected to this +network and is on a shared subnet with other machines. The +`local.conf` exhibited here assumes that 1500 is a reasonable MTU to +use on that network. + +.. image:: /assets/images/neutron-network-1.png + :alt: Network configuration for a single DevStack node + + +DevStack Configuration +---------------------- + +The following is a complete `local.conf` for the host named +`devstack-1`. It will run all the API and services, as well as +serving as a hypervisor for guest instances. + +:: + + [[local|localrc]] + HOST_IP=172.18.161.6 + SERVICE_HOST=172.18.161.6 + MYSQL_HOST=172.18.161.6 + RABBIT_HOST=172.18.161.6 + GLANCE_HOSTPORT=172.18.161.6:9292 + ADMIN_PASSWORD=secret + DATABASE_PASSWORD=secret + RABBIT_PASSWORD=secret + SERVICE_PASSWORD=secret + + ## Neutron options + Q_USE_SECGROUP=True + FLOATING_RANGE="172.18.161.0/24" + IPV4_ADDRS_SAFE_TO_USE="10.0.0.0/22" + Q_FLOATING_ALLOCATION_POOL=start=172.18.161.250,end=172.18.161.254 + PUBLIC_NETWORK_GATEWAY="172.18.161.1" + PUBLIC_INTERFACE=eth0 + + # Open vSwitch provider networking configuration + Q_USE_PROVIDERNET_FOR_PUBLIC=True + OVS_PHYSICAL_BRIDGE=br-ex + PUBLIC_BRIDGE=br-ex + OVS_BRIDGE_MAPPINGS=public:br-ex + + +Adding Additional Compute Nodes +------------------------------- + +Let's suppose that after installing DevStack on the first host, you +also want to do multinode testing and networking. + +Physical Network Setup +~~~~~~~~~~~~~~~~~~~~~~ + +.. image:: /assets/images/neutron-network-2.png + :alt: Network configuration for multiple DevStack nodes + +After DevStack installs and configures Neutron, traffic from guest VMs +flows out of `devstack-2` (the compute node) and is encapsulated in a +VXLAN tunnel back to `devstack-1` (the control node) where the L3 +agent is running. + +:: + + stack@devstack-2:~/devstack$ sudo ovs-vsctl show + 8992d965-0ba0-42fd-90e9-20ecc528bc29 + Bridge br-int + fail_mode: secure + Port br-int + Interface br-int + type: internal + Port patch-tun + Interface patch-tun + type: patch + options: {peer=patch-int} + Bridge br-tun + fail_mode: secure + Port "vxlan-c0a801f6" + Interface "vxlan-c0a801f6" + type: vxlan + options: {df_default="true", in_key=flow, local_ip="172.18.161.7", out_key=flow, remote_ip="172.18.161.6"} + Port patch-int + Interface patch-int + type: patch + options: {peer=patch-tun} + Port br-tun + Interface br-tun + type: internal + ovs_version: "2.0.2" + +Open vSwitch on the control node, where the L3 agent runs, is +configured to de-encapsulate traffic from compute nodes, then forward +it over the `br-ex` bridge, where `eth0` is attached. + +:: + + stack@devstack-1:~/devstack$ sudo ovs-vsctl show + 422adeea-48d1-4a1f-98b1-8e7239077964 + Bridge br-tun + fail_mode: secure + Port br-tun + Interface br-tun + type: internal + Port patch-int + Interface patch-int + type: patch + options: {peer=patch-tun} + Port "vxlan-c0a801d8" + Interface "vxlan-c0a801d8" + type: vxlan + options: {df_default="true", in_key=flow, local_ip="172.18.161.6", out_key=flow, remote_ip="172.18.161.7"} + Bridge br-ex + Port phy-br-ex + Interface phy-br-ex + type: patch + options: {peer=int-br-ex} + Port "eth0" + Interface "eth0" + Port br-ex + Interface br-ex + type: internal + Bridge br-int + fail_mode: secure + Port "tapce66332d-ea" + tag: 1 + Interface "tapce66332d-ea" + type: internal + Port "qg-65e5a4b9-15" + tag: 2 + Interface "qg-65e5a4b9-15" + type: internal + Port "qr-33e5e471-88" + tag: 1 + Interface "qr-33e5e471-88" + type: internal + Port "qr-acbe9951-70" + tag: 1 + Interface "qr-acbe9951-70" + type: internal + Port br-int + Interface br-int + type: internal + Port patch-tun + Interface patch-tun + type: patch + options: {peer=patch-int} + Port int-br-ex + Interface int-br-ex + type: patch + options: {peer=phy-br-ex} + ovs_version: "2.0.2" + +`br-int` is a bridge that the Open vSwitch mechanism driver creates, +which is used as the "integration bridge" where ports are created, and +plugged into the virtual switching fabric. `br-ex` is an OVS bridge +that is used to connect physical ports (like `eth0`), so that floating +IP traffic for project networks can be received from the physical +network infrastructure (and the internet), and routed to self service +project network ports. `br-tun` is a tunnel bridge that is used to +connect OpenStack nodes (like `devstack-2`) together. This bridge is +used so that project network traffic, using the VXLAN tunneling +protocol, flows between each compute node where project instances run. + +DevStack Compute Configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The host `devstack-2` has a very minimal `local.conf`. + +:: + + [[local|localrc]] + HOST_IP=172.18.161.7 + SERVICE_HOST=172.18.161.6 + MYSQL_HOST=172.18.161.6 + RABBIT_HOST=172.18.161.6 + GLANCE_HOSTPORT=172.18.161.6:9292 + ADMIN_PASSWORD=secret + MYSQL_PASSWORD=secret + RABBIT_PASSWORD=secret + SERVICE_PASSWORD=secret + + ## Neutron options + PUBLIC_INTERFACE=eth0 + ENABLED_SERVICES=n-cpu,rabbit,q-agt,placement-client + +Network traffic from `eth0` on the compute nodes is then NAT'd by the +controller node that runs Neutron's `neutron-l3-agent` and provides L3 +connectivity. + + +Neutron Networking with Open vSwitch and Provider Networks +========================================================== + +In some instances, it is desirable to use neutron's provider +networking extension, so that networks that are configured on an +external router can be utilized by neutron, and instances created via +Nova can attach to the network managed by the external router. + +For example, in some lab environments, a hardware router has been +pre-configured by another party, and an OpenStack developer has been +given a VLAN tag and IP address range, so that instances created via +DevStack will use the external router for L3 connectivity, as opposed +to the neutron L3 service. + +Physical Network Setup +---------------------- + +.. image:: /assets/images/neutron-network-3.png + :alt: Network configuration for provider networks + +On a compute node, the first interface, eth0 is used for the OpenStack +management (API, message bus, etc) as well as for ssh for an +administrator to access the machine. + +:: + + stack@compute:~$ ifconfig eth0 + eth0 Link encap:Ethernet HWaddr bc:16:65:20:af:fc + inet addr:10.0.0.3 + +eth1 is manually configured at boot to not have an IP address. +Consult your operating system documentation for the appropriate +technique. For Ubuntu, the contents of `/etc/network/interfaces` +contains: + +:: + + auto eth1 + iface eth1 inet manual + up ifconfig $IFACE 0.0.0.0 up + down ifconfig $IFACE 0.0.0.0 down + +The second physical interface, eth1 is added to a bridge (in this case +named br-ex), which is used to forward network traffic from guest VMs. + +:: + + stack@compute:~$ sudo ovs-vsctl add-br br-ex + stack@compute:~$ sudo ovs-vsctl add-port br-ex eth1 + stack@compute:~$ sudo ovs-vsctl show + 9a25c837-32ab-45f6-b9f2-1dd888abcf0f + Bridge br-ex + Port br-ex + Interface br-ex + type: internal + Port phy-br-ex + Interface phy-br-ex + type: patch + options: {peer=int-br-ex} + Port "eth1" + Interface "eth1" + + +Service Configuration +--------------------- + +**Control Node** + +In this example, the control node will run the majority of the +OpenStack API and management services (keystone, glance, +nova, neutron) + + +**Compute Nodes** + +In this example, the nodes that will host guest instances will run +the ``neutron-openvswitch-agent`` for network connectivity, as well as +the compute service ``nova-compute``. + +DevStack Configuration +---------------------- + +.. _ovs-provider-network-controller: + +The following is a snippet of the DevStack configuration on the +controller node. + +:: + + HOST_IP=10.0.0.2 + SERVICE_HOST=10.0.0.2 + MYSQL_HOST=10.0.0.2 + RABBIT_HOST=10.0.0.2 + GLANCE_HOSTPORT=10.0.0.2:9292 + PUBLIC_INTERFACE=eth1 + + ADMIN_PASSWORD=secret + MYSQL_PASSWORD=secret + RABBIT_PASSWORD=secret + SERVICE_PASSWORD=secret + + ## Neutron options + Q_USE_SECGROUP=True + ENABLE_TENANT_VLANS=True + TENANT_VLAN_RANGE=3001:4000 + PHYSICAL_NETWORK=default + OVS_PHYSICAL_BRIDGE=br-ex + + Q_USE_PROVIDER_NETWORKING=True + + disable_service q-l3 + + ## Neutron Networking options used to create Neutron Subnets + + IPV4_ADDRS_SAFE_TO_USE="203.0.113.0/24" + NETWORK_GATEWAY=203.0.113.1 + PROVIDER_SUBNET_NAME="provider_net" + PROVIDER_NETWORK_TYPE="vlan" + SEGMENTATION_ID=2010 + USE_SUBNETPOOL=False + +In this configuration we are defining IPV4_ADDRS_SAFE_TO_USE to be a +publicly routed IPv4 subnet. In this specific instance we are using +the special TEST-NET-3 subnet defined in `RFC 5737 `_, +which is used for documentation. In your DevStack setup, IPV4_ADDRS_SAFE_TO_USE +would be a public IP address range that you or your organization has +allocated to you, so that you could access your instances from the +public internet. + +The following is the DevStack configuration on +compute node 1. + +:: + + HOST_IP=10.0.0.3 + SERVICE_HOST=10.0.0.2 + MYSQL_HOST=10.0.0.2 + RABBIT_HOST=10.0.0.2 + GLANCE_HOSTPORT=10.0.0.2:9292 + ADMIN_PASSWORD=secret + MYSQL_PASSWORD=secret + RABBIT_PASSWORD=secret + SERVICE_PASSWORD=secret + + # Services that a compute node runs + ENABLED_SERVICES=n-cpu,rabbit,q-agt + + ## Open vSwitch provider networking options + PHYSICAL_NETWORK=default + OVS_PHYSICAL_BRIDGE=br-ex + PUBLIC_INTERFACE=eth1 + Q_USE_PROVIDER_NETWORKING=True + +Compute node 2's configuration will be exactly the same, except +``HOST_IP`` will be ``10.0.0.4`` + +When DevStack is configured to use provider networking (via +``Q_USE_PROVIDER_NETWORKING`` is True) - +DevStack will automatically add the network interface defined in +``PUBLIC_INTERFACE`` to the ``OVS_PHYSICAL_BRIDGE`` + +For example, with the above configuration, a bridge is +created, named ``br-ex`` which is managed by Open vSwitch, and the +second interface on the compute node, ``eth1`` is attached to the +bridge, to forward traffic sent by guest VMs. + +Miscellaneous Tips +================== + +Non-Standard MTU on the Physical Network +---------------------------------------- + +Neutron by default uses a MTU of 1500 bytes, which is +the standard MTU for Ethernet. + +A different MTU can be specified by adding the following to +the Neutron section of `local.conf`. For example, +if you have network equipment that supports jumbo frames, you could +set the MTU to 9000 bytes by adding the following + +:: + + [[post-config|/$Q_PLUGIN_CONF_FILE]] + global_physnet_mtu = 9000 + + +Disabling Next Generation Firewall Tools +---------------------------------------- + +DevStack does not properly operate with modern firewall tools. Specifically +it will appear as if the guest VM can access the external network via ICMP, +but UDP and TCP packets will not be delivered to the guest VM. The root cause +of the issue is that both ufw (Uncomplicated Firewall) and firewalld (Fedora's +firewall manager) apply firewall rules to all interfaces in the system, rather +then per-device. One solution to this problem is to revert to iptables +functionality. + +To get a functional firewall configuration for Fedora do the following: + +:: + + sudo service iptables save + sudo systemctl disable firewalld + sudo systemctl enable iptables + sudo systemctl stop firewalld + sudo systemctl start iptables + + +To get a functional firewall configuration for distributions containing ufw, +disable ufw. Note ufw is generally not enabled by default in Ubuntu. To +disable ufw if it was enabled, do the following: + +:: + + sudo service iptables save + sudo ufw disable + +Configuring Extension Drivers for the ML2 Plugin +------------------------------------------------ + +Extension drivers for the ML2 plugin are set with the variable +``Q_ML2_PLUGIN_EXT_DRIVERS``, and includes the 'port_security' extension +by default. If you want to remove all the extension drivers (even +'port_security'), set ``Q_ML2_PLUGIN_EXT_DRIVERS`` to blank. + + +Using MacVTap instead of Open vSwitch +------------------------------------------ + +Security groups are not supported by the MacVTap agent. Due to that, devstack +configures the NoopFirewall driver on the compute node. + +MacVTap agent does not support l3, dhcp and metadata agent. Due to that you can +chose between the following deployment scenarios: + +Single node with provider networks using config drive and external l3, dhcp +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +This scenario applies, if l3 and dhcp services are provided externally, or if +you do not require them. + + +:: + + [[local|localrc]] + HOST_IP=10.0.0.2 + SERVICE_HOST=10.0.0.2 + MYSQL_HOST=10.0.0.2 + RABBIT_HOST=10.0.0.2 + ADMIN_PASSWORD=secret + MYSQL_PASSWORD=secret + RABBIT_PASSWORD=secret + SERVICE_PASSWORD=secret + + Q_ML2_PLUGIN_MECHANISM_DRIVERS=macvtap + Q_USE_PROVIDER_NETWORKING=True + + enable_plugin neutron https://opendev.org/openstack/neutron + + ## MacVTap agent options + Q_AGENT=macvtap + PHYSICAL_NETWORK=default + + IPV4_ADDRS_SAFE_TO_USE="203.0.113.0/24" + NETWORK_GATEWAY=203.0.113.1 + PROVIDER_SUBNET_NAME="provider_net" + PROVIDER_NETWORK_TYPE="vlan" + SEGMENTATION_ID=2010 + USE_SUBNETPOOL=False + + [[post-config|/$Q_PLUGIN_CONF_FILE]] + [macvtap] + physical_interface_mappings = $PHYSICAL_NETWORK:eth1 + + [[post-config|$NOVA_CONF]] + force_config_drive = True + + +Multi node with MacVTap compute node +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +This scenario applies, if you require OpenStack provided l3, dhcp or metadata +services. Those are hosted on a separate controller and network node, running +some other l2 agent technology (in this example Open vSwitch). This node needs +to be configured for VLAN tenant networks. + +For OVS, a similar configuration like described in the +:ref:`OVS Provider Network ` section can be +used. Just add the following line to this local.conf, which also loads +the MacVTap mechanism driver: + +:: + + [[local|localrc]] + ... + Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,macvtap + ... + +For the MacVTap compute node, use this local.conf: + +:: + + HOST_IP=10.0.0.3 + SERVICE_HOST=10.0.0.2 + MYSQL_HOST=10.0.0.2 + RABBIT_HOST=10.0.0.2 + ADMIN_PASSWORD=secret + MYSQL_PASSWORD=secret + RABBIT_PASSWORD=secret + SERVICE_PASSWORD=secret + + # Services that a compute node runs + disable_all_services + enable_plugin neutron https://opendev.org/openstack/neutron + ENABLED_SERVICES+=n-cpu,q-agt + + ## MacVTap agent options + Q_AGENT=macvtap + PHYSICAL_NETWORK=default + + [[post-config|/$Q_PLUGIN_CONF_FILE]] + [macvtap] + physical_interface_mappings = $PHYSICAL_NETWORK:eth1 diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst new file mode 100644 index 0000000000..6b8aabf8db --- /dev/null +++ b/doc/source/guides/nova.rst @@ -0,0 +1,136 @@ +================= +Nova and DevStack +================= + +This is a rough guide to various configuration parameters for nova +running with DevStack. + + +nova-serialproxy +================ + +In Juno, nova implemented a `spec +`_ +to allow read/write access to the serial console of an instance via +`nova-serialproxy +`_. + +The service can be enabled by adding ``n-sproxy`` to +``ENABLED_SERVICES``. Further options can be enabled via +``local.conf``, e.g. + +:: + + [[post-config|$NOVA_CONF]] + [serial_console] + # + # Options defined in nova.cmd.serialproxy + # + + # Host on which to listen for incoming requests (string value) + #serialproxy_host=0.0.0.0 + + # Port on which to listen for incoming requests (integer + # value) + #serialproxy_port=6083 + + + # + # Options defined in nova.console.serial + # + + # Enable serial console related features (boolean value) + #enabled=false + # Do not set this manually. Instead enable the service as + # outlined above. + + # Range of TCP ports to use for serial ports on compute hosts + # (string value) + #port_range=10000:20000 + + # Location of serial console proxy. (string value) + #base_url=ws://127.0.0.1:6083/ + + # IP address on which instance serial console should listen + # (string value) + #listen=127.0.0.1 + + # The address to which proxy clients (like nova-serialproxy) + # should connect (string value) + #proxyclient_address=127.0.0.1 + + +Enabling the service is enough to be functional for a single machine DevStack. + +These config options are defined in `nova.conf.serial_console +`_. + +For more information on OpenStack configuration see the `OpenStack +Compute Service Configuration Reference +`_ + + +Fake virt driver +================ + +Nova has a `fake virt driver`_ which can be used for scale testing the control +plane services or testing "move" operations between fake compute nodes, for +example cold/live migration, evacuate and unshelve. + +The fake virt driver does not communicate with any hypervisor, it just reports +some fake resource inventory values and keeps track of the state of the +"guests" created, moved and deleted. It is not feature-complete with the +compute API but is good enough for most API testing, and is also used within +the nova functional tests themselves so is fairly robust. + +.. _fake virt driver: https://opendev.org/openstack/nova/src/branch/master/nova/virt/fake.py + +Configuration +------------- + +Set the following in your devstack ``local.conf``: + +.. code-block:: ini + + [[local|localrc]] + VIRT_DRIVER=fake + NUMBER_FAKE_NOVA_COMPUTE= + +The ``NUMBER_FAKE_NOVA_COMPUTE`` variable controls the number of fake +``nova-compute`` services to run and defaults to 1. + +When ``VIRT_DRIVER=fake`` is used, devstack will disable quota checking in +nova and neutron automatically. However, other services, like cinder, will +still enforce quota limits by default. + +Scaling +------- + +The actual value to use for ``NUMBER_FAKE_NOVA_COMPUTE`` depends on factors +such as: + +* The size of the host (physical or virtualized) on which devstack is running. +* The number of API workers. By default, devstack will run ``max($nproc/2, 2)`` + workers per API service. If you are running several fake compute services on + a single host, then consider setting ``API_WORKERS=1`` in ``local.conf``. + +In addition, while quota will be disabled in neutron, there is no fake ML2 +backend for neutron so creating fake VMs will still result in real ports being +created. To create servers without networking, you can specify ``--nic=none`` +when creating the server, for example: + +.. code-block:: shell + + $ openstack --os-compute-api-version 2.37 server create --flavor cirros256 \ + --image cirros-0.6.3-x86_64-disk --nic none --wait test-server + +.. note:: ``--os-compute-api-version`` greater than or equal to 2.37 is + required to use ``--nic=none``. + +To avoid overhead from other services which you may not need, disable them in +your ``local.conf``, for example: + +.. code-block:: ini + + disable_service horizon + disable_service tempest diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst new file mode 100644 index 0000000000..263fbb9d6f --- /dev/null +++ b/doc/source/guides/single-machine.rst @@ -0,0 +1,144 @@ +========================= +All-In-One Single Machine +========================= + +Things are about to get real! Using OpenStack in containers or VMs is +nice for kicking the tires, but doesn't compare to the feeling you get +with hardware. + +Prerequisites Linux & Network +============================= + +Minimal Install +--------------- + +You need to have a system with a fresh install of Linux. You can +download the `Minimal +CD `__ for +Ubuntu releases since DevStack will download & install all the +additional dependencies. The netinstall ISO is available for +`Fedora `__ +and +`CentOS/RHEL `__. +You may be tempted to use a desktop distro on a laptop, it will probably +work but you may need to tell Network Manager to keep its fingers off +the interface(s) that OpenStack uses for bridging. + +Network Configuration +--------------------- + +Determine the network configuration on the interface used to integrate +your OpenStack cloud with your existing network. For example, if the IPs +given out on your network by DHCP are 192.168.1.X - where X is between +100 and 200 you will be able to use IPs 201-254 for **floating ips**. + +To make things easier later change your host to use a static IP instead +of DHCP (i.e. 192.168.1.201). + +Installation shake and bake +=========================== + +Add your user +------------- + +We need to add a user to install DevStack. (if you created a user during +install you can skip this step and just give the user sudo privileges +below) + +.. code-block:: console + + $ sudo useradd -s /bin/bash -d /opt/stack -m stack + +Ensure home directory for the ``stack`` user has executable permission for all, +as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750`` +which can cause issues during deployment. + +.. code-block:: console + + $ sudo chmod +x /opt/stack + +Since this user will be making many changes to your system, it will need +to have sudo privileges: + +.. code-block:: console + + $ apt-get install sudo -y || dnf install -y sudo + $ echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack + +.. note:: On some systems you may need to use ``sudo visudo``. + +From here on you should use the user you created. **Logout** and +**login** as that user: + +.. code-block:: console + + $ sudo su stack && cd ~ + +Download DevStack +----------------- + +We'll grab the latest version of DevStack via https: + +.. code-block:: console + + $ sudo apt-get install git -y || sudo dnf install -y git + $ git clone https://opendev.org/openstack/devstack + $ cd devstack + +Run DevStack +------------ + +Now to configure ``stack.sh``. DevStack includes a sample in +``devstack/samples/local.conf``. Create ``local.conf`` as shown below to +do the following: + +- Set ``FLOATING_RANGE`` to a range not used on the local network, i.e. + 192.168.1.224/27. This configures IP addresses ending in 225-254 to + be used as floating IPs. +- Set ``FIXED_RANGE`` to configure the internal address space used by the + instances. +- Set the administrative password. This password is used for the + **admin** and **demo** accounts set up as OpenStack users. +- Set the MySQL administrative password. The default here is a random + hex string which is inconvenient if you need to look at the database + directly for anything. +- Set the RabbitMQ password. +- Set the service password. This is used by the OpenStack services + (Nova, Glance, etc) to authenticate with Keystone. + +.. warning:: Only use alphanumeric characters in your passwords, as some + services fail to work when using special characters. + +``local.conf`` should look something like this: + +.. code-block:: ini + + [[local|localrc]] + FLOATING_RANGE=192.168.1.224/27 + FIXED_RANGE=10.11.12.0/24 + ADMIN_PASSWORD=supersecret + DATABASE_PASSWORD=iheartdatabases + RABBIT_PASSWORD=flopsymopsy + SERVICE_PASSWORD=iheartksl + +.. note:: There is a sample :download:`local.conf ` file + under the *samples* directory in the devstack repository. + +Run DevStack: + +.. code-block:: console + + $ ./stack.sh + +A seemingly endless stream of activity ensues. When complete you will +see a summary of ``stack.sh``'s work, including the relevant URLs, +accounts and passwords to poke at your shiny new OpenStack. + +Using OpenStack +--------------- + +At this point you should be able to access the dashboard from other +computers on the local network. In this example that would be +http://192.168.1.201/ for the dashboard (aka Horizon). Launch VMs and if +you give them floating IPs and security group access those VMs will be +accessible from other machines on your network. diff --git a/doc/source/guides/single-vm.rst b/doc/source/guides/single-vm.rst new file mode 100644 index 0000000000..4272a4b180 --- /dev/null +++ b/doc/source/guides/single-vm.rst @@ -0,0 +1,103 @@ +==================== +All-In-One Single VM +==================== + +Use the cloud to build the cloud! Use your cloud to launch new versions +of OpenStack in about 5 minutes. If you break it, start over! The VMs +launched in the cloud will be slow as they are running in QEMU +(emulation), but their primary use is testing OpenStack development and +operation. + +Prerequisites Cloud & Image +=========================== + +Virtual Machine +--------------- + +DevStack should run in any virtual machine running a supported Linux +release. It will perform best with 4GB or more of RAM. + +OpenStack Deployment & cloud-init +--------------------------------- + +If the cloud service has an image with ``cloud-init`` pre-installed, use +it. You can get one from `Ubuntu's Daily +Build `__ site if necessary. This will +enable you to launch VMs with userdata that installs everything at boot +time. The userdata script below will install and run DevStack with a +minimal configuration. The use of ``cloud-init`` is outside the scope of +this document, refer to the ``cloud-init`` docs for more information. + +If you are directly using a hypervisor like Xen, kvm or VirtualBox you +can manually kick off the script below as a non-root user in a +bare-bones server installation. + +Installation shake and bake +=========================== + +Launching With Cloud-Init +------------------------- + +This cloud config grabs the latest version of DevStack via git, creates +a minimal ``local.conf`` file and kicks off ``stack.sh``. It should be +passed as the user-data file when booting the VM. + +:: + + #cloud-config + + users: + - default + - name: stack + lock_passwd: False + sudo: ["ALL=(ALL) NOPASSWD:ALL\nDefaults:stack !requiretty"] + shell: /bin/bash + + write_files: + - content: | + #!/bin/sh + DEBIAN_FRONTEND=noninteractive sudo apt-get -qqy update || sudo dnf update -qy + DEBIAN_FRONTEND=noninteractive sudo apt-get install -qqy git || sudo dnf install -qy git + sudo chown stack:stack /home/stack + cd /home/stack + git clone https://opendev.org/openstack/devstack + cd devstack + echo '[[local|localrc]]' > local.conf + echo ADMIN_PASSWORD=password >> local.conf + echo DATABASE_PASSWORD=password >> local.conf + echo RABBIT_PASSWORD=password >> local.conf + echo SERVICE_PASSWORD=password >> local.conf + ./stack.sh + path: /home/stack/start.sh + permissions: 0755 + + runcmd: + - su -l stack ./start.sh + +As DevStack will refuse to run as root, this configures ``cloud-init`` +to create a non-root user and run the ``start.sh`` script as that user. + +If you are using cloud-init and you have not +:ref:`enabled custom logging ` of the stack +output, then the stack output can be found in +``/var/log/cloud-init-output.log`` by default. + +Launching By Hand +----------------- + +Using a hypervisor directly, launch the VM and either manually perform +the steps in the embedded shell script above or copy it into the VM. + +Using OpenStack +--------------- + +At this point you should be able to access the dashboard. Launch VMs and +if you give them floating IPs, access those VMs from other machines on +your network. + +One interesting use case is for developers working on a VM on their +laptop. Once ``stack.sh`` has completed once, all of the pre-requisite +packages are installed in the VM and the source trees checked out. +Setting ``OFFLINE=True`` in ``local.conf`` enables ``stack.sh`` to run +multiple times without an Internet connection. DevStack, making hacking +at the lake possible since 2012! diff --git a/doc/source/hacking.rst b/doc/source/hacking.rst new file mode 100644 index 0000000000..a2bcf4fd67 --- /dev/null +++ b/doc/source/hacking.rst @@ -0,0 +1 @@ +.. include:: ../../HACKING.rst diff --git a/doc/source/index.rst b/doc/source/index.rst new file mode 100644 index 0000000000..a07bb84922 --- /dev/null +++ b/doc/source/index.rst @@ -0,0 +1,180 @@ +.. Documentation Architecture for the devstack docs. + + It is really easy for online docs to meander over time as people + attempt to add the small bit of additional information they think + people need, into an existing information architecture. In order to + prevent that we need to be a bit strict as to what's on this front + page. + + This should *only* be the quick start narrative. Which should end + with 2 sections: what you can do with devstack once it's set up, + and how to go beyond this setup. Both should be a set of quick + links to other documents to let people explore from there. + +DevStack +======== + +.. image:: assets/images/logo-blue.png + +DevStack is a series of extensible scripts used to quickly bring up a +complete OpenStack environment based on the latest versions of +everything from git master. It is used interactively as a development +environment and as the basis for much of the OpenStack project's +functional testing. + +The source is available at ``__. + +.. warning:: + + DevStack will make substantial changes to your system during + installation. Only run DevStack on servers or virtual machines that + are dedicated to this purpose. + +Quick Start ++++++++++++ + +Install Linux +------------- + +Start with a clean and minimal install of a Linux system. DevStack +attempts to support the two latest LTS releases of Ubuntu, +Rocky Linux 9 and openEuler. + +If you do not have a preference, Ubuntu 24.04 (Noble) is the +most tested, and will probably go the smoothest. + +Add Stack User (optional) +------------------------- + +DevStack should be run as a non-root user with sudo enabled +(standard logins to cloud images such as "ubuntu" or "cloud-user" +are usually fine). + +If you are not using a cloud image, you can create a separate `stack` user +to run DevStack with + +.. code-block:: console + + $ sudo useradd -s /bin/bash -d /opt/stack -m stack + +Ensure home directory for the ``stack`` user has executable permission for all, +as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750`` +which can cause issues during deployment. + +.. code-block:: console + + $ sudo chmod +x /opt/stack + +Since this user will be making many changes to your system, it should +have sudo privileges: + +.. code-block:: console + + $ echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack + $ sudo -u stack -i + +Download DevStack +----------------- + +.. code-block:: console + + $ git clone https://opendev.org/openstack/devstack + $ cd devstack + +The ``devstack`` repo contains a script that installs OpenStack and +templates for configuration files. + +Create a local.conf +------------------- + +Create a ``local.conf`` file with four passwords preset at the root of the +devstack git repo. + +.. code-block:: ini + + [[local|localrc]] + ADMIN_PASSWORD=secret + DATABASE_PASSWORD=$ADMIN_PASSWORD + RABBIT_PASSWORD=$ADMIN_PASSWORD + SERVICE_PASSWORD=$ADMIN_PASSWORD + +This is the minimum required config to get started with DevStack. + +.. note:: There is a sample :download:`local.conf ` file + under the *samples* directory in the devstack repository. + +.. warning:: Only use alphanumeric characters in your passwords, as some + services fail to work when using special characters. + +Start the install +----------------- + +.. code-block:: console + + $ ./stack.sh + +This will take 15 - 30 minutes, largely depending on the speed of +your internet connection. Many git trees and packages will be +installed during this process. + +Profit! +------- + +You now have a working DevStack! Congrats! + +Your devstack will have installed ``keystone``, ``glance``, ``nova``, +``placement``, ``cinder``, ``neutron``, and ``horizon``. Floating IPs +will be available, guests have access to the external world. + +You can access horizon to experience the web interface to +OpenStack, and manage vms, networks, volumes, and images from +there. + +You can ``source openrc`` in your shell, and then use the +``openstack`` command line tool to manage your devstack. + +You can :ref:`create a VM and SSH into it `. + +You can ``cd /opt/stack/tempest`` and run tempest tests that have +been configured to work with your devstack. + +You can :doc:`make code changes to OpenStack and validate them +`. + +Going further +------------- + +Learn more about our :doc:`configuration system ` to +customize devstack for your needs. Including making adjustments to the +default :doc:`networking `. + +Read :doc:`guides ` for specific setups people have (note: +guides are point in time contributions, and may not always be kept +up to date to the latest devstack). + +Enable :doc:`devstack plugins ` to support additional +services, features, and configuration not present in base devstack. + +Use devstack in your CI with :doc:`Ansible roles ` and +:doc:`Jobs ` for Zuul V3. Migrate your devstack Zuul V2 jobs to Zuul +V3 with this full migration :doc:`how-to `. + +Get :doc:`the big picture ` of what we are trying to do +with devstack, and help us by :doc:`contributing to the project +`. + +If you are a new contributor to devstack please refer: :doc:`contributor/contributing` + +.. toctree:: + :hidden: + + contributor/contributing + +Contents +++++++++ + +.. toctree:: + :glob: + :maxdepth: 2 + + * diff --git a/doc/source/networking.rst b/doc/source/networking.rst new file mode 100644 index 0000000000..10e1c3ff2c --- /dev/null +++ b/doc/source/networking.rst @@ -0,0 +1,238 @@ +===================== + DevStack Networking +===================== + +An important part of the DevStack experience is networking that works +by default for created guests. This might not be optimal for your +particular testing environment, so this document tries its best to +explain what's going on. + +Defaults +======== + +If you don't specify any configuration you will get the following: + +* neutron (including l3 with openvswitch) +* private project networks for each openstack project +* a floating ip range of 172.24.4.0/24 with the gateway of 172.24.4.1 +* the demo project configured with fixed ips on a subnet allocated from + the 10.0.0.0/22 range +* a ``br-ex`` interface controlled by neutron for all its networking + (this is not connected to any physical interfaces). +* DNS resolution for guests based on the resolv.conf for your host +* an ip masq rule that allows created guests to route out + +This creates an environment which is isolated to the single +host. Guests can get to the external network for package +updates. Tempest tests will work in this environment. + +.. note:: + + By default all OpenStack environments have security group rules + which block all inbound packets to guests. If you want to be able + to ssh / ping your created guests you should run the following. + + .. code-block:: bash + + openstack security group rule create --proto icmp --dst-port 0 default + openstack security group rule create --proto tcp --dst-port 22 default + +Locally Accessible Guests +========================= + +If you want to make your guests accessible from other machines on your +network, we have to connect ``br-ex`` to a physical interface. + +Dedicated Guest Interface +------------------------- + +If you have 2 or more interfaces on your devstack server, you can +allocate an interface to neutron to fully manage. This **should not** +be the same interface you use to ssh into the devstack server itself. + +This is done by setting with the ``PUBLIC_INTERFACE`` attribute. + +.. code-block:: bash + + [[local|localrc]] + PUBLIC_INTERFACE=eth1 + +That will put all layer 2 traffic from your guests onto the main +network. When running in this mode the ip masq rule is **not** added +in your devstack, you are responsible for making routing work on your +local network. + +Shared Guest Interface +---------------------- + +.. warning:: + + This is not a recommended configuration. Because of interactions + between OVS and bridging, if you reboot your box with active + networking you may lose network connectivity to your system. + +If you need your guests accessible on the network, but only have 1 +interface (using something like a NUC), you can share your one +network. But in order for this to work you need to manually set a lot +of addresses, and have them all exactly correct. + +.. code-block:: bash + + [[local|localrc]] + PUBLIC_INTERFACE=eth0 + HOST_IP=10.42.0.52 + FLOATING_RANGE=10.42.0.0/24 + PUBLIC_NETWORK_GATEWAY=10.42.0.1 + Q_FLOATING_ALLOCATION_POOL=start=10.42.0.250,end=10.42.0.254 + +In order for this scenario to work the floating ip network must match +the default networking on your server. This breaks HOST_IP detection, +as we exclude the floating range by default, so you have to specify +that manually. + +The ``PUBLIC_NETWORK_GATEWAY`` is the gateway that server would normally +use to get off the network. ``Q_FLOATING_ALLOCATION_POOL`` controls +the range of floating ips that will be handed out. As we are sharing +your existing network, you'll want to give it a slice that your local +dhcp server is not allocating. Otherwise you could easily have +conflicting ip addresses, and cause havoc with your local network. + + +Private Network Addressing +========================== + +The private networks addresses are controlled by the ``IPV4_ADDRS_SAFE_TO_USE`` +and the ``IPV6_ADDRS_SAFE_TO_USE`` variables. This allows users to specify one +single variable of safe internal IPs to use that will be referenced whether or +not subnetpools are in use. + +For IPv4, ``FIXED_RANGE`` and ``SUBNETPOOL_PREFIX_V4`` will just default to +the value of ``IPV4_ADDRS_SAFE_TO_USE`` directly. + +For IPv6, ``FIXED_RANGE_V6`` will default to the first /64 of the value of +``IPV6_ADDRS_SAFE_TO_USE``. If ``IPV6_ADDRS_SAFE_TO_USE`` is /64 or smaller, +``FIXED_RANGE_V6`` will just use the value of that directly. +``SUBNETPOOL_PREFIX_V6`` will just default to the value of +``IPV6_ADDRS_SAFE_TO_USE`` directly. + +.. _ssh: + +SSH access to instances +======================= + +To validate connectivity, you can create an instance using the +``$PRIVATE_NETWORK_NAME`` network (default: ``private``), create a floating IP +using the ``$PUBLIC_NETWORK_NAME`` network (default: ``public``), and attach +this floating IP to the instance: + +.. code-block:: shell + + openstack keypair create --public-key ~/.ssh/id_rsa.pub test-keypair + openstack server create --network private --key-name test-keypair ... test-server + fip_id=$(openstack floating ip create public -f value -c id) + openstack server add floating ip test-server ${fip_id} + +Once done, ensure you have enabled SSH and ICMP (ping) access for the security +group used for the instance. You can either create a custom security group and +specify it when creating the instance or add it after creation, or you can +modify the ``default`` security group created by default for each project. +Let's do the latter: + +.. code-block:: shell + + openstack security group rule create --proto icmp --dst-port 0 default + openstack security group rule create --proto tcp --dst-port 22 default + +Finally, SSH into the instance. If you used the Cirros instance uploaded by +default, then you can run the following: + +.. code-block:: shell + + openstack server ssh test-server -- -l cirros + +This will connect using the ``cirros`` user and the keypair you configured when +creating the instance. + +Remote SSH access to instances +============================== + +You can also SSH to created instances on your DevStack host from other hosts. +This can be helpful if you are e.g. deploying DevStack in a VM on an existing +cloud and wish to do development on your local machine. There are a few ways to +do this. + +.. rubric:: Configure instances to be locally accessible + +The most obvious way is to configure guests to be locally accessible, as +described `above `__. This has the advantage of +requiring no further effort on the client. However, it is more involved and +requires either support from your cloud or some inadvisable workarounds. + +.. rubric:: Use your DevStack host as a jump host + +You can choose to use your DevStack host as a jump host. To SSH to a instance +this way, pass the standard ``-J`` option to the ``openstack ssh`` / ``ssh`` +command. For example: + +.. code-block:: + + openstack server ssh test-server -- -l cirros -J username@devstack-host + +(where ``test-server`` is name of an existing instance, as described +:ref:`previously `, and ``username`` and ``devstack-host`` are the +username and hostname of your DevStack host). + +This can also be configured via your ``~/.ssh/config`` file, making it rather +effortless. However, it only allows SSH access. If you want to access e.g. a +web application on the instance, you will need to configure an SSH tunnel and +forward select ports using the ``-L`` option. For example, to forward HTTP +traffic: + +.. code-block:: + + openstack server ssh test-server -- -l cirros -L 8080:username@devstack-host:80 + +(where ``test-server`` is name of an existing instance, as described +:ref:`previously `, and ``username`` and ``devstack-host`` are the +username and hostname of your DevStack host). + +As you can imagine, this can quickly get out of hand, particularly for more +complex guest applications with multiple ports. + +.. rubric:: Use a proxy or VPN tool + +You can use a proxy or VPN tool to enable tunneling for the floating IP +address range of the ``$PUBLIC_NETWORK_NAME`` network (default: ``public``) +defined by ``$FLOATING_RANGE`` (default: ``172.24.4.0/24``). There are many +such tools available to do this. For example, we could use a useful utility +called `shuttle`__. To enable tunneling using ``shuttle``, first ensure you +have allowed SSH and HTTP(S) traffic to your DevStack host. Allowing HTTP(S) +traffic is necessary so you can use the OpenStack APIs remotely. How you do +this will depend on where your DevStack host is running. Once this is done, +install ``sshuttle`` on your localhost: + +.. code-block:: bash + + sudo apt-get install sshuttle || dnf install sshuttle + +Finally, start ``sshuttle`` on your localhost using the floating IP address +range. For example, assuming you are using the default value for +``$FLOATING_RANGE``, you can do: + +.. code-block:: bash + + sshuttle -r username@devstack-host 172.24.4.0/24 + +(where ``username`` and ``devstack-host`` are the username and hostname of your +DevStack host). + +You should now be able to create an instance and SSH into it: + +.. code-block:: bash + + openstack server ssh test-server -- -l cirros + +(where ``test-server`` is name of an existing instance, as described +:ref:`previously `) + +.. __: https://github.com/sshuttle/sshuttle diff --git a/doc/source/overview.rst b/doc/source/overview.rst new file mode 100644 index 0000000000..c978e8d2cf --- /dev/null +++ b/doc/source/overview.rst @@ -0,0 +1,71 @@ +======== +Overview +======== + +DevStack has evolved to support a large number of configuration options +and alternative platforms and support services. That evolution has grown +well beyond what was originally intended and the majority of +configuration combinations are rarely, if ever, tested. DevStack is not +a general OpenStack installer and was never meant to be everything to +everyone. + +Below is a list of what is specifically is supported (read that as +"tested") going forward. + +Supported Components +==================== + +Base OS +------- + +*The OpenStack Technical Committee (TC) has defined the current CI +strategy to include the latest Ubuntu release and the latest RHEL +release.* + +- Ubuntu: current LTS release plus current development release +- RHEL/CentOS/RockyLinux: current major release +- Other OS platforms may continue to be included but the maintenance of + those platforms shall not be assumed simply due to their presence. + Having a listed point-of-contact for each additional OS will greatly + increase its chance of being well-maintained. +- Patches for Ubuntu and/or RockyLinux will not be held up due to + side-effects on other OS platforms. + +Databases +--------- + +*As packaged by the host OS* + +- MySQL + +Queues +------ + +*As packaged by the host OS* + +- Rabbit + +Web Server +---------- + +*As packaged by the host OS* + +- Apache + +Services +-------- + +The default services configured by DevStack are Identity (keystone), +Object Storage (swift), Image Service (glance), Block Storage +(cinder), Compute (nova), Placement (placement), +Networking (neutron), Dashboard (horizon). + +Additional services not included directly in DevStack can be tied in to +``stack.sh`` using the :doc:`plugin mechanism ` to call +scripts that perform the configuration and startup of the service. + +Node Configurations +------------------- + +- single node +- multi-node configurations as are tested by the gate diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst new file mode 100644 index 0000000000..9185263443 --- /dev/null +++ b/doc/source/plugin-registry.rst @@ -0,0 +1,182 @@ +.. Note to patch submitters: + + # ============================= # + # THIS FILE IS AUTOGENERATED ! # + # ============================= # + + ** Plugins are found automatically and added to this list ** + + This file is created by a periodic proposal job. You should not + edit this file. + + You should edit the files data/devstack-plugins-registry.footer + data/devstack-plugins-registry.header to modify this text. + +========================== + DevStack Plugin Registry +========================== + +The following list is an automatically-generated collection of +available DevStack plugins. This includes, but is not limited to, +official OpenStack projects. + + +======================================== === +Plugin Name URL +======================================== === +openstack/aetos `https://opendev.org/openstack/aetos `__ +openstack/aodh `https://opendev.org/openstack/aodh `__ +openstack/barbican `https://opendev.org/openstack/barbican `__ +openstack/blazar `https://opendev.org/openstack/blazar `__ +openstack/ceilometer `https://opendev.org/openstack/ceilometer `__ +openstack/cloudkitty `https://opendev.org/openstack/cloudkitty `__ +openstack/cyborg `https://opendev.org/openstack/cyborg `__ +openstack/designate `https://opendev.org/openstack/designate `__ +openstack/designate-tempest-plugin `https://opendev.org/openstack/designate-tempest-plugin `__ +openstack/devstack-plugin-amqp1 `https://opendev.org/openstack/devstack-plugin-amqp1 `__ +openstack/devstack-plugin-ceph `https://opendev.org/openstack/devstack-plugin-ceph `__ +openstack/devstack-plugin-container `https://opendev.org/openstack/devstack-plugin-container `__ +openstack/devstack-plugin-kafka `https://opendev.org/openstack/devstack-plugin-kafka `__ +openstack/devstack-plugin-nfs `https://opendev.org/openstack/devstack-plugin-nfs `__ +openstack/devstack-plugin-open-cas `https://opendev.org/openstack/devstack-plugin-open-cas `__ +openstack/devstack-plugin-prometheus `https://opendev.org/openstack/devstack-plugin-prometheus `__ +openstack/freezer `https://opendev.org/openstack/freezer `__ +openstack/freezer-api `https://opendev.org/openstack/freezer-api `__ +openstack/freezer-tempest-plugin `https://opendev.org/openstack/freezer-tempest-plugin `__ +openstack/freezer-web-ui `https://opendev.org/openstack/freezer-web-ui `__ +openstack/grian-ui `https://opendev.org/openstack/grian-ui `__ +openstack/heat `https://opendev.org/openstack/heat `__ +openstack/heat-dashboard `https://opendev.org/openstack/heat-dashboard `__ +openstack/ironic `https://opendev.org/openstack/ironic `__ +openstack/ironic-inspector `https://opendev.org/openstack/ironic-inspector `__ +openstack/ironic-prometheus-exporter `https://opendev.org/openstack/ironic-prometheus-exporter `__ +openstack/ironic-ui `https://opendev.org/openstack/ironic-ui `__ +openstack/keystone `https://opendev.org/openstack/keystone `__ +openstack/kuryr-libnetwork `https://opendev.org/openstack/kuryr-libnetwork `__ +openstack/magnum `https://opendev.org/openstack/magnum `__ +openstack/magnum-ui `https://opendev.org/openstack/magnum-ui `__ +openstack/manila `https://opendev.org/openstack/manila `__ +openstack/manila-tempest-plugin `https://opendev.org/openstack/manila-tempest-plugin `__ +openstack/manila-ui `https://opendev.org/openstack/manila-ui `__ +openstack/masakari `https://opendev.org/openstack/masakari `__ +openstack/mistral `https://opendev.org/openstack/mistral `__ +openstack/monasca-api `https://opendev.org/openstack/monasca-api `__ +openstack/monasca-events-api `https://opendev.org/openstack/monasca-events-api `__ +openstack/monasca-tempest-plugin `https://opendev.org/openstack/monasca-tempest-plugin `__ +openstack/networking-bagpipe `https://opendev.org/openstack/networking-bagpipe `__ +openstack/networking-baremetal `https://opendev.org/openstack/networking-baremetal `__ +openstack/networking-bgpvpn `https://opendev.org/openstack/networking-bgpvpn `__ +openstack/networking-generic-switch `https://opendev.org/openstack/networking-generic-switch `__ +openstack/networking-sfc `https://opendev.org/openstack/networking-sfc `__ +openstack/neutron `https://opendev.org/openstack/neutron `__ +openstack/neutron-dynamic-routing `https://opendev.org/openstack/neutron-dynamic-routing `__ +openstack/neutron-fwaas `https://opendev.org/openstack/neutron-fwaas `__ +openstack/neutron-fwaas-dashboard `https://opendev.org/openstack/neutron-fwaas-dashboard `__ +openstack/neutron-tempest-plugin `https://opendev.org/openstack/neutron-tempest-plugin `__ +openstack/neutron-vpnaas `https://opendev.org/openstack/neutron-vpnaas `__ +openstack/neutron-vpnaas-dashboard `https://opendev.org/openstack/neutron-vpnaas-dashboard `__ +openstack/nova `https://opendev.org/openstack/nova `__ +openstack/octavia `https://opendev.org/openstack/octavia `__ +openstack/octavia-dashboard `https://opendev.org/openstack/octavia-dashboard `__ +openstack/octavia-tempest-plugin `https://opendev.org/openstack/octavia-tempest-plugin `__ +openstack/openstacksdk `https://opendev.org/openstack/openstacksdk `__ +openstack/osprofiler `https://opendev.org/openstack/osprofiler `__ +openstack/ovn-bgp-agent `https://opendev.org/openstack/ovn-bgp-agent `__ +openstack/ovn-octavia-provider `https://opendev.org/openstack/ovn-octavia-provider `__ +openstack/rally-openstack `https://opendev.org/openstack/rally-openstack `__ +openstack/shade `https://opendev.org/openstack/shade `__ +openstack/skyline-apiserver `https://opendev.org/openstack/skyline-apiserver `__ +openstack/storlets `https://opendev.org/openstack/storlets `__ +openstack/tacker `https://opendev.org/openstack/tacker `__ +openstack/tap-as-a-service `https://opendev.org/openstack/tap-as-a-service `__ +openstack/telemetry-tempest-plugin `https://opendev.org/openstack/telemetry-tempest-plugin `__ +openstack/trove `https://opendev.org/openstack/trove `__ +openstack/trove-dashboard `https://opendev.org/openstack/trove-dashboard `__ +openstack/venus `https://opendev.org/openstack/venus `__ +openstack/venus-dashboard `https://opendev.org/openstack/venus-dashboard `__ +openstack/vitrage `https://opendev.org/openstack/vitrage `__ +openstack/vitrage-dashboard `https://opendev.org/openstack/vitrage-dashboard `__ +openstack/vitrage-tempest-plugin `https://opendev.org/openstack/vitrage-tempest-plugin `__ +openstack/watcher `https://opendev.org/openstack/watcher `__ +openstack/watcher-dashboard `https://opendev.org/openstack/watcher-dashboard `__ +openstack/whitebox-tempest-plugin `https://opendev.org/openstack/whitebox-tempest-plugin `__ +openstack/zaqar `https://opendev.org/openstack/zaqar `__ +openstack/zaqar-ui `https://opendev.org/openstack/zaqar-ui `__ +openstack/zun `https://opendev.org/openstack/zun `__ +openstack/zun-ui `https://opendev.org/openstack/zun-ui `__ +performa/os-faults `https://opendev.org/performa/os-faults `__ +starlingx/config `https://opendev.org/starlingx/config `__ +starlingx/fault `https://opendev.org/starlingx/fault `__ +starlingx/ha `https://opendev.org/starlingx/ha `__ +starlingx/integ `https://opendev.org/starlingx/integ `__ +starlingx/metal `https://opendev.org/starlingx/metal `__ +starlingx/nfv `https://opendev.org/starlingx/nfv `__ +starlingx/update `https://opendev.org/starlingx/update `__ +vexxhost/openstack-operator `https://opendev.org/vexxhost/openstack-operator `__ +x/almanach `https://opendev.org/x/almanach `__ +x/bilean `https://opendev.org/x/bilean `__ +x/broadview-collector `https://opendev.org/x/broadview-collector `__ +x/collectd-openstack-plugins `https://opendev.org/x/collectd-openstack-plugins `__ +x/devstack-plugin-additional-pkg-repos `https://opendev.org/x/devstack-plugin-additional-pkg-repos `__ +x/devstack-plugin-glusterfs `https://opendev.org/x/devstack-plugin-glusterfs `__ +x/devstack-plugin-hdfs `https://opendev.org/x/devstack-plugin-hdfs `__ +x/devstack-plugin-libvirt-qemu `https://opendev.org/x/devstack-plugin-libvirt-qemu `__ +x/devstack-plugin-mariadb `https://opendev.org/x/devstack-plugin-mariadb `__ +x/devstack-plugin-tobiko `https://opendev.org/x/devstack-plugin-tobiko `__ +x/devstack-plugin-vmax `https://opendev.org/x/devstack-plugin-vmax `__ +x/drbd-devstack `https://opendev.org/x/drbd-devstack `__ +x/fenix `https://opendev.org/x/fenix `__ +x/gce-api `https://opendev.org/x/gce-api `__ +x/glare `https://opendev.org/x/glare `__ +x/group-based-policy `https://opendev.org/x/group-based-policy `__ +x/gyan `https://opendev.org/x/gyan `__ +x/horizon-mellanox `https://opendev.org/x/horizon-mellanox `__ +x/ironic-staging-drivers `https://opendev.org/x/ironic-staging-drivers `__ +x/kingbird `https://opendev.org/x/kingbird `__ +x/meteos `https://opendev.org/x/meteos `__ +x/meteos-ui `https://opendev.org/x/meteos-ui `__ +x/mixmatch `https://opendev.org/x/mixmatch `__ +x/mogan `https://opendev.org/x/mogan `__ +x/mogan-ui `https://opendev.org/x/mogan-ui `__ +x/networking-6wind `https://opendev.org/x/networking-6wind `__ +x/networking-ansible `https://opendev.org/x/networking-ansible `__ +x/networking-arista `https://opendev.org/x/networking-arista `__ +x/networking-brocade `https://opendev.org/x/networking-brocade `__ +x/networking-cisco `https://opendev.org/x/networking-cisco `__ +x/networking-cumulus `https://opendev.org/x/networking-cumulus `__ +x/networking-dpm `https://opendev.org/x/networking-dpm `__ +x/networking-fortinet `https://opendev.org/x/networking-fortinet `__ +x/networking-hpe `https://opendev.org/x/networking-hpe `__ +x/networking-huawei `https://opendev.org/x/networking-huawei `__ +x/networking-infoblox `https://opendev.org/x/networking-infoblox `__ +x/networking-l2gw `https://opendev.org/x/networking-l2gw `__ +x/networking-lagopus `https://opendev.org/x/networking-lagopus `__ +x/networking-mlnx `https://opendev.org/x/networking-mlnx `__ +x/networking-nec `https://opendev.org/x/networking-nec `__ +x/networking-omnipath `https://opendev.org/x/networking-omnipath `__ +x/networking-opencontrail `https://opendev.org/x/networking-opencontrail `__ +x/networking-ovs-dpdk `https://opendev.org/x/networking-ovs-dpdk `__ +x/networking-plumgrid `https://opendev.org/x/networking-plumgrid `__ +x/networking-spp `https://opendev.org/x/networking-spp `__ +x/networking-vpp `https://opendev.org/x/networking-vpp `__ +x/networking-vsphere `https://opendev.org/x/networking-vsphere `__ +x/neutron-classifier `https://opendev.org/x/neutron-classifier `__ +x/nova-dpm `https://opendev.org/x/nova-dpm `__ +x/nova-mksproxy `https://opendev.org/x/nova-mksproxy `__ +x/oaktree `https://opendev.org/x/oaktree `__ +x/omni `https://opendev.org/x/omni `__ +x/os-xenapi `https://opendev.org/x/os-xenapi `__ +x/picasso `https://opendev.org/x/picasso `__ +x/rsd-virt-for-nova `https://opendev.org/x/rsd-virt-for-nova `__ +x/scalpels `https://opendev.org/x/scalpels `__ +x/slogging `https://opendev.org/x/slogging `__ +x/stackube `https://opendev.org/x/stackube `__ +x/tatu `https://opendev.org/x/tatu `__ +x/trio2o `https://opendev.org/x/trio2o `__ +x/valet `https://opendev.org/x/valet `__ +x/vmware-nsx `https://opendev.org/x/vmware-nsx `__ +x/vmware-vspc `https://opendev.org/x/vmware-vspc `__ +x/whitebox-neutron-tempest-plugin `https://opendev.org/x/whitebox-neutron-tempest-plugin `__ +======================================== === + + diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst new file mode 100644 index 0000000000..fe567e2277 --- /dev/null +++ b/doc/source/plugins.rst @@ -0,0 +1,334 @@ +======= +Plugins +======= + +The OpenStack ecosystem is wide and deep, and only growing more so +every day. The value of DevStack is that it's simple enough to +understand what it's doing clearly. And yet we'd like to support as +much of the OpenStack Ecosystem as possible. We do that with plugins. + +DevStack plugins are bits of bash code that live outside the DevStack +tree. They are called through a strong contract, so these plugins can +be sure that they will continue to work in the future as DevStack +evolves. + +Prerequisites +============= + +If you are planning to create a plugin that is going to host a service in the +service catalog (that is, your plugin will use the command +``get_or_create_service``) please make sure that you apply to the `service +types authority`_ to reserve a valid service-type. This will help to make sure +that all deployments of your service use the same service-type. + +Plugin Interface +================ + +DevStack supports a standard mechanism for including plugins from +external repositories. The plugin interface assumes the following: + +An external git repository that includes a ``devstack/`` top level +directory. Inside this directory there can be 3 files. + +- ``override-defaults`` - a file containing global variables that + will be sourced before the lib/* files. This allows the plugin + to override the defaults that are otherwise set in the lib/* + files. + + For example, override-defaults may export CINDER_ENABLED_BACKENDS + to include the plugin-specific storage backend and thus be able + to override the default lvm only storage backend for Cinder. + +- ``settings`` - a file containing global variables that will be + sourced very early in the process. This is helpful if other plugins + might depend on this one, and need access to global variables to do + their work. + + Your settings should include any ``enable_service`` lines required + by your plugin. This is especially important if you are kicking off + services using ``run_process`` as it only works with enabled + services. + + Be careful to allow users to override global-variables for + customizing their environment. Usually it is best to provide a + default value only if the variable is unset or empty; e.g. in bash + syntax ``FOO=${FOO:-default}``. + + The file should include a ``define_plugin`` line to indicate the + plugin's name, which is the name that should be used by users on + "enable_plugin" lines. It should generally be the last component of + the git repo path (e.g., if the plugin's repo is + openstack/foo, then the name here should be "foo") :: + + define_plugin + + If your plugin depends on another plugin, indicate it in this file + with one or more lines like the following:: + + plugin_requires + + For a complete example, if the plugin "foo" depends on "bar", the + ``settings`` file should include:: + + define_plugin foo + plugin_requires foo bar + + Devstack does not currently use this dependency information, so it's + important that users continue to add enable_plugin lines in the + correct order in ``local.conf``, however adding this information + allows other tools to consider dependency information when + automatically generating ``local.conf`` files. + +- ``plugin.sh`` - the actual plugin. It is executed by devstack at + well defined points during a ``stack.sh`` run. The plugin.sh + internal structure is discussed below. + + +Plugins are registered by adding the following to the localrc section +of ``local.conf``. + +They are added in the following format:: + + [[local|localrc]] + enable_plugin [GITREF] + +- ``name`` - an arbitrary name. (ex: glusterfs, docker, zaqar, congress) +- ``giturl`` - a valid git url that can be cloned +- ``gitref`` - an optional git ref (branch / ref / tag) that will be + cloned. Defaults to master. + +An example would be as follows:: + + enable_plugin ec2-api https://opendev.org/openstack/ec2-api + +plugin.sh contract +================== + +``plugin.sh`` is a bash script that will be called at specific points +during ``stack.sh``, ``unstack.sh``, and ``clean.sh``. It will be +called in the following way:: + + source $PATH/TO/plugin.sh [phase] + +``mode`` can be thought of as the major mode being called, currently +one of: ``stack``, ``unstack``, ``clean``. ``phase`` is used by modes +which have multiple points during their run where it's necessary to +be able to execute code. All existing ``mode`` and ``phase`` points +are considered **strong contracts** and won't be removed without a +reasonable deprecation period. Additional new ``mode`` or ``phase`` +points may be added at any time if we discover we need them to support +additional kinds of plugins in devstack. + +The current full list of ``mode`` and ``phase`` are: + +- **stack** - Called by ``stack.sh`` four times for different phases + of its run: + + - **pre-install** - Called after system (OS) setup is complete and + before project source is installed. + - **install** - Called after the layer 1 and 2 projects source and + their dependencies have been installed. + - **post-config** - Called after the layer 1 and 2 services have + been configured. All configuration files for enabled services + should exist at this point. + - **extra** - Called near the end after layer 1 and 2 services have + been started. + - **test-config** - Called at the end of devstack used to configure tempest + or any other test environments + +- **unstack** - Called by ``unstack.sh`` before other services are shut + down. +- **clean** - Called by ``clean.sh`` before other services are cleaned, + but after ``unstack.sh`` has been called. + +Example plugin +==================== + +An example plugin would look something as follows. + +``devstack/settings``:: + + # settings file for template + enable_service template + + +``devstack/plugin.sh``:: + + # plugin.sh - DevStack plugin.sh dispatch script template + + function install_template { + ... + } + + function init_template { + ... + } + + function configure_template { + ... + } + + # check for service enabled + if is_service_enabled template; then + + if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then + # Set up system services + echo_summary "Configuring system services Template" + install_package cowsay + + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + # Perform installation of service source + echo_summary "Installing Template" + install_template + + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + # Configure after the other layer 1 and 2 services have been configured + echo_summary "Configuring Template" + configure_template + + elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + # Initialize and start the template service + echo_summary "Initializing Template" + init_template + fi + + if [[ "$1" == "unstack" ]]; then + # Shut down template services + # no-op + : + fi + + if [[ "$1" == "clean" ]]; then + # Remove state and transient data + # Remember clean.sh first calls unstack.sh + # no-op + : + fi + fi + +Plugin Execution Order +====================== + +Plugins are run after in tree services at each of the stages +above. For example, if you need something to happen before Keystone +starts, you should do that at the ``post-config`` phase. + +Multiple plugins can be specified in your ``local.conf``. When that +happens the plugins will be executed **in order** at each phase. This +allows plugins to conceptually depend on each other through +documenting to the user the order they must be declared. A formal +dependency mechanism is beyond the scope of the current work. + +System Packages +=============== + + + +Devstack based +-------------- + +Devstack provides a custom framework for getting packages installed at +an early phase of its execution. These packages may be defined in a +plugin as files that contain new-line separated lists of packages +required by the plugin + +Supported packaging systems include apt and dnf across multiple +distributions. To enable a plugin to hook into this and install +package dependencies, packages may be listed at the following +locations in the top-level of the plugin repository: + +- ``./devstack/files/debs/$plugin_name`` - Packages to install when running + on Ubuntu or Debian. + +- ``./devstack/files/rpms/$plugin_name`` - Packages to install when running + on Red Hat, Fedora, or CentOS. + +Although there a no plans to remove this method of installing +packages, plugins should consider it deprecated for ``bindep`` support +described below. + +bindep +------ + +The `bindep `__ project has +become the defacto standard for OpenStack projects to specify binary +dependencies. + +A plugin may provide a ``./devstack/files/bindep.txt`` file, which +will be called with the *default* profile to install packages. For +details on the syntax, etc. see the bindep documentation. + +It is also possible to use the ``bindep.txt`` of projects that are +being installed from source with the ``-bindep`` flag available in +install functions. For example + +.. code-block:: bash + + if use_library_from_git "diskimage-builder"; then + GITREPO["diskimage-builder"]=$DISKIMAGE_BUILDER_REPO_URL + GITDIR["diskimage-builder"]=$DEST/diskimage-builder + GITBRANCH["diskimage-builder"]=$DISKIMAGE_BUILDER_REPO_REF + git_clone_by_name "diskimage-builder" + setup_dev_lib -bindep "diskimage-builder" + fi + +will result in any packages required by the ``bindep.txt`` of the +``diskimage-builder`` project being installed. Note however that jobs +that switch projects between source and released/pypi installs +(e.g. with a ``foo-dsvm`` and a ``foo-dsvm-src`` test to cover both +released dependencies and master versions) will have to deal with +``bindep.txt`` being unavailable without the source directory. + + +Using Plugins in the OpenStack Gate +=================================== + +For everyday use, DevStack plugins can exist in any git tree that's +accessible on the internet. However, when using DevStack plugins in +the OpenStack gate, they must live in projects in OpenStack's +gerrit. This allows testing of the plugin as well as provides network +isolation against upstream git repository failures (which we see often +enough to be an issue). + +Ideally a plugin will be included within the ``devstack`` directory of +the project they are being tested. For example, the openstack/ec2-api +project has its plugin support in its own tree. + +However, some times a DevStack plugin might be used solely to +configure a backend service that will be used by the rest of +OpenStack, so there is no "project tree" per say. Good examples +include: integration of back end storage (e.g. ceph or glusterfs), +integration of SDN controllers (e.g. ovn, OpenDayLight), or +integration of alternate RPC systems (e.g. zmq, qpid). In these cases +the best practice is to build a dedicated +``openstack/devstack-plugin-FOO`` project. + +Legacy project-config jobs +-------------------------- + +To enable a plugin to be used in a gate job, the following lines will +be needed in your ``jenkins/jobs/.yaml`` definition in +`project-config `_:: + + # Because we are testing a non standard project, add the + # our project repository. This makes zuul do the right + # reference magic for testing changes. + export PROJECTS="openstack/ec2-api $PROJECTS" + + # note the actual url here is somewhat irrelevant because it + # caches in nodepool, however make it a valid url for + # documentation purposes. + export DEVSTACK_LOCAL_CONFIG="enable_plugin ec2-api https://opendev.org/openstack/ec2-api" + +Zuul v3 jobs +------------ + +See the ``devstack_plugins`` example in :doc:`zuul_ci_jobs_migration`. + +See Also +======== + +For additional inspiration on devstack plugins you can check out the +:doc:`Plugin Registry `. + +.. _service types authority: https://specs.openstack.org/openstack/service-types-authority/ diff --git a/doc/source/systemd.rst b/doc/source/systemd.rst new file mode 100644 index 0000000000..78535202d8 --- /dev/null +++ b/doc/source/systemd.rst @@ -0,0 +1,222 @@ +=========================== + Using Systemd in DevStack +=========================== + +By default DevStack is run with all the services as systemd unit +files. Systemd is now the default init system for nearly every Linux +distro, and systemd encodes and solves many of the problems related to +poorly running processes. + +Why this instead of screen? +=========================== + +The screen model for DevStack was invented when the number of services +that a DevStack user was going to run was typically < 10. This made +screen hot keys to jump around very easy. However, the landscape has +changed (not all services are stoppable in screen as some are under +Apache, there are typically at least 20 items) + +There is also a common developer workflow of changing code in more +than one service, and needing to restart a bunch of services for that +to take effect. + +Unit Structure +============== + +.. note:: + + Originally we actually wanted to do this as user units, however + there are issues with running this under non interactive + shells. For now, we'll be running as system units. Some user unit + code is left in place in case we can switch back later. + +All DevStack user units are created as a part of the DevStack slice +given the name ``devstack@$servicename.service``. This makes it easy +to understand which services are part of the devstack run, and lets us +disable / stop them in a single command. + +Manipulating Units +================== + +Assuming the unit ``n-cpu`` to make the examples more clear. + +Enable a unit (allows it to be started):: + + sudo systemctl enable devstack@n-cpu.service + +Disable a unit:: + + sudo systemctl disable devstack@n-cpu.service + +Start a unit:: + + sudo systemctl start devstack@n-cpu.service + +Stop a unit:: + + sudo systemctl stop devstack@n-cpu.service + +Restart a unit:: + + sudo systemctl restart devstack@n-cpu.service + +See status of a unit:: + + sudo systemctl status devstack@n-cpu.service + +Operating on more than one unit at a time +----------------------------------------- + +Systemd supports wildcarding for unit operations. To restart every +service in devstack you can do that following:: + + sudo systemctl restart devstack@* + +Or to see the status of all Nova processes you can do:: + + sudo systemctl status devstack@n-* + +We'll eventually make the unit names a bit more meaningful so that +it's easier to understand what you are restarting. + +.. _journalctl-examples: + +Querying Logs +============= + +One of the other major things that comes with systemd is journald, a +consolidated way to access logs (including querying through structured +metadata). This is accessed by the user via ``journalctl`` command. + + +Logs can be accessed through ``journalctl``. journalctl has powerful +query facilities. We'll start with some common options. + +Follow logs for a specific service:: + + sudo journalctl -f --unit devstack@n-cpu.service + +Following logs for multiple services simultaneously:: + + sudo journalctl -f --unit devstack@n-cpu.service --unit devstack@n-cond.service + +or you can even do wild cards to follow all the nova services:: + + sudo journalctl -f --unit devstack@n-* + +Use higher precision time stamps:: + + sudo journalctl -f -o short-precise --unit devstack@n-cpu.service + +By default, journalctl strips out "unprintable" characters, including +ASCII color codes. To keep the color codes (which can be interpreted by +an appropriate terminal/pager - e.g. ``less``, the default):: + + sudo journalctl -a --unit devstack@n-cpu.service + +When outputting to the terminal using the default pager, long lines +will be truncated, but horizontal scrolling is supported via the +left/right arrow keys. You can override this by setting the +``SYSTEMD_LESS`` environment variable to e.g. ``FRXM``. + +You can pipe the output to another tool, such as ``grep``. For +example, to find a server instance UUID in the nova logs:: + + sudo journalctl -a --unit devstack@n-* | grep 58391b5c-036f-44d5-bd68-21d3c26349e6 + +See ``man 1 journalctl`` for more. + +Debugging +========= + +Using pdb +--------- + +In order to break into a regular pdb session on a systemd-controlled +service, you need to invoke the process manually - that is, take it out +of systemd's control. + +Discover the command systemd is using to run the service:: + + systemctl show devstack@n-sch.service -p ExecStart --no-pager + +Stop the systemd service:: + + sudo systemctl stop devstack@n-sch.service + +Inject your breakpoint in the source, e.g.:: + + import pdb; pdb.set_trace() + +Invoke the command manually:: + + /usr/local/bin/nova-scheduler --config-file /etc/nova/nova.conf + +Some executables, such as :program:`nova-compute`, will need to be executed +with a particular group. This will be shown in the systemd unit file:: + + sudo systemctl cat devstack@n-cpu.service | grep Group + +:: + + Group = libvirt + +Use the :program:`sg` tool to execute the command as this group:: + + sg libvirt -c '/usr/local/bin/nova-compute --config-file /etc/nova/nova-cpu.conf' + +Using remote-pdb +---------------- + +`remote-pdb`_ works while the process is under systemd control. + +Make sure you have remote-pdb installed:: + + sudo pip install remote-pdb + +Inject your breakpoint in the source, e.g.:: + + import remote_pdb; remote_pdb.set_trace() + +Restart the relevant service:: + + sudo systemctl restart devstack@n-api.service + +The remote-pdb code configures the telnet port when ``set_trace()`` is +invoked. Do whatever it takes to hit the instrumented code path, and +inspect the logs for a message displaying the listening port:: + + Sep 07 16:36:12 p8-100-neo devstack@n-api.service[772]: RemotePdb session open at 127.0.0.1:46771, waiting for connection ... + +Telnet to that port to enter the pdb session:: + + telnet 127.0.0.1 46771 + +See the `remote-pdb`_ home page for more options. + +.. _`remote-pdb`: https://pypi.org/project/remote-pdb/ + +Future Work +=========== + +user units +---------- + +It would be great if we could do services as user units, so that there +is a clear separation of code being run as not root, to ensure running +as root never accidentally gets baked in as an assumption to +services. However, user units interact poorly with devstack-gate and +the way that commands are run as users with ansible and su. + +Maybe someday we can figure that out. + +References +========== + +- Arch Linux Wiki - https://wiki.archlinux.org/index.php/Systemd/User +- Python interface to journald - + https://www.freedesktop.org/software/systemd/python-systemd/journal.html +- Systemd documentation on service files - + https://www.freedesktop.org/software/systemd/man/systemd.service.html +- Systemd documentation on exec (can be used to impact service runs) - + https://www.freedesktop.org/software/systemd/man/systemd.exec.html diff --git a/doc/source/tempest.rst b/doc/source/tempest.rst new file mode 100644 index 0000000000..65dd5b16b2 --- /dev/null +++ b/doc/source/tempest.rst @@ -0,0 +1,25 @@ +======= +Tempest +======= + +`Tempest`_ is the OpenStack Integration test suite. It is installed by default +and is used to provide integration testing for many of the OpenStack services. +Just like DevStack itself, it is possible to extend Tempest with plugins. In +fact, many Tempest plugin packages also include DevStack plugin to do things +like pre-create required static resources. + +The `Tempest documentation `_ provides a thorough guide to using +Tempest. However, if you simply wish to run the standard set of Tempest tests +against an existing deployment, you can do the following: + +.. code-block:: shell + + cd /opt/stack/tempest + /opt/stack/data/venv/bin/tempest run ... + +The above assumes you have installed DevStack in the default location +(configured via the ``DEST`` configuration variable) and have enabled +virtualenv-based installation in the standard location (configured via the +``USE_VENV`` and ``VENV_DEST`` configuration variables, respectively). + +.. _Tempest: https://docs.openstack.org/tempest/latest/ diff --git a/doc/source/zuul_ci_jobs_migration.rst b/doc/source/zuul_ci_jobs_migration.rst new file mode 100644 index 0000000000..c43603ea17 --- /dev/null +++ b/doc/source/zuul_ci_jobs_migration.rst @@ -0,0 +1,320 @@ +=============================== +Migrating Zuul V2 CI jobs to V3 +=============================== + +The OpenStack CI system moved from Zuul v2 to Zuul v3, and all CI jobs moved to +the new CI system. All jobs have been migrated automatically to a format +compatible with Zuul v3; the jobs produced in this way however are suboptimal +and do not use the capabilities introduced by Zuul v3, which allow for re-use of +job parts, in the form of Ansible roles, as well as inheritance between jobs. + +DevStack hosts a set of roles, plays and jobs that can be used by other +repositories to define their DevStack based jobs. To benefit from them, jobs +must be migrated from the legacy v2 ones into v3 native format. + +This document provides guidance and examples to make the migration process as +painless and smooth as possible. + +Where to host the job definitions. +================================== + +In Zuul V3 jobs can be defined in the repository that contains the code they +excercise. If you are writing CI jobs for an OpenStack service you can define +your DevStack based CI jobs in one of the repositories that host the code for +your service. If you have a branchless repo, like a Tempest plugin, that is +a convenient choice to host the job definitions since job changes do not have +to be backported. For example, see the beginning of the ``.zuul.yaml`` from the +sahara Tempest plugin repo: + +.. code:: yaml + + # In https://opendev.org/openstack/sahara-tests/src/branch/master/.zuul.yaml: + - job: + name: sahara-tests-tempest + description: | + Run Tempest tests from the Sahara plugin. + parent: devstack-tempest + +Which base job to start from +============================ + +If your job needs an OpenStack cloud deployed via DevStack, but you don't plan +on running Tempest tests, you can start from one of the base +:doc:`jobs ` defined in the DevStack repo. + +The ``devstack`` job can be used for both single-node jobs and multi-node jobs, +and it includes the list of services used in the integrated gate (keystone, +glance, nova, cinder, neutron and swift). Different topologies can be achieved +by switching the nodeset used in the child job. + +The ``devstack-base`` job is similar to ``devstack`` but it does not specify any +required repo or service to be run in DevStack. It can be useful to setup +children jobs that use a very narrow DevStack setup. + +If your job needs an OpenStack cloud deployed via DevStack, and you do plan +on running Tempest tests, you can start from one of the base jobs defined in the +Tempest repo. + +The ``devstack-tempest`` job can be used for both single-node jobs and +multi-node jobs. Different topologies can be achieved by switching the nodeset +used in the child job. + +Jobs can be customized as follows without writing any Ansible code: + +- add and/or remove DevStack services +- add or modify DevStack and services configuration +- install DevStack plugins +- extend the number of sub-nodes (multinode only) +- define extra log files and/or directories to be uploaded on logs.o.o +- define extra log file extensions to be rewritten to .txt for ease of access + +Tempest jobs can be further customized as follows: + +- define the Tempest tox environment to be used +- define the test concurrency +- define the test regular expression + +Writing Ansible code, or importing existing custom roles, jobs can be further +extended by: + +- adding pre and/or post playbooks +- overriding the run playbook, add custom roles + +The (partial) example below extends a Tempest single node base job +"devstack-tempest" in the Kuryr repository. The parent job name is defined in +job.parent. + +.. code:: yaml + + # https://opendev.org/openstack/kuryr-kubernetes/src/branch/master/.zuul.d/base.yaml: + - job: + name: kuryr-kubernetes-tempest-base + parent: devstack-tempest + description: Base kuryr-kubernetes-job + required-projects: + - openstack/devstack-plugin-container + - openstack/kuryr + - openstack/kuryr-kubernetes + - openstack/kuryr-tempest-plugin + - openstack/neutron-lbaas + vars: + tempest_test_regex: '^(kuryr_tempest_plugin.tests.)' + tox_envlist: 'all' + devstack_localrc: + KURYR_K8S_API_PORT: 8080 + devstack_services: + kubernetes-api: true + kubernetes-controller-manager: true + kubernetes-scheduler: true + kubelet: true + kuryr-kubernetes: true + (...) + devstack_plugins: + kuryr-kubernetes: https://opendev.org/openstack/kuryr + devstack-plugin-container: https://opendev.org/openstack/devstack-plugin-container + neutron-lbaas: https://opendev.org/openstack/neutron-lbaas + tempest_plugins: + - kuryr-tempest-plugin + (...) + +Job variables +============= + +Variables can be added to the job in three different places: + +- job.vars: these are global variables available to all node in the nodeset +- job.host-vars.[HOST]: these are variables available only to the specified HOST +- job.group-vars.[GROUP]: these are variables available only to the specified + GROUP + +Zuul merges dict variables through job inheritance. Host and group variables +override variables with the same name defined as global variables. + +In the example below, for the sundaes job, hosts that are not part of the +subnode group will run vanilla and chocolate. Hosts in the subnode group will +run stracciatella and strawberry. + +.. code:: yaml + + - job: + name: ice-creams + vars: + devstack_service: + vanilla: true + chocolate: false + group-vars: + subnode: + devstack_service: + pistacchio: true + stracciatella: true + + - job: + name: sundaes + parent: ice-creams + vars: + devstack_service: + chocolate: true + group-vars: + subnode: + devstack_service: + strawberry: true + pistacchio: false + + +DevStack Gate Flags +=================== + +The old CI system worked using a combination of DevStack, Tempest and +devstack-gate to setup a test environment and run tests against it. With Zuul +V3, the logic that used to live in devstack-gate is moved into different repos, +including DevStack, Tempest and grenade. + +DevStack-gate exposes an interface for job definition based on a number of +DEVSTACK_GATE_* environment variables, or flags. This guide shows how to map +DEVSTACK_GATE flags into the new +system. + +The repo column indicates in which repository is hosted the code that replaces +the devstack-gate flag. The new implementation column explains how to reproduce +the same or a similar behaviour in Zuul v3 jobs. For localrc settings, +devstack-gate defined a default value. In ansible jobs the default is either the +value defined in the parent job, or the default from DevStack, if any. + +.. list-table:: **DevStack Gate Flags** + :widths: 20 10 60 + :header-rows: 1 + + * - DevStack gate flag + - Repo + - New implementation + * - OVERRIDE_ZUUL_BRANCH + - zuul + - override-checkout: [branch] in the job definition. + * - DEVSTACK_GATE_NET_OVERLAY + - zuul-jobs + - A bridge called br-infra is set up for all jobs that inherit + from multinode with a dedicated `bridge role + `_. + * - DEVSTACK_CINDER_VOLUME_CLEAR + - devstack + - *CINDER_VOLUME_CLEAR: true/false* in devstack_localrc in the + job vars. + * - DEVSTACK_GATE_NEUTRON + - devstack + - True by default. To disable, disable all neutron services in + devstack_services in the job definition. + * - DEVSTACK_GATE_CONFIGDRIVE + - devstack + - *FORCE_CONFIG_DRIVE: true/false* in devstack_localrc in the job + vars. + * - DEVSTACK_GATE_INSTALL_TESTONLY + - devstack + - *INSTALL_TESTONLY_PACKAGES: true/false* in devstack_localrc in + the job vars. + * - DEVSTACK_GATE_VIRT_DRIVER + - devstack + - *VIRT_DRIVER: [virt driver]* in devstack_localrc in the job + vars. + * - DEVSTACK_GATE_LIBVIRT_TYPE + - devstack + - *LIBVIRT_TYPE: [libvirt type]* in devstack_localrc in the job + vars. + * - DEVSTACK_GATE_TEMPEST + - devstack and tempest + - Defined by the job that is used. The ``devstack`` job only runs + devstack. The ``devstack-tempest`` one triggers a Tempest run + as well. + * - DEVSTACK_GATE_TEMPEST_FULL + - tempest + - *tox_envlist: full* in the job vars. + * - DEVSTACK_GATE_TEMPEST_ALL + - tempest + - *tox_envlist: all* in the job vars. + * - DEVSTACK_GATE_TEMPEST_ALL_PLUGINS + - tempest + - *tox_envlist: all-plugin* in the job vars. + * - DEVSTACK_GATE_TEMPEST_SCENARIOS + - tempest + - *tox_envlist: scenario* in the job vars. + * - TEMPEST_CONCURRENCY + - tempest + - *tempest_concurrency: [value]* in the job vars. This is + available only on jobs that inherit from ``devstack-tempest`` + down. + * - DEVSTACK_GATE_TEMPEST_NOTESTS + - tempest + - *tox_envlist: venv-tempest* in the job vars. This will create + Tempest virtual environment but run no tests. + * - DEVSTACK_GATE_SMOKE_SERIAL + - tempest + - *tox_envlist: smoke-serial* in the job vars. + * - DEVSTACK_GATE_TEMPEST_DISABLE_TENANT_ISOLATION + - tempest + - *tox_envlist: full-serial* in the job vars. + *TEMPEST_ALLOW_TENANT_ISOLATION: false* in devstack_localrc in + the job vars. + + +The following flags have not been migrated yet or are legacy and won't be +migrated at all. + +.. list-table:: **Not Migrated DevStack Gate Flags** + :widths: 20 10 60 + :header-rows: 1 + + * - DevStack gate flag + - Status + - Details + * - DEVSTACK_GATE_TOPOLOGY + - WIP + - The topology depends on the base job that is used and more + specifically on the nodeset attached to it. The new job format + allows project to define the variables to be passed to every + node/node-group that exists in the topology. Named topologies + that include the nodeset and the matching variables can be + defined in the form of base jobs. + * - DEVSTACK_GATE_GRENADE + - TBD + - Grenade Zuul V3 jobs will be hosted in the grenade repo. + * - GRENADE_BASE_BRANCH + - TBD + - Grenade Zuul V3 jobs will be hosted in the grenade repo. + * - DEVSTACK_GATE_NEUTRON_DVR + - TBD + - Depends on multinode support. + * - DEVSTACK_GATE_EXERCISES + - TBD + - Can be done on request. + * - DEVSTACK_GATE_IRONIC + - TBD + - This will probably be implemented on ironic side. + * - DEVSTACK_GATE_IRONIC_DRIVER + - TBD + - This will probably be implemented on ironic side. + * - DEVSTACK_GATE_IRONIC_BUILD_RAMDISK + - TBD + - This will probably be implemented on ironic side. + * - DEVSTACK_GATE_POSTGRES + - Legacy + - This flag exists in d-g but the only thing that it does is + capture postgres logs. This is already supported by the roles + in post, so the flag is useless in the new jobs. postgres + itself can be enabled via the devstack_service job variable. + * - DEVSTACK_GATE_ZEROMQ + - Legacy + - This has no effect in d-g. + * - DEVSTACK_GATE_MQ_DRIVER + - Legacy + - This has no effect in d-g. + * - DEVSTACK_GATE_TEMPEST_STRESS_ARGS + - Legacy + - Stress is not in Tempest anymore. + * - DEVSTACK_GATE_TEMPEST_HEAT_SLOW + - Legacy + - This is not used anywhere. + * - DEVSTACK_GATE_CELLS + - Legacy + - This has no effect in d-g. + * - DEVSTACK_GATE_NOVA_API_METADATA_SPLIT + - Legacy + - This has no effect in d-g. diff --git a/doc/source/zuul_jobs.rst b/doc/source/zuul_jobs.rst new file mode 100644 index 0000000000..cf203a8973 --- /dev/null +++ b/doc/source/zuul_jobs.rst @@ -0,0 +1,4 @@ +Zuul CI Jobs +============ + +.. zuul:autojobs:: diff --git a/doc/source/zuul_roles.rst b/doc/source/zuul_roles.rst new file mode 100644 index 0000000000..4939281057 --- /dev/null +++ b/doc/source/zuul_roles.rst @@ -0,0 +1,4 @@ +Zuul CI Roles +============= + +.. zuul:autoroles:: diff --git a/exercise.sh b/exercise.sh deleted file mode 100755 index dd45c5aceb..0000000000 --- a/exercise.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env bash - -source ./stackrc -# Run everything in the exercises/ directory that isn't explicitly disabled - -# comma separated list of script basenames to skip -# to refrain from exercising euca.sh use SKIP_EXERCISES=euca -SKIP_EXERCISES=${SKIP_EXERCISES:-""} - -# Locate the scripts we should run -EXERCISE_DIR=$(dirname "$0")/exercises -basenames=$(for b in `ls $EXERCISE_DIR/*.sh`; do basename $b .sh; done) - -# Track the state of each script -passes="" -failures="" -skips="" - -# Loop over each possible script (by basename) -for script in $basenames; do - if [[ "$SKIP_EXERCISES" =~ $script ]] ; then - skips="$skips $script" - else - echo ========================= - echo Running $script - echo ========================= - $EXERCISE_DIR/$script.sh - if [[ $? -ne 0 ]] ; then - failures="$failures $script" - else - passes="$passes $script" - fi - fi -done - -# output status of exercise run -echo ========================= -echo ========================= -for script in $skips; do - echo SKIP $script -done -for script in $passes; do - echo PASS $script -done -for script in $failures; do - echo FAILED $script -done - -if [ -n "$failures" ] ; then - exit 1 -fi diff --git a/exercises/euca.sh b/exercises/euca.sh deleted file mode 100755 index f9996094c4..0000000000 --- a/exercises/euca.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env bash - -# we will use the ``euca2ools`` cli tool that wraps the python boto -# library to test ec2 compatibility -# - -# This script exits on an error so that errors don't compound and you see -# only the first error that occured. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Use openrc + stackrc + localrc for settings -pushd $(cd $(dirname "$0")/.. && pwd) -source ./openrc -popd - -# find a machine image to boot -IMAGE=`euca-describe-images | grep machine | cut -f2 | head -n1` - -# launch it -INSTANCE=`euca-run-instances $IMAGE | grep INSTANCE | cut -f2` - -# assure it has booted within a reasonable time -if ! timeout $RUNNING_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then - echo "server didn't become active within $RUNNING_TIMEOUT seconds" - exit 1 -fi - -euca-terminate-instances $INSTANCE diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh deleted file mode 100755 index dca6d5be50..0000000000 --- a/exercises/floating_ips.sh +++ /dev/null @@ -1,168 +0,0 @@ -#!/usr/bin/env bash - -# **exercise.sh** - using the cloud can be fun - -# we will use the ``nova`` cli tool provided by the ``python-novaclient`` -# package -# - - -# This script exits on an error so that errors don't compound and you see -# only the first error that occured. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Use openrc + stackrc + localrc for settings -pushd $(cd $(dirname "$0")/.. && pwd) -source ./openrc -popd - -# Get a token for clients that don't support service catalog -# ========================================================== - -# manually create a token by querying keystone (sending JSON data). Keystone -# returns a token and catalog of endpoints. We use python to parse the token -# and save it. - -TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$NOVA_USERNAME\", \"password\": \"$NOVA_PASSWORD\"}}}" -H "Content-type: application/json" http://$HOST_IP:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"` - -# Launching a server -# ================== - -# List servers for tenant: -nova list - -# Images -# ------ - -# Nova has a **deprecated** way of listing images. -nova image-list - -# But we recommend using glance directly -glance -A $TOKEN index - -# Let's grab the id of the first AMI image to launch -IMAGE=`glance -A $TOKEN index | egrep ami | cut -d" " -f1` - -# Security Groups -# --------------- -SECGROUP=test_secgroup - -# List of secgroups: -nova secgroup-list - -# Create a secgroup -nova secgroup-create $SECGROUP "test_secgroup description" - -# determine flavor -# ---------------- - -# List of flavors: -nova flavor-list - -# and grab the first flavor in the list to launch -FLAVOR=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2` - -NAME="myserver" - -nova boot --flavor $FLAVOR --image $IMAGE $NAME --security_groups=$SECGROUP - -# Testing -# ======= - -# First check if it spins up (becomes active and responds to ping on -# internal ip). If you run this script from a nova node, you should -# bypass security groups and have direct access to the server. - -# Waiting for boot -# ---------------- - -# Max time to wait while vm goes from build to active state -ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-10} - -# Max time till the vm is bootable -BOOT_TIMEOUT=${BOOT_TIMEOUT:-15} - -# Max time to wait for proper association and dis-association. -ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10} - -# check that the status is active within ACTIVE_TIMEOUT seconds -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $NAME | grep status | grep -q ACTIVE; do sleep 1; done"; then - echo "server didn't become active!" - exit 1 -fi - -# get the IP of the server -IP=`nova show $NAME | grep "private network" | cut -d"|" -f3` - -# for single node deployments, we can ping private ips -MULTI_HOST=${MULTI_HOST:-0} -if [ "$MULTI_HOST" = "0" ]; then - # sometimes the first ping fails (10 seconds isn't enough time for the VM's - # network to respond?), so let's ping for a default of 15 seconds with a - # timeout of a second for each ping. - if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $IP; do sleep 1; done"; then - echo "Couldn't ping server" - exit 1 - fi -else - # On a multi-host system, without vm net access, do a sleep to wait for the boot - sleep $BOOT_TIMEOUT -fi - -# Security Groups & Floating IPs -# ------------------------------ - -# allow icmp traffic (ping) -nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 - -# List rules for a secgroup -nova secgroup-list-rules $SECGROUP - -# allocate a floating ip -nova floating-ip-create - -# store floating address -FLOATING_IP=`nova floating-ip-list | grep None | head -1 | cut -d '|' -f2 | sed 's/ //g'` - -# add floating ip to our server -nova add-floating-ip $NAME $FLOATING_IP - -# test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds -if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then - echo "Couldn't ping server with floating ip" - exit 1 -fi - -# dis-allow icmp traffic (ping) -nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 - -# FIXME (anthony): make xs support security groups -if [ "$VIRT_DRIVER" != "xenserver" ]; then - # test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then - print "Security group failure - ping should not be allowed!" - echo "Couldn't ping server with floating ip" - exit 1 - fi -fi - -# de-allocate the floating ip -nova floating-ip-delete $FLOATING_IP - -# shutdown the server -nova delete $NAME - -# Delete a secgroup -nova secgroup-delete $SECGROUP - -# FIXME: validate shutdown within 5 seconds -# (nova show $NAME returns 1 or status != ACTIVE)? - diff --git a/exercises/swift.sh b/exercises/swift.sh deleted file mode 100755 index f7be099445..0000000000 --- a/exercises/swift.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env bash - -# Test swift via the command line tools that ship with it. - -# This script exits on an error so that errors don't compound and you see -# only the first error that occured. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Use openrc + stackrc + localrc for settings -pushd $(cd $(dirname "$0")/.. && pwd) -source ./openrc -popd - - -# Testing Swift -# ============= - -# Check if we have to swift via keystone -swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD stat - -# We start by creating a test container -swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD post testcontainer - -# add some files into it. -swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD upload testcontainer /etc/issue - -# list them -swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD list testcontainer - -# And we may want to delete them now that we have tested that -# everything works. -swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD delete testcontainer diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh new file mode 100644 index 0000000000..06c73ec763 --- /dev/null +++ b/extras.d/80-tempest.sh @@ -0,0 +1,36 @@ +# tempest.sh - DevStack extras script + +if is_service_enabled tempest; then + if [[ "$1" == "source" ]]; then + # Initial source + source $TOP_DIR/lib/tempest + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + echo_summary "Installing Tempest" + async_runfunc install_tempest + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + # Tempest config must come after layer 2 services are running + : + elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + # Tempest config must come after all other plugins are run + : + elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then + # local.conf Tempest option overrides + : + elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then + async_wait install_tempest + echo_summary "Initializing Tempest" + configure_tempest + echo_summary "Installing Tempest Plugins" + install_tempest_plugins + fi + + if [[ "$1" == "unstack" ]]; then + # no-op + : + fi + + if [[ "$1" == "clean" ]]; then + # no-op + : + fi +fi diff --git a/extras.d/README.md b/extras.d/README.md new file mode 100644 index 0000000000..4cec14b4e7 --- /dev/null +++ b/extras.d/README.md @@ -0,0 +1,48 @@ +# Extras Hooks + +The `extras.d` directory contains project dispatch scripts that are called +at specific times by `stack.sh`, `unstack.sh` and `clean.sh`. These hooks are +used to install, configure and start additional projects during a DevStack run +without any modifications to the base DevStack scripts. + +When `stack.sh` reaches one of the hook points it sources the scripts in `extras.d` +that end with `.sh`. To control the order that the scripts are sourced their +names start with a two digit sequence number. DevStack reserves the sequence +numbers 00 through 09 and 90 through 99 for its own use. + +The scripts are sourced at the beginning of each script that calls them. The +entire `stack.sh` variable space is available. The scripts are +sourced with one or more arguments, the first of which defines the hook phase: + + override_defaults | source | stack | unstack | clean + + override_defaults: always called first in any of the scripts, used to + override defaults (if need be) that are otherwise set in lib/* scripts + + source: called by stack.sh. Used to set the initial defaults in a lib/* + script or similar + + stack: called by stack.sh. There are four possible values for + the second arg to distinguish the phase stack.sh is in: + + arg 2: pre-install | install | post-config | extra + + unstack: called by unstack.sh + + clean: called by clean.sh. Remember, clean.sh also calls unstack.sh + so that work need not be repeated. + +The `stack` phase sub-phases are called from `stack.sh` in the following places: + + pre-install - After all system prerequisites have been installed but before any + DevStack-specific services are installed (including database and rpc). + + install - After all OpenStack services have been installed and configured + but before any OpenStack services have been started. Changes to OpenStack + service configurations should be done here. + + post-config - After OpenStack services have been initialized but still before + they have been started. (This is probably mis-named, think of it as post-init.) + + extra - After everything is started. + diff --git a/files/000-default.template b/files/000-default.template deleted file mode 100644 index 1d7380d95a..0000000000 --- a/files/000-default.template +++ /dev/null @@ -1,28 +0,0 @@ - - WSGIScriptAlias / %HORIZON_DIR%/openstack-dashboard/dashboard/wsgi/django.wsgi - WSGIDaemonProcess horizon user=%USER% group=%USER% processes=3 threads=10 - SetEnv APACHE_RUN_USER %USER% - SetEnv APACHE_RUN_GROUP %USER% - WSGIProcessGroup horizon - - DocumentRoot %HORIZON_DIR%/.blackhole/ - Alias /media %HORIZON_DIR%/openstack-dashboard/dashboard/static - Alias /vpn /opt/stack/vpn - - - Options FollowSymLinks - AllowOverride None - - - - Options Indexes FollowSymLinks MultiViews - AllowOverride None - Order allow,deny - allow from all - - - ErrorLog /var/log/apache2/error.log - LogLevel warn - CustomLog /var/log/apache2/access.log combined - - diff --git a/files/apache-cinder-api.template b/files/apache-cinder-api.template new file mode 100644 index 0000000000..e401803abc --- /dev/null +++ b/files/apache-cinder-api.template @@ -0,0 +1,18 @@ +Listen %PUBLICPORT% + + + WSGIDaemonProcess osapi_volume processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% + WSGIProcessGroup osapi_volume + WSGIScriptAlias / %CINDER_BIN_DIR%/cinder-wsgi + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + ErrorLogFormat "%{cu}t %M" + ErrorLog /var/log/%APACHE_NAME%/c-api.log + %SSLENGINE% + %SSLCERTFILE% + %SSLKEYFILE% + + + Require all granted + + diff --git a/files/apache-horizon.template b/files/apache-horizon.template new file mode 100644 index 0000000000..c6c55ecf27 --- /dev/null +++ b/files/apache-horizon.template @@ -0,0 +1,33 @@ + + WSGIScriptAlias %WEBROOT% %HORIZON_DIR%/openstack_dashboard/wsgi.py + WSGIDaemonProcess horizon user=%USER% group=%GROUP% processes=3 threads=10 home=%HORIZON_DIR% display-name=%{GROUP} + WSGIApplicationGroup %{GLOBAL} + + SetEnv APACHE_RUN_USER %USER% + SetEnv APACHE_RUN_GROUP %GROUP% + WSGIProcessGroup horizon + + DocumentRoot %HORIZON_DIR%/.blackhole/ + Alias %WEBROOT%/media %HORIZON_DIR%/openstack_dashboard/static + Alias %WEBROOT%/static %HORIZON_DIR%/static + + RedirectMatch "^/$" "%WEBROOT%/" + + + Options FollowSymLinks + AllowOverride None + + + + Options Indexes FollowSymLinks MultiViews + AllowOverride None + Require all granted + + ErrorLogFormat "%{cu}t %M" + ErrorLog /var/log/%APACHE_NAME%/horizon_error.log + LogLevel warn + CustomLog /var/log/%APACHE_NAME%/horizon_access.log combined + + +%WSGIPYTHONHOME% +WSGISocketPrefix /var/run/%APACHE_NAME% diff --git a/files/apache-keystone.template b/files/apache-keystone.template new file mode 100644 index 0000000000..d99e8e6ce0 --- /dev/null +++ b/files/apache-keystone.template @@ -0,0 +1,37 @@ +Listen %PUBLICPORT% +LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" keystone_combined + + + Require all granted + + + + WSGIDaemonProcess keystone-public processes=3 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% + WSGIProcessGroup keystone-public + WSGIScriptAlias / %KEYSTONE_BIN%/keystone-wsgi-public + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + ErrorLogFormat "%M" + ErrorLog /var/log/%APACHE_NAME%/keystone.log + CustomLog /var/log/%APACHE_NAME%/keystone_access.log keystone_combined + %SSLENGINE% + %SSLCERTFILE% + %SSLKEYFILE% + + +%SSLLISTEN% +%SSLLISTEN% %SSLENGINE% +%SSLLISTEN% %SSLCERTFILE% +%SSLLISTEN% %SSLKEYFILE% +%SSLLISTEN% SSLProtocol -all +TLSv1.3 +TLSv1.2 +%SSLLISTEN% + +Alias /identity %KEYSTONE_BIN%/keystone-wsgi-public + + SetHandler wsgi-script + Options +ExecCGI + + WSGIProcessGroup keystone-public + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + diff --git a/files/apache-neutron.template b/files/apache-neutron.template new file mode 100644 index 0000000000..358e87f5da --- /dev/null +++ b/files/apache-neutron.template @@ -0,0 +1,37 @@ +Listen %PUBLICPORT% +LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" neutron_combined + + + Require all granted + + + + WSGIDaemonProcess neutron-server processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% + WSGIProcessGroup neutron-server + WSGIScriptAlias / %NEUTRON_BIN%/neutron-api + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + ErrorLogFormat "%M" + ErrorLog /var/log/%APACHE_NAME%/neutron.log + CustomLog /var/log/%APACHE_NAME%/neutron_access.log neutron_combined + %SSLENGINE% + %SSLCERTFILE% + %SSLKEYFILE% + + + +%SSLLISTEN% +%SSLLISTEN% %SSLENGINE% +%SSLLISTEN% %SSLCERTFILE% +%SSLLISTEN% %SSLKEYFILE% +%SSLLISTEN% SSLProtocol -all +TLSv1.3 +TLSv1.2 +%SSLLISTEN% + +Alias /networking %NEUTRON_BIN%/neutron-api + + SetHandler wsgi-script + Options +ExecCGI + WSGIProcessGroup neutron-server + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + diff --git a/files/apache-nova-api.template b/files/apache-nova-api.template new file mode 100644 index 0000000000..66fcf73cf2 --- /dev/null +++ b/files/apache-nova-api.template @@ -0,0 +1,23 @@ +Listen %PUBLICPORT% + + + WSGIDaemonProcess nova-api processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% + WSGIProcessGroup nova-api + WSGIScriptAlias / %PUBLICWSGI% + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + ErrorLogFormat "%M" + ErrorLog /var/log/%APACHE_NAME%/nova-api.log + %SSLENGINE% + %SSLCERTFILE% + %SSLKEYFILE% + + +Alias /compute %PUBLICWSGI% + + SetHandler wsgi-script + Options +ExecCGI + WSGIProcessGroup nova-api + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + diff --git a/files/apache-nova-metadata.template b/files/apache-nova-metadata.template new file mode 100644 index 0000000000..64be03166e --- /dev/null +++ b/files/apache-nova-metadata.template @@ -0,0 +1,23 @@ +Listen %PUBLICPORT% + + + WSGIDaemonProcess nova-metadata processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% + WSGIProcessGroup nova-metadata + WSGIScriptAlias / %PUBLICWSGI% + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + ErrorLogFormat "%M" + ErrorLog /var/log/%APACHE_NAME%/nova-metadata.log + %SSLENGINE% + %SSLCERTFILE% + %SSLKEYFILE% + + +Alias /metadata %PUBLICWSGI% + + SetHandler wsgi-script + Options +ExecCGI + WSGIProcessGroup nova-metadata + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + diff --git a/files/apts/general b/files/apts/general deleted file mode 100644 index 31fa752714..0000000000 --- a/files/apts/general +++ /dev/null @@ -1,19 +0,0 @@ -pep8 -pylint -python-pip -screen -unzip -wget -psmisc -git-core -lsof # useful when debugging -openssh-server -vim-nox -locate # useful when debugging -python-virtualenv -python-unittest2 -iputils-ping -wget -curl -tcpdump -euca2ools # only for testing client diff --git a/files/apts/glance b/files/apts/glance deleted file mode 100644 index 1e87d58911..0000000000 --- a/files/apts/glance +++ /dev/null @@ -1,8 +0,0 @@ -python-eventlet -python-routes -python-greenlet -python-argparse -python-sqlalchemy -python-wsgiref -python-pastedeploy -python-xattr diff --git a/files/apts/horizon b/files/apts/horizon deleted file mode 100644 index aa08a31648..0000000000 --- a/files/apts/horizon +++ /dev/null @@ -1,20 +0,0 @@ -apache2 # NOPRIME -libapache2-mod-wsgi # NOPRIME -python-dateutil -python-paste -python-pastedeploy -python-anyjson -python-routes -python-xattr -python-sqlalchemy -python-webob -python-kombu -pylint -pep8 -python-eventlet -python-nose -python-sphinx -python-mox -python-kombu -python-coverage -python-cherrypy3 # why? diff --git a/files/apts/keystone b/files/apts/keystone deleted file mode 100644 index 6e6d3d53ab..0000000000 --- a/files/apts/keystone +++ /dev/null @@ -1,15 +0,0 @@ -python-setuptools -python-dev -python-lxml -python-pastescript -python-pastedeploy -python-paste -sqlite3 -python-pysqlite2 -python-sqlalchemy -python-webob -python-greenlet -python-routes -libldap2-dev -libsasl2-dev - diff --git a/files/apts/n-vnc b/files/apts/n-vnc deleted file mode 100644 index c8722b9f66..0000000000 --- a/files/apts/n-vnc +++ /dev/null @@ -1 +0,0 @@ -python-numpy diff --git a/files/apts/nova b/files/apts/nova deleted file mode 100644 index f4fe4595d1..0000000000 --- a/files/apts/nova +++ /dev/null @@ -1,46 +0,0 @@ -dnsmasq-base -dnsmasq-utils # for dhcp_release only available in dist:oneiric -kpartx -parted -arping # used for send_arp_for_ha option in nova-network -mysql-server # NOPRIME -python-mysqldb -python-xattr # needed for glance which is needed for nova --- this shouldn't be here -python-lxml # needed for glance which is needed for nova --- this shouldn't be here -kvm -gawk -iptables -ebtables -sqlite3 -sudo -kvm -libvirt-bin # NOPRIME -vlan -curl -rabbitmq-server # NOPRIME -socat # used by ajaxterm -python-mox -python-paste -python-migrate -python-gflags -python-greenlet -python-libvirt -python-libxml2 -python-routes -python-netaddr -python-pastedeploy -python-eventlet -python-cheetah -python-carrot -python-tempita -python-sqlalchemy -python-suds -python-lockfile -python-m2crypto -python-boto -python-kombu - -# Stuff for diablo volumes -iscsitarget # NOPRIME -iscsitarget-dkms # NOPRIME -lvm2 diff --git a/files/apts/swift b/files/apts/swift deleted file mode 100644 index f29837784e..0000000000 --- a/files/apts/swift +++ /dev/null @@ -1,17 +0,0 @@ -curl -gcc -memcached # NOPRIME -python-configobj -python-coverage -python-dev -python-eventlet -python-greenlet -python-netifaces -python-nose -python-pastedeploy -python-setuptools -python-simplejson -python-webob -python-xattr -sqlite3 -xfsprogs diff --git a/files/debs/baremetal b/files/debs/baremetal new file mode 100644 index 0000000000..06ffab650c --- /dev/null +++ b/files/debs/baremetal @@ -0,0 +1,8 @@ +busybox +dnsmasq +ipmitool +make +open-iscsi +qemu-kvm +syslinux +tgt diff --git a/files/debs/ceph b/files/debs/ceph new file mode 100644 index 0000000000..69863abc34 --- /dev/null +++ b/files/debs/ceph @@ -0,0 +1,2 @@ +ceph # NOPRIME +xfsprogs diff --git a/files/debs/cinder b/files/debs/cinder new file mode 100644 index 0000000000..5d390e24bf --- /dev/null +++ b/files/debs/cinder @@ -0,0 +1,4 @@ +lvm2 +qemu-utils +tgt # NOPRIME +thin-provisioning-tools diff --git a/files/debs/dstat b/files/debs/dstat new file mode 100644 index 0000000000..40d00f4aa4 --- /dev/null +++ b/files/debs/dstat @@ -0,0 +1,2 @@ +dstat # dist:bionic +pcp diff --git a/files/debs/general b/files/debs/general new file mode 100644 index 0000000000..1e63e4f582 --- /dev/null +++ b/files/debs/general @@ -0,0 +1,37 @@ +apache2 +apache2-dev +bc +bsdmainutils +curl +default-jre-headless # NOPRIME +g++ +gawk +gcc +gettext # used for compiling message catalogs +git +graphviz # needed for docs +iputils-ping +libffi-dev # for pyOpenSSL +libjpeg-dev # Pillow 3.0.0 +libpq-dev # psycopg2 +libssl-dev # for pyOpenSSL +libsystemd-dev # for systemd-python +libxml2-dev # lxml +libxslt1-dev # lxml +libyaml-dev +lsof # useful when debugging +openssh-server +openssl +pkg-config +psmisc +python3-dev +python3-pip +python3-systemd +python3-venv +tar +tcpdump +unzip +uuid-runtime +wget +wget +zlib1g-dev diff --git a/files/debs/horizon b/files/debs/horizon new file mode 100644 index 0000000000..48332893b1 --- /dev/null +++ b/files/debs/horizon @@ -0,0 +1,2 @@ +apache2 # NOPRIME +libapache2-mod-wsgi # NOPRIME diff --git a/files/debs/keystone b/files/debs/keystone new file mode 100644 index 0000000000..1cfa6ffa38 --- /dev/null +++ b/files/debs/keystone @@ -0,0 +1,6 @@ +libkrb5-dev +libldap2-dev +libsasl2-dev +memcached +python3-mysqldb +sqlite3 diff --git a/files/debs/ldap b/files/debs/ldap new file mode 100644 index 0000000000..54896bb845 --- /dev/null +++ b/files/debs/ldap @@ -0,0 +1,3 @@ +ldap-utils +python3-ldap +slapd diff --git a/files/debs/n-cpu b/files/debs/n-cpu new file mode 100644 index 0000000000..54d6fa3fd1 --- /dev/null +++ b/files/debs/n-cpu @@ -0,0 +1,11 @@ +cryptsetup +dosfstools +genisoimage +gir1.2-libosinfo-1.0 +lvm2 # NOPRIME +netcat-openbsd +open-iscsi +python3-guestfs # NOPRIME +qemu-utils +sg3-utils +sysfsutils diff --git a/files/debs/neutron-agent b/files/debs/neutron-agent new file mode 100644 index 0000000000..ea8819e884 --- /dev/null +++ b/files/debs/neutron-agent @@ -0,0 +1 @@ +ipset diff --git a/files/debs/neutron-common b/files/debs/neutron-common new file mode 100644 index 0000000000..f6afc5bf55 --- /dev/null +++ b/files/debs/neutron-common @@ -0,0 +1,16 @@ +acl +dnsmasq-base +dnsmasq-utils # for dhcp_release +ebtables +haproxy # to serve as metadata proxy inside router/dhcp namespaces +iptables +iputils-arping +iputils-ping +mysql-server #NOPRIME +postgresql-server-dev-all +python3-mysqldb +rabbitmq-server # NOPRIME +radvd # NOPRIME +sqlite3 +sudo +vlan diff --git a/files/debs/neutron-l3 b/files/debs/neutron-l3 new file mode 100644 index 0000000000..106a6a35aa --- /dev/null +++ b/files/debs/neutron-l3 @@ -0,0 +1,3 @@ +conntrack +conntrackd +keepalived diff --git a/files/debs/nova b/files/debs/nova new file mode 100644 index 0000000000..5c00ad72d9 --- /dev/null +++ b/files/debs/nova @@ -0,0 +1,21 @@ +conntrack +curl +ebtables +genisoimage # required for config_drive +iptables +iputils-arping +kpartx +libjs-jquery-tablesorter # Needed for coverage html reports +libvirt-clients # NOPRIME +libvirt-daemon-system # NOPRIME +libvirt-dev # NOPRIME +mysql-server # NOPRIME +parted +pm-utils +python3-mysqldb +qemu-kvm # NOPRIME +rabbitmq-server # NOPRIME +socat # used by ajaxterm +sqlite3 +sudo +vlan diff --git a/files/debs/openvswitch b/files/debs/openvswitch new file mode 100644 index 0000000000..4c0af4ae2d --- /dev/null +++ b/files/debs/openvswitch @@ -0,0 +1,3 @@ +fakeroot +make +openvswitch-switch diff --git a/files/debs/os-brick b/files/debs/os-brick new file mode 100644 index 0000000000..4148b0c421 --- /dev/null +++ b/files/debs/os-brick @@ -0,0 +1,3 @@ +lsscsi +open-iscsi +open-iscsi-utils # Deprecated since quantal dist:precise diff --git a/files/debs/ovn b/files/debs/ovn new file mode 100644 index 0000000000..81eea5e633 --- /dev/null +++ b/files/debs/ovn @@ -0,0 +1,3 @@ +ovn-central +ovn-controller-vtep +ovn-host diff --git a/files/debs/q-agt b/files/debs/q-agt new file mode 120000 index 0000000000..99fe353094 --- /dev/null +++ b/files/debs/q-agt @@ -0,0 +1 @@ +neutron-agent \ No newline at end of file diff --git a/files/debs/q-l3 b/files/debs/q-l3 new file mode 120000 index 0000000000..0a5ca2a45f --- /dev/null +++ b/files/debs/q-l3 @@ -0,0 +1 @@ +neutron-l3 \ No newline at end of file diff --git a/files/debs/swift b/files/debs/swift new file mode 100644 index 0000000000..67c6c8ddb4 --- /dev/null +++ b/files/debs/swift @@ -0,0 +1,7 @@ +curl +liberasurecode-dev +make +memcached +rsync +sqlite3 +xfsprogs diff --git a/files/debs/tls-proxy b/files/debs/tls-proxy new file mode 100644 index 0000000000..5bd8e213a2 --- /dev/null +++ b/files/debs/tls-proxy @@ -0,0 +1 @@ +apache2 diff --git a/files/glance-api.conf b/files/glance-api.conf deleted file mode 100644 index 1bbd58e061..0000000000 --- a/files/glance-api.conf +++ /dev/null @@ -1,178 +0,0 @@ -[DEFAULT] -# Show more verbose log output (sets INFO log level output) -verbose = True - -# Show debugging output in logs (sets DEBUG log level output) -debug = True - -# Which backend store should Glance use by default is not specified -# in a request to add a new image to Glance? Default: 'file' -# Available choices are 'file', 'swift', and 's3' -default_store = file - -# Address to bind the API server -bind_host = 0.0.0.0 - -# Port the bind the API server to -bind_port = 9292 - -# Address to find the registry server -registry_host = 0.0.0.0 - -# Port the registry server is listening on -registry_port = 9191 - -# Log to this file. Make sure you do not set the same log -# file for both the API and registry servers! -#log_file = %DEST%/glance/api.log - -# Send logs to syslog (/dev/log) instead of to file specified by `log_file` -use_syslog = %SYSLOG% - -# ============ Notification System Options ===================== - -# Notifications can be sent when images are create, updated or deleted. -# There are three methods of sending notifications, logging (via the -# log_file directive), rabbit (via a rabbitmq queue) or noop (no -# notifications sent, the default) -notifier_strategy = noop - -# Configuration options if sending notifications via rabbitmq (these are -# the defaults) -rabbit_host = localhost -rabbit_port = 5672 -rabbit_use_ssl = false -rabbit_userid = guest -rabbit_password = guest -rabbit_virtual_host = / -rabbit_notification_topic = glance_notifications - -# ============ Filesystem Store Options ======================== - -# Directory that the Filesystem backend store -# writes image data to -filesystem_store_datadir = %DEST%/glance/images/ - -# ============ Swift Store Options ============================= - -# Address where the Swift authentication service lives -swift_store_auth_address = 127.0.0.1:8080/v1.0/ - -# User to authenticate against the Swift authentication service -swift_store_user = jdoe - -# Auth key for the user authenticating against the -# Swift authentication service -swift_store_key = a86850deb2742ec3cb41518e26aa2d89 - -# Container within the account that the account should use -# for storing images in Swift -swift_store_container = glance - -# Do we create the container if it does not exist? -swift_store_create_container_on_put = False - -# What size, in MB, should Glance start chunking image files -# and do a large object manifest in Swift? By default, this is -# the maximum object size in Swift, which is 5GB -swift_store_large_object_size = 5120 - -# When doing a large object manifest, what size, in MB, should -# Glance write chunks to Swift? This amount of data is written -# to a temporary disk buffer during the process of chunking -# the image file, and the default is 200MB -swift_store_large_object_chunk_size = 200 - -# Whether to use ServiceNET to communicate with the Swift storage servers. -# (If you aren't RACKSPACE, leave this False!) -# -# To use ServiceNET for authentication, prefix hostname of -# `swift_store_auth_address` with 'snet-'. -# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/ -swift_enable_snet = False - -# ============ S3 Store Options ============================= - -# Address where the S3 authentication service lives -s3_store_host = 127.0.0.1:8080/v1.0/ - -# User to authenticate against the S3 authentication service -s3_store_access_key = <20-char AWS access key> - -# Auth key for the user authenticating against the -# S3 authentication service -s3_store_secret_key = <40-char AWS secret key> - -# Container within the account that the account should use -# for storing images in S3. Note that S3 has a flat namespace, -# so you need a unique bucket name for your glance images. An -# easy way to do this is append your AWS access key to "glance". -# S3 buckets in AWS *must* be lowercased, so remember to lowercase -# your AWS access key if you use it in your bucket name below! -s3_store_bucket = glance - -# Do we create the bucket if it does not exist? -s3_store_create_bucket_on_put = False - -# ============ Image Cache Options ======================== - -image_cache_enabled = False - -# Directory that the Image Cache writes data to -# Make sure this is also set in glance-pruner.conf -image_cache_datadir = /var/lib/glance/image-cache/ - -# Number of seconds after which we should consider an incomplete image to be -# stalled and eligible for reaping -image_cache_stall_timeout = 86400 - -# ============ Delayed Delete Options ============================= - -# Turn on/off delayed delete -delayed_delete = False - -# Delayed delete time in seconds -scrub_time = 43200 - -# Directory that the scrubber will use to remind itself of what to delete -# Make sure this is also set in glance-scrubber.conf -scrubber_datadir = /var/lib/glance/scrubber - -[pipeline:glance-api] -#pipeline = versionnegotiation context apiv1app -# NOTE: use the following pipeline for keystone -pipeline = versionnegotiation authtoken context apiv1app - -# To enable Image Cache Management API replace pipeline with below: -# pipeline = versionnegotiation context imagecache apiv1app -# NOTE: use the following pipeline for keystone auth (with caching) -# pipeline = versionnegotiation authtoken context imagecache apiv1app - -[pipeline:versions] -pipeline = versionsapp - -[app:versionsapp] -paste.app_factory = glance.api.versions:app_factory - -[app:apiv1app] -paste.app_factory = glance.api.v1.router:app_factory - -[filter:versionnegotiation] -paste.filter_factory = glance.api.middleware.version_negotiation:filter_factory - -[filter:imagecache] -paste.filter_factory = glance.api.middleware.image_cache:filter_factory - -[filter:context] -paste.filter_factory = glance.common.context:filter_factory - -[filter:authtoken] -paste.filter_factory = keystone.middleware.auth_token:filter_factory -service_protocol = http -service_host = 127.0.0.1 -service_port = 5000 -auth_host = 127.0.0.1 -auth_port = 35357 -auth_protocol = http -auth_uri = http://127.0.0.1:5000/ -admin_token = %SERVICE_TOKEN% diff --git a/files/glance-registry.conf b/files/glance-registry.conf deleted file mode 100644 index 1e04186073..0000000000 --- a/files/glance-registry.conf +++ /dev/null @@ -1,70 +0,0 @@ -[DEFAULT] -# Show more verbose log output (sets INFO log level output) -verbose = True - -# Show debugging output in logs (sets DEBUG log level output) -debug = True - -# Address to bind the registry server -bind_host = 0.0.0.0 - -# Port the bind the registry server to -bind_port = 9191 - -# Log to this file. Make sure you do not set the same log -# file for both the API and registry servers! -#log_file = %DEST%/glance/registry.log - -# Where to store images -filesystem_store_datadir = %DEST%/glance/images - -# Send logs to syslog (/dev/log) instead of to file specified by `log_file` -use_syslog = %SYSLOG% - -# SQLAlchemy connection string for the reference implementation -# registry server. Any valid SQLAlchemy connection string is fine. -# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine -sql_connection = %SQL_CONN% - -# Period in seconds after which SQLAlchemy should reestablish its connection -# to the database. -# -# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop -# idle connections. This can result in 'MySQL Gone Away' exceptions. If you -# notice this, you can lower this value to ensure that SQLAlchemy reconnects -# before MySQL can drop the connection. -sql_idle_timeout = 3600 - -# Limit the api to return `param_limit_max` items in a call to a container. If -# a larger `limit` query param is provided, it will be reduced to this value. -api_limit_max = 1000 - -# If a `limit` query param is not provided in an api request, it will -# default to `limit_param_default` -limit_param_default = 25 - -[pipeline:glance-registry] -#pipeline = context registryapp -# NOTE: use the following pipeline for keystone -pipeline = authtoken keystone_shim context registryapp - -[app:registryapp] -paste.app_factory = glance.registry.server:app_factory - -[filter:context] -context_class = glance.registry.context.RequestContext -paste.filter_factory = glance.common.context:filter_factory - -[filter:authtoken] -paste.filter_factory = keystone.middleware.auth_token:filter_factory -service_protocol = http -service_host = 127.0.0.1 -service_port = 5000 -auth_host = 127.0.0.1 -auth_port = 35357 -auth_protocol = http -auth_uri = http://127.0.0.1:5000/ -admin_token = %SERVICE_TOKEN% - -[filter:keystone_shim] -paste.filter_factory = keystone.middleware.glance_auth_token:filter_factory diff --git a/files/horizon_settings.py b/files/horizon_settings.py deleted file mode 100644 index 05ddfe7bf9..0000000000 --- a/files/horizon_settings.py +++ /dev/null @@ -1,110 +0,0 @@ -import os - -DEBUG = True -TEMPLATE_DEBUG = DEBUG -PROD = False -USE_SSL = False - -LOCAL_PATH = os.path.dirname(os.path.abspath(__file__)) - -# FIXME: We need to change this to mysql, instead of sqlite. -DATABASES = { - 'default': { - 'ENGINE': 'django.db.backends.sqlite3', - 'NAME': os.path.join(LOCAL_PATH, 'dashboard_openstack.sqlite3'), - 'TEST_NAME': os.path.join(LOCAL_PATH, 'test.sqlite3'), - }, -} - -# The default values for these two settings seem to cause issues with apache -CACHE_BACKEND = 'dummy://' -SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db' - -# Send email to the console by default -EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' -# Or send them to /dev/null -#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend' - -# django-mailer uses a different settings attribute -MAILER_EMAIL_BACKEND = EMAIL_BACKEND - -# Configure these for your outgoing email host -# EMAIL_HOST = 'smtp.my-company.com' -# EMAIL_PORT = 25 -# EMAIL_HOST_USER = 'djangomail' -# EMAIL_HOST_PASSWORD = 'top-secret!' - -HORIZON_CONFIG = { - 'dashboards': ('nova', 'syspanel', 'settings',), - 'default_dashboard': 'nova', - 'user_home': 'dashboard.views.user_home', -} - -OPENSTACK_HOST = "127.0.0.1" -OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST -# FIXME: this is only needed until keystone fixes its GET /tenants call -# so that it doesn't return everything for admins -OPENSTACK_KEYSTONE_ADMIN_URL = "http://%s:35357/v2.0" % OPENSTACK_HOST -OPENSTACK_KEYSTONE_DEFAULT_ROLE = "Member" - -SWIFT_PAGINATE_LIMIT = 100 - -# Configure quantum connection details for networking -QUANTUM_ENABLED = False -QUANTUM_URL = '%s' % OPENSTACK_HOST -QUANTUM_PORT = '9696' -QUANTUM_TENANT = '1234' -QUANTUM_CLIENT_VERSION='0.1' - -# If you have external monitoring links, eg: -# EXTERNAL_MONITORING = [ -# ['Nagios','http://foo.com'], -# ['Ganglia','http://bar.com'], -# ] - -#LOGGING = { -# 'version': 1, -# # When set to True this will disable all logging except -# # for loggers specified in this configuration dictionary. Note that -# # if nothing is specified here and disable_existing_loggers is True, -# # django.db.backends will still log unless it is disabled explicitly. -# 'disable_existing_loggers': False, -# 'handlers': { -# 'null': { -# 'level': 'DEBUG', -# 'class': 'django.utils.log.NullHandler', -# }, -# 'console': { -# # Set the level to "DEBUG" for verbose output logging. -# 'level': 'INFO', -# 'class': 'logging.StreamHandler', -# }, -# }, -# 'loggers': { -# # Logging from django.db.backends is VERY verbose, send to null -# # by default. -# 'django.db.backends': { -# 'handlers': ['null'], -# 'propagate': False, -# }, -# 'horizon': { -# 'handlers': ['console'], -# 'propagate': False, -# }, -# 'novaclient': { -# 'handlers': ['console'], -# 'propagate': False, -# }, -# 'keystoneclient': { -# 'handlers': ['console'], -# 'propagate': False, -# }, -# 'nose.plugins.manager': { -# 'handlers': ['console'], -# 'propagate': False, -# } -# } -#} - -# How much ram on each compute host? -COMPUTE_HOST_RAM_GB = 16 diff --git a/files/keystone.conf b/files/keystone.conf deleted file mode 100644 index 0c0d0e26d3..0000000000 --- a/files/keystone.conf +++ /dev/null @@ -1,116 +0,0 @@ -[DEFAULT] -# Show more verbose log output (sets INFO log level output) -verbose = False - -# Show debugging output in logs (sets DEBUG log level output) -debug = False - -# Which backend store should Keystone use by default. -# Default: 'sqlite' -# Available choices are 'sqlite' [future will include LDAP, PAM, etc] -default_store = sqlite - -# Log to this file. Make sure you do not set the same log -# file for both the API and registry servers! -log_file = %DEST%/keystone/keystone.log - -# List of backends to be configured -backends = keystone.backends.sqlalchemy -#For LDAP support, add: ,keystone.backends.ldap - -# Dictionary Maps every service to a header.Missing services would get header -# X_(SERVICE_NAME) Key => Service Name, Value => Header Name -service-header-mappings = { - 'nova' : 'X-Server-Management-Url', - 'swift' : 'X-Storage-Url', - 'cdn' : 'X-CDN-Management-Url'} - -#List of extensions currently supported -extensions= osksadm,oskscatalog - -# Address to bind the API server -# TODO Properties defined within app not available via pipeline. -service_host = 0.0.0.0 - -# Port the bind the API server to -service_port = 5000 - -# SSL for API server -service_ssl = False - -# Address to bind the Admin API server -admin_host = 0.0.0.0 - -# Port the bind the Admin API server to -admin_port = 35357 - -# SSL for API Admin server -admin_ssl = False - -# Keystone certificate file (modify as needed) -# Only required if *_ssl is set to True -certfile = /etc/keystone/ssl/certs/keystone.pem - -# Keystone private key file (modify as needed) -# Only required if *_ssl is set to True -keyfile = /etc/keystone/ssl/private/keystonekey.pem - -# Keystone trusted CA certificates (modify as needed) -# Only required if *_ssl is set to True -ca_certs = /etc/keystone/ssl/certs/ca.pem - -# Client certificate required -# Only relevant if *_ssl is set to True -cert_required = True - -#Role that allows to perform admin operations. -keystone-admin-role = Admin - -#Role that allows to perform service admin operations. -keystone-service-admin-role = KeystoneServiceAdmin - -#Tells whether password user need to be hashed in the backend -hash-password = True - -[keystone.backends.sqlalchemy] -# SQLAlchemy connection string for the reference implementation registry -# server. Any valid SQLAlchemy connection string is fine. -# See: http://bit.ly/ideIpI -sql_connection = %SQL_CONN% -backend_entities = ['UserRoleAssociation', 'Endpoints', 'Role', 'Tenant', - 'User', 'Credentials', 'EndpointTemplates', 'Token', - 'Service'] - -# Period in seconds after which SQLAlchemy should reestablish its connection -# to the database. -sql_idle_timeout = 30 - -[pipeline:admin] -pipeline = - urlrewritefilter - admin_api - -[pipeline:keystone-legacy-auth] -pipeline = - urlrewritefilter - legacy_auth - RAX-KEY-extension - service_api - -[app:service_api] -paste.app_factory = keystone.server:service_app_factory - -[app:admin_api] -paste.app_factory = keystone.server:admin_app_factory - -[filter:urlrewritefilter] -paste.filter_factory = keystone.middleware.url:filter_factory - -[filter:legacy_auth] -paste.filter_factory = keystone.frontends.legacy_token_auth:filter_factory - -[filter:RAX-KEY-extension] -paste.filter_factory = keystone.contrib.extensions.service.raxkey.frontend:filter_factory - -[filter:debug] -paste.filter_factory = keystone.common.wsgi:debug_filter_factory diff --git a/files/keystone_data.sh b/files/keystone_data.sh deleted file mode 100755 index be2d5767cd..0000000000 --- a/files/keystone_data.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/bash -BIN_DIR=${BIN_DIR:-.} -# Tenants -$BIN_DIR/keystone-manage $* tenant add admin -$BIN_DIR/keystone-manage $* tenant add demo -$BIN_DIR/keystone-manage $* tenant add invisible_to_admin - -# Users -$BIN_DIR/keystone-manage $* user add admin %ADMIN_PASSWORD% -$BIN_DIR/keystone-manage $* user add demo %ADMIN_PASSWORD% - -# Roles -$BIN_DIR/keystone-manage $* role add Admin -$BIN_DIR/keystone-manage $* role add Member -$BIN_DIR/keystone-manage $* role add KeystoneAdmin -$BIN_DIR/keystone-manage $* role add KeystoneServiceAdmin -$BIN_DIR/keystone-manage $* role add sysadmin -$BIN_DIR/keystone-manage $* role add netadmin -$BIN_DIR/keystone-manage $* role grant Admin admin admin -$BIN_DIR/keystone-manage $* role grant Member demo demo -$BIN_DIR/keystone-manage $* role grant sysadmin demo demo -$BIN_DIR/keystone-manage $* role grant netadmin demo demo -$BIN_DIR/keystone-manage $* role grant Member demo invisible_to_admin -$BIN_DIR/keystone-manage $* role grant Admin admin demo -$BIN_DIR/keystone-manage $* role grant Admin admin -$BIN_DIR/keystone-manage $* role grant KeystoneAdmin admin -$BIN_DIR/keystone-manage $* role grant KeystoneServiceAdmin admin - -# Services -$BIN_DIR/keystone-manage $* service add nova compute "Nova Compute Service" -$BIN_DIR/keystone-manage $* service add glance image "Glance Image Service" -$BIN_DIR/keystone-manage $* service add keystone identity "Keystone Identity Service" -if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then - $BIN_DIR/keystone-manage $* service add swift object-store "Swift Service" -fi - -#endpointTemplates -$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne nova http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id% 1 1 -$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne glance http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% 1 1 -$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne keystone http://%HOST_IP%:5000/v2.0 http://%HOST_IP%:35357/v2.0 http://%HOST_IP%:5000/v2.0 1 1 -if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then - $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne swift http://%HOST_IP%:8080/v1/AUTH_%tenant_id% http://%HOST_IP%:8080/ http://%HOST_IP%:8080/v1/AUTH_%tenant_id% 1 1 -fi - -# Tokens -$BIN_DIR/keystone-manage $* token add %SERVICE_TOKEN% admin admin 2015-02-05T00:00 - -# EC2 related creds - note we are setting the secret key to ADMIN_PASSWORD -# but keystone doesn't parse them - it is just a blob from keystone's -# point of view -$BIN_DIR/keystone-manage $* credentials add admin EC2 'admin' '%ADMIN_PASSWORD%' admin || echo "no support for adding credentials" -$BIN_DIR/keystone-manage $* credentials add demo EC2 'demo' '%ADMIN_PASSWORD%' demo || echo "no support for adding credentials" diff --git a/files/ldap/keystone.ldif.in b/files/ldap/keystone.ldif.in new file mode 100644 index 0000000000..cf51907cf6 --- /dev/null +++ b/files/ldap/keystone.ldif.in @@ -0,0 +1,26 @@ +dn: ${BASE_DN} +objectClass: dcObject +objectClass: organizationalUnit +dc: ${BASE_DC} +ou: ${BASE_DC} + +dn: ou=UserGroups,${BASE_DN} +objectClass: organizationalUnit +ou: UserGroups + +dn: ou=Users,${BASE_DN} +objectClass: organizationalUnit +ou: Users + +dn: ou=Roles,${BASE_DN} +objectClass: organizationalUnit +ou: Roles + +dn: ou=Projects,${BASE_DN} +objectClass: organizationalUnit +ou: Projects + +dn: cn=9fe2ff9ee4384b1894a90878d3e92bab,ou=Roles,${BASE_DN} +objectClass: organizationalRole +ou: _member_ +cn: 9fe2ff9ee4384b1894a90878d3e92bab diff --git a/files/ldap/manager.ldif.in b/files/ldap/manager.ldif.in new file mode 100644 index 0000000000..d3b9be8b6e --- /dev/null +++ b/files/ldap/manager.ldif.in @@ -0,0 +1,15 @@ +dn: olcDatabase={${LDAP_OLCDB_NUMBER}}${LDAP_OLCDB_TYPE},cn=config +changetype: modify +replace: olcSuffix +olcSuffix: ${BASE_DN} +- +replace: olcRootDN +olcRootDN: ${MANAGER_DN} +- +${LDAP_ROOTPW_COMMAND}: olcRootPW +olcRootPW: ${SLAPPASS} +- +replace: olcDbIndex +olcDbIndex: objectClass eq +olcDbIndex: default pres,eq +olcDbIndex: cn,sn,givenName diff --git a/files/ldap/suse-base-config.ldif.in b/files/ldap/suse-base-config.ldif.in new file mode 100644 index 0000000000..00256ee9d8 --- /dev/null +++ b/files/ldap/suse-base-config.ldif.in @@ -0,0 +1,21 @@ +dn: cn=config +objectClass: olcGlobal +cn: config +olcArgsFile: /var/run/slapd/slapd.args +olcAuthzRegexp: {0}gidNumber=0\+uidNumber=0,cn=peercred,cn=external,cn=auth dn + :cn=config +olcPidFile: /var/run/slapd/slapd.pid +olcSizeLimit: 10000 + +dn: cn=schema,cn=config +objectClass: olcSchemaConfig +cn: schema + +include: file:///etc/openldap/schema/core.ldif +include: file:///etc/openldap/schema/cosine.ldif +include: file:///etc/openldap/schema/inetorgperson.ldif + +dn: olcDatabase={1}hdb,cn=config +objectClass: olcHdbConfig +olcDbDirectory: /var/lib/ldap +olcSuffix: ${BASE_DN} diff --git a/files/ldap/user.ldif.in b/files/ldap/user.ldif.in new file mode 100644 index 0000000000..16a980757d --- /dev/null +++ b/files/ldap/user.ldif.in @@ -0,0 +1,23 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +# Demo LDAP user +dn: cn=demo,ou=Users,${BASE_DN} +cn: demo +displayName: demo +givenName: demo +mail: demo@openstack.org +objectClass: inetOrgPerson +objectClass: top +sn: demo +uid: demo +userPassword: demo diff --git a/files/lvm-backing-file.template b/files/lvm-backing-file.template new file mode 100644 index 0000000000..dc519d7745 --- /dev/null +++ b/files/lvm-backing-file.template @@ -0,0 +1,16 @@ +[Unit] +Description=Activate LVM backing file %BACKING_FILE% +DefaultDependencies=no +After=systemd-udev-settle.service +Before=lvm2-activation-early.service +Wants=systemd-udev-settle.service + +[Service] +ExecStart=/sbin/losetup --find --show %DIRECTIO% %BACKING_FILE% +ExecStop=/bin/sh -c '/sbin/losetup -d $$(/sbin/losetup --associated %BACKING_FILE% -O NAME -n)' +RemainAfterExit=yes +Type=oneshot + +[Install] +WantedBy=local-fs.target +Also=systemd-udev-settle.service diff --git a/files/nova-api-paste.ini b/files/nova-api-paste.ini deleted file mode 100644 index 7f27fdcbf9..0000000000 --- a/files/nova-api-paste.ini +++ /dev/null @@ -1,138 +0,0 @@ -############ -# Metadata # -############ -[composite:metadata] -use = egg:Paste#urlmap -/: metaversions -/latest: meta -/2007-01-19: meta -/2007-03-01: meta -/2007-08-29: meta -/2007-10-10: meta -/2007-12-15: meta -/2008-02-01: meta -/2008-09-01: meta -/2009-04-04: meta - -[pipeline:metaversions] -pipeline = ec2faultwrap logrequest metaverapp - -[pipeline:meta] -pipeline = ec2faultwrap logrequest metaapp - -[app:metaverapp] -paste.app_factory = nova.api.metadata.handler:Versions.factory - -[app:metaapp] -paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory - -####### -# EC2 # -####### - -[composite:ec2] -use = egg:Paste#urlmap -/services/Cloud: ec2cloud -/services/Admin: ec2admin - -[pipeline:ec2cloud] -pipeline = ec2faultwrap logrequest totoken authtoken keystonecontext cloudrequest authorizer ec2executor - -[pipeline:ec2admin] -pipeline = ec2faultwrap logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor - -[pipeline:ec2metadata] -pipeline = ec2faultwrap logrequest ec2md - -[pipeline:ec2versions] -pipeline = ec2faultwrap logrequest ec2ver - -[filter:ec2faultwrap] -paste.filter_factory = nova.api.ec2:FaultWrapper.factory - -[filter:logrequest] -paste.filter_factory = nova.api.ec2:RequestLogging.factory - -[filter:ec2lockout] -paste.filter_factory = nova.api.ec2:Lockout.factory - -[filter:totoken] -paste.filter_factory = keystone.middleware.ec2_token:EC2Token.factory - -[filter:ec2noauth] -paste.filter_factory = nova.api.ec2:NoAuth.factory - -[filter:authenticate] -paste.filter_factory = nova.api.ec2:Authenticate.factory - -[filter:cloudrequest] -controller = nova.api.ec2.cloud.CloudController -paste.filter_factory = nova.api.ec2:Requestify.factory - -[filter:adminrequest] -controller = nova.api.ec2.admin.AdminController -paste.filter_factory = nova.api.ec2:Requestify.factory - -[filter:authorizer] -paste.filter_factory = nova.api.ec2:Authorizer.factory - -[app:ec2executor] -paste.app_factory = nova.api.ec2:Executor.factory - -############# -# Openstack # -############# - -[composite:osapi] -use = call:nova.api.openstack.v2.urlmap:urlmap_factory -/: osversions -/v1.1: openstack_api_v2 -/v2: openstack_api_v2 - -[pipeline:openstack_api_v2] -pipeline = faultwrap authtoken keystonecontext ratelimit serialize extensions osapi_app_v2 - -[filter:faultwrap] -paste.filter_factory = nova.api.openstack.v2:FaultWrapper.factory - -[filter:auth] -paste.filter_factory = nova.api.openstack.v2.auth:AuthMiddleware.factory - -[filter:noauth] -paste.filter_factory = nova.api.openstack.v2.auth:NoAuthMiddleware.factory - -[filter:ratelimit] -paste.filter_factory = nova.api.openstack.v2.limits:RateLimitingMiddleware.factory - -[filter:serialize] -paste.filter_factory = nova.api.openstack.wsgi:LazySerializationMiddleware.factory - -[filter:extensions] -paste.filter_factory = nova.api.openstack.v2.extensions:ExtensionMiddleware.factory - -[app:osapi_app_v2] -paste.app_factory = nova.api.openstack.v2:APIRouter.factory - -[pipeline:osversions] -pipeline = faultwrap osversionapp - -[app:osversionapp] -paste.app_factory = nova.api.openstack.v2.versions:Versions.factory - -########## -# Shared # -########## - -[filter:keystonecontext] -paste.filter_factory = keystone.middleware.nova_keystone_context:NovaKeystoneContext.factory - -[filter:authtoken] -paste.filter_factory = keystone.middleware.auth_token:filter_factory -service_protocol = http -service_host = 127.0.0.1 -service_port = 5000 -auth_host = 127.0.0.1 -auth_port = 35357 -auth_protocol = http -auth_uri = http://127.0.0.1:5000/ -admin_token = %SERVICE_TOKEN% diff --git a/files/openstack-cli-server/openstack b/files/openstack-cli-server/openstack new file mode 100755 index 0000000000..47fbfc5e17 --- /dev/null +++ b/files/openstack-cli-server/openstack @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 +# Copyright 2016 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import socket +import sys +import os +import os.path +import json + +server_address = "/tmp/openstack.sock" + +sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + +try: + sock.connect(server_address) +except socket.error as msg: + print(msg, file=sys.stderr) + sys.exit(1) + + +def send(sock, doc): + jdoc = json.dumps(doc) + sock.send(b'%d\n' % len(jdoc)) + sock.sendall(jdoc.encode('utf-8')) + +def recv(sock): + length_str = b'' + + char = sock.recv(1) + if len(char) == 0: + print("Unexpected end of file", file=sys.stderr) + sys.exit(1) + + while char != b'\n': + length_str += char + char = sock.recv(1) + if len(char) == 0: + print("Unexpected end of file", file=sys.stderr) + sys.exit(1) + + total = int(length_str) + + # use a memoryview to receive the data chunk by chunk efficiently + jdoc = memoryview(bytearray(total)) + next_offset = 0 + while total - next_offset > 0: + recv_size = sock.recv_into(jdoc[next_offset:], total - next_offset) + next_offset += recv_size + try: + doc = json.loads(jdoc.tobytes()) + except (TypeError, ValueError) as e: + raise Exception('Data received was not in JSON format') + return doc + +try: + env = {} + passenv = ["CINDER_VERSION", + "OS_AUTH_URL", + "OS_NO_CACHE", + "OS_PASSWORD", + "OS_PROJECT_NAME", + "OS_REGION_NAME", + "OS_TENANT_NAME", + "OS_USERNAME", + "OS_VOLUME_API_VERSION", + "OS_CLOUD"] + for name in passenv: + if name in os.environ: + env[name] = os.environ[name] + + cmd = { + "app": os.path.basename(sys.argv[0]), + "env": env, + "argv": sys.argv[1:] + } + try: + image_idx = sys.argv.index('image') + create_idx = sys.argv.index('create') + missing_file = image_idx < create_idx and \ + not any(x.startswith('--file') for x in sys.argv) + except ValueError: + missing_file = False + + if missing_file: + # This means we were called with an image create command, but were + # not provided a --file option. That likely means we're being passed + # the image data to stdin, which won't work because we do not proxy + # stdin to the server. So, we just reject the operation and ask the + # caller to provide the file with --file instead. + # We've already connected to the server, we need to send it some dummy + # data so it doesn't wait forever. + send(sock, {}) + print('Image create without --file is not allowed in server mode', + file=sys.stderr) + sys.exit(1) + else: + send(sock, cmd) + + doc = recv(sock) + if doc["stdout"] != b'': + print(doc["stdout"], end='') + if doc["stderr"] != b'': + print(doc["stderr"], file=sys.stderr) + sys.exit(doc["status"]) +finally: + sock.close() diff --git a/files/openstack-cli-server/openstack-cli-server b/files/openstack-cli-server/openstack-cli-server new file mode 100755 index 0000000000..f3d2747e52 --- /dev/null +++ b/files/openstack-cli-server/openstack-cli-server @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 +# Copyright 2016 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import socket +import sys +import os +import json + +from openstackclient import shell as osc_shell +from io import StringIO + +server_address = "/tmp/openstack.sock" + +try: + os.unlink(server_address) +except OSError: + if os.path.exists(server_address): + raise + +sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) +print('starting up on %s' % server_address, file=sys.stderr) +sock.bind(server_address) + +# Listen for incoming connections +sock.listen(1) + +def send(sock, doc): + jdoc = json.dumps(doc) + sock.send(b'%d\n' % len(jdoc)) + sock.sendall(jdoc.encode('utf-8')) + +def recv(sock): + length_str = b'' + char = sock.recv(1) + while char != b'\n': + length_str += char + char = sock.recv(1) + + total = int(length_str) + + # use a memoryview to receive the data chunk by chunk efficiently + jdoc = memoryview(bytearray(total)) + next_offset = 0 + while total - next_offset > 0: + recv_size = sock.recv_into(jdoc[next_offset:], total - next_offset) + next_offset += recv_size + try: + doc = json.loads(jdoc.tobytes()) + except (TypeError, ValueError) as e: + raise Exception('Data received was not in JSON format') + return doc + +while True: + csock, client_address = sock.accept() + try: + doc = recv(csock) + + print("%s %s" % (doc["app"], doc["argv"]), file=sys.stderr) + oldenv = {} + for name in doc["env"].keys(): + oldenv[name] = os.environ.get(name, None) + os.environ[name] = doc["env"][name] + + try: + old_stdout = sys.stdout + old_stderr = sys.stderr + my_stdout = sys.stdout = StringIO() + my_stderr = sys.stderr = StringIO() + + class Exit(BaseException): + def __init__(self, status): + self.status = status + + def noexit(stat): + raise Exit(stat) + + sys.exit = noexit + + if doc["app"] == "openstack": + sh = osc_shell.OpenStackShell() + ret = sh.run(doc["argv"]) + else: + print("Unknown application %s" % doc["app"], file=sys.stderr) + ret = 1 + except Exit as e: + ret = e.status + finally: + sys.stdout = old_stdout + sys.stderr = old_stderr + + for name in oldenv.keys(): + if oldenv[name] is None: + del os.environ[name] + else: + os.environ[name] = oldenv[name] + + send(csock, { + "stdout": my_stdout.getvalue(), + "stderr": my_stderr.getvalue(), + "status": ret, + }) + + except BaseException as e: + print(e, file=sys.stderr) + finally: + csock.close() diff --git a/files/pips/horizon b/files/pips/horizon deleted file mode 100644 index dddf01106c..0000000000 --- a/files/pips/horizon +++ /dev/null @@ -1,12 +0,0 @@ -Django==1.3 -django-mailer -django-nose==0.1.2 -django-nose-selenium -django-registration==0.7 -pycrypto==2.3 -sqlalchemy-migrate -python-cloudfiles - --e git+https://github.com/cloudbuilders/openstackx.git#egg=openstackx --e git+https://github.com/jacobian/openstack.compute.git#egg=openstack --e git+https://github.com/4P/python-keystoneclient.git#egg=python-keystoneclient diff --git a/files/pips/keystone b/files/pips/keystone deleted file mode 100644 index 09636e4991..0000000000 --- a/files/pips/keystone +++ /dev/null @@ -1 +0,0 @@ -PassLib diff --git a/files/pips/tempest b/files/pips/tempest deleted file mode 100644 index df7f4230a2..0000000000 --- a/files/pips/tempest +++ /dev/null @@ -1 +0,0 @@ -pika diff --git a/files/rpms/ceph b/files/rpms/ceph new file mode 100644 index 0000000000..19f158fd57 --- /dev/null +++ b/files/rpms/ceph @@ -0,0 +1,3 @@ +ceph # NOPRIME +redhat-lsb-core # not:rhel9,openEuler-22.03 +xfsprogs diff --git a/files/rpms/cinder b/files/rpms/cinder new file mode 100644 index 0000000000..375f93e090 --- /dev/null +++ b/files/rpms/cinder @@ -0,0 +1,3 @@ +lvm2 +qemu-img +targetcli diff --git a/files/rpms/dstat b/files/rpms/dstat new file mode 100644 index 0000000000..6524bed607 --- /dev/null +++ b/files/rpms/dstat @@ -0,0 +1 @@ +pcp-system-tools diff --git a/files/rpms/general b/files/rpms/general new file mode 100644 index 0000000000..6f4572c708 --- /dev/null +++ b/files/rpms/general @@ -0,0 +1,42 @@ +bc +curl +dbus +gawk +gcc +gcc-c++ +gettext # used for compiling message catalogs +git-core +glibc-langpack-en # dist:rhel9 +graphviz # needed only for docs +httpd +httpd-devel +iptables-nft # dist:rhel9,rhel10 +iptables-services +java-1.8.0-openjdk-headless # not:rhel10 +java-21-openjdk-headless # dist:rhel10 +libffi-devel +libjpeg-turbo-devel # Pillow 3.0.0 +libxml2-devel # lxml +libxslt-devel # lxml +libyaml-devel +mod_ssl # required for tls-proxy on centos 9 stream computes +net-tools +openssh-server +openssl +openssl-devel # to rebuild pyOpenSSL if needed +pcre2-devel # dist:rhel10 for python-pcre2 +pcre-devel # not:rhel10 for python-pcre +pkgconfig +postgresql-devel # psycopg2 +psmisc +python3-devel +python3-pip # not:openEuler-22.03 +python3-systemd +redhat-rpm-config # not:openEuler-22.03 missing dep for gcc hardening flags, see rhbz#1217376 +tar +tcpdump +unzip +util-linux +wget +which +zlib-devel diff --git a/files/rpms/horizon b/files/rpms/horizon new file mode 100644 index 0000000000..a88552bc84 --- /dev/null +++ b/files/rpms/horizon @@ -0,0 +1,2 @@ +httpd # NOPRIME +mod_wsgi # NOPRIME diff --git a/files/rpms/keystone b/files/rpms/keystone new file mode 100644 index 0000000000..5f19c6f70c --- /dev/null +++ b/files/rpms/keystone @@ -0,0 +1,3 @@ +memcached +mod_ssl +sqlite diff --git a/files/rpms/ldap b/files/rpms/ldap new file mode 100644 index 0000000000..d5b8fa4374 --- /dev/null +++ b/files/rpms/ldap @@ -0,0 +1,2 @@ +openldap-clients +openldap-servers diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu new file mode 100644 index 0000000000..3d50f3a062 --- /dev/null +++ b/files/rpms/n-cpu @@ -0,0 +1,9 @@ +cryptsetup +dosfstools +iscsi-initiator-utils +libosinfo +lvm2 +sg3_utils +# Stuff for diablo volumes +sysfsutils +xorriso diff --git a/files/rpms/neutron-agent b/files/rpms/neutron-agent new file mode 100644 index 0000000000..ea8819e884 --- /dev/null +++ b/files/rpms/neutron-agent @@ -0,0 +1 @@ +ipset diff --git a/files/rpms/neutron-common b/files/rpms/neutron-common new file mode 100644 index 0000000000..fe25f57ea6 --- /dev/null +++ b/files/rpms/neutron-common @@ -0,0 +1,12 @@ +acl +dnsmasq # for q-dhcp +dnsmasq-utils # for dhcp_release +ebtables +haproxy # to serve as metadata proxy inside router/dhcp namespaces +iptables +iputils +openvswitch # NOPRIME +rabbitmq-server # NOPRIME +radvd # NOPRIME +sqlite +sudo diff --git a/files/rpms/neutron-l3 b/files/rpms/neutron-l3 new file mode 100644 index 0000000000..a7a190c063 --- /dev/null +++ b/files/rpms/neutron-l3 @@ -0,0 +1,2 @@ +conntrack-tools +keepalived diff --git a/files/rpms/nova b/files/rpms/nova new file mode 100644 index 0000000000..d0f843bb60 --- /dev/null +++ b/files/rpms/nova @@ -0,0 +1,13 @@ +conntrack-tools +curl +ebtables +iptables +iputils +kernel-modules # not:openEuler-22.03 +kpartx +parted +polkit +rabbitmq-server # NOPRIME +sqlite +sudo +xorriso diff --git a/files/rpms/openvswitch b/files/rpms/openvswitch new file mode 100644 index 0000000000..64796f72cd --- /dev/null +++ b/files/rpms/openvswitch @@ -0,0 +1 @@ +openvswitch diff --git a/files/rpms/os-brick b/files/rpms/os-brick new file mode 100644 index 0000000000..14ff870557 --- /dev/null +++ b/files/rpms/os-brick @@ -0,0 +1,2 @@ +iscsi-initiator-utils +lsscsi diff --git a/files/rpms/ovn b/files/rpms/ovn new file mode 100644 index 0000000000..698e57b0de --- /dev/null +++ b/files/rpms/ovn @@ -0,0 +1,3 @@ +ovn-central +ovn-host +ovn-vtep diff --git a/files/rpms/q-agt b/files/rpms/q-agt new file mode 120000 index 0000000000..99fe353094 --- /dev/null +++ b/files/rpms/q-agt @@ -0,0 +1 @@ +neutron-agent \ No newline at end of file diff --git a/files/rpms/q-l3 b/files/rpms/q-l3 new file mode 120000 index 0000000000..0a5ca2a45f --- /dev/null +++ b/files/rpms/q-l3 @@ -0,0 +1 @@ +neutron-l3 \ No newline at end of file diff --git a/files/rpms/swift b/files/rpms/swift new file mode 100644 index 0000000000..c3921a47d4 --- /dev/null +++ b/files/rpms/swift @@ -0,0 +1,6 @@ +curl +liberasurecode-devel +memcached +rsync-daemon +sqlite +xfsprogs diff --git a/files/screenrc b/files/screenrc deleted file mode 100644 index e18db39d80..0000000000 --- a/files/screenrc +++ /dev/null @@ -1,9 +0,0 @@ -hardstatus on -hardstatus alwayslastline -hardstatus string "%{.bW}%-w%{.rW}%n %t%{-}%+w %=%{..G}%H %{..Y}%d/%m %c" - -defscrollback 10240 - -vbell off -startup_message off - diff --git a/files/sources.list b/files/sources.list deleted file mode 100644 index 77a1bfb52e..0000000000 --- a/files/sources.list +++ /dev/null @@ -1,9 +0,0 @@ -deb http://mirror.rackspace.com/ubuntu/ %DIST% main restricted -deb http://mirror.rackspace.com/ubuntu/ %DIST%-updates main restricted -deb http://mirror.rackspace.com/ubuntu/ %DIST% universe -deb http://mirror.rackspace.com/ubuntu/ %DIST%-updates universe -deb http://mirror.rackspace.com/ubuntu/ %DIST% multiverse -deb http://mirror.rackspace.com/ubuntu/ %DIST%-updates multiverse -deb http://security.ubuntu.com/ubuntu %DIST%-security main restricted -deb http://security.ubuntu.com/ubuntu %DIST%-security universe -deb http://security.ubuntu.com/ubuntu %DIST%-security multiverse diff --git a/files/sudo/nova b/files/sudo/nova deleted file mode 100644 index 0a79c210a0..0000000000 --- a/files/sudo/nova +++ /dev/null @@ -1,47 +0,0 @@ -Cmnd_Alias NOVADEVCMDS = /bin/chmod /var/lib/nova/tmp/*/root/.ssh, \ - /bin/chown /var/lib/nova/tmp/*/root/.ssh, \ - /bin/chown, \ - /bin/chmod, \ - /bin/dd, \ - /sbin/ifconfig, \ - /sbin/ip, \ - /sbin/route, \ - /sbin/iptables, \ - /sbin/iptables-save, \ - /sbin/iptables-restore, \ - /sbin/ip6tables-save, \ - /sbin/ip6tables-restore, \ - /sbin/kpartx, \ - /sbin/losetup, \ - /sbin/lvcreate, \ - /sbin/lvdisplay, \ - /sbin/lvremove, \ - /bin/mkdir, \ - /bin/mount, \ - /sbin/pvcreate, \ - /usr/bin/tee, \ - /sbin/tune2fs, \ - /bin/umount, \ - /sbin/vgcreate, \ - /usr/bin/virsh, \ - /usr/bin/qemu-nbd, \ - /usr/sbin/brctl, \ - /sbin/brctl, \ - /usr/sbin/radvd, \ - /usr/sbin/vblade-persist, \ - /sbin/pvcreate, \ - /sbin/aoe-discover, \ - /sbin/vgcreate, \ - /bin/aoe-stat, \ - /bin/kill, \ - /sbin/vconfig, \ - /usr/sbin/ietadm, \ - /sbin/vgs, \ - /sbin/iscsiadm, \ - /usr/bin/socat, \ - /sbin/parted, \ - /usr/sbin/dnsmasq, \ - /usr/sbin/arping - -%USER% ALL = (root) NOPASSWD: SETENV: NOVADEVCMDS - diff --git a/files/swift/account-server.conf b/files/swift/account-server.conf deleted file mode 100644 index db0f097fa5..0000000000 --- a/files/swift/account-server.conf +++ /dev/null @@ -1,20 +0,0 @@ -[DEFAULT] -devices = %NODE_PATH%/node -mount_check = false -bind_port = %BIND_PORT% -user = %USER% -log_facility = LOG_LOCAL%LOG_FACILITY% -swift_dir = %SWIFT_CONFIG_LOCATION% - -[pipeline:main] -pipeline = account-server - -[app:account-server] -use = egg:swift#account - -[account-replicator] -vm_test_mode = yes - -[account-auditor] - -[account-reaper] diff --git a/files/swift/container-server.conf b/files/swift/container-server.conf deleted file mode 100644 index bdc3e3a075..0000000000 --- a/files/swift/container-server.conf +++ /dev/null @@ -1,22 +0,0 @@ -[DEFAULT] -devices = %NODE_PATH%/node -mount_check = false -bind_port = %BIND_PORT% -user = %USER% -log_facility = LOG_LOCAL%LOG_FACILITY% -swift_dir = %SWIFT_CONFIG_LOCATION% - -[pipeline:main] -pipeline = container-server - -[app:container-server] -use = egg:swift#container - -[container-replicator] -vm_test_mode = yes - -[container-updater] - -[container-auditor] - -[container-sync] diff --git a/files/swift/object-server.conf b/files/swift/object-server.conf deleted file mode 100644 index 06fbffea77..0000000000 --- a/files/swift/object-server.conf +++ /dev/null @@ -1,20 +0,0 @@ -[DEFAULT] -devices = %NODE_PATH%/node -mount_check = false -bind_port = %BIND_PORT% -user = %USER% -log_facility = LOG_LOCAL%LOG_FACILITY% -swift_dir = %SWIFT_CONFIG_LOCATION% - -[pipeline:main] -pipeline = object-server - -[app:object-server] -use = egg:swift#object - -[object-replicator] -vm_test_mode = yes - -[object-updater] - -[object-auditor] diff --git a/files/swift/proxy-server.conf b/files/swift/proxy-server.conf deleted file mode 100644 index 2db6d322c0..0000000000 --- a/files/swift/proxy-server.conf +++ /dev/null @@ -1,33 +0,0 @@ -[DEFAULT] -bind_port = 8080 -user = %USER% -log_facility = LOG_LOCAL1 -swift_dir = %SWIFT_CONFIG_LOCATION% - -[pipeline:main] -pipeline = healthcheck cache %AUTH_SERVER% proxy-server - -[app:proxy-server] -use = egg:swift#proxy -allow_account_management = true -account_autocreate = true - -[filter:keystone] -use = egg:swiftkeystone2#keystone2 -keystone_admin_token = %SERVICE_TOKEN% -keystone_url = http://localhost:35357/v2.0 -keystone_admin_group = Member - -[filter:tempauth] -use = egg:swift#tempauth -user_admin_admin = admin .admin .reseller_admin -user_test_tester = testing .admin -user_test2_tester2 = testing2 .admin -user_test_tester3 = testing3 -bind_ip = 0.0.0.0 - -[filter:healthcheck] -use = egg:swift#healthcheck - -[filter:cache] -use = egg:swift#memcache diff --git a/files/swift/rsyncd.conf b/files/swift/rsyncd.conf index 66215c7f0f..937d6c4b9a 100644 --- a/files/swift/rsyncd.conf +++ b/files/swift/rsyncd.conf @@ -1,79 +1,79 @@ uid = %USER% gid = %GROUP% -log file = /var/log/rsyncd.log -pid file = /var/run/rsyncd.pid +log file = %SWIFT_DATA_DIR%/logs/rsyncd.log +pid file = %SWIFT_DATA_DIR%/run/rsyncd.pid address = 127.0.0.1 -[account6012] +[account6612] max connections = 25 -path = %SWIFT_DATA_LOCATION%/1/node/ +path = %SWIFT_DATA_DIR%/1/ read only = false -lock file = /var/lock/account6012.lock +lock file = %SWIFT_DATA_DIR%/run/account6612.lock -[account6022] +[account6622] max connections = 25 -path = %SWIFT_DATA_LOCATION%/2/node/ +path = %SWIFT_DATA_DIR%/2/ read only = false -lock file = /var/lock/account6022.lock +lock file = %SWIFT_DATA_DIR%/run/account6622.lock -[account6032] +[account6632] max connections = 25 -path = %SWIFT_DATA_LOCATION%/3/node/ +path = %SWIFT_DATA_DIR%/3/ read only = false -lock file = /var/lock/account6032.lock +lock file = %SWIFT_DATA_DIR%/run/account6632.lock -[account6042] +[account6642] max connections = 25 -path = %SWIFT_DATA_LOCATION%/4/node/ +path = %SWIFT_DATA_DIR%/4/ read only = false -lock file = /var/lock/account6042.lock +lock file = %SWIFT_DATA_DIR%/run/account6642.lock -[container6011] +[container6611] max connections = 25 -path = %SWIFT_DATA_LOCATION%/1/node/ +path = %SWIFT_DATA_DIR%/1/ read only = false -lock file = /var/lock/container6011.lock +lock file = %SWIFT_DATA_DIR%/run/container6611.lock -[container6021] +[container6621] max connections = 25 -path = %SWIFT_DATA_LOCATION%/2/node/ +path = %SWIFT_DATA_DIR%/2/ read only = false -lock file = /var/lock/container6021.lock +lock file = %SWIFT_DATA_DIR%/run/container6621.lock -[container6031] +[container6631] max connections = 25 -path = %SWIFT_DATA_LOCATION%/3/node/ +path = %SWIFT_DATA_DIR%/3/ read only = false -lock file = /var/lock/container6031.lock +lock file = %SWIFT_DATA_DIR%/run/container6631.lock -[container6041] +[container6641] max connections = 25 -path = %SWIFT_DATA_LOCATION%/4/node/ +path = %SWIFT_DATA_DIR%/4/ read only = false -lock file = /var/lock/container6041.lock +lock file = %SWIFT_DATA_DIR%/run/container6641.lock -[object6010] +[object6613] max connections = 25 -path = %SWIFT_DATA_LOCATION%/1/node/ +path = %SWIFT_DATA_DIR%/1/ read only = false -lock file = /var/lock/object6010.lock +lock file = %SWIFT_DATA_DIR%/run/object6613.lock -[object6020] +[object6623] max connections = 25 -path = %SWIFT_DATA_LOCATION%/2/node/ +path = %SWIFT_DATA_DIR%/2/ read only = false -lock file = /var/lock/object6020.lock +lock file = %SWIFT_DATA_DIR%/run/object6623.lock -[object6030] +[object6633] max connections = 25 -path = %SWIFT_DATA_LOCATION%/3/node/ +path = %SWIFT_DATA_DIR%/3/ read only = false -lock file = /var/lock/object6030.lock +lock file = %SWIFT_DATA_DIR%/run/object6633.lock -[object6040] +[object6643] max connections = 25 -path = %SWIFT_DATA_LOCATION%/4/node/ +path = %SWIFT_DATA_DIR%/4/ read only = false -lock file = /var/lock/object6040.lock +lock file = %SWIFT_DATA_DIR%/run/object6643.lock diff --git a/files/swift/rsyslog.conf b/files/swift/rsyslog.conf new file mode 100644 index 0000000000..011c893b91 --- /dev/null +++ b/files/swift/rsyslog.conf @@ -0,0 +1,26 @@ +# Uncomment the following to have a log containing all logs together +#local1,local2,local3,local4,local5.* %SWIFT_LOGDIR%/all.log + +# Uncomment the following to have hourly proxy logs for stats processing +#$template HourlyProxyLog,"%SWIFT_LOGDIR%/hourly/%$YEAR%%$MONTH%%$DAY%%$HOUR%" +#local1.*;local1.!notice ?HourlyProxyLog + +local1.*;local1.!notice %SWIFT_LOGDIR%/proxy.log +local1.notice %SWIFT_LOGDIR%/proxy.error +local1.* ~ + +local2.*;local2.!notice %SWIFT_LOGDIR%/storage1.log +local2.notice %SWIFT_LOGDIR%/storage1.error +local2.* ~ + +local3.*;local3.!notice %SWIFT_LOGDIR%/storage2.log +local3.notice %SWIFT_LOGDIR%/storage2.error +local3.* ~ + +local4.*;local4.!notice %SWIFT_LOGDIR%/storage3.log +local4.notice %SWIFT_LOGDIR%/storage3.error +local4.* ~ + +local5.*;local5.!notice %SWIFT_LOGDIR%/storage4.log +local5.notice %SWIFT_LOGDIR%/storage4.error +local5.* ~ diff --git a/files/swift/swift-remakerings b/files/swift/swift-remakerings deleted file mode 100755 index c65353ced5..0000000000 --- a/files/swift/swift-remakerings +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -cd %SWIFT_CONFIG_LOCATION% - -rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz - -swift-ring-builder object.builder create %SWIFT_PARTITION_POWER_SIZE% 3 1 -swift-ring-builder object.builder add z1-127.0.0.1:6010/sdb1 1 -swift-ring-builder object.builder add z2-127.0.0.1:6020/sdb2 1 -swift-ring-builder object.builder add z3-127.0.0.1:6030/sdb3 1 -swift-ring-builder object.builder add z4-127.0.0.1:6040/sdb4 1 -swift-ring-builder object.builder rebalance - -swift-ring-builder container.builder create %SWIFT_PARTITION_POWER_SIZE% 3 1 -swift-ring-builder container.builder add z1-127.0.0.1:6011/sdb1 1 -swift-ring-builder container.builder add z2-127.0.0.1:6021/sdb2 1 -swift-ring-builder container.builder add z3-127.0.0.1:6031/sdb3 1 -swift-ring-builder container.builder add z4-127.0.0.1:6041/sdb4 1 -swift-ring-builder container.builder rebalance - -swift-ring-builder account.builder create %SWIFT_PARTITION_POWER_SIZE% 3 1 -swift-ring-builder account.builder add z1-127.0.0.1:6012/sdb1 1 -swift-ring-builder account.builder add z2-127.0.0.1:6022/sdb2 1 -swift-ring-builder account.builder add z3-127.0.0.1:6032/sdb3 1 -swift-ring-builder account.builder add z4-127.0.0.1:6042/sdb4 1 -swift-ring-builder account.builder rebalance diff --git a/files/swift/swift-startmain b/files/swift/swift-startmain deleted file mode 100755 index 69efebd90d..0000000000 --- a/files/swift/swift-startmain +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -swift-init all restart diff --git a/files/swift/swift.conf b/files/swift/swift.conf deleted file mode 100644 index 98df466317..0000000000 --- a/files/swift/swift.conf +++ /dev/null @@ -1,3 +0,0 @@ -[swift-hash] -# random unique string that can never change (DO NOT LOSE) -swift_hash_path_suffix = %SWIFT_HASH% diff --git a/functions b/functions new file mode 100644 index 0000000000..829fc86c55 --- /dev/null +++ b/functions @@ -0,0 +1,881 @@ +#!/bin/bash +# +# functions - DevStack-specific functions +# +# The following variables are assumed to be defined by certain functions: +# +# - ``DATABASE_BACKENDS`` +# - ``ENABLED_SERVICES`` +# - ``FILES`` +# - ``GLANCE_HOSTPORT`` +# + +# ensure we don't re-source this in the same environment +[[ -z "$_DEVSTACK_FUNCTIONS" ]] || return 0 +declare -r -g _DEVSTACK_FUNCTIONS=1 + +# Include the common functions +FUNC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) +source ${FUNC_DIR}/functions-common +source ${FUNC_DIR}/inc/ini-config +source ${FUNC_DIR}/inc/meta-config +source ${FUNC_DIR}/inc/python +source ${FUNC_DIR}/inc/rootwrap +source ${FUNC_DIR}/inc/async + +# Save trace setting +_XTRACE_FUNCTIONS=$(set +o | grep xtrace) +set +o xtrace + +# Check if a function already exists +function function_exists { + declare -f -F $1 > /dev/null +} + +# short_source prints out the current location of the caller in a way +# that strips redundant directories. This is useful for PS4 usage. +function short_source { + saveIFS=$IFS + IFS=" " + called=($(caller 0)) + IFS=$saveIFS + file=${called[2]} + file=${file#$RC_DIR/} + printf "%-40s " "$file:${called[1]}:${called[0]}" +} +# PS4 is exported to child shells and uses the 'short_source' function, so +# export it so child shells have access to the 'short_source' function also. +export -f short_source + +# Download a file from a URL +# +# Will check cache (in $FILES) or download given URL. +# +# Argument is the URL to the remote file +# +# Will echo the local path to the file as the output. Will die on +# failure to download. +# +# Files can be pre-cached for CI environments, see EXTRA_CACHE_URLS +# and tools/image_list.sh +function get_extra_file { + local file_url=$1 + + file_name=$(basename "$file_url") + if [[ $file_url != file* ]]; then + # If the file isn't cache, download it + if [[ ! -f $FILES/$file_name ]]; then + wget --progress=dot:giga -t 2 -c $file_url -O $FILES/$file_name + if [[ $? -ne 0 ]]; then + die "$file_url could not be downloaded" + fi + fi + echo "$FILES/$file_name" + return + else + # just strip the file:// bit and that's the path to the file + echo $file_url | sed 's/$file:\/\///g' + fi +} + +# Generate image property arguments for OSC +# +# Arguments: properties, one per, like propname=value +# +# Result is --property propname1=value1 --property propname2=value2 +function _image_properties_to_arg { + local result="" + for property in $*; do + result+=" --property $property" + done + echo $result +} + +# Upload an image to glance using the configured mechanism +# +# Arguments: +# image name +# container format +# disk format +# path to image file +# optional properties (format of propname=value) +# +function _upload_image { + local image_name="$1" + shift + local container="$1" + shift + local disk="$1" + shift + local image="$1" + shift + local properties + local useimport + + properties=$(_image_properties_to_arg $*) + + if [[ "$GLANCE_USE_IMPORT_WORKFLOW" == "True" ]]; then + useimport="--import" + fi + + openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format "$container" --disk-format "$disk" $useimport $properties --file $(readlink -f "${image}") +} + +# Retrieve an image from a URL and upload into Glance. +# Uses the following variables: +# +# - ``FILES`` must be set to the cache dir +# - ``GLANCE_HOSTPORT`` +# +# upload_image image-url +function upload_image { + local image_url=$1 + + local image image_fname image_name + + local max_attempts=5 + + # Create a directory for the downloaded image tarballs. + mkdir -p $FILES/images + image_fname=`basename "$image_url"` + if [[ $image_url != file* ]]; then + # Downloads the image (uec ami+akistyle), then extracts it. + if [[ ! -f $FILES/$image_fname || "$(stat -c "%s" $FILES/$image_fname)" = "0" ]]; then + for attempt in `seq $max_attempts`; do + local rc=0 + wget --progress=dot:giga -c $image_url -O $FILES/$image_fname || rc=$? + if [[ $rc -ne 0 ]]; then + if [[ "$attempt" -eq "$max_attempts" ]]; then + echo "Not found: $image_url" + # Signal failure to download to the caller, so they can fail early + return 1 + fi + echo "Download failed, retrying in $attempt second, attempt: $attempt" + sleep $attempt + else + break + fi + done + fi + image="$FILES/${image_fname}" + else + # File based URL (RFC 1738): ``file://host/path`` + # Remote files are not considered here. + # unix: ``file:///home/user/path/file`` + # windows: ``file:///C:/Documents%20and%20Settings/user/path/file`` + image=$(echo $image_url | sed "s/^file:\/\///g") + if [[ ! -f $image || "$(stat -c "%s" $image)" == "0" ]]; then + echo "Not found: $image_url" + return + fi + fi + + # OpenVZ-format images are provided as .tar.gz, but not decompressed prior to loading + if [[ "$image_url" =~ 'openvz' ]]; then + image_name="${image_fname%.tar.gz}" + _upload_image "$image_name" ami ami "$image" + return + fi + + # vmdk format images + if [[ "$image_url" =~ '.vmdk' ]]; then + image_name="${image_fname%.vmdk}" + + # Before we can upload vmdk type images to glance, we need to know it's + # disk type, storage adapter, and networking adapter. These values are + # passed to glance as custom properties. + # We take these values from the vmdk file if populated. Otherwise, we use + # vmdk filename, which is expected in the following format: + # + # -;; + # + # If the filename does not follow the above format then the vsphere + # driver will supply default values. + + local vmdk_disktype="" + local vmdk_net_adapter="e1000" + local path_len + + # vmdk adapter type + local vmdk_adapter_type + vmdk_adapter_type="$(head -25 $image | { grep -a -F -m 1 'ddb.adapterType =' $image || true; })" + vmdk_adapter_type="${vmdk_adapter_type#*\"}" + vmdk_adapter_type="${vmdk_adapter_type%?}" + + # vmdk disk type + local vmdk_create_type + vmdk_create_type="$(head -25 $image | { grep -a -F -m 1 'createType=' $image || true; })" + vmdk_create_type="${vmdk_create_type#*\"}" + vmdk_create_type="${vmdk_create_type%\"*}" + + descriptor_data_pair_msg="Monolithic flat and VMFS disks "` + `"should use a descriptor-data pair." + if [[ "$vmdk_create_type" = "monolithicSparse" ]]; then + vmdk_disktype="sparse" + elif [[ "$vmdk_create_type" = "monolithicFlat" || "$vmdk_create_type" = "vmfs" ]]; then + # Attempt to retrieve the ``*-flat.vmdk`` + local flat_fname + flat_fname="$(head -25 $image | { grep -G 'RW\|RDONLY [0-9]+ FLAT\|VMFS' $image || true; })" + flat_fname="${flat_fname#*\"}" + flat_fname="${flat_fname%?}" + if [[ -z "$flat_fname" ]]; then + flat_fname="$image_name-flat.vmdk" + fi + path_len=`expr ${#image_url} - ${#image_fname}` + local flat_url="${image_url:0:$path_len}$flat_fname" + warn $LINENO "$descriptor_data_pair_msg"` + `" Attempt to retrieve the *-flat.vmdk: $flat_url" + if [[ $flat_url != file* ]]; then + if [[ ! -f $FILES/$flat_fname || \ + "$(stat -c "%s" $FILES/$flat_fname)" = "0" ]]; then + wget --progress=dot:giga -c $flat_url -O $FILES/$flat_fname + fi + image="$FILES/${flat_fname}" + else + image=$(echo $flat_url | sed "s/^file:\/\///g") + if [[ ! -f $image || "$(stat -c "%s" $image)" == "0" ]]; then + echo "Flat disk not found: $flat_url" + return 1 + fi + fi + image_name="${flat_fname}" + vmdk_disktype="preallocated" + elif [[ "$vmdk_create_type" = "streamOptimized" ]]; then + vmdk_disktype="streamOptimized" + elif [[ -z "$vmdk_create_type" ]]; then + # *-flat.vmdk provided: attempt to retrieve the descriptor (*.vmdk) + # to retrieve appropriate metadata + if [[ ${image_name: -5} != "-flat" ]]; then + warn $LINENO "Expected filename suffix: '-flat'."` + `" Filename provided: ${image_name}" + else + descriptor_fname="${image_name:0:${#image_name} - 5}.vmdk" + path_len=`expr ${#image_url} - ${#image_fname}` + local flat_path="${image_url:0:$path_len}" + local descriptor_url=$flat_path$descriptor_fname + warn $LINENO "$descriptor_data_pair_msg"` + `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url" + if [[ $flat_path != file* ]]; then + if [[ ! -f $FILES/$descriptor_fname || \ + "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then + wget -c $descriptor_url -O $FILES/$descriptor_fname + fi + descriptor_url="$FILES/$descriptor_fname" + else + descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g") + if [[ ! -f $descriptor_url || \ + "$(stat -c "%s" $descriptor_url)" == "0" ]]; then + echo "Descriptor not found: $descriptor_url" + return 1 + fi + fi + vmdk_adapter_type="$(head -25 $descriptor_url | { grep -a -F -m 1 'ddb.adapterType =' $descriptor_url || true; })" + vmdk_adapter_type="${vmdk_adapter_type#*\"}" + vmdk_adapter_type="${vmdk_adapter_type%?}" + fi + vmdk_disktype="preallocated" + else + vmdk_disktype="preallocated" + fi + + # NOTE: For backwards compatibility reasons, colons may be used in place + # of semi-colons for property delimiters but they are not permitted + # characters in NTFS filesystems. + property_string=`echo "$image_name" | { grep -oP '(?<=-)(?!.*-).*[:;].*[:;].*$' || true; }` + IFS=':;' read -a props <<< "$property_string" + vmdk_disktype="${props[0]:-$vmdk_disktype}" + vmdk_adapter_type="${props[1]:-$vmdk_adapter_type}" + vmdk_net_adapter="${props[2]:-$vmdk_net_adapter}" + + _upload_image "$image_name" bare vmdk "$image" vmware_disktype="$vmdk_disktype" vmware_adaptertype="$vmdk_adapter_type" hw_vif_model="$vmdk_net_adapter" + + return + fi + + if [[ "$image_url" =~ '.hds' ]]; then + image_name="${image_fname%.hds}" + vm_mode=${image_name##*-} + if [[ $vm_mode != 'exe' && $vm_mode != 'hvm' ]]; then + die $LINENO "Unknown vm_mode=${vm_mode} for Virtuozzo image" + fi + + _upload_image "$image_name" bare ploop "$image" vm_mode=$vm_mode + return + fi + + local kernel="" + local ramdisk="" + local disk_format="" + local container_format="" + local unpack="" + local img_property="" + + # NOTE(danms): If we're on libvirt/qemu or libvirt/kvm, set the hw_rng_model + # to libvirt in the image properties. + if [[ "$VIRT_DRIVER" == "libvirt" ]]; then + if [[ "$LIBVIRT_TYPE" == "qemu" || "$LIBVIRT_TYPE" == "kvm" ]]; then + img_property="hw_rng_model=virtio" + fi + fi + + case "$image_fname" in + *.tar.gz|*.tgz) + # Extract ami and aki files + [ "${image_fname%.tar.gz}" != "$image_fname" ] && + image_name="${image_fname%.tar.gz}" || + image_name="${image_fname%.tgz}" + local xdir="$FILES/images/$image_name" + rm -Rf "$xdir"; + mkdir "$xdir" + tar -zxf $image -C "$xdir" + kernel=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do + [ -f "$f" ] && echo "$f" && break; done; true) + ramdisk=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do + [ -f "$f" ] && echo "$f" && break; done; true) + image=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do + [ -f "$f" ] && echo "$f" && break; done; true) + if [[ -z "$image_name" ]]; then + image_name=$(basename "$image" ".img") + fi + ;; + *.img) + image_name=$(basename "$image" ".img") + local format + format=$(qemu-img info ${image} | awk '/^file format/ { print $3; exit }') + if [[ ",qcow2,raw,vdi,vmdk,vpc," =~ ",$format," ]]; then + disk_format=$format + else + disk_format=raw + fi + container_format=bare + ;; + *.img.gz) + image_name=$(basename "$image" ".img.gz") + disk_format=raw + container_format=bare + unpack=zcat + ;; + *.img.bz2) + image_name=$(basename "$image" ".img.bz2") + disk_format=qcow2 + container_format=bare + unpack=bunzip2 + ;; + *.qcow2) + image_name=$(basename "$image" ".qcow2") + disk_format=qcow2 + container_format=bare + ;; + *.qcow2.xz) + image_name=$(basename "$image" ".qcow2.xz") + disk_format=qcow2 + container_format=bare + unpack=unxz + ;; + *.raw) + image_name=$(basename "$image" ".raw") + disk_format=raw + container_format=bare + ;; + *.iso) + image_name=$(basename "$image" ".iso") + disk_format=iso + container_format=bare + ;; + *.vhd|*.vhdx|*.vhd.gz|*.vhdx.gz) + local extension="${image_fname#*.}" + image_name=$(basename "$image" ".$extension") + disk_format=$(echo $image_fname | grep -oP '(?<=\.)vhdx?(?=\.|$)') + container_format=bare + if [ "${image_fname##*.}" == "gz" ]; then + unpack=zcat + fi + ;; + *) echo "Do not know what to do with $image_fname"; false;; + esac + + if is_arch "ppc64le" || is_arch "ppc64" || is_arch "ppc"; then + img_property="$img_property hw_cdrom_bus=scsi os_command_line=console=hvc0" + fi + + if is_arch "aarch64"; then + img_property="$img_property hw_machine_type=virt hw_cdrom_bus=scsi hw_scsi_model=virtio-scsi os_command_line='console=ttyAMA0'" + fi + + if [ "$container_format" = "bare" ]; then + if [ "$unpack" = "zcat" ]; then + _upload_image "$image_name" $container_format $disk_format <(zcat --force "$image") $img_property + elif [ "$unpack" = "bunzip2" ]; then + _upload_image "$image_name" $container_format $disk_format <(bunzip2 -cdk "$image") $img_property + elif [ "$unpack" = "unxz" ]; then + # NOTE(brtknr): unxz the file first and cleanup afterwards to + # prevent timeout while Glance tries to upload image (e.g. to Swift). + local tmp_dir + local image_path + tmp_dir=$(mktemp -d) + image_path="$tmp_dir/$image_name" + unxz -cv "${image}" > "$image_path" + _upload_image "$image_name" $container_format $disk_format "$image_path" $img_property + rm -rf $tmp_dir + else + _upload_image "$image_name" $container_format $disk_format "$image" $img_property + fi + else + # Use glance client to add the kernel the root filesystem. + # We parse the results of the first upload to get the glance ID of the + # kernel for use when uploading the root filesystem. + local kernel_id="" ramdisk_id=""; + if [ -n "$kernel" ]; then + kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki --file $(readlink -f "$kernel") -f value -c id) + fi + if [ -n "$ramdisk" ]; then + ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari --file $(readlink -f "$ramdisk") -f value -c id) + fi + _upload_image "${image_name%.img}" ami ami "$image" ${kernel_id:+ kernel_id=$kernel_id} ${ramdisk_id:+ ramdisk_id=$ramdisk_id} $img_property + fi +} + + +# Set the database backend to use +# When called from stackrc/localrc DATABASE_BACKENDS has not been +# initialized yet, just save the configuration selection and call back later +# to validate it. +# +# ``$1`` - the name of the database backend to use (mysql, postgresql, ...) +function use_database { + if [[ -z "$DATABASE_BACKENDS" ]]; then + # No backends registered means this is likely called from ``localrc`` + # This is now deprecated usage + DATABASE_TYPE=$1 + deprecated "The database backend needs to be properly set in ENABLED_SERVICES; use_database is deprecated localrc" + else + # This should no longer get called...here for posterity + use_exclusive_service DATABASE_BACKENDS DATABASE_TYPE $1 + fi +} + +#Macro for curl statements. curl requires -g option for literal IPv6 addresses. +CURL_GET="${CURL_GET:-curl -g}" + +# Wait for an HTTP server to start answering requests +# wait_for_service timeout url +# +# If the service we want is behind a proxy, the proxy may be available +# before the service. Compliant proxies will return a 503 in this case +# Loop until we get something else. +# Also check for the case where there is no proxy and the service just +# hasn't started yet. curl returns 7 for Failed to connect to host. +function wait_for_service { + local timeout=$1 + local url=$2 + local rval=0 + time_start "wait_for_service" + timeout $timeout bash -x < [boot-timeout] [from_net] [expected] +function ping_check { + local ip=$1 + local timeout=${2:-30} + local from_net=${3:-""} + local expected=${4:-True} + local op="!" + local failmsg="[Fail] Couldn't ping server" + local ping_cmd="ping" + + # if we don't specify a from_net we're expecting things to work + # fine from our local box. + if [[ -n "$from_net" ]]; then + # TODO(stephenfin): Is there any way neutron could be disabled now? + if is_service_enabled neutron; then + ping_cmd="$TOP_DIR/tools/ping_neutron.sh $from_net" + fi + fi + + # inverse the logic if we're testing no connectivity + if [[ "$expected" != "True" ]]; then + op="" + failmsg="[Fail] Could ping server" + fi + + # Because we've transformed this command so many times, print it + # out at the end. + local check_command="while $op $ping_cmd -c1 -w1 $ip; do sleep 1; done" + echo "Checking connectivity with $check_command" + + if ! timeout $timeout sh -c "$check_command"; then + die $LINENO $failmsg + fi +} + +# Get ip of instance +function get_instance_ip { + local vm_id=$1 + local network_name=$2 + local addresses + local ip + + addresses=$(openstack server show -c addresses -f value "$vm_id") + ip=$(echo $addresses | sed -n "s/^.*$network_name=\([0-9\.]*\).*$/\1/p") + if [[ $ip = "" ]];then + echo "addresses of server $vm_id : $addresses" + die $LINENO "[Fail] Couldn't get ipaddress of VM" + fi + echo $ip +} + +# ssh check + +# ssh_check net-name key-file floating-ip default-user active-timeout +function ssh_check { + if is_service_enabled neutron; then + _ssh_check_neutron "$1" $2 $3 $4 $5 + return + fi + _ssh_check_novanet "$1" $2 $3 $4 $5 +} + +function _ssh_check_novanet { + local NET_NAME=$1 + local KEY_FILE=$2 + local FLOATING_IP=$3 + local DEFAULT_INSTANCE_USER=$4 + local ACTIVE_TIMEOUT=$5 + local probe_cmd="" + if ! timeout $ACTIVE_TIMEOUT sh -c "while ! ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP echo success; do sleep 1; done"; then + die $LINENO "server didn't become ssh-able!" + fi +} + + +# Get the location of the $module-rootwrap executables, where module is cinder +# or nova. +# get_rootwrap_location module +function get_rootwrap_location { + local module=$1 + + echo "$(get_python_exec_prefix)/$module-rootwrap" +} + + +# Path permissions sanity check +# check_path_perm_sanity path +function check_path_perm_sanity { + # Ensure no element of the path has 0700 permissions, which is very + # likely to cause issues for daemons. Inspired by default 0700 + # homedir permissions on RHEL and common practice of making DEST in + # the stack user's homedir. + + local real_path + real_path=$(readlink -f $1) + local rebuilt_path="" + for i in $(echo ${real_path} | tr "/" " "); do + rebuilt_path=$rebuilt_path"/"$i + + if [[ $(stat -c '%a' ${rebuilt_path}) = 700 ]]; then + echo "*** DEST path element" + echo "*** ${rebuilt_path}" + echo "*** appears to have 0700 permissions." + echo "*** This is very likely to cause fatal issues for DevStack daemons." + + if [[ -n "$SKIP_PATH_SANITY" ]]; then + return + else + echo "*** Set SKIP_PATH_SANITY to skip this check" + die $LINENO "Invalid path permissions" + fi + fi + done +} + + +# vercmp ver1 op ver2 +# Compare VER1 to VER2 +# - op is one of < <= == >= > +# - returns true if satisified +# e.g. +# if vercmp 1.0 "<" 2.0; then +# ... +# fi +function vercmp { + local v1=$1 + local op=$2 + local v2=$3 + local result + + # sort the two numbers with sort's "-V" argument. Based on if v2 + # swapped places with v1, we can determine ordering. + result=$(echo -e "$v1\n$v2" | sort -V | head -1) + + case $op in + "==") + [ "$v1" = "$v2" ] + return + ;; + ">") + [ "$v1" != "$v2" ] && [ "$result" = "$v2" ] + return + ;; + "<") + [ "$v1" != "$v2" ] && [ "$result" = "$v1" ] + return + ;; + ">=") + [ "$result" = "$v2" ] + return + ;; + "<=") + [ "$result" = "$v1" ] + return + ;; + *) + die $LINENO "unrecognised op: $op" + ;; + esac +} + +# This sets up defaults we like in devstack for logging for tracking +# down issues, and makes sure everything is done the same between +# projects. +# NOTE(jh): Historically this function switched between three different +# functions: setup_systemd_logging, setup_colorized_logging and +# setup_standard_logging_identity. Since we always run with systemd now, +# this could be cleaned up, but the other functions may still be in use +# by plugins. Since deprecations haven't worked in the past, we'll just +# leave them in place. +function setup_logging { + setup_systemd_logging $1 +} + +# This function sets log formatting options for colorizing log +# output to stdout. It is meant to be called by lib modules. +function setup_colorized_logging { + local conf_file=$1 + # Add color to logging output + iniset $conf_file DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(project_name)s %(user_name)s%(color)s] %(instance)s%(color)s%(message)s" + iniset $conf_file DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + iniset $conf_file DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" + iniset $conf_file DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" + # Enable or disable color for oslo.log + iniset $conf_file DEFAULT log_color $LOG_COLOR +} + +function setup_systemd_logging { + local conf_file=$1 + # NOTE(sdague): this is a nice to have, and means we're using the + # native systemd path, which provides for things like search on + # request-id. However, there may be an eventlet interaction here, + # so going off for now. + USE_JOURNAL=$(trueorfalse False USE_JOURNAL) + local pidstr="" + if [[ "$USE_JOURNAL" == "True" ]]; then + iniset $conf_file DEFAULT use_journal "True" + # if we are using the journal directly, our process id is already correct + else + pidstr="(pid=%(process)d) " + fi + iniset $conf_file DEFAULT logging_debug_format_suffix "{{${pidstr}%(funcName)s %(pathname)s:%(lineno)d}}" + + iniset $conf_file DEFAULT logging_context_format_string "%(color)s%(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(project_name)s %(user_name)s%(color)s] %(instance)s%(color)s%(message)s" + iniset $conf_file DEFAULT logging_default_format_string "%(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + iniset $conf_file DEFAULT logging_exception_prefix "ERROR %(name)s %(instance)s" + + # Enable or disable color for oslo.log + iniset $conf_file DEFAULT log_color $LOG_COLOR +} + +function setup_standard_logging_identity { + local conf_file=$1 + iniset $conf_file DEFAULT logging_user_identity_format "%(project_name)s %(user_name)s" +} + +# These functions are provided for basic fall-back functionality for +# projects that include parts of DevStack (Grenade). stack.sh will +# override these with more specific versions for DevStack (with fancy +# spinners, etc). We never override an existing version +if ! function_exists echo_summary; then + function echo_summary { + echo $@ + } +fi +if ! function_exists echo_nolog; then + function echo_nolog { + echo $@ + } +fi + + +# create_disk - Create, configure, and mount a backing disk +function create_disk { + local node_number + local disk_image=${1} + local storage_data_dir=${2} + local loopback_disk_size=${3} + local key + + key=$(echo $disk_image | sed 's#/.##') + key="devstack-$key" + + destroy_disk $disk_image $storage_data_dir + + # Create an empty file of the correct size (and ensure the + # directory structure up to that path exists) + sudo mkdir -p $(dirname ${disk_image}) + sudo truncate -s ${loopback_disk_size} ${disk_image} + + # Make a fresh XFS filesystem. Use bigger inodes so xattr can fit in + # a single inode. Keeping the default inode size (256) will result in multiple + # inodes being used to store xattr. Retrieving the xattr will be slower + # since we have to read multiple inodes. This statement is true for both + # Swift and Ceph. + sudo mkfs.xfs -f -i size=1024 ${disk_image} + + # Install a new loopback fstab entry for this disk image, and mount it + echo "$disk_image $storage_data_dir xfs loop,noatime,nodiratime,logbufs=8,comment=$key 0 0" | sudo tee -a /etc/fstab + sudo mkdir -p $storage_data_dir + sudo mount -v $storage_data_dir +} + +# Unmount, de-configure, and destroy a backing disk +function destroy_disk { + local disk_image=$1 + local storage_data_dir=$2 + local key + + key=$(echo $disk_image | sed 's#/.##') + key="devstack-$key" + + # Unmount the target, if mounted + if egrep -q $storage_data_dir /proc/mounts; then + sudo umount $storage_data_dir + fi + + # Clear any fstab rules + sudo sed -i '/.*comment=$key.*/ d' /etc/fstab + + # Delete the file + sudo rm -f $disk_image +} + + +# set_mtu - Set MTU on a device +function set_mtu { + local dev=$1 + local mtu=$2 + sudo ip link set mtu $mtu dev $dev +} + + +# running_in_container - Returns true otherwise false +function running_in_container { + [[ $(systemd-detect-virt --container) != 'none' ]] +} + + +# enable_kernel_bridge_firewall - Enable kernel support for bridge firewalling +function enable_kernel_bridge_firewall { + # Load bridge module. This module provides access to firewall for bridged + # frames; and also on older kernels (pre-3.18) it provides sysctl knobs to + # enable/disable bridge firewalling + sudo modprobe bridge + # For newer kernels (3.18+), those sysctl settings are split into a separate + # kernel module (br_netfilter). Load it too, if present. + sudo modprobe br_netfilter 2>> /dev/null || : + # Enable bridge firewalling in case it's disabled in kernel (upstream + # default is enabled, but some distributions may decide to change it). + # This is at least needed for RHEL 7.2 and earlier releases. + for proto in ip ip6; do + sudo sysctl -w net.bridge.bridge-nf-call-${proto}tables=1 + done +} + + +# Set a systemd system override +# +# This sets a system-side override in system.conf. A per-service +# override would be /etc/systemd/system/${service}.service/override.conf +function set_systemd_override { + local key="$1" + local value="$2" + + local sysconf="/etc/systemd/system.conf" + iniset -sudo "${sysconf}" "Manager" "$key" "$value" + echo "Set systemd system override for ${key}=${value}" + + sudo systemctl daemon-reload +} + +# Get a random port from the local port range +# +# This function returns an available port in the local port range. The search +# order is not truly random, but should be considered a random value by the +# user because it depends on the state of your local system. +function get_random_port { + read lower_port upper_port < /proc/sys/net/ipv4/ip_local_port_range + while true; do + for (( port = upper_port ; port >= lower_port ; port-- )); do + sudo lsof -i ":$port" &> /dev/null + if [[ $? > 0 ]] ; then + break 2 + fi + done + done + echo $port +} + +# Save some state information +# +# Write out various useful state information to /etc/devstack-version +function write_devstack_version { + cat - </dev/null +DevStack Version: ${DEVSTACK_SERIES} +Change: $(git log --format="%H %s %ci" -1) +OS Version: ${os_VENDOR} ${os_RELEASE} ${os_CODENAME} +EOF +} + +# Restore xtrace +$_XTRACE_FUNCTIONS + +# Local variables: +# mode: shell-script +# End: diff --git a/functions-common b/functions-common new file mode 100644 index 0000000000..c2042c4fef --- /dev/null +++ b/functions-common @@ -0,0 +1,2553 @@ +#!/bin/bash +# +# functions-common - Common functions used by DevStack components +# +# The canonical copy of this file is maintained in the DevStack repo. +# All modifications should be made there and then sync'ed to other repos +# as required. +# +# This file is sorted alphabetically within the function groups. +# +# - Config Functions +# - Control Functions +# - Distro Functions +# - Git Functions +# - OpenStack Functions +# - Package Functions +# - Process Functions +# - Service Functions +# - System Functions +# +# The following variables are assumed to be defined by certain functions: +# +# - ``ENABLED_SERVICES`` +# - ``ERROR_ON_CLONE`` +# - ``FILES`` +# - ``OFFLINE`` +# - ``RECLONE`` +# - ``REQUIREMENTS_DIR`` +# - ``STACK_USER`` +# - ``http_proxy``, ``https_proxy``, ``no_proxy`` +# + +# Save trace setting +_XTRACE_FUNCTIONS_COMMON=$(set +o | grep xtrace) +set +o xtrace + +# ensure we don't re-source this in the same environment +[[ -z "$_DEVSTACK_FUNCTIONS_COMMON" ]] || return 0 +declare -r -g _DEVSTACK_FUNCTIONS_COMMON=1 + +# Global Config Variables +declare -A -g GITREPO +declare -A -g GITBRANCH +declare -A -g GITDIR + +# Systemd service file environment variables per service +declare -A -g SYSTEMD_ENV_VARS + +KILL_PATH="$(which kill)" + +# Save these variables to .stackenv +STACK_ENV_VARS="BASE_SQL_CONN DATA_DIR DEST ENABLED_SERVICES HOST_IP \ + KEYSTONE_SERVICE_URI \ + LOGFILE OS_CACERT SERVICE_HOST STACK_USER TLS_IP \ + HOST_IPV6 SERVICE_IP_VERSION TUNNEL_ENDPOINT_IP TUNNEL_IP_VERSION" + + +# Saves significant environment variables to .stackenv for later use +# Refers to a lot of globals, only TOP_DIR and STACK_ENV_VARS are required to +# function, the rest are simply saved and do not cause problems if they are undefined. +# save_stackenv [tag] +function save_stackenv { + local tag=${1:-""} + # Save some values we generated for later use + time_stamp=$(date "+$TIMESTAMP_FORMAT") + echo "# $time_stamp $tag" >$TOP_DIR/.stackenv + for i in $STACK_ENV_VARS; do + echo $i=${!i} >>$TOP_DIR/.stackenv + done +} + +# Update/create user clouds.yaml file. +# clouds.yaml will have +# - A `devstack` entry for the `demo` user for the `demo` project. +# - A `devstack-admin` entry for the `admin` user for the `admin` project. +# write_clouds_yaml +function write_clouds_yaml { + # The location is a variable to allow for easier refactoring later to make it + # overridable. There is currently no usecase where doing so makes sense, so + # it's not currently configurable. + + CLOUDS_YAML=/etc/openstack/clouds.yaml + + sudo mkdir -p $(dirname $CLOUDS_YAML) + sudo chown -R $STACK_USER /etc/openstack + + CA_CERT_ARG='' + if [ -f "$SSL_BUNDLE_FILE" ]; then + CA_CERT_ARG="--os-cacert $SSL_BUNDLE_FILE" + fi + # devstack: user with the member role on demo project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username demo \ + --os-password $ADMIN_PASSWORD \ + --os-project-name demo + + # devstack-admin: user with the admin role on the admin project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-admin \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username admin \ + --os-password $ADMIN_PASSWORD \ + --os-project-name admin + + # devstack-admin-demo: user with the admin role on the demo project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-admin-demo \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username admin \ + --os-password $ADMIN_PASSWORD \ + --os-project-name demo + + # devstack-alt: user with the member role on alt_demo project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-alt \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username alt_demo \ + --os-password $ADMIN_PASSWORD \ + --os-project-name alt_demo + + # devstack-alt-member: user with the member role on alt_demo project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-alt-member \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username alt_demo_member \ + --os-password $ADMIN_PASSWORD \ + --os-project-name alt_demo + + # devstack-alt-reader: user with the reader role on alt_demo project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-alt-reader \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username alt_demo_reader \ + --os-password $ADMIN_PASSWORD \ + --os-project-name alt_demo + + # devstack-reader: user with the reader role on demo project + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-reader \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username demo_reader \ + --os-password $ADMIN_PASSWORD \ + --os-project-name demo + + # devstack-system-admin: user with the admin role on the system + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-system-admin \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username admin \ + --os-password $ADMIN_PASSWORD \ + --os-system-scope all + + # devstack-system-member: user with the member role on the system + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-system-member \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username system_member \ + --os-password $ADMIN_PASSWORD \ + --os-system-scope all + + # devstack-system-reader: user with the reader role on the system + $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-system-reader \ + --os-region-name $REGION_NAME \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_SERVICE_URI \ + --os-username system_reader \ + --os-password $ADMIN_PASSWORD \ + --os-system-scope all + + cat >> $CLOUDS_YAML < +# +# Normalize config-value provided in variable VAR to either "True" or +# "False". If VAR is unset (i.e. $VAR evaluates as empty), the value +# of the second argument will be used as the default value. +# +# Accepts as False: 0 no No NO false False FALSE +# Accepts as True: 1 yes Yes YES true True TRUE +# +# usage: +# VAL=$(trueorfalse False VAL) +function trueorfalse { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local default=$1 + + if [ -z $2 ]; then + die $LINENO "variable to normalize required" + fi + local testval=${!2:-} + + case "$testval" in + "1" | [yY]es | "YES" | [tT]rue | "TRUE" ) echo "True" ;; + "0" | [nN]o | "NO" | [fF]alse | "FALSE" ) echo "False" ;; + * ) echo "$default" ;; + esac + + $xtrace +} + +# bool_to_int +# +# Convert True|False to int 1 or 0 +# This function can be used to convert the output of trueorfalse +# to an int follow c conventions where false is 0 and 1 it true. +function bool_to_int { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + if [ -z $1 ]; then + die $LINENO "Bool value required" + fi + if [[ $1 == "True" ]] ; then + echo '1' + else + echo '0' + fi + $xtrace +} + + +function isset { + [[ -v "$1" ]] +} + + +# Control Functions +# ================= + +# Prints backtrace info +# filename:lineno:function +# backtrace level +function backtrace { + local level=$1 + local deep + deep=$((${#BASH_SOURCE[@]} - 1)) + echo "[Call Trace]" + while [ $level -le $deep ]; do + echo "${BASH_SOURCE[$deep]}:${BASH_LINENO[$deep-1]}:${FUNCNAME[$deep-1]}" + deep=$((deep - 1)) + done +} + +# Prints line number and "message" then exits +# die $LINENO "message" +function die { + local exitcode=$? + set +o xtrace + local line=$1; shift + if [ $exitcode == 0 ]; then + exitcode=1 + fi + backtrace 2 + err $line "$*" + # Give buffers a second to flush + sleep 1 + exit $exitcode +} + +# Checks an environment variable is not set or has length 0 OR if the +# exit code is non-zero and prints "message" and exits +# NOTE: env-var is the variable name without a '$' +# die_if_not_set $LINENO env-var "message" +function die_if_not_set { + local exitcode=$? + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local line=$1; shift + local evar=$1; shift + if ! is_set $evar || [ $exitcode != 0 ]; then + die $line "$*" + fi + $xtrace +} + +function deprecated { + local text=$1 + DEPRECATED_TEXT+="\n$text" + echo "WARNING: $text" >&2 +} + +# Prints line number and "message" in error format +# err $LINENO "message" +function err { + local exitcode=$? + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2" + echo "$msg" 1>&2; + if [[ -n ${LOGDIR} ]]; then + echo "$msg" >> "${LOGDIR}/error.log" + fi + $xtrace + return $exitcode +} + +# Checks an environment variable is not set or has length 0 OR if the +# exit code is non-zero and prints "message" +# NOTE: env-var is the variable name without a '$' +# err_if_not_set $LINENO env-var "message" +function err_if_not_set { + local exitcode=$? + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local line=$1; shift + local evar=$1; shift + if ! is_set $evar || [ $exitcode != 0 ]; then + err $line "$*" + fi + $xtrace + return $exitcode +} + +# Exit after outputting a message about the distribution not being supported. +# exit_distro_not_supported [optional-string-telling-what-is-missing] +function exit_distro_not_supported { + if [[ -z "$DISTRO" ]]; then + GetDistro + fi + + if [ $# -gt 0 ]; then + die $LINENO "Support for $DISTRO is incomplete: no support for $@" + else + die $LINENO "Support for $DISTRO is incomplete." + fi +} + +# Test if the named environment variable is set and not zero length +# is_set env-var +function is_set { + local var=\$"$1" + eval "[ -n \"$var\" ]" # For ex.: sh -c "[ -n \"$var\" ]" would be better, but several exercises depends on this +} + +# Prints line number and "message" in warning format +# warn $LINENO "message" +function warn { + local exitcode=$? + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2" + echo "$msg" + $xtrace + return $exitcode +} + + +# Distro Functions +# ================ + +# Determine OS Vendor, Release and Update + +# +# NOTE : For portability, you almost certainly do not want to use +# these variables directly! The "is_*" functions defined below this +# bundle up compatible platforms under larger umbrellas that we have +# determinted are compatible enough (e.g. is_ubuntu covers Ubuntu & +# Debian, is_fedora covers RPM-based distros). Higher-level functions +# such as "install_package" further abstract things in better ways. +# +# ``os_VENDOR`` - vendor name: ``Ubuntu``, ``Fedora``, etc +# ``os_RELEASE`` - major release: ``22.04`` (Ubuntu), ``23`` (Fedora) +# ``os_PACKAGE`` - package type: ``deb`` or ``rpm`` +# ``os_CODENAME`` - vendor's codename for release: ``jammy`` + +declare -g os_VENDOR os_RELEASE os_PACKAGE os_CODENAME + +# Make a *best effort* attempt to install lsb_release packages for the +# user if not available. Note can't use generic install_package* +# because they depend on this! +function _ensure_lsb_release { + if [[ -x $(command -v lsb_release 2>/dev/null) ]]; then + return + fi + + if [[ -x $(command -v apt-get 2>/dev/null) ]]; then + sudo apt-get install -y lsb-release + elif [[ -x $(command -v zypper 2>/dev/null) ]]; then + sudo zypper -n install lsb-release + elif [[ -x $(command -v dnf 2>/dev/null) ]]; then + sudo dnf install -y python3-distro || sudo dnf install -y openeuler-lsb + else + die $LINENO "Unable to find or auto-install lsb_release" + fi +} + +# GetOSVersion +# Set the following variables: +# - os_RELEASE +# - os_CODENAME +# - os_VENDOR +# - os_PACKAGE +function GetOSVersion { + # CentOS Stream 9 or later and RHEL 9 or later do not provide lsb_release + source /etc/os-release + if [[ "${ID}" =~ (almalinux|centos|rocky|rhel) ]]; then + os_RELEASE=${VERSION_ID} + os_CODENAME=$(echo $VERSION | grep -oP '(?<=[(])[^)]*' || echo 'n/a') + os_VENDOR=$(echo $NAME | tr -d '[:space:]') + else + _ensure_lsb_release + + os_RELEASE=$(lsb_release -r -s) + os_CODENAME=$(lsb_release -c -s) + os_VENDOR=$(lsb_release -i -s) + fi + + if [[ $os_VENDOR =~ (Debian|Ubuntu) ]]; then + os_PACKAGE="deb" + else + os_PACKAGE="rpm" + fi + + typeset -xr os_VENDOR + typeset -xr os_RELEASE + typeset -xr os_PACKAGE + typeset -xr os_CODENAME +} + +# Translate the OS version values into common nomenclature +# Sets global ``DISTRO`` from the ``os_*`` values +declare -g DISTRO + +function GetDistro { + GetOSVersion + if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then + # 'Everyone' refers to Ubuntu / Debian releases by + # the code name adjective + DISTRO=$os_CODENAME + elif [[ "$os_VENDOR" =~ (Fedora) ]]; then + # For Fedora, just use 'f' and the release + DISTRO="f$os_RELEASE" + elif [[ "$os_VENDOR" =~ (Red.*Hat) || \ + "$os_VENDOR" =~ (CentOS) || \ + "$os_VENDOR" =~ (AlmaLinux) || \ + "$os_VENDOR" =~ (Scientific) || \ + "$os_VENDOR" =~ (OracleServer) || \ + "$os_VENDOR" =~ (RockyLinux) || \ + "$os_VENDOR" =~ (Virtuozzo) ]]; then + MAJOR_VERSION=$(echo $os_RELEASE | cut -d. -f1) + DISTRO="rhel${MAJOR_VERSION}" + elif [[ "$os_VENDOR" =~ (openEuler) ]]; then + DISTRO="openEuler-$os_RELEASE" + else + # We can't make a good choice here. Setting a sensible DISTRO + # is part of the problem, but not the major issue -- we really + # only use DISTRO in the code as a fine-filter. + # + # The bigger problem is categorising the system into one of + # our two big categories as Ubuntu/Debian-ish or + # Fedora/CentOS-ish. + # + # The setting of os_PACKAGE above is only set to "deb" based + # on a hard-coded list of vendor names ... thus we will + # default to thinking unknown distros are RPM based + # (ie. is_ubuntu does not match). But the platform will then + # also not match in is_fedora, because that also has a list of + # names. + # + # So, if you are reading this, getting your distro supported + # is really about making sure it matches correctly in these + # functions. Then you can choose a sensible way to construct + # DISTRO based on your distros release approach. + die $LINENO "Unable to determine DISTRO, can not continue." + fi + typeset -xr DISTRO +} + +# Utility function for checking machine architecture +# is_arch arch-type +function is_arch { + [[ "$(uname -m)" == "$1" ]] +} + +# Determine if current distribution is an Oracle distribution +# is_oraclelinux +function is_oraclelinux { + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + + [ "$os_VENDOR" = "OracleServer" ] +} + + +# Determine if current distribution is a Fedora-based distribution +# (Fedora, RHEL, CentOS, Rocky, etc). +# is_fedora +function is_fedora { + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + + [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \ + [ "$os_VENDOR" = "openEuler" ] || \ + [ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \ + [ "$os_VENDOR" = "RedHatEnterprise" ] || \ + [ "$os_VENDOR" = "RedHatEnterpriseLinux" ] || \ + [ "$os_VENDOR" = "RockyLinux" ] || \ + [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "CentOSStream" ] || \ + [ "$os_VENDOR" = "AlmaLinux" ] || \ + [ "$os_VENDOR" = "OracleServer" ] || [ "$os_VENDOR" = "Virtuozzo" ] +} + + +# Determine if current distribution is an Ubuntu-based distribution +# It will also detect non-Ubuntu but Debian-based distros +# is_ubuntu +function is_ubuntu { + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + [ "$os_PACKAGE" = "deb" ] +} + +# Determine if current distribution is an openEuler distribution +# is_openeuler +function is_openeuler { + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + [ "$os_VENDOR" = "openEuler" ] +} +# Git Functions +# ============= + +# Returns openstack release name for a given branch name +# ``get_release_name_from_branch branch-name`` +function get_release_name_from_branch { + local branch=$1 + if [[ $branch =~ "stable/" || $branch =~ "proposed/" ]]; then + echo ${branch#*/} + else + echo "master" + fi +} + +# git clone only if directory doesn't exist already. Since ``DEST`` might not +# be owned by the installation user, we create the directory and change the +# ownership to the proper user. +# Set global ``RECLONE=yes`` to simulate a clone when dest-dir exists +# Set global ``ERROR_ON_CLONE=True`` to abort execution with an error if the git repo +# does not exist (default is False, meaning the repo will be cloned). +# Uses globals ``ERROR_ON_CLONE``, ``OFFLINE``, ``RECLONE`` +# git_clone remote dest-dir branch +function git_clone { + local git_remote=$1 + local git_dest=$2 + local git_ref=$3 + local orig_dir + orig_dir=$(pwd) + local git_clone_flags="" + + RECLONE=$(trueorfalse False RECLONE) + if [[ "${GIT_DEPTH}" -gt 0 ]]; then + git_clone_flags="$git_clone_flags --depth $GIT_DEPTH" + fi + + if [[ "$OFFLINE" = "True" ]]; then + echo "Running in offline mode, clones already exist" + # print out the results so we know what change was used in the logs + cd $git_dest + git show --oneline | head -1 + cd $orig_dir + return + fi + + if echo $git_ref | egrep -q "^refs"; then + # If our branch name is a gerrit style refs/changes/... + if [[ ! -d $git_dest ]]; then + if [[ "$ERROR_ON_CLONE" = "True" ]]; then + echo "The $git_dest project was not found; if this is a gate job, add" + echo "the project to 'required-projects' in the job definition." + die $LINENO "ERROR_ON_CLONE is set to True so cloning not allowed in this configuration" + fi + git_timed clone $git_clone_flags $git_remote $git_dest + fi + cd $git_dest + git_timed fetch $git_remote $git_ref && git checkout FETCH_HEAD + else + # do a full clone only if the directory doesn't exist + if [[ ! -d $git_dest ]]; then + if [[ "$ERROR_ON_CLONE" = "True" ]]; then + echo "The $git_dest project was not found; if this is a gate job, add" + echo "the project to the \$PROJECTS variable in the job definition." + die $LINENO "ERROR_ON_CLONE is set to True so cloning not allowed in this configuration" + fi + git_timed clone --no-checkout $git_clone_flags $git_remote $git_dest + cd $git_dest + git_timed fetch $git_clone_flags origin $git_ref + git_timed checkout FETCH_HEAD + elif [[ "$RECLONE" = "True" ]]; then + # if it does exist then simulate what clone does if asked to RECLONE + cd $git_dest + # set the url to pull from and fetch + git remote set-url origin $git_remote + git_timed fetch origin + # remove the existing ignored files (like pyc) as they cause breakage + # (due to the py files having older timestamps than our pyc, so python + # thinks the pyc files are correct using them) + sudo find $git_dest -name '*.pyc' -delete + + # handle git_ref accordingly to type (tag, branch) + if [[ -n "`git show-ref refs/tags/$git_ref`" ]]; then + git_update_tag $git_ref + elif [[ -n "`git show-ref refs/heads/$git_ref`" ]]; then + git_update_branch $git_ref + elif [[ -n "`git show-ref refs/remotes/origin/$git_ref`" ]]; then + git_update_remote_branch $git_ref + else + die $LINENO "$git_ref is neither branch nor tag" + fi + + fi + fi + + # NOTE(ianw) 2022-04-13 : commit [1] has broken many assumptions + # about how we clone and work with repos. Mark them safe globally + # as a work-around. + # + # NOTE(danms): On bionic (and likely others) git-config may write + # ~stackuser/.gitconfig if not run with sudo -H. Using --system + # writes these changes to /etc/gitconfig which is more + # discoverable anyway. + # + # [1] https://github.com/git/git/commit/8959555cee7ec045958f9b6dd62e541affb7e7d9 + sudo git config --system --add safe.directory ${git_dest} + + # print out the results so we know what change was used in the logs + cd $git_dest + git show --oneline | head -1 + cd $orig_dir +} + +# A variation on git clone that lets us specify a project by it's +# actual name, like oslo.config. This is exceptionally useful in the +# library installation case +function git_clone_by_name { + local name=$1 + local repo=${GITREPO[$name]} + local dir=${GITDIR[$name]} + local branch=${GITBRANCH[$name]} + git_clone $repo $dir $branch +} + + +# git can sometimes get itself infinitely stuck with transient network +# errors or other issues with the remote end. This wraps git in a +# timeout/retry loop and is intended to watch over non-local git +# processes that might hang. GIT_TIMEOUT, if set, is passed directly +# to timeout(1); otherwise the default value of 0 maintains the status +# quo of waiting forever. +# usage: git_timed +function git_timed { + local count=0 + local timeout=0 + + if [[ -n "${GIT_TIMEOUT}" ]]; then + timeout=${GIT_TIMEOUT} + fi + + time_start "git_timed" + until timeout -s SIGINT ${timeout} git "$@"; do + # 124 is timeout(1)'s special return code when it reached the + # timeout; otherwise assume fatal failure + if [[ $? -ne 124 ]]; then + die $LINENO "git call failed: [git $@]" + fi + + count=$(($count + 1)) + warn $LINENO "timeout ${count} for git call: [git $@]" + if [ $count -eq 3 ]; then + die $LINENO "Maximum of 3 git retries reached" + fi + sleep 5 + done + time_stop "git_timed" +} + +# git update using reference as a branch. +# git_update_branch ref +function git_update_branch { + local git_branch=$1 + + git checkout -f origin/$git_branch + # a local branch might not exist + git branch -D $git_branch || true + git checkout -b $git_branch +} + +# git update using reference as a branch. +# git_update_remote_branch ref +function git_update_remote_branch { + local git_branch=$1 + + git checkout -b $git_branch -t origin/$git_branch +} + +# git update using reference as a tag. Be careful editing source at that repo +# as working copy will be in a detached mode +# git_update_tag ref +function git_update_tag { + local git_tag=$1 + + git tag -d $git_tag + # fetching given tag only + git_timed fetch origin tag $git_tag + git checkout -f $git_tag +} + + +# OpenStack Functions +# =================== + +# Get the default value for HOST_IP +# get_default_host_ip fixed_range floating_range host_ip_iface host_ip +function get_default_host_ip { + local fixed_range=$1 + local floating_range=$2 + local host_ip_iface=$3 + local host_ip=$4 + local af=$5 + + # Search for an IP unless an explicit is set by ``HOST_IP`` environment variable + if [ -z "$host_ip" -o "$host_ip" == "dhcp" ]; then + host_ip="" + # Find the interface used for the default route + host_ip_iface=${host_ip_iface:-$(ip -f $af route list match default table all | grep via | awk '/default/ {print $5}' | head -1)} + local host_ips + host_ips=$(LC_ALL=C ip -f $af addr show ${host_ip_iface} | sed /temporary/d |awk /$af'/ {split($2,parts,"/"); print parts[1]}') + local ip + for ip in $host_ips; do + # Attempt to filter out IP addresses that are part of the fixed and + # floating range. Note that this method only works if the ``netaddr`` + # python library is installed. If it is not installed, an error + # will be printed and the first IP from the interface will be used. + # If that is not correct set ``HOST_IP`` in ``localrc`` to the correct + # address. + if [[ "$af" == "inet6" ]]; then + host_ip=$ip + break; + fi + if ! (address_in_net $ip $fixed_range || address_in_net $ip $floating_range); then + host_ip=$ip + break; + fi + done + fi + echo $host_ip +} + +# Generates hex string from ``size`` byte of pseudo random data +# generate_hex_string size +function generate_hex_string { + local size=$1 + hexdump -n "$size" -v -e '/1 "%02x"' /dev/urandom +} + +# Grab a numbered field from python prettytable output +# Fields are numbered starting with 1 +# Reverse syntax is supported: -1 is the last field, -2 is second to last, etc. +# get_field field-number +function get_field { + local data field + while read data; do + if [ "$1" -lt 0 ]; then + field="(\$(NF$1))" + else + field="\$$(($1 + 1))" + fi + echo "$data" | awk -F'[ \t]*\\|[ \t]*' "{print $field}" + done +} + +# install default policy +# copy over a default policy.json and policy.d for projects +function install_default_policy { + local project=$1 + local project_uc + project_uc=$(echo $1|tr a-z A-Z) + local conf_dir="${project_uc}_CONF_DIR" + # eval conf dir to get the variable + conf_dir="${!conf_dir}" + local project_dir="${project_uc}_DIR" + # eval project dir to get the variable + project_dir="${!project_dir}" + local sample_conf_dir="${project_dir}/etc/${project}" + local sample_policy_dir="${project_dir}/etc/${project}/policy.d" + + # first copy any policy.json + cp -p $sample_conf_dir/policy.json $conf_dir + # then optionally copy over policy.d + if [[ -d $sample_policy_dir ]]; then + cp -r $sample_policy_dir $conf_dir/policy.d + fi +} + +# Add a policy to a policy.json file +# Do nothing if the policy already exists +# ``policy_add policy_file policy_name policy_permissions`` +function policy_add { + local policy_file=$1 + local policy_name=$2 + local policy_perm=$3 + + if grep -q ${policy_name} ${policy_file}; then + echo "Policy ${policy_name} already exists in ${policy_file}" + return + fi + + # Add a terminating comma to policy lines without one + # Remove the closing '}' and all lines following to the end-of-file + local tmpfile + tmpfile=$(mktemp) + uniq ${policy_file} | sed -e ' + s/]$/],/ + /^[}]/,$d + ' > ${tmpfile} + + # Append policy and closing brace + echo " \"${policy_name}\": ${policy_perm}" >>${tmpfile} + echo "}" >>${tmpfile} + + mv ${tmpfile} ${policy_file} +} + +# Gets or creates a domain +# Usage: get_or_create_domain +function get_or_create_domain { + local domain_id + domain_id=$( + openstack --os-cloud devstack-system-admin domain create $1 \ + --description "$2" --or-show \ + -f value -c id + ) + echo $domain_id +} + +# Gets or creates group +# Usage: get_or_create_group [] +function get_or_create_group { + local desc="${3:-}" + local group_id + # Gets group id + group_id=$( + # Creates new group with --or-show + openstack --os-cloud devstack-system-admin group create $1 \ + --domain $2 --description "$desc" --or-show \ + -f value -c id + ) + echo $group_id +} + +# Gets or creates user +# Usage: get_or_create_user [] +function get_or_create_user { + local user_id + if [[ ! -z "$4" ]]; then + local email="--email=$4" + else + local email="" + fi + # Gets user id + user_id=$( + # Creates new user with --or-show + openstack --os-cloud devstack-system-admin user create \ + $1 \ + --password "$2" \ + --domain=$3 \ + $email \ + --or-show \ + -f value -c id + ) + echo $user_id +} + +# Gets or creates project +# Usage: get_or_create_project +function get_or_create_project { + local project_id + project_id=$( + # Creates new project with --or-show + openstack --os-cloud devstack-system-admin project create $1 \ + --domain=$2 \ + --or-show -f value -c id + ) + echo $project_id +} + +# Gets or creates role +# Usage: get_or_create_role +function get_or_create_role { + local role_id + role_id=$( + # Creates role with --or-show + openstack --os-cloud devstack-system-admin role create $1 \ + --or-show -f value -c id + ) + echo $role_id +} + +# Returns the domain parts of a function call if present +# Usage: _get_domain_args [ ] +function _get_domain_args { + local domain + domain="" + + if [[ -n "$1" ]]; then + domain="$domain --user-domain $1" + fi + if [[ -n "$2" ]]; then + domain="$domain --project-domain $2" + fi + + echo $domain +} + +# Gets or adds user role to project +# Usage: get_or_add_user_project_role [ ] +function get_or_add_user_project_role { + local user_role_id + local domain_args + + domain_args=$(_get_domain_args $4 $5) + + # Note this is idempotent so we are safe across multiple + # duplicate calls. + openstack --os-cloud devstack-system-admin role add $1 \ + --user $2 \ + --project $3 \ + $domain_args + user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ + --role $1 \ + --user $2 \ + --project $3 \ + $domain_args \ + -c Role -f value) + echo $user_role_id +} + +# Gets or adds user role to domain +# Usage: get_or_add_user_domain_role +function get_or_add_user_domain_role { + local user_role_id + + # Note this is idempotent so we are safe across multiple + # duplicate calls. + openstack --os-cloud devstack-system-admin role add $1 \ + --user $2 \ + --domain $3 + user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ + --role $1 \ + --user $2 \ + --domain $3 \ + -c Role -f value) + + echo $user_role_id +} + +# Gets or adds user role to system +# Usage: get_or_add_user_system_role [] +function get_or_add_user_system_role { + local user_role_id + local domain_args + + domain_args=$(_get_domain_args $4) + + # Gets user role id + user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ + --role $1 \ + --user $2 \ + --system $3 \ + $domain_args \ + -f value -c Role) + if [[ -z "$user_role_id" ]]; then + # Adds role to user and get it + openstack --os-cloud devstack-system-admin role add $1 \ + --user $2 \ + --system $3 \ + $domain_args + user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ + --role $1 \ + --user $2 \ + --system $3 \ + $domain_args \ + -f value -c Role) + fi + echo $user_role_id +} + +# Gets or adds group role to project +# Usage: get_or_add_group_project_role +function get_or_add_group_project_role { + local group_role_id + + # Note this is idempotent so we are safe across multiple + # duplicate calls. + openstack role add $1 \ + --group $2 \ + --project $3 + group_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \ + --role $1 \ + --group $2 \ + --project $3 \ + -f value -c Role) + + echo $group_role_id +} + +# Gets or creates service +# Usage: get_or_create_service +function get_or_create_service { + local service_id + # Gets service id + service_id=$( + # Gets service id + openstack --os-cloud devstack-system-admin service show $2 -f value -c id 2>/dev/null || + # Creates new service if not exists + openstack --os-cloud devstack-system-admin service create \ + $2 \ + --name $1 \ + --description="$3" \ + -f value -c id + ) + echo $service_id +} + +# Create an endpoint with a specific interface +# Usage: _get_or_create_endpoint_with_interface +function _get_or_create_endpoint_with_interface { + local endpoint_id + endpoint_id=$(openstack --os-cloud devstack-system-admin endpoint list \ + --service $1 \ + --interface $2 \ + --region $4 \ + -c ID -f value) + if [[ -z "$endpoint_id" ]]; then + # Creates new endpoint + endpoint_id=$(openstack --os-cloud devstack-system-admin endpoint create \ + $1 $2 $3 --region $4 -f value -c id) + fi + + echo $endpoint_id +} + +# Gets or creates endpoint +# Usage: get_or_create_endpoint [adminurl] [internalurl] +function get_or_create_endpoint { + # NOTE(jamielennnox): when converting to v3 endpoint creation we go from + # creating one endpoint with multiple urls to multiple endpoints each with + # a different interface. To maintain the existing function interface we + # create 3 endpoints and return the id of the public one. In reality + # returning the public id will not make a lot of difference as there are no + # scenarios currently that use the returned id. Ideally this behaviour + # should be pushed out to the service setups and let them create the + # endpoints they need. + local public_id + public_id=$(_get_or_create_endpoint_with_interface $1 public $3 $2) + # only create admin/internal urls if provided content for them + if [[ -n "$4" ]]; then + _get_or_create_endpoint_with_interface $1 admin $4 $2 + fi + if [[ -n "$5" ]]; then + _get_or_create_endpoint_with_interface $1 internal $5 $2 + fi + # return the public id to indicate success, and this is the endpoint most likely wanted + echo $public_id +} + +# Get a URL from the identity service +# Usage: get_endpoint_url +function get_endpoint_url { + echo $(openstack --os-cloud devstack-system-admin endpoint list \ + --service $1 --interface $2 \ + -c URL -f value) +} + +# check if we are using ironic with hardware +# TODO(jroll) this is a kludge left behind when ripping ironic code +# out of tree, as it is used by nova and neutron. +# figure out a way to refactor nova/neutron code to eliminate this +function is_ironic_hardware { + is_service_enabled ironic && [[ "$IRONIC_IS_HARDWARE" == "True" ]] && return 0 + return 1 +} + +function is_ironic_enforce_scope { + is_service_enabled ironic && [[ "$IRONIC_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]] && return 0 + return 1 +} + +function is_ironic_sharded { + # todo(JayF): Support >1 shard with multiple n-cpu instances for each + is_service_enabled ironic && [[ "$IRONIC_SHARDS" == "1" ]] && return 0 + return 1 +} + + +# Package Functions +# ================= + +# _get_package_dir +function _get_package_dir { + local base_dir=$1 + local pkg_dir + + if [[ -z "$base_dir" ]]; then + base_dir=$FILES + fi + if is_ubuntu; then + pkg_dir=$base_dir/debs + elif is_fedora; then + pkg_dir=$base_dir/rpms + else + exit_distro_not_supported "list of packages" + fi + echo "$pkg_dir" +} + +# Wrapper for ``apt-get update`` to try multiple times on the update +# to address bad package mirrors (which happen all the time). +function apt_get_update { + # only do this once per run + if [[ "$REPOS_UPDATED" == "True" && "$RETRY_UPDATE" != "True" ]]; then + return + fi + + # bail if we are offline + [[ "$OFFLINE" = "True" ]] && return + + local sudo="sudo" + [[ "$(id -u)" = "0" ]] && sudo="env" + + # time all the apt operations + time_start "apt-get-update" + + local proxies="http_proxy=${http_proxy:-} https_proxy=${https_proxy:-} no_proxy=${no_proxy:-} " + local update_cmd="$sudo $proxies apt-get update" + if ! timeout 300 sh -c "while ! $update_cmd; do sleep 30; done"; then + die $LINENO "Failed to update apt repos, we're dead now" + fi + + REPOS_UPDATED=True + # stop the clock + time_stop "apt-get-update" +} + +# Wrapper for ``apt-get`` to set cache and proxy environment variables +# Uses globals ``OFFLINE``, ``*_proxy`` +# apt_get operation package [package ...] +function apt_get { + local xtrace result + xtrace=$(set +o | grep xtrace) + set +o xtrace + + [[ "$OFFLINE" = "True" || -z "$@" ]] && return + local sudo="sudo" + [[ "$(id -u)" = "0" ]] && sudo="env" + + # time all the apt operations + time_start "apt-get" + + $xtrace + + $sudo DEBIAN_FRONTEND=noninteractive \ + http_proxy=${http_proxy:-} https_proxy=${https_proxy:-} \ + no_proxy=${no_proxy:-} \ + apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@" < /dev/null + result=$? + + # stop the clock + time_stop "apt-get" + return $result +} + +function _parse_package_files { + local files_to_parse=$@ + + if [[ -z "$DISTRO" ]]; then + GetDistro + fi + + for fname in ${files_to_parse}; do + local OIFS line package distros distro + [[ -e $fname ]] || continue + + OIFS=$IFS + IFS=$'\n' + for line in $(<${fname}); do + if [[ $line =~ "NOPRIME" ]]; then + continue + fi + + # Assume we want this package; free-form + # comments allowed after a # + package=${line%%#*} + inst_pkg=1 + + # Look for # dist:xxx in comment + if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then + # We are using BASH regexp matching feature. + package=${BASH_REMATCH[1]} + distros=${BASH_REMATCH[2]} + # In bash ${VAR,,} will lowercase VAR + # Look for a match in the distro list + if [[ ! ${distros,,} =~ ${DISTRO,,} ]]; then + # If no match then skip this package + inst_pkg=0 + fi + fi + + # Look for # not:xxx in comment + if [[ $line =~ (.*)#.*not:([^ ]*) ]]; then + # We are using BASH regexp matching feature. + package=${BASH_REMATCH[1]} + distros=${BASH_REMATCH[2]} + # In bash ${VAR,,} will lowercase VAR + # Look for a match in the distro list + if [[ ${distros,,} =~ ${DISTRO,,} ]]; then + # If match then skip this package + inst_pkg=0 + fi + fi + + if [[ $inst_pkg = 1 ]]; then + echo $package + fi + done + IFS=$OIFS + done +} + +# get_packages() collects a list of package names of any type from the +# prerequisite files in ``files/{debs|rpms}``. The list is intended +# to be passed to a package installer such as apt or yum. +# +# Only packages required for the services in 1st argument will be +# included. Two bits of metadata are recognized in the prerequisite files: +# +# - ``# NOPRIME`` defers installation to be performed later in `stack.sh` +# - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection +# of the package to the distros listed. The distro names are case insensitive. +# - ``# not:DISTRO`` or ``not:DISTRO1,DISTRO2`` limits the selection +# of the package to the distros not listed. The distro names are case insensitive. +function get_packages { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local services=$@ + local package_dir + package_dir=$(_get_package_dir) + local file_to_parse="" + local service="" + + if [ $# -ne 1 ]; then + die $LINENO "get_packages takes a single, comma-separated argument" + fi + + if [[ -z "$package_dir" ]]; then + echo "No package directory supplied" + return 1 + fi + for service in ${services//,/ }; do + # Allow individual services to specify dependencies + if [[ -e ${package_dir}/${service} ]]; then + file_to_parse="${file_to_parse} ${package_dir}/${service}" + fi + # NOTE(sdague) n-api needs glance for now because that's where + # glance client is + if [[ $service == n-api ]]; then + if [[ ! $file_to_parse =~ $package_dir/nova ]]; then + file_to_parse="${file_to_parse} ${package_dir}/nova" + fi + if [[ ! $file_to_parse =~ $package_dir/glance ]]; then + file_to_parse="${file_to_parse} ${package_dir}/glance" + fi + if [[ ! $file_to_parse =~ $package_dir/os-brick ]]; then + file_to_parse="${file_to_parse} ${package_dir}/os-brick" + fi + elif [[ $service == c-* ]]; then + if [[ ! $file_to_parse =~ $package_dir/cinder ]]; then + file_to_parse="${file_to_parse} ${package_dir}/cinder" + fi + if [[ ! $file_to_parse =~ $package_dir/os-brick ]]; then + file_to_parse="${file_to_parse} ${package_dir}/os-brick" + fi + elif [[ $service == s-* ]]; then + if [[ ! $file_to_parse =~ $package_dir/swift ]]; then + file_to_parse="${file_to_parse} ${package_dir}/swift" + fi + elif [[ $service == n-* ]]; then + if [[ ! $file_to_parse =~ $package_dir/nova ]]; then + file_to_parse="${file_to_parse} ${package_dir}/nova" + fi + if [[ ! $file_to_parse =~ $package_dir/os-brick ]]; then + file_to_parse="${file_to_parse} ${package_dir}/os-brick" + fi + elif [[ $service == g-* ]]; then + if [[ ! $file_to_parse =~ $package_dir/glance ]]; then + file_to_parse="${file_to_parse} ${package_dir}/glance" + fi + elif [[ $service == key* ]]; then + if [[ ! $file_to_parse =~ $package_dir/keystone ]]; then + file_to_parse="${file_to_parse} ${package_dir}/keystone" + fi + elif [[ $service == q-* || $service == neutron-* ]]; then + if [[ ! $file_to_parse =~ $package_dir/neutron-common ]]; then + file_to_parse="${file_to_parse} ${package_dir}/neutron-common" + fi + elif [[ $service == ir-* ]]; then + if [[ ! $file_to_parse =~ $package_dir/ironic ]]; then + file_to_parse="${file_to_parse} ${package_dir}/ironic" + fi + fi + done + echo "$(_parse_package_files $file_to_parse)" + $xtrace +} + +# get_plugin_packages() collects a list of package names of any type from a +# plugin's prerequisite files in ``$PLUGIN/devstack/files/{debs|rpms}``. The +# list is intended to be passed to a package installer such as apt or yum. +# +# Only packages required for enabled and collected plugins will included. +# +# The same metadata used in the main DevStack prerequisite files may be used +# in these prerequisite files, see get_packages() for more info. +function get_plugin_packages { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local files_to_parse="" + local package_dir="" + for plugin in ${DEVSTACK_PLUGINS//,/ }; do + package_dir="$(_get_package_dir ${GITDIR[$plugin]}/devstack/files)" + files_to_parse+=" $package_dir/$plugin" + done + echo "$(_parse_package_files $files_to_parse)" + $xtrace +} + +# Search plugins for a bindep.txt file +# +# Uses globals ``BINDEP_CMD``, ``GITDIR``, ``DEVSTACK_PLUGINS`` +# +# Note this is only valid after BINDEP_CMD is setup in stack.sh, and +# is thus not really intended to be called externally. +function _get_plugin_bindep_packages { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local bindep_file + local packages + + for plugin in ${DEVSTACK_PLUGINS//,/ }; do + bindep_file=${GITDIR[$plugin]}/devstack/files/bindep.txt + if [[ -f ${bindep_file} ]]; then + packages+=$($BINDEP_CMD -b --file ${bindep_file} || true) + fi + done + echo "${packages}" + $xtrace +} + +# Distro-agnostic package installer +# Uses globals ``NO_UPDATE_REPOS``, ``REPOS_UPDATED``, ``RETRY_UPDATE`` +# install_package package [package ...] +function update_package_repo { + NO_UPDATE_REPOS=${NO_UPDATE_REPOS:-False} + REPOS_UPDATED=${REPOS_UPDATED:-False} + RETRY_UPDATE=${RETRY_UPDATE:-False} + + if [[ "$NO_UPDATE_REPOS" = "True" ]]; then + return 0 + fi + + if is_ubuntu; then + apt_get_update + fi +} + +function real_install_package { + if is_ubuntu; then + apt_get install "$@" + elif is_fedora; then + yum_install "$@" + else + exit_distro_not_supported "installing packages" + fi +} + +# Distro-agnostic package installer +# install_package package [package ...] +function install_package { + update_package_repo + if ! real_install_package "$@"; then + RETRY_UPDATE=True update_package_repo && real_install_package "$@" + fi +} + +# Distro-agnostic function to tell if a package is installed +# is_package_installed package [package ...] +function is_package_installed { + if [[ -z "$@" ]]; then + return 1 + fi + + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + + if [[ "$os_PACKAGE" = "deb" ]]; then + dpkg -s "$@" > /dev/null 2> /dev/null + elif [[ "$os_PACKAGE" = "rpm" ]]; then + rpm --quiet -q "$@" + else + exit_distro_not_supported "finding if a package is installed" + fi +} + +# Distro-agnostic package uninstaller +# uninstall_package package [package ...] +function uninstall_package { + if is_ubuntu; then + apt_get purge "$@" + elif is_fedora; then + sudo dnf remove -y "$@" ||: + else + exit_distro_not_supported "uninstalling packages" + fi +} + +# Wrapper for ``dnf`` to set proxy environment variables +# Uses globals ``OFFLINE``, ``*_proxy`` +# The name is kept for backwards compatability with external +# callers, despite none of our supported platforms using yum +# any more. +# yum_install package [package ...] +function yum_install { + local result parse_yum_result + + [[ "$OFFLINE" = "True" ]] && return + + time_start "yum_install" + sudo_with_proxies dnf install -y "$@" + time_stop "yum_install" +} + +# zypper wrapper to set arguments correctly +# Uses globals ``OFFLINE``, ``*_proxy`` +# zypper_install package [package ...] +function zypper_install { + [[ "$OFFLINE" = "True" ]] && return + local sudo="sudo" + [[ "$(id -u)" = "0" ]] && sudo="env" + $sudo http_proxy="${http_proxy:-}" https_proxy="${https_proxy:-}" \ + no_proxy="${no_proxy:-}" \ + zypper --non-interactive install --auto-agree-with-licenses --no-recommends "$@" +} + +# Run bindep and install packages it outputs +# +# Usage: +# install_bindep [profile,profile] +# +# Note unlike the bindep command itself, profile(s) specified should +# be a single, comma-separated string, no spaces. +function install_bindep { + local file=$1 + local profiles=${2:-""} + local pkgs + + if [[ ! -f $file ]]; then + warn $LINENO "Can not find bindep file: $file" + return + fi + + # converting here makes it much easier to work with passing + # arguments + profiles=${profiles/,/ /} + + # Note bindep returns 1 when packages need to be installed, so we + # have to ignore it's return for "-e" + pkgs=$($DEST/bindep-venv/bin/bindep -b --file $file $profiles || true) + + if [[ -n "${pkgs}" ]]; then + install_package ${pkgs} + fi +} + +function write_user_unit_file { + local service=$1 + local command="$2" + local group=$3 + local user=$4 + local env_vars="$5" + local extra="" + if [[ -n "$group" ]]; then + extra="Group=$group" + fi + local unitfile="$SYSTEMD_DIR/$service" + mkdir -p $SYSTEMD_DIR + + iniset -sudo $unitfile "Unit" "Description" "Devstack $service" + iniset -sudo $unitfile "Service" "Environment" "\"PATH=$PATH\"" + iniset -sudo $unitfile "Service" "User" "$user" + iniset -sudo $unitfile "Service" "ExecStart" "$command" + iniset -sudo $unitfile "Service" "KillMode" "process" + iniset -sudo $unitfile "Service" "TimeoutStopSec" "300" + iniset -sudo $unitfile "Service" "ExecReload" "$KILL_PATH -HUP \$MAINPID" + if [[ -n "$env_vars" ]] ; then + iniset -sudo $unitfile "Service" "Environment" "$env_vars" + fi + if [[ -n "$group" ]]; then + iniset -sudo $unitfile "Service" "Group" "$group" + fi + iniset -sudo $unitfile "Install" "WantedBy" "multi-user.target" + + # changes to existing units sometimes need a refresh + $SYSTEMCTL daemon-reload +} + +function write_uwsgi_user_unit_file { + local service=$1 + local command="$2" + local group=$3 + local user=$4 + local env_vars="$5" + local unitfile="$SYSTEMD_DIR/$service" + mkdir -p $SYSTEMD_DIR + + iniset -sudo $unitfile "Unit" "Description" "Devstack $service" + iniset -sudo $unitfile "Service" "Environment" "\"PATH=$PATH\"" + iniset -sudo $unitfile "Service" "SyslogIdentifier" "$service" + iniset -sudo $unitfile "Service" "User" "$user" + iniset -sudo $unitfile "Service" "ExecStart" "$command" + iniset -sudo $unitfile "Service" "ExecReload" "$KILL_PATH -HUP \$MAINPID" + iniset -sudo $unitfile "Service" "Type" "notify" + iniset -sudo $unitfile "Service" "KillMode" "process" + iniset -sudo $unitfile "Service" "Restart" "always" + iniset -sudo $unitfile "Service" "NotifyAccess" "all" + iniset -sudo $unitfile "Service" "RestartForceExitStatus" "100" + + if [[ -n "$env_vars" ]] ; then + iniset -sudo $unitfile "Service" "Environment" "$env_vars" + fi + if [[ -n "$group" ]]; then + iniset -sudo $unitfile "Service" "Group" "$group" + fi + iniset -sudo $unitfile "Install" "WantedBy" "multi-user.target" + + # changes to existing units sometimes need a refresh + $SYSTEMCTL daemon-reload +} + +function _common_systemd_pitfalls { + local cmd=$1 + # do some sanity checks on $cmd to see things we don't expect to work + + if [[ "$cmd" =~ "sudo" ]]; then + read -r -d '' msg << EOF || true # read returns 1 for EOF, but it is ok here +You are trying to use run_process with sudo, this is not going to work under systemd. + +If you need to run a service as a user other than \$STACK_USER call it with: + + run_process \$name \$cmd \$group \$user +EOF + die $LINENO "$msg" + fi + + if [[ ! "$cmd" =~ ^/ ]]; then + read -r -d '' msg << EOF || true # read returns 1 for EOF, but it is ok here +The cmd="$cmd" does not start with an absolute path. It will fail to +start under systemd. + +Please update your run_process stanza to have an absolute path. +EOF + die $LINENO "$msg" + fi + +} + +# Helper function to build a basic unit file and run it under systemd. +function _run_under_systemd { + local service=$1 + local command="$2" + local cmd=$command + # sanity check the command + _common_systemd_pitfalls "$cmd" + + local systemd_service="devstack@$service.service" + local group=$3 + local user=${4:-$STACK_USER} + if [[ -z "$user" ]]; then + user=$STACK_USER + fi + local env_vars="$5" + if [[ -v SYSTEMD_ENV_VARS[$service] ]]; then + env_vars="${SYSTEMD_ENV_VARS[$service]} $env_vars" + fi + if [[ "$command" =~ "uwsgi" ]] ; then + if [[ "$GLOBAL_VENV" == "True" ]] ; then + cmd="$cmd --venv $DEVSTACK_VENV" + fi + write_uwsgi_user_unit_file $systemd_service "$cmd" "$group" "$user" "$env_vars" + else + write_user_unit_file $systemd_service "$cmd" "$group" "$user" "$env_vars" + fi + + $SYSTEMCTL enable $systemd_service + $SYSTEMCTL start $systemd_service +} + +# Find out if a process exists by partial name. +# is_running name +function is_running { + local name=$1 + ps auxw | grep -v grep | grep ${name} > /dev/null + local exitcode=$? + # some times I really hate bash reverse binary logic + return $exitcode +} + +# Run a single service under screen or directly +# If the command includes shell metachatacters (;<>*) it must be run using a shell +# If an optional group is provided sg will be used to run the +# command as that group. +# run_process service "command-line" [group] [user] [env_vars] +# env_vars must be a space separated list of variable assigments, ie: "A=1 B=2" +function run_process { + local service=$1 + local command="$2" + local group=$3 + local user=$4 + local env_vars="$5" + + local name=$service + + time_start "run_process" + if is_service_enabled $service; then + _run_under_systemd "$name" "$command" "$group" "$user" "$env_vars" + fi + time_stop "run_process" +} + +# Stop a service process +# If a PID is available use it, kill the whole process group via TERM +# If screen is being used kill the screen window; this will catch processes +# that did not leave a PID behind +# Uses globals ``SERVICE_DIR`` +# stop_process service +function stop_process { + local service=$1 + + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + + if is_service_enabled $service; then + # Only do this for units which appear enabled, this also + # catches units that don't really exist for cases like + # keystone without a failure. + if $SYSTEMCTL is-enabled devstack@$service.service; then + $SYSTEMCTL stop devstack@$service.service + $SYSTEMCTL disable devstack@$service.service + fi + fi +} + +# use systemctl to check service status +function service_check { + local service + for service in ${ENABLED_SERVICES//,/ }; do + # because some things got renamed like key => keystone + if $SYSTEMCTL is-enabled devstack@$service.service; then + # no-pager is needed because otherwise status dumps to a + # pager when in interactive mode, which will stop a manual + # devstack run. + $SYSTEMCTL status devstack@$service.service --no-pager + fi + done +} + + +# Plugin Functions +# ================= + +DEVSTACK_PLUGINS=${DEVSTACK_PLUGINS:-""} + +# enable_plugin [branch] +# +# ``name`` is an arbitrary name - (aka: glusterfs, nova-docker, zaqar) +# ``url`` is a git url +# ``branch`` is a gitref. If it's not set, defaults to master +function enable_plugin { + local name=$1 + local url=$2 + local branch=${3:-master} + if is_plugin_enabled $name; then + die $LINENO "Plugin attempted to be enabled twice: ${name} ${url} ${branch}" + fi + DEVSTACK_PLUGINS+=",$name" + GITREPO[$name]=$url + GITDIR[$name]=$DEST/$name + GITBRANCH[$name]=$branch +} + +# is_plugin_enabled +# +# Check if the plugin was enabled, e.g. using enable_plugin +# +# ``name`` The name with which the plugin was enabled +function is_plugin_enabled { + local name=$1 + if [[ ",${DEVSTACK_PLUGINS}," =~ ",${name}," ]]; then + return 0 + fi + return 1 +} + +# fetch_plugins +# +# clones all plugins +function fetch_plugins { + local plugins="${DEVSTACK_PLUGINS}" + local plugin + + # short circuit if nothing to do + if [[ -z $plugins ]]; then + return + fi + + echo "Fetching DevStack plugins" + for plugin in ${plugins//,/ }; do + git_clone_by_name $plugin + done +} + +# load_plugin_settings +# +# Load settings from plugins in the order that they were registered +function load_plugin_settings { + local plugins="${DEVSTACK_PLUGINS}" + local plugin + + # short circuit if nothing to do + if [[ -z $plugins ]]; then + return + fi + + echo "Loading plugin settings" + for plugin in ${plugins//,/ }; do + local dir=${GITDIR[$plugin]} + # source any known settings + if [[ -f $dir/devstack/settings ]]; then + source $dir/devstack/settings + fi + done +} + +# plugin_override_defaults +# +# Run an extremely early setting phase for plugins that allows default +# overriding of services. +function plugin_override_defaults { + local plugins="${DEVSTACK_PLUGINS}" + local plugin + + # short circuit if nothing to do + if [[ -z $plugins ]]; then + return + fi + + echo "Overriding Configuration Defaults" + for plugin in ${plugins//,/ }; do + local dir=${GITDIR[$plugin]} + # source any overrides + if [[ -f $dir/devstack/override-defaults ]]; then + # be really verbose that an override is happening, as it + # may not be obvious if things fail later. + echo "$plugin has overridden the following defaults" + cat $dir/devstack/override-defaults + source $dir/devstack/override-defaults + fi + done +} + +# run_plugins +# +# Run the devstack/plugin.sh in all the plugin directories. These are +# run in registration order. +function run_plugins { + local mode=$1 + local phase=$2 + + local plugins="${DEVSTACK_PLUGINS}" + local plugin + for plugin in ${plugins//,/ }; do + local dir=${GITDIR[$plugin]} + if [[ -f $dir/devstack/plugin.sh ]]; then + source $dir/devstack/plugin.sh $mode $phase + fi + done +} + +function run_phase { + local mode=$1 + local phase=$2 + if [[ -d $TOP_DIR/extras.d ]]; then + local extra_plugin_file_name + for extra_plugin_file_name in $TOP_DIR/extras.d/*.sh; do + # NOTE(sdague): only process extras.d for the 3 explicitly + # white listed elements in tree. We want these to move out + # over time as well, but they are in tree, so we need to + # manage that. + local exceptions="80-tempest.sh" + local extra + extra=$(basename $extra_plugin_file_name) + if [[ ! ( $exceptions =~ "$extra" ) ]]; then + warn "use of extras.d is no longer supported" + warn "processing of project $extra is skipped" + else + [[ -r $extra_plugin_file_name ]] && source $extra_plugin_file_name $mode $phase + fi + done + fi + # the source phase corresponds to settings loading in plugins + if [[ "$mode" == "source" ]]; then + load_plugin_settings + verify_disabled_services + elif [[ "$mode" == "override_defaults" ]]; then + plugin_override_defaults + else + run_plugins $mode $phase + fi +} + +# define_plugin +# +# This function is a no-op. It allows a plugin to define its name So +# that other plugins may reference it by name. It should generally be +# the last component of the canonical git repo name. E.g., +# openstack/devstack-foo should use "devstack-foo" as the name here. +# +# This function is currently a noop, but the value may still be used +# by external tools (as in plugin_requires) and may be used by +# devstack in the future. +# +# ``name`` is an arbitrary name - (aka: glusterfs, nova-docker, zaqar) +function define_plugin { + : +} + +# plugin_requires +# +# This function is a no-op. It is currently used by external tools +# (such as the devstack module for Ansible) to automatically generate +# local.conf files. It is not currently used by devstack itself to +# resolve dependencies. +# +# ``name`` is an arbitrary name - (aka: glusterfs, nova-docker, zaqar) +# ``other`` is the name of another plugin +function plugin_requires { + : +} + + +# Service Functions +# ================= + +# remove extra commas from the input string (i.e. ``ENABLED_SERVICES``) +# _cleanup_service_list service-list +function _cleanup_service_list { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + echo "$1" | sed -e ' + s/,,/,/g; + s/^,//; + s/,$// + ' + + $xtrace +} + +# disable_all_services() removes all current services +# from ``ENABLED_SERVICES`` to reset the configuration +# before a minimal installation +# Uses global ``ENABLED_SERVICES`` +# disable_all_services +function disable_all_services { + ENABLED_SERVICES="" +} + +# Remove all services starting with '-'. For example, to install all default +# services except rabbit (rabbit) set in ``localrc``: +# ENABLED_SERVICES+=",-rabbit" +# Uses global ``ENABLED_SERVICES`` +# disable_negated_services +function disable_negated_services { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local to_remove="" + local remaining="" + local service + + # build up list of services that should be removed; i.e. they + # begin with "-" + for service in ${ENABLED_SERVICES//,/ }; do + if [[ ${service} == -* ]]; then + to_remove+=",${service#-}" + else + remaining+=",${service}" + fi + done + + # go through the service list. if this service appears in the "to + # be removed" list, drop it + ENABLED_SERVICES=$(remove_disabled_services "$remaining" "$to_remove") + + $xtrace +} + +# disable_service() prepares the services passed as argument to be +# removed from the ``ENABLED_SERVICES`` list, if they are present. +# +# For example: +# disable_service rabbit +# +# Uses global ``DISABLED_SERVICES`` +# disable_service service [service ...] +function disable_service { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local disabled_svcs="${DISABLED_SERVICES}" + local enabled_svcs=",${ENABLED_SERVICES}," + local service + for service in $@; do + disabled_svcs+=",$service" + if is_service_enabled $service; then + enabled_svcs=${enabled_svcs//,$service,/,} + fi + done + DISABLED_SERVICES=$(_cleanup_service_list "$disabled_svcs") + ENABLED_SERVICES=$(_cleanup_service_list "$enabled_svcs") + + $xtrace +} + +# enable_service() adds the services passed as argument to the +# ``ENABLED_SERVICES`` list, if they are not already present. +# +# For example: +# enable_service q-svc +# +# This function does not know about the special cases +# for nova, glance, and neutron built into is_service_enabled(). +# Uses global ``ENABLED_SERVICES`` +# enable_service service [service ...] +function enable_service { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local tmpsvcs="${ENABLED_SERVICES}" + local service + for service in $@; do + if [[ ,${DISABLED_SERVICES}, =~ ,${service}, ]]; then + warn $LINENO "Attempt to enable_service ${service} when it has been disabled" + continue + fi + if ! is_service_enabled $service; then + tmpsvcs+=",$service" + fi + done + ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") + disable_negated_services + + $xtrace +} + +# is_service_enabled() checks if the service(s) specified as arguments are +# enabled by the user in ``ENABLED_SERVICES``. +# +# Multiple services specified as arguments are ``OR``'ed together; the test +# is a short-circuit boolean, i.e it returns on the first match. +# +# There are special cases for some 'catch-all' services:: +# **nova** returns true if any service enabled start with **n-** +# **cinder** returns true if any service enabled start with **c-** +# **glance** returns true if any service enabled start with **g-** +# **neutron** returns true if any service enabled start with **q-** +# **swift** returns true if any service enabled start with **s-** +# **trove** returns true if any service enabled start with **tr-** +# For backward compatibility if we have **swift** in ENABLED_SERVICES all the +# **s-** services will be enabled. This will be deprecated in the future. +# +# Uses global ``ENABLED_SERVICES`` +# is_service_enabled service [service ...] +function is_service_enabled { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local enabled=1 + local services=$@ + local service + for service in ${services}; do + [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && enabled=0 + + # Look for top-level 'enabled' function for this service + if type is_${service}_enabled >/dev/null 2>&1; then + # A function exists for this service, use it + is_${service}_enabled && enabled=0 + fi + + # TODO(dtroyer): Remove these legacy special-cases after the is_XXX_enabled() + # are implemented + + [[ ${service} == n-cpu-* && ,${ENABLED_SERVICES} =~ ,"n-cpu" ]] && enabled=0 + [[ ${service} == "nova" && ,${ENABLED_SERVICES} =~ ,"n-" ]] && enabled=0 + [[ ${service} == "glance" && ,${ENABLED_SERVICES} =~ ,"g-" ]] && enabled=0 + [[ ${service} == "neutron" && ,${ENABLED_SERVICES} =~ ,"q-" ]] && enabled=0 + [[ ${service} == "trove" && ,${ENABLED_SERVICES} =~ ,"tr-" ]] && enabled=0 + [[ ${service} == "swift" && ,${ENABLED_SERVICES} =~ ,"s-" ]] && enabled=0 + [[ ${service} == s-* && ,${ENABLED_SERVICES} =~ ,"swift" ]] && enabled=0 + done + + $xtrace + return $enabled +} + +# remove specified list from the input string +# remove_disabled_services service-list remove-list +function remove_disabled_services { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local service_list=$1 + local remove_list=$2 + local service + local enabled="" + + for service in ${service_list//,/ }; do + local remove + local add=1 + for remove in ${remove_list//,/ }; do + if [[ ${remove} == ${service} ]]; then + add=0 + break + fi + done + if [[ $add == 1 ]]; then + enabled="${enabled},$service" + fi + done + + $xtrace + + _cleanup_service_list "$enabled" +} + +# Toggle enable/disable_service for services that must run exclusive of each other +# $1 The name of a variable containing a space-separated list of services +# $2 The name of a variable in which to store the enabled service's name +# $3 The name of the service to enable +function use_exclusive_service { + local options=${!1} + local selection=$3 + local out=$2 + [ -z $selection ] || [[ ! "$options" =~ "$selection" ]] && return 1 + local opt + for opt in $options;do + [[ "$opt" = "$selection" ]] && enable_service $opt || disable_service $opt + done + eval "$out=$selection" + return 0 +} + +# Make sure that nothing has manipulated ENABLED_SERVICES in a way +# that conflicts with prior calls to disable_service. +# Uses global ``ENABLED_SERVICES`` +function verify_disabled_services { + local service + for service in ${ENABLED_SERVICES//,/ }; do + if [[ ,${DISABLED_SERVICES}, =~ ,${service}, ]]; then + die $LINENO "ENABLED_SERVICES directly modified to overcome 'disable_service ${service}'" + fi + done +} + + +# System Functions +# ================ + +# Only run the command if the target file (the last arg) is not on an +# NFS filesystem. +function _safe_permission_operation { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local args=( $@ ) + local last + local sudo_cmd + local dir_to_check + + let last="${#args[*]} - 1" + + local dir_to_check=${args[$last]} + if [ ! -d "$dir_to_check" ]; then + dir_to_check=`dirname "$dir_to_check"` + fi + + if is_nfs_directory "$dir_to_check" ; then + $xtrace + return 0 + fi + + sudo_cmd="sudo" + + $xtrace + $sudo_cmd $@ +} + +# Exit 0 if address is in network or 1 if address is not in network +# ip-range is in CIDR notation: 1.2.3.4/20 +# address_in_net ip-address ip-range +function address_in_net { + local ip=$1 + local range=$2 + local masklen=${range#*/} + local network + network=$(maskip ${range%/*} $(cidr2netmask $masklen)) + local subnet + subnet=$(maskip $ip $(cidr2netmask $masklen)) + [[ $network == $subnet ]] +} + +# Add a user to a group. +# add_user_to_group user group +function add_user_to_group { + local user=$1 + local group=$2 + + sudo usermod -a -G "$group" "$user" +} + +# Convert CIDR notation to a IPv4 netmask +# cidr2netmask cidr-bits +function cidr2netmask { + local maskpat="255 255 255 255" + local maskdgt="254 252 248 240 224 192 128" + set -- ${maskpat:0:$(( ($1 / 8) * 4 ))}${maskdgt:$(( (7 - ($1 % 8)) * 4 )):3} + echo ${1-0}.${2-0}.${3-0}.${4-0} +} + +# Check if this is a valid ipv4 address string +function is_ipv4_address { + local address=$1 + local regex='([0-9]{1,3}\.){3}[0-9]{1,3}' + # TODO(clarkb) make this more robust + if [[ "$address" =~ $regex ]] ; then + return 0 + else + return 1 + fi +} + +# Remove "[]" around urlquoted IPv6 addresses +function ipv6_unquote { + echo $1 | tr -d [] +} + +# Gracefully cp only if source file/dir exists +# cp_it source destination +function cp_it { + if [ -e $1 ] || [ -d $1 ]; then + cp -pRL $1 $2 + fi +} + +# HTTP and HTTPS proxy servers are supported via the usual environment variables [1] +# ``http_proxy``, ``https_proxy`` and ``no_proxy``. They can be set in +# ``localrc`` or on the command line if necessary:: +# +# [1] http://www.w3.org/Daemon/User/Proxies/ProxyClients.html +# +# http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh + +function export_proxy_variables { + if isset http_proxy ; then + export http_proxy=$http_proxy + fi + if isset https_proxy ; then + export https_proxy=$https_proxy + fi + if isset no_proxy ; then + export no_proxy=$no_proxy + fi +} + +# Returns true if the directory is on a filesystem mounted via NFS. +function is_nfs_directory { + local mount_type + mount_type=`stat -f -L -c %T $1` + test "$mount_type" == "nfs" +} + +# Return the network portion of the given IP address using netmask +# netmask is in the traditional dotted-quad format +# maskip ip-address netmask +function maskip { + local ip=$1 + local mask=$2 + local l="${ip%.*}"; local r="${ip#*.}"; local n="${mask%.*}"; local m="${mask#*.}" + local subnet + subnet=$((${ip%%.*}&${mask%%.*})).$((${r%%.*}&${m%%.*})).$((${l##*.}&${n##*.})).$((${ip##*.}&${mask##*.})) + echo $subnet +} + +function is_provider_network { + if [ "$Q_USE_PROVIDER_NETWORKING" == "True" ]; then + return 0 + fi + return 1 +} + + +# Return just the . for the given python interpreter +function _get_python_version { + local interp=$1 + local version + # disable erroring out here, otherwise if python 3 doesn't exist we fail hard. + if [[ -x $(which $interp 2> /dev/null) ]]; then + version=$($interp -c 'import sys; print("%s.%s" % sys.version_info[0:2])') + fi + echo ${version} +} + +# Return the current python as "python." +function python_version { + local python_version + python_version=$(_get_python_version python2) + echo "python${python_version}" +} + +function python3_version { + local python3_version + python3_version=$(_get_python_version python3) + echo "python${python3_version}" +} + + +# Service wrapper to restart services +# restart_service service-name +function restart_service { + if [ -x /bin/systemctl ]; then + sudo /bin/systemctl restart $1 + else + sudo service $1 restart + fi + +} + +# Only change permissions of a file or directory if it is not on an +# NFS filesystem. +function safe_chmod { + _safe_permission_operation chmod $@ +} + +# Only change ownership of a file or directory if it is not on an NFS +# filesystem. +function safe_chown { + _safe_permission_operation chown $@ +} + +# Service wrapper to start services +# start_service service-name +function start_service { + if [ -x /bin/systemctl ]; then + sudo /bin/systemctl start $1 + else + sudo service $1 start + fi +} + +# Service wrapper to stop services +# stop_service service-name +function stop_service { + if [ -x /bin/systemctl ]; then + sudo /bin/systemctl stop $1 + else + sudo service $1 stop + fi +} + +# Service wrapper to reload services +# If the service was not in running state it will start it +# reload_service service-name +function reload_service { + if [ -x /bin/systemctl ]; then + sudo /bin/systemctl reload-or-restart $1 + else + sudo service $1 reload + fi +} + +# Test with a finite retry loop. +# +function test_with_retry { + local testcmd=$1 + local failmsg=$2 + local until=${3:-10} + local sleep=${4:-0.5} + + time_start "test_with_retry" + if ! timeout $until sh -c "while ! $testcmd; do sleep $sleep; done"; then + die $LINENO "$failmsg" + fi + time_stop "test_with_retry" +} + +# Like sudo but forwarding http_proxy https_proxy no_proxy environment vars. +# If it is run as superuser then sudo is replaced by env. +# +function sudo_with_proxies { + local sudo + + [[ "$(id -u)" = "0" ]] && sudo="env" || sudo="sudo" + + $sudo http_proxy="${http_proxy:-}" https_proxy="${https_proxy:-}"\ + no_proxy="${no_proxy:-}" "$@" +} + +# Timing infrastructure - figure out where large blocks of time are +# used in DevStack +# +# The timing infrastructure for DevStack is about collecting buckets +# of time that are spend in some subtask. For instance, that might be +# 'apt', 'pip', 'osc', even database migrations. We do this by a pair +# of functions: time_start / time_stop. +# +# These take a single parameter: $name - which specifies the name of +# the bucket to be accounted against. time_totals function spits out +# the results. +# +# Resolution is only in whole seconds, so should be used for long +# running activities. + +declare -A -g _TIME_TOTAL +declare -A -g _TIME_START +declare -r -g _TIME_BEGIN=$(date +%s) + +# time_start $name +# +# starts the clock for a timer by name. Errors if that clock is +# already started. +function time_start { + local name=$1 + local start_time=${_TIME_START[$name]} + if [[ -n "$start_time" ]]; then + die $LINENO "Trying to start the clock on $name, but it's already been started" + fi + _TIME_START[$name]=$(date +%s%3N) +} + +# time_stop $name +# +# stops the clock for a timer by name, and accumulate that time in the +# global counter for that name. Errors if that clock had not +# previously been started. +function time_stop { + local name + local end_time + local elapsed_time + local total + local start_time + + name=$1 + start_time=${_TIME_START[$name]} + + if [[ -z "$start_time" ]]; then + die $LINENO "Trying to stop the clock on $name, but it was never started" + fi + end_time=$(date +%s%3N) + elapsed_time=$(($end_time - $start_time)) + total=${_TIME_TOTAL[$name]:-0} + # reset the clock so we can start it in the future + _TIME_START[$name]="" + _TIME_TOTAL[$name]=$(($total + $elapsed_time)) +} + +function install_openstack_cli_server { + export PATH=$TOP_DIR/files/openstack-cli-server:$PATH + run_process openstack-cli-server "$PYTHON $TOP_DIR/files/openstack-cli-server/openstack-cli-server" +} + +function oscwrap { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local out + local rc + local start + local end + # Cannot use timer_start and timer_stop as we run in subshells + # and those rely on modifying vars in the same process (which cannot + # happen from a subshell. + start=$(date +%s%3N) + out=$(command openstack "$@") + rc=$? + end=$(date +%s%3N) + echo $((end - start)) >> $OSCWRAP_TIMER_FILE + + echo "$out" + $xtrace + return $rc +} + +function install_oscwrap { + # File to accumulate our timing data + OSCWRAP_TIMER_FILE=$(mktemp) + # Bash by default doesn't expand aliases, allow it for the aliases + # we want to whitelist. + shopt -s expand_aliases + # Remove all aliases that might be expanded to preserve old unexpanded + # behavior + unalias -a + # Add only the alias we want for openstack + alias openstack=oscwrap +} + +function cleanup_oscwrap { + local total=0 + total=$(cat $OSCWRAP_TIMER_FILE | $PYTHON -c "import sys; print(sum(int(l) for l in sys.stdin))") + _TIME_TOTAL["osc"]=$total + rm $OSCWRAP_TIMER_FILE +} + +# time_totals +# Print out total time summary +function time_totals { + local elapsed_time + local end_time + local len=20 + local xtrace + local unaccounted_time + + end_time=$(date +%s) + elapsed_time=$(($end_time - $_TIME_BEGIN)) + unaccounted_time=$elapsed_time + + # pad 1st column this far + for t in ${!_TIME_TOTAL[*]}; do + if [[ ${#t} -gt $len ]]; then + len=${#t} + fi + done + + cleanup_oscwrap + + xtrace=$(set +o | grep xtrace) + set +o xtrace + + echo + echo "=========================" + echo "DevStack Component Timing" + echo " (times are in seconds) " + echo "=========================" + for t in ${!_TIME_TOTAL[*]}; do + local v=${_TIME_TOTAL[$t]} + # because we're recording in milliseconds + v=$(($v / 1000)) + printf "%-${len}s %3d\n" "$t" "$v" + unaccounted_time=$(($unaccounted_time - $v)) + done + echo "-------------------------" + printf "%-${len}s %3d\n" "Unaccounted time" "$unaccounted_time" + echo "=========================" + printf "%-${len}s %3d\n" "Total runtime" "$elapsed_time" + + $xtrace +} + +function clean_pyc_files { + # Clean up all *.pyc files + if [[ -n "$DEST" ]] && [[ -d "$DEST" ]]; then + sudo find $DEST -name "*.pyc" -delete + fi +} + +function is_fips_enabled { + fips=`cat /proc/sys/crypto/fips_enabled` + [ "$fips" == "1" ] +} + +# Restore xtrace +$_XTRACE_FUNCTIONS_COMMON + +# Local variables: +# mode: shell-script +# End: diff --git a/gate/updown.sh b/gate/updown.sh new file mode 100755 index 0000000000..f46385cfe1 --- /dev/null +++ b/gate/updown.sh @@ -0,0 +1,24 @@ +#!/bin/bash -xe +# +# An up / down test for gate functional testing +# +# Note: this is expected to start running as jenkins + +# Step 1: give back sudoers permissions to DevStack +TEMPFILE=`mktemp` +echo "stack ALL=(root) NOPASSWD:ALL" >$TEMPFILE +chmod 0440 $TEMPFILE +sudo chown root:root $TEMPFILE +sudo mv $TEMPFILE /etc/sudoers.d/51_stack_sh + +# TODO: do something to start a guest to create crud that should +# disappear + +# Step 2: unstack +echo "Running unstack.sh" +sudo -H -u stack stdbuf -oL -eL bash -ex ./unstack.sh + +# Step 3: clean +echo "Running clean.sh" +sudo -H -u stack stdbuf -oL -eL bash -ex ./clean.sh + diff --git a/inc/async b/inc/async new file mode 100644 index 0000000000..56338f5343 --- /dev/null +++ b/inc/async @@ -0,0 +1,256 @@ +#!/bin/bash +# +# Symbolic asynchronous tasks for devstack +# +# Usage: +# +# async_runfunc my_shell_func foo bar baz +# +# ... do other stuff ... +# +# async_wait my_shell_func +# + +DEVSTACK_PARALLEL=$(trueorfalse True DEVSTACK_PARALLEL) +_ASYNC_BG_TIME=0 + +# Keep track of how much total time was spent in background tasks +# Takes a job runtime in ms. +function _async_incr_bg_time { + local elapsed_ms="$1" + _ASYNC_BG_TIME=$(($_ASYNC_BG_TIME + $elapsed_ms)) +} + +# Get the PID of a named future to wait on +function async_pidof { + local name="$1" + local inifile="${DEST}/async/${name}.ini" + + if [ -f "$inifile" ]; then + iniget $inifile job pid + else + echo 'UNKNOWN' + return 1 + fi +} + +# Log a message about a job. If the message contains "%command" then the +# full command line of the job will be substituted in the output +function async_log { + local name="$1" + shift + local message="$*" + local inifile=${DEST}/async/${name}.ini + local pid + local command + + pid=$(iniget $inifile job pid) + command=$(iniget $inifile job command | tr '#' '-') + message=$(echo "$message" | sed "s#%command#$command#g") + + echo "[$BASHPID Async ${name}:${pid}]: $message" +} + +# Inner function that actually runs the requested task. We wrap it like this +# just so we can emit a finish message as soon as the work is done, to make +# it easier to find the tracking just before an error. +function async_inner { + local name="$1" + local rc + local fifo="${DEST}/async/${name}.fifo" + shift + set -o xtrace + if $* >${DEST}/async/${name}.log 2>&1; then + rc=0 + set +o xtrace + async_log "$name" "finished successfully" + else + rc=$? + set +o xtrace + async_log "$name" "FAILED with rc $rc" + fi + iniset ${DEST}/async/${name}.ini job end_time $(date "+%s%3N") + # Block on the fifo until we are signaled to exit by the main process + cat $fifo + return $rc +} + +# Run something async. Takes a symbolic name and a list of arguments of +# what to run. Ideally this would be rarely used and async_runfunc() would +# be used everywhere for readability. +# +# This spawns the work in a background worker, records a "future" to be +# collected by a later call to async_wait() +function async_run { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local name="$1" + shift + local inifile=${DEST}/async/${name}.ini + local fifo=${DEST}/async/${name}.fifo + + touch $inifile + iniset $inifile job command "$*" + iniset $inifile job start_time $(date +%s%3N) + + if [[ "$DEVSTACK_PARALLEL" = "True" ]]; then + mkfifo $fifo + async_inner $name $* & + iniset $inifile job pid $! + async_log "$name" "running: %command" + $xtrace + else + iniset $inifile job pid "self" + async_log "$name" "Running synchronously: %command" + $xtrace + $* + return $? + fi +} + +# Shortcut for running a shell function async. Uses the function name as the +# async name. +function async_runfunc { + async_run $1 $* +} + +# Dump some information to help debug a failed wait +function async_wait_dump { + local failpid=$1 + + echo "=== Wait failure dump from $BASHPID ===" + echo "Processes:" + ps -f + echo "Waiting jobs:" + for name in $(ls ${DEST}/async/*.ini); do + echo "Job $name :" + cat "$name" + done + echo "Failed PID status:" + sudo cat /proc/$failpid/status + sudo cat /proc/$failpid/cmdline + echo "=== End wait failure dump ===" +} + +# Wait for an async future to complete. May return immediately if already +# complete, or of the future has already been waited on (avoid this). May +# block until the future completes. +function async_wait { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + local pid rc running inifile runtime fifo + rc=0 + for name in $*; do + running=$(ls ${DEST}/async/*.ini 2>/dev/null | wc -l) + inifile="${DEST}/async/${name}.ini" + fifo="${DEST}/async/${name}.fifo" + + if pid=$(async_pidof "$name"); then + async_log "$name" "Waiting for completion of %command" \ + "running on PID $pid ($running other jobs running)" + time_start async_wait + if [[ "$pid" != "self" ]]; then + # Signal the child to go ahead and exit since we are about to + # wait for it to collect its status. + async_log "$name" "Signaling child to exit" + echo WAKEUP > $fifo + async_log "$name" "Signaled" + # Do not actually call wait if we ran synchronously + if wait $pid; then + rc=0 + else + rc=$? + fi + cat ${DEST}/async/${name}.log + rm -f $fifo + fi + time_stop async_wait + local start_time + local end_time + start_time=$(iniget $inifile job start_time) + end_time=$(iniget $inifile job end_time) + _async_incr_bg_time $(($end_time - $start_time)) + runtime=$((($end_time - $start_time) / 1000)) + async_log "$name" "finished %command with result" \ + "$rc in $runtime seconds" + rm -f $inifile + if [ $rc -ne 0 ]; then + async_wait_dump $pid + echo Stopping async wait due to error: $* + break + fi + else + # This could probably be removed - it is really just here + # to help notice if you wait for something by the wrong + # name, but it also shows up for things we didn't start + # because they were not enabled. + echo Not waiting for async task $name that we never started or \ + has already been waited for + fi + done + + $xtrace + return $rc +} + +# Check for uncollected futures and wait on them +function async_cleanup { + local name + + if [[ "$DEVSTACK_PARALLEL" != "True" ]]; then + return 0 + fi + + for inifile in $(find ${DEST}/async -name '*.ini'); do + name=$(basename $pidfile .ini) + echo "WARNING: uncollected async future $name" + async_wait $name || true + done +} + +# Make sure our async dir is created and clean +function async_init { + local async_dir=${DEST}/async + + # Clean any residue if present from previous runs + rm -Rf $async_dir + + # Make sure we have a state directory + mkdir -p $async_dir +} + +function async_print_timing { + local bg_time_minus_wait + local elapsed_time + local serial_time + local speedup + + if [[ "$DEVSTACK_PARALLEL" != "True" ]]; then + return 0 + fi + + # The logic here is: All the background task time would be + # serialized if we did not do them in the background. So we can + # add that to the elapsed time for the whole run. However, time we + # spend waiting for async things to finish adds to the elapsed + # time, but is time where we're not doing anything useful. Thus, + # we substract that from the would-be-serialized time. + + bg_time_minus_wait=$((\ + ($_ASYNC_BG_TIME - ${_TIME_TOTAL[async_wait]}) / 1000)) + elapsed_time=$(($(date "+%s") - $_TIME_BEGIN)) + serial_time=$(($elapsed_time + $bg_time_minus_wait)) + + echo + echo "=================" + echo " Async summary" + echo "=================" + echo " Time spent in the background minus waits: $bg_time_minus_wait sec" + echo " Elapsed time: $elapsed_time sec" + echo " Time if we did everything serially: $serial_time sec" + echo " Speedup: " $(echo | awk "{print $serial_time / $elapsed_time}") +} diff --git a/inc/ini-config b/inc/ini-config new file mode 100644 index 0000000000..920d4775fa --- /dev/null +++ b/inc/ini-config @@ -0,0 +1,458 @@ +#!/bin/bash +# +# **inc/ini-config** - Configuration/INI functions +# +# Support for manipulating INI-style configuration files +# +# These functions have no external dependencies and no side-effects + +# Save trace setting +INC_CONF_TRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Config Functions +# ================ + +# Append a new option in an ini file without replacing the old value +# iniadd [-sudo] config-file section option value1 value2 value3 ... +function iniadd { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local sudo="" + if [ $1 == "-sudo" ]; then + sudo="-sudo " + shift + fi + local file=$1 + local section=$2 + local option=$3 + shift 3 + + local values + values="$(iniget_multiline $file $section $option) $@" + iniset_multiline $sudo $file $section $option $values + $xtrace +} + +# Comment an option in an INI file +# inicomment [-sudo] config-file section option +function inicomment { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local sudo="" + if [ $1 == "-sudo" ]; then + sudo="sudo " + shift + fi + local file=$1 + local section=$2 + local option=$3 + + $sudo sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file" + $xtrace +} + +# Get an option from an INI file +# iniget config-file section option +function iniget { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local file=$1 + local section=$2 + local option=$3 + local line + + line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") + echo ${line#*=} + $xtrace +} + +# Get a multiple line option from an INI file +# iniget_multiline config-file section option +function iniget_multiline { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local file=$1 + local section=$2 + local option=$3 + local values + + values=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { s/^$option[ \t]*=[ \t]*//gp; }" "$file") + echo ${values} + $xtrace +} + +# Determinate is the given option present in the INI file +# ini_has_option [-sudo] config-file section option +function ini_has_option { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local sudo="" + if [ $1 == "-sudo" ]; then + sudo="sudo " + shift + fi + local file=$1 + local section=$2 + local option=$3 + local line + + line=$($sudo sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") + $xtrace + [ -n "$line" ] +} + +# Add another config line for a multi-line option. +# It's normally called after iniset of the same option and assumes +# that the section already exists. +# +# Note that iniset_multiline requires all the 'lines' to be supplied +# in the argument list. Doing that will cause incorrect configuration +# if spaces are used in the config values. +# +# iniadd_literal [-sudo] config-file section option value +function iniadd_literal { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local sudo="" + if [ $1 == "-sudo" ]; then + sudo="sudo " + shift + fi + local file=$1 + local section=$2 + local option=$3 + local value=$4 + + if [[ -z $section || -z $option ]]; then + $xtrace + return + fi + + # Add it + $sudo sed -i -e "/^\[$section\]/ a\\ +$option = $value +" "$file" + + $xtrace +} + +# Remove an option from an INI file +# inidelete [-sudo] config-file section option +function inidelete { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local sudo="" + if [ $1 == "-sudo" ]; then + sudo="sudo " + shift + fi + local file=$1 + local section=$2 + local option=$3 + + if [[ -z $section || -z $option ]]; then + $xtrace + return + fi + + # Remove old values + $sudo sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file" + + $xtrace +} + +# Set an option in an INI file +# iniset [-sudo] config-file section option value +# - if the file does not exist, it is created +function iniset { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local sudo="" + local sudo_option="" + if [ $1 == "-sudo" ]; then + sudo="sudo " + sudo_option="-sudo " + shift + fi + local file=$1 + local section=$2 + local option=$3 + local value=$4 + + # Escape the ampersand (&) and backslash (\) characters for sed + # Order of substitution matters: we escape backslashes first before + # adding more backslashes to escape ampersands + value=$(echo $value | sed -e 's/\\/\\\\/g' -e 's/&/\\&/g') + + if [[ -z $section || -z $option ]]; then + $xtrace + return + fi + + if ! $sudo grep -q "^\[$section\]" "$file" 2>/dev/null; then + # Add section at the end + echo -e "\n[$section]" | $sudo tee --append "$file" > /dev/null + fi + if ! ini_has_option $sudo_option "$file" "$section" "$option"; then + # Add it + $sudo sed -i -e "/^\[$section\]/ a\\ +$option = $value +" "$file" + else + local sep + sep=$(echo -ne "\x01") + # Replace it + $sudo sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('"${option}"'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file" + fi + $xtrace +} + +# Set a multiple line option in an INI file +# iniset_multiline [-sudo] config-file section option value1 value2 value3 ... +function iniset_multiline { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local sudo="" + if [ $1 == "-sudo" ]; then + sudo="sudo " + shift + fi + local file=$1 + local section=$2 + local option=$3 + + shift 3 + local values + for v in $@; do + # The later sed command inserts each new value in the line next to + # the section identifier, which causes the values to be inserted in + # the reverse order. Do a reverse here to keep the original order. + values="$v ${values}" + done + if ! $sudo grep -q "^\[$section\]" "$file"; then + # Add section at the end + echo -e "\n[$section]" | $sudo tee --append "$file" > /dev/null + else + # Remove old values + $sudo sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file" + fi + # Add new ones + for v in $values; do + $sudo sed -i -e "/^\[$section\]/ a\\ +$option = $v +" "$file" + done + $xtrace +} + +# Uncomment an option in an INI file +# iniuncomment config-file section option +function iniuncomment { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local sudo="" + if [ $1 == "-sudo" ]; then + sudo="sudo " + shift + fi + local file=$1 + local section=$2 + local option=$3 + $sudo sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file" + $xtrace +} + +# Get list of sections from an INI file +# iniget_sections config-file +function iniget_sections { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local file=$1 + + echo $(sed -ne "s/^\[\(.*\)\]/\1/p" "$file") + $xtrace +} + +# Set a localrc var +function localrc_set { + local file=$1 + local group="local" + local conf="localrc" + local section="" + local option=$2 + local value=$3 + localconf_set "$file" "$group" "$conf" "$section" "$option" "$value" +} + +# Check if local.conf has section. +function localconf_has_section { + local file=$1 + local group=$2 + local conf=$3 + local section=$4 + local sep + sep=$(echo -ne "\x01") + local line + line=$(sed -ne "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{ + /\[${section}\]/p + }" "$file") + [ -n "$line" ] +} + +# Check if local.conf has option. +function localconf_has_option { + local file=$1 + local group=$2 + local conf=$3 + local section=$4 + local option=$5 + local sep + sep=$(echo -ne "\x01") + local line + if [[ -z "$section" ]]; then + line=$(sed -ne "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{ + /${option}[ \t]*=.*$/p + }" "$file") + else + line=$(sed -ne "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{ + /\[${section}\]/,/\[\[.*\]\]\|\[.*\]/{ + /${option}[ \t]*=.*$/p} + }" "$file") + fi + [ -n "$line" ] +} + +# Update option in local.conf. +function localconf_update_option { + local sudo=$1 + local file=$2 + local group=$3 + local conf=$4 + local section=$5 + local option=$6 + local value=$7 + local sep + sep=$(echo -ne "\x01") + if [[ -z "$section" ]]; then + $sudo sed -i -e "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{ + s${sep}^\(${option}[ \t]*=[ \t]*\).*\$${sep}\1${value}${sep} + }" "$file" + else + $sudo sed -i -e "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{ + /\[${section}\]/,/\[\[.*\]\]\|\[.*\]/s${sep}^\(${option}[ \t]*=[ \t]*\).*\$${sep}\1${value}${sep} + }" "$file" + fi +} + +# Add option in local.conf. +function localconf_add_option { + local sudo=$1 + local file=$2 + local group=$3 + local conf=$4 + local section=$5 + local option=$6 + local value=$7 + local sep + sep=$(echo -ne "\x01") + if [[ -z "$section" ]]; then + $sudo sed -i -e "\\${sep}^\[\[${group}|${conf}\]\]${sep} a $option=$value" "$file" + else + $sudo sed -i -e "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{ + /\[${section}\]/ a $option=$value + }" "$file" + fi +} + +# Add section and option in local.conf. +function localconf_add_section_and_option { + local sudo=$1 + local file=$2 + local group=$3 + local conf=$4 + local section=$5 + local option=$6 + local value=$7 + local sep + sep=$(echo -ne "\x01") + $sudo sed -i -e "\\${sep}^\[\[${group}|${conf}\]\]${sep} { + a [$section] + a $option=$value + }" "$file" +} + +# Set an option in a local.conf file. +# localconf_set [-sudo] config-file group conf-name section option value +# - if the file does not exist, it is created +function localconf_set { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local sep + sep=$(echo -ne "\x01") + local sudo="" + if [ $1 == "-sudo" ]; then + sudo="sudo " + shift + fi + local file=$1 + local group=$2 + local conf=$3 + local section=$4 + local option=$5 + local value=$6 + + if [[ -z $group || -z $conf || -z $option || -z $value ]]; then + $xtrace + return + fi + + if ! grep -q "^\[\[${group}|${conf}\]\]" "$file" 2>/dev/null; then + # Add meta section at the end if it does not exist + echo -e "\n[[${group}|${conf}]]" | $sudo tee --append "$file" > /dev/null + # Add section at the end + if [[ -n "$section" ]]; then + echo -e "[$section]" | $sudo tee --append "$file" > /dev/null + fi + # Add option at the end + echo -e "$option=$value" | $sudo tee --append "$file" > /dev/null + elif [[ -z "$section" ]]; then + if ! localconf_has_option "$file" "$group" "$conf" "$section" "$option"; then + # Add option + localconf_add_option "$sudo" "$file" "$group" "$conf" "$section" "$option" "$value" + else + # Replace it + localconf_update_option "$sudo" "$file" "$group" "$conf" "$section" "$option" "$value" + fi + elif ! localconf_has_section "$file" "$group" "$conf" "$section"; then + # Add section and option in specified meta section + localconf_add_section_and_option "$sudo" "$file" "$group" "$conf" "$section" "$option" "$value" + elif ! localconf_has_option "$file" "$group" "$conf" "$section" "$option"; then + # Add option + localconf_add_option "$sudo" "$file" "$group" "$conf" "$section" "$option" "$value" + else + # Replace it + localconf_update_option "$sudo" "$file" "$group" "$conf" "$section" "$option" "$value" + fi + $xtrace +} + +# Restore xtrace +$INC_CONF_TRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/inc/meta-config b/inc/meta-config new file mode 100644 index 0000000000..1215bb8307 --- /dev/null +++ b/inc/meta-config @@ -0,0 +1,226 @@ +#!/bin/bash +# +# **lib/meta-config** - Configuration file manipulation functions +# +# Support for DevStack's local.conf meta-config sections +# +# These functions have no external dependencies and the following side-effects: +# +# CONFIG_AWK_CMD is defined, default is ``awk`` + +# Meta-config files contain multiple INI-style configuration files +# using a specific new section header to delimit them: +# +# [[group-name|file-name]] +# +# group-name refers to the group of configuration file changes to be processed +# at a particular time. These are called phases in ``stack.sh`` but +# group here as these functions are not DevStack-specific. +# +# file-name is the destination of the config file + +# Save trace setting +_XTRACE_INC_META=$(set +o | grep xtrace) +set +o xtrace + + +# Allow the awk command to be overridden on legacy platforms +CONFIG_AWK_CMD=${CONFIG_AWK_CMD:-awk} + +# Get the section for the specific group and config file +# get_meta_section infile group configfile +function get_meta_section { + local file=$1 + local matchgroup=$2 + local configfile=$3 + + [[ -r $file ]] || return 0 + [[ -z $configfile ]] && return 0 + + $CONFIG_AWK_CMD -v matchgroup=$matchgroup -v configfile=$configfile ' + BEGIN { group = "" } + /^\[\[.+\|.*\]\]/ { + gsub("[][]", "", $1); + split($1, a, "|"); + if (a[1] == matchgroup && a[2] == configfile) { + group=a[1] + } else { + group="" + } + next + } + { + if (group != "") + print $0 + } + ' $file +} + + +# Get a list of config files for a specific group +# get_meta_section_files infile group +function get_meta_section_files { + local file=$1 + local matchgroup=$2 + + [[ -r $file ]] || return 0 + + $CONFIG_AWK_CMD -v matchgroup=$matchgroup ' + /^\[\[.+\|.*\]\]/ { + gsub("[][]", "", $1); + split($1, a, "|"); + if (a[1] == matchgroup) + print a[2] + } + ' $file +} + + +# Merge the contents of a meta-config file into its destination config file +# If configfile does not exist it will be created. +# merge_config_file infile group configfile +function merge_config_file { + local file=$1 + local matchgroup=$2 + local configfile=$3 + + # note, configfile might be a variable (note the iniset, etc + # created in the mega-awk below is "eval"ed too, so we just leave + # it alone. + local real_configfile + real_configfile=$(eval echo $configfile) + if [ ! -f $real_configfile ]; then + touch $real_configfile || die $LINENO "could not create config file $real_configfile ($configfile)" + fi + + get_meta_section $file $matchgroup $configfile | \ + $CONFIG_AWK_CMD -v configfile=$configfile ' + BEGIN { + section = "" + last_section = "" + section_count = 0 + } + /^\[.+\]/ { + gsub("[][]", "", $1); + section=$1 + next + } + /^ *\#/ { + next + } + /^[^ \t]+/ { + # get offset of first '=' in $0 + eq_idx = index($0, "=") + # extract attr & value from $0 + attr = substr($0, 1, eq_idx - 1) + value = substr($0, eq_idx + 1) + # only need to strip trailing whitespace from attr + sub(/[ \t]*$/, "", attr) + # need to strip leading & trailing whitespace from value + sub(/^[ \t]*/, "", value) + sub(/[ \t]*$/, "", value) + + # cfg_attr_count: number of config lines per [section, attr] + # cfg_attr: three dimensional array to keep all the config lines per [section, attr] + # cfg_section: keep the section names in the same order as they appear in local.conf + # cfg_sec_attr_name: keep the attr names in the same order as they appear in local.conf + if (! (section, attr) in cfg_attr_count) { + if (section != last_section) { + cfg_section[section_count++] = section + last_section = section + } + attr_count = cfg_sec_attr_count[section_count - 1]++ + cfg_sec_attr_name[section_count - 1, attr_count] = attr + + cfg_attr[section, attr, 0] = value + cfg_attr_count[section, attr] = 1 + } else { + lno = cfg_attr_count[section, attr]++ + cfg_attr[section, attr, lno] = value + } + } + END { + # Process each section in order + for (sno = 0; sno < section_count; sno++) { + section = cfg_section[sno] + # The ini routines simply append a config item immediately + # after the section header. To keep the same order as defined + # in local.conf, invoke the ini routines in the reverse order + for (attr_no = cfg_sec_attr_count[sno] - 1; attr_no >=0; attr_no--) { + attr = cfg_sec_attr_name[sno, attr_no] + if (cfg_attr_count[section, attr] == 1) + print "iniset " configfile " " section " " attr " \"" cfg_attr[section, attr, 0] "\"" + else { + # For multiline, invoke the ini routines in the reverse order + count = cfg_attr_count[section, attr] + print "inidelete " configfile " " section " " attr + print "iniset " configfile " " section " " attr " \"" cfg_attr[section, attr, count - 1] "\"" + for (l = count -2; l >= 0; l--) + print "iniadd_literal " configfile " " section " " attr " \"" cfg_attr[section, attr, l] "\"" + } + } + } + } + ' | while read a; do eval "$a"; done +} + + +# Merge all of the files specified by group +# merge_config_group infile group [group ...] +function merge_config_group { + local localfile=$1; shift + local matchgroups=$@ + + [[ -r $localfile ]] || return 0 + + local configfile group + for group in $matchgroups; do + for configfile in $(get_meta_section_files $localfile $group); do + local realconfigfile + local dir + + realconfigfile=$(eval "echo $configfile") + if [[ -z $realconfigfile ]]; then + warn $LINENO "unknown config file specification: $configfile is undefined" + break + fi + dir=$(dirname $realconfigfile) + + test -e $dir && ! test -d $dir && die $LINENO "bogus config file specification $configfile ($configfile=$realconfigfile, $dir exists but it is not a directory)" + + if ! [[ -e $dir ]] ; then + sudo mkdir -p $dir || die $LINENO "could not create the directory of $real_configfile ($configfile)" + sudo chown ${STACK_USER} $dir + fi + + merge_config_file $localfile $group $configfile + done + done +} + +function extract_localrc_section { + local configfile=$1 # top_dir/local.conf + local localrcfile=$2 # top_dir/localrc + local localautofile=$3 # top_dir/.localrc.auto + + if [[ -r $configfile ]]; then + LRC=$(get_meta_section_files $configfile local) + for lfile in $LRC; do + if [[ "$lfile" == "localrc" ]]; then + if [[ -r $localrcfile ]]; then + echo "localrc and local.conf:[[local]] both exist, using localrc" + else + echo "# Generated file, do not edit" >$localautofile + get_meta_section $configfile local $lfile >>$localautofile + fi + fi + done + fi +} + +# Restore xtrace +$_XTRACE_INC_META + +# Local variables: +# mode: shell-script +# End: diff --git a/inc/python b/inc/python new file mode 100644 index 0000000000..3969c1fa82 --- /dev/null +++ b/inc/python @@ -0,0 +1,507 @@ +#!/bin/bash +# +# **inc/python** - Python-related functions +# +# Support for pip/setuptools interfaces and virtual environments +# +# External functions used: +# - GetOSVersion +# - is_fedora +# - safe_chown + +# Save trace setting +INC_PY_TRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Global Config Variables + +# PROJECT_VENV contains the name of the virtual environment for each +# project. A null value installs to the system Python directories. +declare -A -g PROJECT_VENV + +# Utility Functions +# ================= + +# Joins bash array of extras with commas as expected by other functions +function join_extras { + local IFS="," + echo "$*" +} + +# Python Functions +# ================ + +# Setup the global devstack virtualenvs and the associated environment +# updates. +function setup_devstack_virtualenv { + # We run devstack out of a global virtualenv. + if [[ ! -d $DEVSTACK_VENV ]] ; then + # Using system site packages to enable nova to use libguestfs. + # This package is currently installed via the distro and not + # available on pypi. + $PYTHON -m venv --system-site-packages "${DEVSTACK_VENV}" + pip_install -U pip setuptools[core] + fi + if [[ ":$PATH:" != *":$DEVSTACK_VENV/bin:"* ]] ; then + export PATH="$DEVSTACK_VENV/bin:$PATH" + export PYTHON="$DEVSTACK_VENV/bin/python3" + fi +} + +# Get the path to the pip command. +# get_pip_command +function get_pip_command { + local version="$1" + if [ -z "$version" ]; then + die $LINENO "pip python version is not set." + fi + + # NOTE(dhellmann): I don't know if we actually get a pip3.4-python + # under any circumstances. + which pip${version} || which pip${version}-python + + if [ $? -ne 0 ]; then + die $LINENO "Unable to find pip${version}; cannot continue" + fi +} + +# Get the path to the directory where python executables are installed. +# get_python_exec_prefix +function get_python_exec_prefix { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + $xtrace + + if [[ "$GLOBAL_VENV" == "True" ]] ; then + echo "$DEVSTACK_VENV/bin" + else + echo "/usr/local/bin" + fi +} + +# Wrapper for ``pip install`` that only installs versions of libraries +# from the global-requirements specification. +# +# Uses globals ``REQUIREMENTS_DIR`` +# +# pip_install_gr packagename +function pip_install_gr { + local name=$1 + local clean_name + clean_name=$(get_from_global_requirements $name) + pip_install $clean_name +} + +# Wrapper for ``pip install`` that only installs versions of libraries +# from the global-requirements specification with extras. +# +# Uses globals ``REQUIREMENTS_DIR`` +# +# pip_install_gr_extras packagename extra1,extra2,... +function pip_install_gr_extras { + local name=$1 + local extras=$2 + local version_constraints + version_constraints=$(get_version_constraints_from_global_requirements $name) + pip_install $name[$extras]$version_constraints +} + +# enable_python3_package() -- no-op for backwards compatibility +# +# enable_python3_package dir [dir ...] +function enable_python3_package { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + echo "It is no longer necessary to call enable_python3_package()." + + $xtrace +} + +# disable_python3_package() -- no-op for backwards compatibility +# +# disable_python3_package dir [dir ...] +function disable_python3_package { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + + echo "It is no longer possible to call disable_python3_package()." + + $xtrace +} + +# Wrapper for ``pip install`` to set cache and proxy environment variables +# Uses globals ``OFFLINE``, ``PIP_VIRTUAL_ENV``, +# ``PIP_UPGRADE``, ``*_proxy``, +# Usage: +# pip_install pip_arguments +function pip_install { + local xtrace result + xtrace=$(set +o | grep xtrace) + set +o xtrace + local upgrade="" + local offline=${OFFLINE:-False} + if [[ "$offline" == "True" || -z "$@" ]]; then + $xtrace + return + fi + + time_start "pip_install" + + PIP_UPGRADE=$(trueorfalse False PIP_UPGRADE) + if [[ "$PIP_UPGRADE" = "True" ]] ; then + upgrade="--upgrade" + fi + + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + + # Try to extract the path of the package we are installing into + # package_dir. We need this to check for test-requirements.txt, + # at least. + # + # ${!#} expands to the last positional argument to this function. + # With "extras" syntax included, our arguments might be something + # like: + # -e /path/to/fooproject[extra] + # Thus this magic line grabs just the path without extras + # + # Note that this makes no sense if this is a pypi (rather than + # local path) install; ergo you must check this path exists before + # use. Also, if we had multiple or mixed installs, we would also + # likely break. But for historical reasons, it's basically only + # the other wrapper functions in here calling this to install + # local packages, and they do so with single call per install. So + # this works (for now...) + local package_dir=${!#%\[*\]} + + if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then + local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip + local sudo_pip="env" + elif [[ "${GLOBAL_VENV}" == "True" && -d ${DEVSTACK_VENV} ]] ; then + # We have to check that the DEVSTACK_VENV exists because early + # devstack boostrapping needs to operate in a system context + # too bootstrap pip. Once pip is bootstrapped we create the + # global venv and can start to use it. + local cmd_pip=$DEVSTACK_VENV/bin/pip + local sudo_pip="env" + echo "Using python $PYTHON3_VERSION to install $package_dir" + else + local cmd_pip="python$PYTHON3_VERSION -m pip" + local sudo_pip="sudo -H LC_ALL=en_US.UTF-8" + echo "Using python $PYTHON3_VERSION to install $package_dir" + fi + + cmd_pip="$cmd_pip install" + # Always apply constraints + cmd_pip="$cmd_pip -c $REQUIREMENTS_DIR/upper-constraints.txt" + + $xtrace + + $sudo_pip \ + http_proxy="${http_proxy:-}" \ + https_proxy="${https_proxy:-}" \ + no_proxy="${no_proxy:-}" \ + PIP_FIND_LINKS=$PIP_FIND_LINKS \ + $cmd_pip $upgrade \ + $@ + result=$? + + time_stop "pip_install" + return $result +} + +function pip_uninstall { + # Skip uninstall if offline + [[ "${OFFLINE}" = "True" ]] && return + + local name=$1 + if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then + local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip + local sudo_pip="env" + else + local cmd_pip="python$PYTHON3_VERSION -m pip" + local sudo_pip="sudo -H LC_ALL=en_US.UTF-8" + fi + # don't error if we can't uninstall, it might not be there + $sudo_pip $cmd_pip uninstall -y $name || /bin/true +} + +# get version of a package from global requirements file +# get_from_global_requirements +function get_from_global_requirements { + local package=$1 + local required_pkg + required_pkg=$(grep -i -h ^${package} $REQUIREMENTS_DIR/global-requirements.txt | cut -d\# -f1) + if [[ $required_pkg == "" ]]; then + die $LINENO "Can't find package $package in requirements" + fi + echo $required_pkg +} + +# get only version constraints of a package from global requirements file +# get_version_constraints_from_global_requirements +function get_version_constraints_from_global_requirements { + local package=$1 + local required_pkg_version_constraint + # drop the package name from output (\K) + required_pkg_version_constraint=$(grep -i -h -o -P "^${package}\K.*" $REQUIREMENTS_DIR/global-requirements.txt | cut -d\# -f1) + if [[ $required_pkg_version_constraint == "" ]]; then + die $LINENO "Can't find package $package in requirements" + fi + echo $required_pkg_version_constraint +} + +# should we use this library from their git repo, or should we let it +# get pulled in via pip dependencies. +function use_library_from_git { + local name=$1 + local enabled=1 + [[ ${LIBS_FROM_GIT} = 'ALL' ]] || [[ ,${LIBS_FROM_GIT}, =~ ,${name}, ]] && enabled=0 + return $enabled +} + +# determine if a package was installed from git +function lib_installed_from_git { + local name=$1 + local safe_name + safe_name=$(python -c "from packaging import canonicalize_name; print(canonicalize_name('${name}'))") + # Note "pip freeze" doesn't always work here, because it tries to + # be smart about finding the remote of the git repo the package + # was installed from. This doesn't work with zuul which clones + # repos with no remote. + # + # The best option seems to be to use "pip list" which will tell + # you the path an editable install was installed from; for example + # in response to something like + # pip install -e 'git+https://opendev.org/openstack/bashate#egg=bashate' + # pip list --format columns shows + # bashate 0.5.2.dev19 /tmp/env/src/bashate + # Thus we check the third column to see if we're installed from + # some local place. + [[ -n $(pip list --format=columns 2>/dev/null | awk "/^$safe_name/ {print \$3}") ]] +} + +# setup a library by name. If we are trying to use the library from +# git, we'll do a git based install, otherwise we'll punt and the +# library should be installed by a requirements pull from another +# project. +function setup_lib { + local name=$1 + local dir=${GITDIR[$name]} + setup_install $dir +} + +# setup a library by name in editable mode. If we are trying to use +# the library from git, we'll do a git based install, otherwise we'll +# punt and the library should be installed by a requirements pull from +# another project. +# +# use this for non namespaced libraries +# +# setup_dev_lib [-bindep] [] +function setup_dev_lib { + local bindep + if [[ $1 == -bindep* ]]; then + bindep="${1}" + shift + fi + local name=$1 + local dir=${GITDIR[$name]} + local extras=$2 + setup_develop $bindep $dir $extras +} + +# this should be used if you want to install globally, all libraries should +# use this, especially *oslo* ones +# +# setup_install project_dir [extras] +# project_dir: directory of project repo (e.g., /opt/stack/keystone) +# extras: comma-separated list of optional dependencies to install +# (e.g., ldap,memcache). +# See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements +# bindep: Set "-bindep" as first argument to install bindep.txt packages +# The command is like "pip install []" +function setup_install { + local bindep + if [[ $1 == -bindep* ]]; then + bindep="${1}" + shift + fi + local project_dir=$1 + local extras=$2 + _setup_package_with_constraints_edit $bindep $project_dir "" $extras +} + +# this should be used for projects which run services, like all services +# +# setup_develop project_dir [extras] +# project_dir: directory of project repo (e.g., /opt/stack/keystone) +# extras: comma-separated list of optional dependencies to install +# (e.g., ldap,memcache). +# See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements +# The command is like "pip install -e []" +function setup_develop { + local bindep + if [[ $1 == -bindep* ]]; then + bindep="${1}" + shift + fi + local project_dir=$1 + local extras=$2 + _setup_package_with_constraints_edit $bindep $project_dir -e $extras +} + +# ``pip install -e`` the package, which processes the dependencies +# using pip before running `setup.py develop` +# +# Updates the constraints from REQUIREMENTS_DIR to reflect the +# future installed state of this package. This ensures when we +# install this package we get the from source version. +# +# Uses globals ``REQUIREMENTS_DIR`` +# _setup_package_with_constraints_edit project_dir flags [extras] +# project_dir: directory of project repo (e.g., /opt/stack/keystone) +# flags: pip CLI options/flags +# extras: comma-separated list of optional dependencies to install +# (e.g., ldap,memcache). +# See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements +# The command is like "pip install []" +function _setup_package_with_constraints_edit { + local bindep + if [[ $1 == -bindep* ]]; then + bindep="${1}" + shift + fi + local project_dir=$1 + local flags=$2 + local extras=$3 + + # Normalize the directory name to avoid + # "installation from path or url cannot be constrained to a version" + # error. + # REVISIT(yamamoto): Remove this when fixed in pip. + # https://github.com/pypa/pip/pull/3582 + project_dir=$(cd $project_dir && pwd) + + if [ -n "$REQUIREMENTS_DIR" ]; then + # Remove this package from constraints before we install it. + # That way, later installs won't "downgrade" the install from + # source we are about to do. + local name + name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg) + if [ -z $name ]; then + name=$(awk '/^name =/ {gsub(/"/, "", $3); print $3}' $project_dir/pyproject.toml) + fi + $REQUIREMENTS_DIR/.venv/bin/edit-constraints \ + $REQUIREMENTS_DIR/upper-constraints.txt -- $name + fi + + setup_package $bindep $project_dir "$flags" $extras + + # If this project is in LIBS_FROM_GIT, verify it was actually installed + # correctly. This helps catch errors caused by constraints mismatches. + if use_library_from_git "$project_dir"; then + if ! lib_installed_from_git "$project_dir"; then + die $LINENO "The following LIBS_FROM_GIT was not installed correctly: $project_dir" + fi + fi +} + +# ``pip install -e`` the package, which processes the dependencies +# using pip before running `setup.py develop`. The command is like +# "pip install []" +# +# Uses globals ``STACK_USER`` +# +# Usage: +# setup_package [-bindep[=profile,profile]] [extras] +# +# -bindep : Use bindep to install dependencies; select extra profiles +# as comma separated arguments after "=" +# project_dir : directory of project repo (e.g., /opt/stack/keystone) +# flags : pip CLI options/flags +# extras : comma-separated list of optional dependencies to install +# (e.g., ldap,memcache). +# See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements +function setup_package { + local bindep=0 + local bindep_flag="" + local bindep_profiles="" + if [[ $1 == -bindep* ]]; then + bindep=1 + IFS="=" read bindep_flag bindep_profiles <<< ${1} + shift + fi + local project_dir=$1 + local flags=$2 + local extras=$3 + + # if the flags variable exists, and it doesn't look like a flag, + # assume it's actually the extras list. + if [[ -n "$flags" && -z "$extras" && ! "$flags" =~ ^-.* ]]; then + extras=$flags + flags="" + fi + + if [[ ! -z "$extras" ]]; then + extras="[$extras]" + fi + + # install any bindep packages + if [[ $bindep == 1 ]]; then + install_bindep $project_dir/bindep.txt $bindep_profiles + fi + + pip_install $flags "$project_dir$extras" + # ensure that further actions can do things like setup.py sdist + if [[ "$flags" == "-e" && "$GLOBAL_VENV" == "False" ]]; then + # egg-info is not created when project have pyproject.toml + if [ -d $1/*.egg-info ]; then + safe_chown -R $STACK_USER $1/*.egg-info + fi + fi +} + +# Report whether python 3 should be used +# TODO(frickler): drop this once all legacy uses are removed +function python3_enabled { + return 0 +} + +# Provide requested python version and sets PYTHON variable +function install_python { + install_python3 + export PYTHON=$(which python${PYTHON3_VERSION} 2>/dev/null) +} + +# Install python3 packages +function install_python3 { + if is_ubuntu; then + apt_get install python${PYTHON3_VERSION} python${PYTHON3_VERSION}-dev + elif is_fedora; then + install_package python${PYTHON3_VERSION}-devel python${PYTHON3_VERSION}-pip + fi +} + +function install_devstack_tools { + # intentionally old to ensure devstack-gate has control + local dstools_version=${DSTOOLS_VERSION:-0.1.2} + install_python3 + sudo pip3 install -U devstack-tools==${dstools_version} +} + +# Restore xtrace +$INC_PY_TRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/inc/rootwrap b/inc/rootwrap new file mode 100644 index 0000000000..4c65440a4e --- /dev/null +++ b/inc/rootwrap @@ -0,0 +1,94 @@ +#!/bin/bash +# +# **inc/rootwrap** - Rootwrap functions +# +# Handle rootwrap's foibles + +# Uses: ``STACK_USER`` +# Defines: ``SUDO_SECURE_PATH_FILE`` + +# Save trace setting +INC_ROOT_TRACE=$(set +o | grep xtrace) +set +o xtrace + +# Accumulate all additions to sudo's ``secure_path`` in one file read last +# so they all work in a venv configuration +SUDO_SECURE_PATH_FILE=${SUDO_SECURE_PATH_FILE:-/etc/sudoers.d/zz-secure-path} + +# Add a directory to the common sudo ``secure_path`` +# add_sudo_secure_path dir +function add_sudo_secure_path { + local dir=$1 + local line + + # This is pretty simplistic for now - assume only the first line is used + if [[ -r $SUDO_SECURE_PATH_FILE ]]; then + line=$(head -1 $SUDO_SECURE_PATH_FILE) + else + line="Defaults:$STACK_USER secure_path=/usr/local/sbin:/usr/local/bin:/usr/sbin:/sbin:/usr/bin:/bin" + fi + + # Only add ``dir`` if it is not already present + if [[ ! $line =~ $dir ]]; then + echo "${line}:$dir" | sudo tee $SUDO_SECURE_PATH_FILE + sudo chmod 400 $SUDO_SECURE_PATH_FILE + sudo chown root:root $SUDO_SECURE_PATH_FILE + fi +} + +# Configure rootwrap +# Make a load of assumptions otherwise we'll have 6 arguments +# configure_rootwrap project +function configure_rootwrap { + local project=$1 + local project_uc + project_uc=$(echo $1|tr a-z A-Z) + local bin_dir="${project_uc}_BIN_DIR" + bin_dir="${!bin_dir}" + local project_dir="${project_uc}_DIR" + project_dir="${!project_dir}" + + local rootwrap_conf_src_dir="${project_dir}/etc/${project}" + local rootwrap_bin="${bin_dir}/${project}-rootwrap" + + # Start fresh with rootwrap filters + sudo rm -rf /etc/${project}/rootwrap.d + sudo install -d -o root -g root -m 755 /etc/${project}/rootwrap.d + sudo install -o root -g root -m 644 $rootwrap_conf_src_dir/rootwrap.d/*.filters /etc/${project}/rootwrap.d + + # Set up rootwrap.conf, pointing to /etc/*/rootwrap.d + sudo install -o root -g root -m 644 $rootwrap_conf_src_dir/rootwrap.conf /etc/${project}/rootwrap.conf + sudo sed -e "s:^filters_path=.*$:filters_path=/etc/${project}/rootwrap.d:" -i /etc/${project}/rootwrap.conf + + # Rely on $PATH set by devstack to determine what is safe to execute + # by rootwrap rather than use explicit whitelist of paths in + # rootwrap.conf + sudo sed -e 's/^exec_dirs=.*/#&/' -i /etc/${project}/rootwrap.conf + + # Set up the rootwrap sudoers + local tempfile + tempfile=$(mktemp) + # Specify rootwrap.conf as first parameter to rootwrap + rootwrap_sudo_cmd="${rootwrap_bin} /etc/${project}/rootwrap.conf *" + echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_sudo_cmd" >$tempfile + if [ -f ${bin_dir}/${project}-rootwrap-daemon ]; then + # rootwrap daemon does not need any parameters + rootwrap_sudo_cmd="${rootwrap_bin}-daemon /etc/${project}/rootwrap.conf" + echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_sudo_cmd" >>$tempfile + fi + chmod 0440 $tempfile + sudo chown root:root $tempfile + sudo mv $tempfile /etc/sudoers.d/${project}-rootwrap + + # Add bin dir to sudo's secure_path because rootwrap is being called + # without a path because BROKEN. + add_sudo_secure_path $(dirname $rootwrap_bin) +} + + +# Restore xtrace +$INC_ROOT_TRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/apache b/lib/apache new file mode 100644 index 0000000000..b3379a7cde --- /dev/null +++ b/lib/apache @@ -0,0 +1,426 @@ +#!/bin/bash +# +# lib/apache +# Functions to control configuration and operation of apache web server + +# Dependencies: +# +# - ``functions`` file +# - ``STACK_USER`` must be defined +# +# lib/apache exports the following functions: +# +# - install_apache_wsgi +# - apache_site_config_for +# - enable_apache_site +# - disable_apache_site +# - start_apache_server +# - stop_apache_server +# - restart_apache_server + +# Save trace setting +_XTRACE_LIB_APACHE=$(set +o | grep xtrace) +set +o xtrace + +# Allow overriding the default Apache user and group, default to +# current user and his default group. +APACHE_USER=${APACHE_USER:-$STACK_USER} +APACHE_GROUP=${APACHE_GROUP:-$(id -gn $APACHE_USER)} + +APACHE_LOCAL_HOST=$SERVICE_LOCAL_HOST +if [[ "$SERVICE_IP_VERSION" == 6 ]]; then + APACHE_LOCAL_HOST=[$APACHE_LOCAL_HOST] +fi + + +# Set up apache name and configuration directory +# Note that APACHE_CONF_DIR is really more accurately apache's vhost +# configuration dir but we can't just change this because public interfaces. +if is_ubuntu; then + APACHE_NAME=apache2 + APACHE_CONF_DIR=${APACHE_CONF_DIR:-/etc/$APACHE_NAME/sites-available} + APACHE_SETTINGS_DIR=${APACHE_SETTINGS_DIR:-/etc/$APACHE_NAME/conf-enabled} +elif is_fedora; then + APACHE_NAME=httpd + APACHE_CONF_DIR=${APACHE_CONF_DIR:-/etc/$APACHE_NAME/conf.d} + APACHE_SETTINGS_DIR=${APACHE_SETTINGS_DIR:-/etc/$APACHE_NAME/conf.d} +fi +APACHE_LOG_DIR="/var/log/${APACHE_NAME}" + +# Functions +# --------- + +# Enable apache mod and restart apache if it isn't already enabled. +function enable_apache_mod { + local mod=$1 + local should_restart=$2 + # Apache installation, because we mark it NOPRIME + if is_ubuntu; then + # Skip mod_version as it is not a valid mod to enable + # on debuntu, instead it is built in. + if [[ "$mod" != "version" ]] && ! a2query -m $mod ; then + sudo a2enmod $mod + if [[ "$should_restart" != "norestart" ]] ; then + restart_apache_server + fi + fi + elif is_fedora; then + # pass + true + else + exit_distro_not_supported "apache enable mod" + fi +} + +# NOTE(sdague): Install uwsgi including apache module, we need to get +# to 2.0.6+ to get a working mod_proxy_uwsgi. We can probably build a +# check for that and do it differently for different platforms. +function install_apache_uwsgi { + local apxs="apxs2" + if is_fedora; then + apxs="apxs" + fi + + if is_ubuntu; then + local pkg_list="uwsgi uwsgi-plugin-python3" + install_package ${pkg_list} + # NOTE(ianw) 2022-02-03 : Fedora 35 needs to skip this and fall + # into the install-from-source because the upstream packages + # didn't fix Python 3.10 compatibility before release. Should be + # fixed in uwsgi 4.9.0; can remove this when packages available + # or we drop this release + elif is_fedora && ! is_openeuler && ! [[ $DISTRO =~ rhel9 ]]; then + # Note httpd comes with mod_proxy_uwsgi and it is loaded by + # default; the mod_proxy_uwsgi package actually conflicts now. + # See: + # https://bugzilla.redhat.com/show_bug.cgi?id=1574335 + # + # Thus there is nothing else to do after this install + install_package uwsgi \ + uwsgi-plugin-python3 + else + # Compile uwsgi from source. + local dir + dir=$(mktemp -d) + pushd $dir + pip_install uwsgi + pip download uwsgi -c $REQUIREMENTS_DIR/upper-constraints.txt + local uwsgi + uwsgi=$(ls uwsgi*) + tar xvf $uwsgi + cd uwsgi*/apache2 + sudo $apxs -i -c mod_proxy_uwsgi.c + popd + # delete the temp directory + sudo rm -rf $dir + fi + + if is_ubuntu; then + if ! a2query -m proxy || ! a2query -m proxy_uwsgi ; then + # we've got to enable proxy and proxy_uwsgi for this to work + sudo a2enmod proxy + sudo a2enmod proxy_uwsgi + restart_apache_server + fi + fi +} + +# install_apache_wsgi() - Install Apache server and wsgi module +function install_apache_wsgi { + # Apache installation, because we mark it NOPRIME + if is_ubuntu; then + # Install apache2, which is NOPRIME'd + install_package apache2 + if is_package_installed libapache2-mod-wsgi; then + uninstall_package libapache2-mod-wsgi + fi + install_package libapache2-mod-wsgi-py3 + elif is_fedora; then + sudo rm -f /etc/httpd/conf.d/000-* + install_package httpd python${PYTHON3_VERSION}-mod_wsgi + # rpm distros dont enable httpd by default so enable it to support reboots. + sudo systemctl enable httpd + # For consistency with Ubuntu, switch to the worker mpm, as + # the default is event + sudo sed -i '/mod_mpm_prefork.so/s/^/#/g' /etc/httpd/conf.modules.d/00-mpm.conf + sudo sed -i '/mod_mpm_event.so/s/^/#/g' /etc/httpd/conf.modules.d/00-mpm.conf + sudo sed -i '/mod_mpm_worker.so/s/^#//g' /etc/httpd/conf.modules.d/00-mpm.conf + else + exit_distro_not_supported "apache wsgi installation" + fi + # WSGI isn't enabled by default, enable it + enable_apache_mod wsgi +} + +# apache_site_config_for() - The filename of the site's configuration file. +# This function uses the global variables APACHE_NAME and APACHE_CONF_DIR. +# +# On Ubuntu 14.04+, the site configuration file must have a .conf suffix for a2ensite and a2dissite to +# recognise it. a2ensite and a2dissite ignore the .conf suffix used as parameter. The default sites' +# files are 000-default.conf and default-ssl.conf. +# +# On Fedora, any file in /etc/httpd/conf.d/ whose name ends with .conf is enabled. +# +# On RHEL and CentOS, things should hopefully work as in Fedora. +# +# The table below summarizes what should happen on each distribution: +# +----------------------+--------------------+--------------------------+--------------------------+ +# | Distribution | File name | Site enabling command | Site disabling command | +# +----------------------+--------------------+--------------------------+--------------------------+ +# | Ubuntu 14.04 | site.conf | a2ensite site | a2dissite site | +# | Fedora, RHEL, CentOS | site.conf.disabled | mv site.conf{.disabled,} | mv site.conf{,.disabled} | +# +----------------------+--------------------+--------------------------+--------------------------+ +function apache_site_config_for { + local site=$@ + if is_ubuntu; then + # Ubuntu 14.04 - Apache 2.4 + echo $APACHE_CONF_DIR/${site}.conf + elif is_fedora; then + # fedora conf.d is only imported if it ends with .conf so this is approx the same + local enabled_site_file="$APACHE_CONF_DIR/${site}.conf" + if [ -f $enabled_site_file ]; then + echo ${enabled_site_file} + else + echo ${enabled_site_file}.disabled + fi + fi +} + +# enable_apache_site() - Enable a particular apache site +function enable_apache_site { + local site=$@ + # Many of our sites use mod version. Just enable it. + enable_apache_mod version + if is_ubuntu; then + sudo a2ensite ${site} + elif is_fedora; then + local enabled_site_file="$APACHE_CONF_DIR/${site}.conf" + # Do nothing if site already enabled or no site config exists + if [[ -f ${enabled_site_file}.disabled ]] && [[ ! -f ${enabled_site_file} ]]; then + sudo mv ${enabled_site_file}.disabled ${enabled_site_file} + fi + fi +} + +# disable_apache_site() - Disable a particular apache site +function disable_apache_site { + local site=$@ + if is_ubuntu; then + sudo a2dissite ${site} || true + elif is_fedora; then + local enabled_site_file="$APACHE_CONF_DIR/${site}.conf" + # Do nothing if no site config exists + if [[ -f ${enabled_site_file} ]]; then + sudo mv ${enabled_site_file} ${enabled_site_file}.disabled + fi + fi +} + +# start_apache_server() - Start running apache server +function start_apache_server { + start_service $APACHE_NAME +} + +# stop_apache_server() - Stop running apache server +function stop_apache_server { + if [ -n "$APACHE_NAME" ]; then + stop_service $APACHE_NAME + else + exit_distro_not_supported "apache configuration" + fi +} + +# restart_apache_server +function restart_apache_server { + # Apache can be slow to stop, doing an explicit stop, sleep, start helps + # to mitigate issues where apache will claim a port it's listening on is + # still in use and fail to start. + restart_service $APACHE_NAME +} + +# write_uwsgi_config() - Create a new uWSGI config file +function write_uwsgi_config { + local conf=$1 + local wsgi=$2 + local url=$3 + local http=$4 + local name=$5 + + if [ -z "$name" ]; then + name=$(basename $wsgi) + fi + + # create a home for the sockets; note don't use /tmp -- apache has + # a private view of it on some platforms. + local socket_dir='/var/run/uwsgi' + + # /var/run will be empty on ubuntu after reboot, so we can use systemd-temptiles + # to automatically create $socket_dir. + sudo mkdir -p /etc/tmpfiles.d/ + echo "d $socket_dir 0755 $STACK_USER root" | sudo tee /etc/tmpfiles.d/uwsgi.conf + sudo systemd-tmpfiles --create /etc/tmpfiles.d/uwsgi.conf + + local socket="$socket_dir/${name}.socket" + + # always cleanup given that we are using iniset here + rm -rf $conf + # Set either the module path or wsgi script path depending on what we've + # been given. Note that the regex isn't exhaustive - neither Python modules + # nor Python variables can start with a number - but it's "good enough" + if [[ "$wsgi" =~ ^[a-zA-Z0-9_.]+:[a-zA-Z0-9_]+$ ]]; then + iniset "$conf" uwsgi module "$wsgi" + else + deprecated 'Configuring uWSGI with a WSGI file is deprecated, use module paths instead' + iniset "$conf" uwsgi wsgi-file "$wsgi" + fi + iniset "$conf" uwsgi processes $API_WORKERS + # This is running standalone + iniset "$conf" uwsgi master true + # Set die-on-term & exit-on-reload so that uwsgi shuts down + iniset "$conf" uwsgi die-on-term true + iniset "$conf" uwsgi exit-on-reload false + # Set worker-reload-mercy so that worker will not exit till the time + # configured after graceful shutdown + iniset "$conf" uwsgi worker-reload-mercy $WORKER_TIMEOUT + iniset "$conf" uwsgi enable-threads true + iniset "$conf" uwsgi plugins http,python3 + # uwsgi recommends this to prevent thundering herd on accept. + iniset "$conf" uwsgi thunder-lock true + # Set hook to trigger graceful shutdown on SIGTERM + iniset "$conf" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all" + # Override the default size for headers from the 4k default. + iniset "$conf" uwsgi buffer-size 65535 + # Make sure the client doesn't try to re-use the connection. + iniset "$conf" uwsgi add-header "Connection: close" + # This ensures that file descriptors aren't shared between processes. + iniset "$conf" uwsgi lazy-apps true + # Starting time of the WSGi server + iniset "$conf" uwsgi start-time %t + + # If we said bind directly to http, then do that and don't start the apache proxy + if [[ -n "$http" ]]; then + iniset "$conf" uwsgi http $http + else + local apache_conf="" + apache_conf=$(apache_site_config_for $name) + iniset "$conf" uwsgi socket "$socket" + iniset "$conf" uwsgi chmod-socket 666 + echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}\" retry=0 acquire=1 " | sudo tee -a $apache_conf + enable_apache_site $name + restart_apache_server + fi +} + +# For services using chunked encoding, the only services known to use this +# currently are Glance and Swift, we need to use an http proxy instead of +# mod_proxy_uwsgi because the chunked encoding gets dropped. See: +# https://github.com/unbit/uwsgi/issues/1540. +function write_local_uwsgi_http_config { + local conf=$1 + local wsgi=$2 + local url=$3 + local name=$4 + + if [ -z "$name" ]; then + name=$(basename $wsgi) + fi + + # create a home for the sockets; note don't use /tmp -- apache has + # a private view of it on some platforms. + + # always cleanup given that we are using iniset here + rm -rf $conf + # Set either the module path or wsgi script path depending on what we've + # been given + if [[ "$wsgi" =~ ^[a-zA-Z0-9_.]+:[a-zA-Z0-9_]+$ ]]; then + iniset "$conf" uwsgi module "$wsgi" + else + deprecated 'Configuring uWSGI with a WSGI file is deprecated, use module paths instead' + iniset "$conf" uwsgi wsgi-file "$wsgi" + fi + port=$(get_random_port) + iniset "$conf" uwsgi http-socket "$APACHE_LOCAL_HOST:$port" + iniset "$conf" uwsgi processes $API_WORKERS + # This is running standalone + iniset "$conf" uwsgi master true + # Set die-on-term & exit-on-reload so that uwsgi shuts down + iniset "$conf" uwsgi die-on-term true + iniset "$conf" uwsgi exit-on-reload false + # Set worker-reload-mercy so that worker will not exit till the time + # configured after graceful shutdown + iniset "$conf" uwsgi worker-reload-mercy $WORKER_TIMEOUT + iniset "$conf" uwsgi enable-threads true + iniset "$conf" uwsgi plugins http,python3 + # uwsgi recommends this to prevent thundering herd on accept. + iniset "$conf" uwsgi thunder-lock true + # Set hook to trigger graceful shutdown on SIGTERM + iniset "$conf" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all" + # Override the default size for headers from the 4k default. + iniset "$conf" uwsgi buffer-size 65535 + # Make sure the client doesn't try to re-use the connection. + iniset "$conf" uwsgi add-header "Connection: close" + # This ensures that file descriptors aren't shared between processes. + iniset "$conf" uwsgi lazy-apps true + iniset "$conf" uwsgi chmod-socket 666 + iniset "$conf" uwsgi http-raw-body true + iniset "$conf" uwsgi http-chunked-input true + iniset "$conf" uwsgi http-auto-chunked true + iniset "$conf" uwsgi http-keepalive false + # Increase socket timeout for slow chunked uploads + iniset "$conf" uwsgi socket-timeout 30 + # Starting time of the WSGi server + iniset "$conf" uwsgi start-time %t + + enable_apache_mod proxy + enable_apache_mod proxy_http + local apache_conf="" + apache_conf=$(apache_site_config_for $name) + echo "KeepAlive Off" | sudo tee $apache_conf + echo "SetEnv proxy-sendchunked 1" | sudo tee -a $apache_conf + echo "ProxyPass \"${url}\" \"http://$APACHE_LOCAL_HOST:$port\" retry=0 acquire=1 " | sudo tee -a $apache_conf + enable_apache_site $name + restart_apache_server +} + +# Write a straight-through proxy for a service that runs locally and just needs +# to be reachable via the main http proxy at $loc +function write_local_proxy_http_config { + local name=$1 + local url=$2 + local loc=$3 + local apache_conf + apache_conf=$(apache_site_config_for $name) + + enable_apache_mod proxy + enable_apache_mod proxy_http + + echo "KeepAlive Off" | sudo tee $apache_conf + echo "SetEnv proxy-sendchunked 1" | sudo tee -a $apache_conf + echo "ProxyPass \"${loc}\" \"$url\" retry=0 acquire=1 " | sudo tee -a $apache_conf + enable_apache_site $name + restart_apache_server +} + +function remove_uwsgi_config { + local conf=$1 + local wsgi=$2 + local name="" + # TODO(stephenfin): Remove this call when everyone is using module path + # configuration instead of file path configuration + name=$(basename $wsgi) + + if [[ "$wsgi" = /* ]]; then + deprecated "Passing a wsgi script to remove_uwsgi_config is deprecated, pass an application name instead" + fi + + rm -rf $conf + disable_apache_site $name +} + +# Restore xtrace +$_XTRACE_LIB_APACHE + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/atop b/lib/atop new file mode 100644 index 0000000000..25c8e9a83f --- /dev/null +++ b/lib/atop @@ -0,0 +1,49 @@ +#!/bin/bash +# +# lib/atop +# Functions to start and stop atop + +# Dependencies: +# +# - ``functions`` file + +# ``stack.sh`` calls the entry points in this order: +# +# - configure_atop +# - install_atop +# - start_atop +# - stop_atop + +# Save trace setting +_XTRACE_ATOP=$(set +o | grep xtrace) +set +o xtrace + +function configure_atop { + mkdir -p $LOGDIR/atop + cat </dev/null +# /etc/default/atop +# see man atoprc for more possibilities to configure atop execution + +LOGOPTS="-R" +LOGINTERVAL=${ATOP_LOGINTERVAL:-"30"} +LOGGENERATIONS=${ATOP_LOGGENERATIONS:-"1"} +LOGPATH=$LOGDIR/atop +EOF +} + +function install_atop { + install_package atop +} + +# start_() - Start running processes +function start_atop { + start_service atop +} + +# stop_atop() stop atop process +function stop_atop { + stop_service atop +} + +# Restore xtrace +$_XTRACE_ATOP diff --git a/lib/cinder b/lib/cinder new file mode 100644 index 0000000000..02056c20f4 --- /dev/null +++ b/lib/cinder @@ -0,0 +1,762 @@ +#!/bin/bash +# +# lib/cinder +# Install and start **Cinder** volume service + +# Dependencies: +# +# - functions +# - DEST, DATA_DIR, STACK_USER must be defined +# - SERVICE_{TENANT_NAME|PASSWORD} must be defined +# - ``KEYSTONE_TOKEN_FORMAT`` must be defined + +# stack.sh +# --------- +# - install_cinder +# - configure_cinder +# - init_cinder +# - start_cinder +# - stop_cinder +# - cleanup_cinder + +# Save trace setting +_XTRACE_CINDER=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# set up default driver +CINDER_DRIVER=${CINDER_DRIVER:-default} +CINDER_PLUGINS=$TOP_DIR/lib/cinder_plugins +CINDER_BACKENDS=$TOP_DIR/lib/cinder_backends +CINDER_BACKUPS=$TOP_DIR/lib/cinder_backups + +# grab plugin config if specified via cinder_driver +if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then + source $CINDER_PLUGINS/$CINDER_DRIVER +fi + +# set up default directories +GITDIR["python-cinderclient"]=$DEST/python-cinderclient +GITDIR["python-brick-cinderclient-ext"]=$DEST/python-brick-cinderclient-ext +CINDER_DIR=$DEST/cinder + +if [[ $SERVICE_IP_VERSION == 6 ]]; then + CINDER_MY_IP="$HOST_IPV6" +else + CINDER_MY_IP="$HOST_IP" +fi + + +# Cinder virtual environment +if [[ ${USE_VENV} = True ]]; then + PROJECT_VENV["cinder"]=${CINDER_DIR}.venv + CINDER_BIN_DIR=${PROJECT_VENV["cinder"]}/bin +else + CINDER_BIN_DIR=$(get_python_exec_prefix) +fi + +CINDER_STATE_PATH=${CINDER_STATE_PATH:=$DATA_DIR/cinder} +OS_BRICK_LOCK_PATH=${OS_BRICK_LOCK_PATH:=$DATA_DIR/os_brick} + +CINDER_CONF_DIR=/etc/cinder +CINDER_CONF=$CINDER_CONF_DIR/cinder.conf +CINDER_UWSGI=cinder.wsgi.api:application +CINDER_UWSGI_CONF=$CINDER_CONF_DIR/cinder-api-uwsgi.ini +CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini + +# Public facing bits +if is_service_enabled tls-proxy; then + CINDER_SERVICE_PROTOCOL="https" +fi +CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST} +CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776} +CINDER_SERVICE_PORT_INT=${CINDER_SERVICE_PORT_INT:-18776} +CINDER_SERVICE_PROTOCOL=${CINDER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} +CINDER_SERVICE_LISTEN_ADDRESS=${CINDER_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} + +# We do not need to report service status every 10s for devstack-like +# deployments. In the gate this generates extra work for the services and the +# database which are already taxed. +CINDER_SERVICE_REPORT_INTERVAL=${CINDER_SERVICE_REPORT_INTERVAL:-120} + +# What type of LVM device should Cinder use for LVM backend +# Defaults to auto, which will do thin provisioning if it's a fresh +# volume group, otherwise it will do thick. The other valid choices are +# default, which is thick, or thin, which as the name implies utilizes lvm +# thin provisioning. +CINDER_LVM_TYPE=${CINDER_LVM_TYPE:-auto} + +# ``CINDER_USE_SERVICE_TOKEN`` is a mode where service token is passed along with +# user token while communicating to external REST APIs like Glance. +CINDER_USE_SERVICE_TOKEN=$(trueorfalse True CINDER_USE_SERVICE_TOKEN) + +# Default backends +# The backend format is type:name where type is one of the supported backend +# types (lvm, nfs, etc) and name is the identifier used in the Cinder +# configuration and for the volume type name. Multiple backends are +# comma-separated. +# The old ``CINDER_MULTI_LVM_BACKEND=True`` setting had a default of: +# CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-lvm:lvmdriver-1,lvm:lvmdriver-2} +CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-lvm:lvmdriver-1} + +CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}} +CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]') + +VOLUME_TYPE_MULTIATTACH=${VOLUME_TYPE_MULTIATTACH:-multiattach} + +if [[ -n "$CINDER_ISCSI_HELPER" ]]; then + if [[ -z "$CINDER_TARGET_HELPER" ]]; then + deprecated 'Using CINDER_ISCSI_HELPER is deprecated, use CINDER_TARGET_HELPER instead' + CINDER_TARGET_HELPER="$CINDER_ISCSI_HELPER" + else + deprecated 'Deprecated CINDER_ISCSI_HELPER is set, but is being overwritten by CINDER_TARGET_HELPER' + fi +fi +CINDER_TARGET_HELPER=${CINDER_TARGET_HELPER:-lioadm} + +if [[ $CINDER_TARGET_HELPER == 'nvmet' ]]; then + CINDER_TARGET_PROTOCOL=${CINDER_TARGET_PROTOCOL:-'nvmet_rdma'} + CINDER_TARGET_PREFIX=${CINDER_TARGET_PREFIX:-'nvme-subsystem-1'} + CINDER_TARGET_PORT=${CINDER_TARGET_PORT:-4420} +else + CINDER_TARGET_PROTOCOL=${CINDER_TARGET_PROTOCOL:-'iscsi'} + CINDER_TARGET_PREFIX=${CINDER_TARGET_PREFIX:-'iqn.2010-10.org.openstack:'} + CINDER_TARGET_PORT=${CINDER_TARGET_PORT:-3260} +fi + + +# EL should only use lioadm +if is_fedora; then + if [[ ${CINDER_TARGET_HELPER} != "lioadm" && ${CINDER_TARGET_HELPER} != 'nvmet' ]]; then + die "lioadm and nvmet are the only valid Cinder target_helper config on this platform" + fi +fi + +# When Cinder is used as a backend for Glance, it can be configured to clone +# the volume containing image data directly in the backend instead of +# transferring data from volume to volume. Value is a comma separated list of +# schemes (currently only 'file' and 'cinder' are supported). The default +# configuration in Cinder is empty (that is, do not use this feature). NOTE: +# to use this feature you must also enable GLANCE_SHOW_DIRECT_URL and/or +# GLANCE_SHOW_MULTIPLE_LOCATIONS for glance-api.conf. +CINDER_ALLOWED_DIRECT_URL_SCHEMES=${CINDER_ALLOWED_DIRECT_URL_SCHEMES:-} +if [[ -n "$CINDER_ALLOWED_DIRECT_URL_SCHEMES" ]]; then + if [[ "${GLANCE_SHOW_DIRECT_URL:-False}" != "True" \ + && "${GLANCE_SHOW_MULTIPLE_LOCATIONS:-False}" != "True" ]]; then + warn $LINENO "CINDER_ALLOWED_DIRECT_URL_SCHEMES is set, but neither \ +GLANCE_SHOW_DIRECT_URL nor GLANCE_SHOW_MULTIPLE_LOCATIONS is True" + fi +fi + +# For backward compatibility +# Before CINDER_BACKUP_DRIVER was introduced, ceph backup driver was configured +# along with ceph backend driver. +if [[ -z "${CINDER_BACKUP_DRIVER}" && "$CINDER_ENABLED_BACKENDS" =~ "ceph" ]]; then + CINDER_BACKUP_DRIVER=ceph +fi + +# Supported backup drivers are in lib/cinder_backups +CINDER_BACKUP_DRIVER=${CINDER_BACKUP_DRIVER:-swift} + +# Source the enabled backends +if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then + for be in ${CINDER_ENABLED_BACKENDS//,/ }; do + be_type=${be%%:*} + be_name=${be##*:} + if [[ -r $CINDER_BACKENDS/${be_type} ]]; then + source $CINDER_BACKENDS/${be_type} + fi + done +fi + +# Source the backup driver +if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then + if [[ -r $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER ]]; then + source $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER + else + die "cinder backup driver $CINDER_BACKUP_DRIVER is not supported" + fi +fi + +# Environment variables to configure the image-volume cache +CINDER_IMG_CACHE_ENABLED=${CINDER_IMG_CACHE_ENABLED:-True} + +# Environment variables to configure the optimized volume upload +CINDER_UPLOAD_OPTIMIZED=${CINDER_UPLOAD_OPTIMIZED:-False} + +# Environment variables to configure the internal tenant during optimized volume upload +CINDER_UPLOAD_INTERNAL_TENANT=${CINDER_UPLOAD_INTERNAL_TENANT:-False} + +# For limits, if left unset, it will use cinder defaults of 0 for unlimited +CINDER_IMG_CACHE_SIZE_GB=${CINDER_IMG_CACHE_SIZE_GB:-} +CINDER_IMG_CACHE_SIZE_COUNT=${CINDER_IMG_CACHE_SIZE_COUNT:-} + +# Configure which cinder backends will have the image-volume cache, this takes the same +# form as the CINDER_ENABLED_BACKENDS config option. By default it will +# enable the cache for all cinder backends. +CINDER_CACHE_ENABLED_FOR_BACKENDS=${CINDER_CACHE_ENABLED_FOR_BACKENDS:-$CINDER_ENABLED_BACKENDS} + +# Configure which cinder backends will have optimized volume upload, this takes the same +# form as the CINDER_ENABLED_BACKENDS config option. By default it will +# enable the cache for all cinder backends. +CINDER_UPLOAD_OPTIMIZED_BACKENDS=${CINDER_UPLOAD_OPTIMIZED_BACKENDS:-$CINDER_ENABLED_BACKENDS} + +# Flag to set the oslo_policy.enforce_scope. This is used to switch +# the Volume API policies to start checking the scope of token. by default, +# this flag is False. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +CINDER_ENFORCE_SCOPE=$(trueorfalse False CINDER_ENFORCE_SCOPE) + +# Functions +# --------- + +# Test if any Cinder services are enabled +# is_cinder_enabled +function is_cinder_enabled { + [[ ,${DISABLED_SERVICES} =~ ,"cinder" ]] && return 1 + [[ ,${ENABLED_SERVICES} =~ ,"c-" ]] && return 0 + return 1 +} + +# _cinder_cleanup_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file +function _cinder_cleanup_apache_wsgi { + sudo rm -f $(apache_site_config_for osapi-volume) +} + +# cleanup_cinder() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_cinder { + # ensure the volume group is cleared up because fails might + # leave dead volumes in the group + if [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then + local targets + targets=$(sudo tgtadm --op show --mode target) + if [ $? -ne 0 ]; then + # If tgt driver isn't running this won't work obviously + # So check the response and restart if need be + echo "tgtd seems to be in a bad state, restarting..." + if is_ubuntu; then + restart_service tgt + else + restart_service tgtd + fi + targets=$(sudo tgtadm --op show --mode target) + fi + + if [[ -n "$targets" ]]; then + local iqn_list=( $(grep --no-filename -r iqn $SCSI_PERSIST_DIR | sed 's///') ) + for i in "${iqn_list[@]}"; do + echo removing iSCSI target: $i + sudo tgt-admin --delete $i + done + fi + + if is_ubuntu; then + stop_service tgt + else + stop_service tgtd + fi + elif [ "$CINDER_TARGET_HELPER" = "lioadm" ]; then + sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete + elif [ "$CINDER_TARGET_HELPER" = "nvmet" ]; then + # If we don't disconnect everything vgremove will block + sudo nvme disconnect-all + sudo nvmetcli clear + else + die $LINENO "Unknown value \"$CINDER_TARGET_HELPER\" for CINDER_TARGET_HELPER" + fi + + if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then + local be be_name be_type + for be in ${CINDER_ENABLED_BACKENDS//,/ }; do + be_type=${be%%:*} + be_name=${be##*:} + if type cleanup_cinder_backend_${be_type} >/dev/null 2>&1; then + cleanup_cinder_backend_${be_type} ${be_name} + fi + done + fi + + if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then + if type cleanup_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then + cleanup_cinder_backup_$CINDER_BACKUP_DRIVER + fi + fi + + stop_process "c-api" + remove_uwsgi_config "$CINDER_UWSGI_CONF" "cinder-wsgi" +} + +# configure_cinder() - Set config files, create data dirs, etc +function configure_cinder { + sudo install -d -o $STACK_USER -m 755 $CINDER_CONF_DIR + + rm -f $CINDER_CONF + + configure_rootwrap cinder + + if [[ -f "$CINDER_DIR/etc/cinder/resource_filters.json" ]]; then + cp -p "$CINDER_DIR/etc/cinder/resource_filters.json" "$CINDER_CONF_DIR/resource_filters.json" + fi + + cp $CINDER_DIR/etc/cinder/api-paste.ini $CINDER_API_PASTE_INI + + inicomment $CINDER_API_PASTE_INI filter:authtoken auth_host + inicomment $CINDER_API_PASTE_INI filter:authtoken auth_port + inicomment $CINDER_API_PASTE_INI filter:authtoken auth_protocol + inicomment $CINDER_API_PASTE_INI filter:authtoken cafile + inicomment $CINDER_API_PASTE_INI filter:authtoken admin_tenant_name + inicomment $CINDER_API_PASTE_INI filter:authtoken admin_user + inicomment $CINDER_API_PASTE_INI filter:authtoken admin_password + inicomment $CINDER_API_PASTE_INI filter:authtoken signing_dir + + configure_keystone_authtoken_middleware $CINDER_CONF cinder + + iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + + iniset $CINDER_CONF DEFAULT target_helper "$CINDER_TARGET_HELPER" + iniset $CINDER_CONF database connection `database_connection_url cinder` + iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI + iniset $CINDER_CONF DEFAULT rootwrap_config "$CINDER_CONF_DIR/rootwrap.conf" + iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.contrib.standard_extensions + iniset $CINDER_CONF DEFAULT osapi_volume_listen $CINDER_SERVICE_LISTEN_ADDRESS + iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH + iniset $CINDER_CONF oslo_concurrency lock_path $CINDER_STATE_PATH + iniset $CINDER_CONF DEFAULT my_ip "$CINDER_MY_IP" + iniset $CINDER_CONF key_manager backend cinder.keymgr.conf_key_mgr.ConfKeyManager + iniset $CINDER_CONF key_manager fixed_key $(openssl rand -hex 16) + if [[ -n "$CINDER_ALLOWED_DIRECT_URL_SCHEMES" ]]; then + iniset $CINDER_CONF DEFAULT allowed_direct_url_schemes $CINDER_ALLOWED_DIRECT_URL_SCHEMES + fi + + # set default quotas + iniset $CINDER_CONF DEFAULT quota_volumes ${CINDER_QUOTA_VOLUMES:-10} + iniset $CINDER_CONF DEFAULT quota_backups ${CINDER_QUOTA_BACKUPS:-10} + iniset $CINDER_CONF DEFAULT quota_snapshots ${CINDER_QUOTA_SNAPSHOTS:-10} + + # Avoid RPC timeouts in slow CI and test environments by doubling the + # default response timeout set by RPC clients. See bug #1873234 for more + # details and example failures. + iniset $CINDER_CONF DEFAULT rpc_response_timeout 120 + + iniset $CINDER_CONF DEFAULT report_interval $CINDER_SERVICE_REPORT_INTERVAL + iniset $CINDER_CONF DEFAULT service_down_time $(($CINDER_SERVICE_REPORT_INTERVAL * 6)) + + if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then + local enabled_backends="" + local default_name="" + local be be_name be_type + for be in ${CINDER_ENABLED_BACKENDS//,/ }; do + be_type=${be%%:*} + be_name=${be##*:} + if type configure_cinder_backend_${be_type} >/dev/null 2>&1; then + configure_cinder_backend_${be_type} ${be_name} + fi + if [[ -z "$default_name" ]]; then + default_name=$be_name + fi + enabled_backends+=$be_name, + done + iniset $CINDER_CONF DEFAULT enabled_backends ${enabled_backends%,*} + if [[ -n "$default_name" ]]; then + iniset $CINDER_CONF DEFAULT default_volume_type ${default_name} + fi + configure_cinder_image_volume_cache + + # The upload optimization uses Cinder's clone volume functionality to + # clone the Image-Volume from source volume hence can only be + # performed when glance is using cinder as it's backend. + if [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then + # Configure optimized volume upload + configure_cinder_volume_upload + fi + fi + + if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then + if type configure_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then + configure_cinder_backup_$CINDER_BACKUP_DRIVER + else + die "configure_cinder_backup_$CINDER_BACKUP_DRIVER doesn't exist in $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER" + fi + fi + + if is_service_enabled ceilometer; then + iniset $CINDER_CONF oslo_messaging_notifications driver "messagingv2" + fi + + if is_service_enabled tls-proxy; then + if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then + # Set the service port for a proxy to take the original + iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT + iniset $CINDER_CONF oslo_middleware enable_proxy_headers_parsing True + fi + fi + + if [ "$SYSLOG" != "False" ]; then + iniset $CINDER_CONF DEFAULT use_syslog True + fi + + iniset_rpc_backend cinder $CINDER_CONF + + # Format logging + setup_logging $CINDER_CONF + + if is_service_enabled c-api; then + write_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" "/volume" "" "cinder-api" + fi + + if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then + configure_cinder_driver + fi + + iniset $CINDER_CONF DEFAULT osapi_volume_workers "$API_WORKERS" + + iniset $CINDER_CONF DEFAULT glance_api_servers "$GLANCE_URL" + if is_service_enabled tls-proxy; then + iniset $CINDER_CONF DEFAULT glance_protocol https + iniset $CINDER_CONF DEFAULT glance_ca_certificates_file $SSL_BUNDLE_FILE + fi + + # Set glance credentials (used for location APIs) + configure_keystone_authtoken_middleware $CINDER_CONF glance glance + + # Set nova credentials (used for os-assisted-snapshots) + configure_keystone_authtoken_middleware $CINDER_CONF nova nova + iniset $CINDER_CONF nova region_name "$REGION_NAME" + iniset $CINDER_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" + + if [[ ! -z "$CINDER_COORDINATION_URL" ]]; then + iniset $CINDER_CONF coordination backend_url "$CINDER_COORDINATION_URL" + elif is_service_enabled etcd3; then + # NOTE(jan.gutter): api_version can revert to default once tooz is + # updated with the etcd v3.4 defaults + iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT?api_version=v3" + fi + + if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $CINDER_CONF oslo_policy enforce_scope true + iniset $CINDER_CONF oslo_policy enforce_new_defaults true + else + iniset $CINDER_CONF oslo_policy enforce_scope false + iniset $CINDER_CONF oslo_policy enforce_new_defaults false + fi + + if [ "$CINDER_USE_SERVICE_TOKEN" == "True" ]; then + init_cinder_service_user_conf + fi +} + +# create_cinder_accounts() - Set up common required cinder accounts + +# Project User Roles +# ------------------------------------------------------------------ +# SERVICE_PROJECT_NAME cinder service +# SERVICE_PROJECT_NAME cinder creator (if Barbican is enabled) + +# Migrated from keystone_data.sh +function create_cinder_accounts { + # Cinder + if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then + + local extra_role="" + + # cinder needs the "creator" role in order to interact with barbican + if is_service_enabled barbican; then + extra_role=$(get_or_create_role "creator") + fi + + create_service_user "cinder" $extra_role + + local cinder_api_url + cinder_api_url="$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume" + + # block-storage is the official service type + get_or_create_service "cinder" "block-storage" "Cinder Volume Service" + get_or_create_endpoint \ + "block-storage" \ + "$REGION_NAME" \ + "$cinder_api_url/v3" + configure_cinder_internal_tenant + fi +} + +# init_cinder() - Initialize database and volume group +function init_cinder { + if is_service_enabled $DATABASE_BACKENDS; then + # (Re)create cinder database + recreate_database cinder + + time_start "dbsync" + # Migrate cinder database + $CINDER_BIN_DIR/cinder-manage --config-file $CINDER_CONF db sync + time_stop "dbsync" + fi + + if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then + local be be_name be_type + for be in ${CINDER_ENABLED_BACKENDS//,/ }; do + be_type=${be%%:*} + be_name=${be##*:} + if type init_cinder_backend_${be_type} >/dev/null 2>&1; then + init_cinder_backend_${be_type} ${be_name} + fi + done + fi + + if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then + if type init_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then + init_cinder_backup_$CINDER_BACKUP_DRIVER + fi + fi + + mkdir -p $CINDER_STATE_PATH/volumes +} + + +function init_os_brick { + mkdir -p $OS_BRICK_LOCK_PATH + if is_service_enabled cinder; then + iniset $CINDER_CONF os_brick lock_path $OS_BRICK_LOCK_PATH + fi + if is_service_enabled nova; then + iniset $NOVA_CONF os_brick lock_path $OS_BRICK_LOCK_PATH + fi + if is_service_enabled glance; then + iniset $GLANCE_API_CONF os_brick lock_path $OS_BRICK_LOCK_PATH + iniset $GLANCE_CACHE_CONF os_brick lock_path $OS_BRICK_LOCK_PATH + fi +} + +# install_cinder() - Collect source and prepare +function install_cinder { + git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH + setup_develop $CINDER_DIR + if [[ "$CINDER_TARGET_HELPER" == "tgtadm" ]]; then + install_package tgt + elif [[ "$CINDER_TARGET_HELPER" == "lioadm" ]]; then + if is_ubuntu; then + # TODO(frickler): Workaround for https://launchpad.net/bugs/1819819 + sudo mkdir -p /etc/target + + install_package targetcli-fb + else + install_package targetcli + fi + elif [[ "$CINDER_TARGET_HELPER" == "nvmet" ]]; then + install_package nvme-cli + + # TODO: Remove manual installation of the dependency when the + # requirement is added to nvmetcli: + # http://lists.infradead.org/pipermail/linux-nvme/2022-July/033576.html + if is_ubuntu; then + install_package python3-configshell-fb + else + install_package python3-configshell + fi + # Install from source because Ubuntu doesn't have the package and some packaged versions didn't work on Python 3 + pip_install git+git://git.infradead.org/users/hch/nvmetcli.git + + sudo modprobe nvmet + sudo modprobe nvme-fabrics + + if [[ $CINDER_TARGET_PROTOCOL == 'nvmet_rdma' ]]; then + install_package rdma-core + sudo modprobe nvme-rdma + + # Create the Soft-RoCE device over the networking interface + local iface=${HOST_IP_IFACE:-`ip -br -$SERVICE_IP_VERSION a | grep $CINDER_MY_IP | awk '{print $1}'`} + if [[ -z "$iface" ]]; then + die $LINENO "Cannot find interface to bind Soft-RoCE" + fi + + if ! sudo rdma link | grep $iface ; then + sudo rdma link add rxe_$iface type rxe netdev $iface + fi + + elif [[ $CINDER_TARGET_PROTOCOL == 'nvmet_tcp' ]]; then + sudo modprobe nvme-tcp + + else # 'nvmet_fc' + sudo modprobe nvme-fc + fi + fi +} + +# install_cinderclient() - Collect source and prepare +function install_cinderclient { + if use_library_from_git "python-brick-cinderclient-ext"; then + git_clone_by_name "python-brick-cinderclient-ext" + setup_dev_lib "python-brick-cinderclient-ext" + fi + + if use_library_from_git "python-cinderclient"; then + git_clone_by_name "python-cinderclient" + setup_dev_lib "python-cinderclient" + sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-cinderclient"]}/tools/,/etc/bash_completion.d/}cinder.bash_completion + fi +} + +# apply config.d approach for cinder volumes directory +function _configure_tgt_for_config_d { + if [[ ! -d /etc/tgt/stack.d/ ]]; then + sudo ln -sf $CINDER_STATE_PATH/volumes /etc/tgt/stack.d + fi + if ! grep -q "include /etc/tgt/stack.d/*" /etc/tgt/targets.conf; then + echo "include /etc/tgt/stack.d/*" | sudo tee -a /etc/tgt/targets.conf + fi +} + +# start_cinder() - Start running processes +function start_cinder { + local service_port=$CINDER_SERVICE_PORT + local service_protocol=$CINDER_SERVICE_PROTOCOL + local cinder_url + if [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then + if is_service_enabled c-vol; then + # Delete any old stack.conf + sudo rm -f /etc/tgt/conf.d/stack.conf + _configure_tgt_for_config_d + if is_ubuntu; then + sudo service tgt restart + else + restart_service tgtd + fi + # NOTE(gfidente): ensure tgtd is running in debug mode + sudo tgtadm --mode system --op update --name debug --value on + fi + fi + + if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then + run_process "c-api" "$(which uwsgi) --procname-prefix cinder-api --ini $CINDER_UWSGI_CONF" + cinder_url=$service_protocol://$SERVICE_HOST/volume/v3 + fi + + echo "Waiting for Cinder API to start..." + if ! wait_for_service $SERVICE_TIMEOUT $cinder_url; then + die $LINENO "c-api did not start" + fi + + run_process c-sch "$CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF" + # Tune glibc for Python Services using single malloc arena for all threads + # and disabling dynamic thresholds to reduce memory usage when using native + # threads directly or via eventlet.tpool + # https://www.gnu.org/software/libc/manual/html_node/Memory-Allocation-Tunables.html + malloc_tuning="MALLOC_ARENA_MAX=1 MALLOC_MMAP_THRESHOLD_=131072 MALLOC_TRIM_THRESHOLD_=262144" + run_process c-bak "$CINDER_BIN_DIR/cinder-backup --config-file $CINDER_CONF" "" "" "$malloc_tuning" + run_process c-vol "$CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF" "" "" "$malloc_tuning" + + # NOTE(jdg): For cinder, startup order matters. To ensure that repor_capabilities is received + # by the scheduler start the cinder-volume service last (or restart it) after the scheduler + # has started. This is a quick fix for lp bug/1189595 +} + +# stop_cinder() - Stop running processes +function stop_cinder { + stop_process c-api + stop_process c-bak + stop_process c-sch + stop_process c-vol +} + +function create_one_type { + type_name=$1 + property_key=$2 + property_value=$3 + # NOTE (e0ne): openstack client doesn't work with cinder in noauth mode + if is_service_enabled keystone; then + openstack --os-region-name="$REGION_NAME" volume type create --property $property_key="$property_value" $type_name + else + # TODO (e0ne): use openstack client once it will support cinder in noauth mode: + # https://bugs.launchpad.net/python-cinderclient/+bug/1755279 + local cinder_url + cinder_url=$CINDER_SERVICE_PROTOCOL://$SERVICE_HOST:$CINDER_SERVICE_PORT/v3 + OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-create $type_name + OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-key $type_name set $property_key="$property_value" + fi +} + +# create_volume_types() - Create Cinder's configured volume types +function create_volume_types { + # Create volume types + if is_service_enabled c-api && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then + local be be_name + for be in ${CINDER_ENABLED_BACKENDS//,/ }; do + be_name=${be##*:} + create_one_type $be_name "volume_backend_name" $be_name + done + + if [[ $ENABLE_VOLUME_MULTIATTACH == "True" ]]; then + create_one_type $VOLUME_TYPE_MULTIATTACH $VOLUME_TYPE_MULTIATTACH " True" + fi + + # Increase quota for the service project if glance is using cinder, + # since it's likely to occasionally go above the default 10 in parallel + # test execution. + if [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then + openstack --os-region-name="$REGION_NAME" \ + quota set --volumes 50 "$SERVICE_PROJECT_NAME" + fi + fi +} + +# Compatibility for Grenade + +function create_cinder_volume_group { + # During a transition period Grenade needs to have this function defined + # It is effectively a no-op in the Grenade 'target' use case + : +} + +function configure_cinder_internal_tenant { + # Re-use the Cinder service account for simplicity. + iniset $CINDER_CONF DEFAULT cinder_internal_tenant_project_id $(get_or_create_project $SERVICE_PROJECT_NAME) + iniset $CINDER_CONF DEFAULT cinder_internal_tenant_user_id $(get_or_create_user "cinder") +} + +function configure_cinder_image_volume_cache { + # Expect CINDER_CACHE_ENABLED_FOR_BACKENDS to be a list of backends + # similar to CINDER_ENABLED_BACKENDS with NAME:TYPE where NAME will + # be the backend specific configuration stanza in cinder.conf. + for be in ${CINDER_CACHE_ENABLED_FOR_BACKENDS//,/ }; do + local be_name=${be##*:} + + iniset $CINDER_CONF $be_name image_volume_cache_enabled $CINDER_IMG_CACHE_ENABLED + + if [[ -n $CINDER_IMG_CACHE_SIZE_GB ]]; then + iniset $CINDER_CONF $be_name image_volume_cache_max_size_gb $CINDER_IMG_CACHE_SIZE_GB + fi + + if [[ -n $CINDER_IMG_CACHE_SIZE_COUNT ]]; then + iniset $CINDER_CONF $be_name image_volume_cache_max_count $CINDER_IMG_CACHE_SIZE_COUNT + fi + done +} + +function configure_cinder_volume_upload { + # Expect UPLOAD_VOLUME_OPTIMIZED_FOR_BACKENDS to be a list of backends + # similar to CINDER_ENABLED_BACKENDS with NAME:TYPE where NAME will + # be the backend specific configuration stanza in cinder.conf. + local be be_name + for be in ${CINDER_UPLOAD_OPTIMIZED_BACKENDS//,/ }; do + be_name=${be##*:} + + iniset $CINDER_CONF $be_name image_upload_use_cinder_backend $CINDER_UPLOAD_OPTIMIZED + iniset $CINDER_CONF $be_name image_upload_use_internal_tenant $CINDER_UPLOAD_INTERNAL_TENANT + done +} + +function init_cinder_service_user_conf { + configure_keystone_authtoken_middleware $CINDER_CONF cinder service_user + iniset $CINDER_CONF service_user send_service_user_token True +} + +# Restore xtrace +$_XTRACE_CINDER + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/cinder_backends/ceph b/lib/cinder_backends/ceph new file mode 100644 index 0000000000..0b465730c0 --- /dev/null +++ b/lib/cinder_backends/ceph @@ -0,0 +1,51 @@ +#!/bin/bash +# +# lib/cinder_backends/ceph +# Configure the ceph backend + +# Enable with: +# +# CINDER_ENABLED_BACKENDS+=,ceph:ceph + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_ceph_backend_lvm - called from configure_cinder() + + +# Save trace setting +_XTRACE_CINDER_CEPH=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + + +# Entry Points +# ------------ + +# configure_cinder_backend_ceph - Set config files, create data dirs, etc +# configure_cinder_backend_ceph $name +function configure_cinder_backend_ceph { + local be_name=$1 + + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.rbd.RBDDriver" + iniset $CINDER_CONF $be_name rbd_ceph_conf "$CEPH_CONF_FILE" + iniset $CINDER_CONF $be_name rbd_pool "$CINDER_CEPH_POOL" + iniset $CINDER_CONF $be_name rbd_user "$CINDER_CEPH_USER" + iniset $CINDER_CONF $be_name rbd_secret_uuid "$CINDER_CEPH_UUID" + iniset $CINDER_CONF $be_name rbd_flatten_volume_from_snapshot False + iniset $CINDER_CONF $be_name rbd_max_clone_depth 5 + iniset $CINDER_CONF DEFAULT glance_api_version 2 +} + +# Restore xtrace +$_XTRACE_CINDER_CEPH + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_backends/ceph_iscsi b/lib/cinder_backends/ceph_iscsi new file mode 100644 index 0000000000..94412e0da6 --- /dev/null +++ b/lib/cinder_backends/ceph_iscsi @@ -0,0 +1,56 @@ +#!/bin/bash +# +# lib/cinder_backends/ceph_iscsi +# Configure the ceph_iscsi backend + +# Enable with: +# +# CINDER_ENABLED_BACKENDS+=,ceph_iscsi:ceph_iscsi +# +# Optional paramteters: +# CEPH_ISCSI_API_URL= +# +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_ceph_backend_ceph_iscsi - called from configure_cinder() + + +# Save trace setting +_XTRACE_CINDER_CEPH_ISCSI=$(set +o | grep xtrace) +set +o xtrace + +# Entry Points +# ------------ + +# configure_cinder_backend_ceph_iscsi - Set config files, create data dirs, etc +# configure_cinder_backend_ceph_iscsi $name +function configure_cinder_backend_ceph_iscsi { + local be_name=$1 + + CEPH_ISCSI_API_URL=${CEPH_ISCSI_API_URL:-http://$CEPH_ISCSI_API_HOST:$CEPH_ISCSI_API_PORT} + + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.ceph.rbd_iscsi.RBDISCSIDriver" + iniset $CINDER_CONF $be_name rbd_ceph_conf "$CEPH_CONF_FILE" + iniset $CINDER_CONF $be_name rbd_pool "$CINDER_CEPH_POOL" + iniset $CINDER_CONF $be_name rbd_user "$CINDER_CEPH_USER" + iniset $CINDER_CONF $be_name rbd_iscsi_api_user "$CEPH_ISCSI_API_USER" + iniset $CINDER_CONF $be_name rbd_iscsi_api_password "$CEPH_ISCSI_API_PASSWORD" + iniset $CINDER_CONF $be_name rbd_iscsi_api_url "$CEPH_ISCSI_API_URL" + iniset $CINDER_CONF $be_name rbd_iscsi_target_iqn "$CEPH_ISCSI_TARGET_IQN" + iniset $CINDER_CONF $be_name rbd_flatten_volume_from_snapshot False + iniset $CINDER_CONF $be_name rbd_max_clone_depth 5 + iniset $CINDER_CONF DEFAULT glance_api_version 2 + + pip_install rbd-iscsi-client +} + +# Restore xtrace +$_XTRACE_CINDER_CEPH_ISCSI + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_backends/fake b/lib/cinder_backends/fake new file mode 100644 index 0000000000..4749aced69 --- /dev/null +++ b/lib/cinder_backends/fake @@ -0,0 +1,47 @@ +#!/bin/bash +# +# lib/cinder_backends/fake +# Configure the Fake backend + +# Enable with: +# +# CINDER_ENABLED_BACKENDS+=,fake:fake + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# CINDER_CONF + +# clean_cinder_backend_fake - called from clean_cinder() +# configure_cinder_backend_fake - called from configure_cinder() +# init_cinder_backend_fake - called from init_cinder() + + +# Save trace setting +_XTRACE_CINDER_FAKE=$(set +o | grep xtrace) +set +o xtrace + + +function cleanup_cinder_backend_fake { + local be_name=$1 +} + +function configure_cinder_backend_fake { + local be_name=$1 + + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.tests.fake_driver.FakeLoggingVolumeDriver" + +} + +function init_cinder_backend_fake { + local be_name=$1 +} + +# Restore xtrace +$_XTRACE_CINDER_FAKE + +# mode: shell-script +# End: diff --git a/lib/cinder_backends/fake_gate b/lib/cinder_backends/fake_gate new file mode 100644 index 0000000000..3b9f1d1164 --- /dev/null +++ b/lib/cinder_backends/fake_gate @@ -0,0 +1,74 @@ +#!/bin/bash +# +# lib/cinder_backends/lvm +# Configure the LVM backend + +# Enable with: +# +# CINDER_ENABLED_BACKENDS+=,fake_gate:lvmname + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# CINDER_CONF +# DATA_DIR +# VOLUME_GROUP_NAME + +# clean_cinder_backend_lvm - called from clean_cinder() +# configure_cinder_backend_lvm - called from configure_cinder() +# init_cinder_backend_lvm - called from init_cinder() + + +# Save trace setting +_XTRACE_CINDER_LVM=$(set +o | grep xtrace) +set +o xtrace + + +# TODO: resurrect backing device...need to know how to set values +#VOLUME_BACKING_DEVICE=${VOLUME_BACKING_DEVICE:-} + +# Entry Points +# ------------ + +# cleanup_cinder_backend_lvm - Delete volume group and remove backing file +# cleanup_cinder_backend_lvm $be_name +function cleanup_cinder_backend_lvm { + local be_name=$1 + + # Campsite rule: leave behind a volume group at least as clean as we found it + clean_lvm_volume_group $VOLUME_GROUP_NAME-$be_name + clean_lvm_filter +} + +# configure_cinder_backend_lvm - Set config files, create data dirs, etc +# configure_cinder_backend_lvm $be_name +function configure_cinder_backend_lvm { + local be_name=$1 + + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.tests.fake_driver.FakeGateDriver" + iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name + iniset $CINDER_CONF $be_name target_helper "$CINDER_TARGET_HELPER" + iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE" + + if [[ "$CINDER_VOLUME_CLEAR" == "non" ]]; then + iniset $CINDER_CONF $be_name volume_clear none + fi +} + +# init_cinder_backend_lvm - Initialize volume group +# init_cinder_backend_lvm $be_name +function init_cinder_backend_lvm { + local be_name=$1 + + # Start with a clean volume group + init_lvm_volume_group $VOLUME_GROUP_NAME-$be_name $VOLUME_BACKING_FILE_SIZE +} + +# Restore xtrace +$_XTRACE_CINDER_LVM + +# mode: shell-script +# End: diff --git a/lib/cinder_backends/glusterfs b/lib/cinder_backends/glusterfs new file mode 100644 index 0000000000..4e34f8ef6c --- /dev/null +++ b/lib/cinder_backends/glusterfs @@ -0,0 +1,48 @@ +#!/bin/bash +# +# lib/cinder_backends/glusterfs +# Configure the glusterfs backend + +# Enable with: +# +# CINDER_ENABLED_BACKENDS+=,glusterfs: + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# CINDER_CONF +# CINDER_CONF_DIR +# CINDER_GLUSTERFS_SHARES - Contents of glusterfs shares config file + +# configure_cinder_backend_glusterfs - Configure Cinder for GlusterFS backends + +# Save trace setting +_XTRACE_CINDER_GLUSTERFS=$(set +o | grep xtrace) +set +o xtrace + + +# Entry Points +# ------------ + +# configure_cinder_backend_glusterfs - Set config files, create data dirs, etc +function configure_cinder_backend_glusterfs { + local be_name=$1 + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.glusterfs.GlusterfsDriver" + iniset $CINDER_CONF $be_name glusterfs_shares_config "$CINDER_CONF_DIR/glusterfs-shares-$be_name.conf" + + if [[ -n "$CINDER_GLUSTERFS_SHARES" ]]; then + CINDER_GLUSTERFS_SHARES=$(echo $CINDER_GLUSTERFS_SHARES | tr ";" "\n") + echo "$CINDER_GLUSTERFS_SHARES" | tee "$CINDER_CONF_DIR/glusterfs-shares-$be_name.conf" + fi +} + + +# Restore xtrace +$_XTRACE_CINDER_GLUSTERFS + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm new file mode 100644 index 0000000000..42865119da --- /dev/null +++ b/lib/cinder_backends/lvm @@ -0,0 +1,74 @@ +#!/bin/bash +# +# lib/cinder_backends/lvm +# Configure the LVM backend + +# Enable with: +# +# CINDER_ENABLED_BACKENDS+=,lvm:lvmname + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# CINDER_CONF +# DATA_DIR +# VOLUME_GROUP_NAME + +# clean_cinder_backend_lvm - called from clean_cinder() +# configure_cinder_backend_lvm - called from configure_cinder() +# init_cinder_backend_lvm - called from init_cinder() + + +# Save trace setting +_XTRACE_CINDER_LVM=$(set +o | grep xtrace) +set +o xtrace + + +# TODO: resurrect backing device...need to know how to set values +#VOLUME_BACKING_DEVICE=${VOLUME_BACKING_DEVICE:-} + +# Entry Points +# ------------ + +# cleanup_cinder_backend_lvm - Delete volume group and remove backing file +# cleanup_cinder_backend_lvm $be_name +function cleanup_cinder_backend_lvm { + local be_name=$1 + + # Campsite rule: leave behind a volume group at least as clean as we found it + clean_lvm_volume_group $VOLUME_GROUP_NAME-$be_name + clean_lvm_filter +} + +# configure_cinder_backend_lvm - Set config files, create data dirs, etc +# configure_cinder_backend_lvm $be_name +function configure_cinder_backend_lvm { + local be_name=$1 + + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.lvm.LVMVolumeDriver" + iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name + iniset $CINDER_CONF $be_name target_helper "$CINDER_TARGET_HELPER" + iniset $CINDER_CONF $be_name target_protocol "$CINDER_TARGET_PROTOCOL" + iniset $CINDER_CONF $be_name target_port "$CINDER_TARGET_PORT" + iniset $CINDER_CONF $be_name target_prefix "$CINDER_TARGET_PREFIX" + iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE" + iniset $CINDER_CONF $be_name volume_clear "$CINDER_VOLUME_CLEAR" +} + +# init_cinder_backend_lvm - Initialize volume group +# init_cinder_backend_lvm $be_name +function init_cinder_backend_lvm { + local be_name=$1 + + # Start with a clean volume group + init_lvm_volume_group $VOLUME_GROUP_NAME-$be_name $VOLUME_BACKING_FILE_SIZE +} + +# Restore xtrace +$_XTRACE_CINDER_LVM + +# mode: shell-script +# End: diff --git a/lib/cinder_backends/netapp_iscsi b/lib/cinder_backends/netapp_iscsi new file mode 100644 index 0000000000..5cce30a6d3 --- /dev/null +++ b/lib/cinder_backends/netapp_iscsi @@ -0,0 +1,66 @@ +#!/bin/bash +# +# lib/cinder_backends/netapp_iscsi +# Configure the NetApp iSCSI driver + +# Enable with: +# +# iSCSI: +# CINDER_ENABLED_BACKENDS+=,netapp_iscsi: + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# ``CINDER_CONF`` +# ``CINDER_CONF_DIR`` +# ``CINDER_ENABLED_BACKENDS`` + +# configure_cinder_backend_netapp_iscsi - configure iSCSI + +# Save trace setting +_XTRACE_CINDER_NETAPP=$(set +o | grep xtrace) +set +o xtrace + + +# Entry Points +# ------------ + +# configure_cinder_backend_netapp_iscsi - Set config files, create data dirs, etc +function configure_cinder_backend_netapp_iscsi { + # To use NetApp, set the following in local.conf: + # CINDER_ENABLED_BACKENDS+=,netapp_iscsi: + # NETAPP_MODE=ontap_7mode|ontap_cluster + # NETAPP_IP= + # NETAPP_LOGIN= + # NETAPP_PASSWORD= + # NETAPP_ISCSI_VOLUME_LIST= + + # In ontap_cluster mode, the following also needs to be defined: + # NETAPP_ISCSI_VSERVER= + + local be_name=$1 + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.netapp.common.NetAppDriver" + iniset $CINDER_CONF $be_name netapp_storage_family ${NETAPP_MODE:-ontap_7mode} + iniset $CINDER_CONF $be_name netapp_server_hostname $NETAPP_IP + iniset $CINDER_CONF $be_name netapp_login $NETAPP_LOGIN + iniset $CINDER_CONF $be_name netapp_password $NETAPP_PASSWORD + iniset $CINDER_CONF $be_name netapp_volume_list $NETAPP_ISCSI_VOLUME_LIST + + iniset $CINDER_CONF $be_name netapp_storage_protocol iscsi + iniset $CINDER_CONF $be_name netapp_transport_type https + + if [[ "$NETAPP_MODE" == "ontap_cluster" ]]; then + iniset $CINDER_CONF $be_name netapp_vserver $NETAPP_ISCSI_VSERVER + fi +} + + +# Restore xtrace +$_XTRACE_CINDER_NETAPP + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_backends/netapp_nfs b/lib/cinder_backends/netapp_nfs new file mode 100644 index 0000000000..7ba36d2a3b --- /dev/null +++ b/lib/cinder_backends/netapp_nfs @@ -0,0 +1,77 @@ +#!/bin/bash +# +# lib/cinder_backends/netapp_nfs +# Configure the NetApp NFS driver + +# Enable with: +# +# NFS: +# CINDER_ENABLED_BACKENDS+=,netapp_nfs: + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# ``CINDER_CONF`` +# ``CINDER_CONF_DIR`` +# ``CINDER_ENABLED_BACKENDS`` + +# configure_cinder_backend_netapp_nfs - configure NFS + +# Save trace setting +_XTRACE_CINDER_NETAPP=$(set +o | grep xtrace) +set +o xtrace + + +# Entry Points +# ------------ + +# configure_cinder_backend_netapp_nfs - Set config files, create data dirs, etc +function configure_cinder_backend_netapp_nfs { + # To use NetApp, set the following in local.conf: + # CINDER_ENABLED_BACKENDS+=,netapp_nfs: + # NETAPP_MODE=ontap_7mode|ontap_cluster + # NETAPP_IP= + # NETAPP_LOGIN= + # NETAPP_PASSWORD= + # NETAPP_NFS_VOLUME_LIST= + + # In ontap_cluster mode, the following also needs to be defined: + # NETAPP_NFS_VSERVER= + + local be_name=$1 + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.netapp.common.NetAppDriver" + iniset $CINDER_CONF $be_name netapp_storage_family ${NETAPP_MODE:-ontap_7mode} + iniset $CINDER_CONF $be_name netapp_server_hostname $NETAPP_IP + iniset $CINDER_CONF $be_name netapp_login $NETAPP_LOGIN + iniset $CINDER_CONF $be_name netapp_password $NETAPP_PASSWORD + + iniset $CINDER_CONF $be_name netapp_storage_protocol nfs + iniset $CINDER_CONF $be_name netapp_transport_type https + iniset $CINDER_CONF $be_name nfs_shares_config $CINDER_CONF_DIR/netapp_shares.conf + + echo "$NETAPP_NFS_VOLUME_LIST" | tee "$CINDER_CONF_DIR/netapp_shares.conf" + + if [[ "$NETAPP_MODE" == "ontap_cluster" ]]; then + iniset $CINDER_CONF $be_name netapp_vserver $NETAPP_NFS_VSERVER + fi +} + +function cleanup_cinder_backend_netapp_nfs { + # Clean up remaining NFS mounts + # Be blunt and do them all + local m + for m in $CINDER_STATE_PATH/mnt/*; do + sudo umount $m + done +} + + +# Restore xtrace +$_XTRACE_CINDER_NETAPP + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_backends/nfs b/lib/cinder_backends/nfs new file mode 100644 index 0000000000..f3fcbeff19 --- /dev/null +++ b/lib/cinder_backends/nfs @@ -0,0 +1,54 @@ +#!/bin/bash +# +# lib/cinder_backends/nfs +# Configure the nfs backend + +# Enable with: +# +# CINDER_ENABLED_BACKENDS+=,nfs: + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# CINDER_CONF +# CINDER_CONF_DIR +# CINDER_NFS_SERVERPATH - contents of nfs shares config file + +# configure_cinder_backend_nfs - Configure Cinder for NFS backends + +# Save trace setting +_XTRACE_CINDER_NFS=$(set +o | grep xtrace) +set +o xtrace + + +# Entry Points +# ------------ + +# configure_cinder_backend_nfs - Set config files, create data dirs, etc +function configure_cinder_backend_nfs { + local be_name=$1 + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.nfs.NfsDriver" + iniset $CINDER_CONF $be_name nfs_shares_config "$CINDER_CONF_DIR/nfs-shares-$be_name.conf" + iniset $CINDER_CONF $be_name nas_host localhost + iniset $CINDER_CONF $be_name nas_share_path ${NFS_EXPORT_DIR} + iniset $CINDER_CONF $be_name nas_secure_file_operations \ + ${NFS_SECURE_FILE_OPERATIONS} + iniset $CINDER_CONF $be_name nas_secure_file_permissions \ + ${NFS_SECURE_FILE_PERMISSIONS} + + # NFS snapshot support is currently opt-in only. + iniset $CINDER_CONF $be_name nfs_snapshot_support True + + echo "$CINDER_NFS_SERVERPATH" | tee "$CINDER_CONF_DIR/nfs-shares-$be_name.conf" +} + + +# Restore xtrace +$_XTRACE_CINDER_NFS + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_backends/vmdk b/lib/cinder_backends/vmdk new file mode 100644 index 0000000000..3a6a5cf2ff --- /dev/null +++ b/lib/cinder_backends/vmdk @@ -0,0 +1,47 @@ +#!/bin/bash +# +# lib/cinder_backends/vmdk +# Configure the VMware vmdk backend + +# Enable with: +# +# CINDER_ENABLED_BACKENDS+=,vmdk: + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_cinder_backend_vmdk - Configure Cinder for VMware vmdk backends + +# Save trace setting +_XTRACE_CINDER_VMDK=$(set +o | grep xtrace) +set +o xtrace + + +# Entry Points +# ------------ + +# configure_cinder_backend_vmdk - Set config files, create data dirs, etc +function configure_cinder_backend_vmdk { + # To use VMware vmdk backend, set the following in local.conf: + # CINDER_ENABLED_BACKENDS+=,vmdk: + # VMWAREAPI_IP= + # VMWAREAPI_USER= + # VMWAREAPI_PASSWORD= + + local be_name=$1 + iniset $CINDER_CONF $be_name volume_backend_name $be_name + iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver" + iniset $CINDER_CONF $be_name vmware_host_ip "$VMWAREAPI_IP" + iniset $CINDER_CONF $be_name vmware_host_username "$VMWAREAPI_USER" + iniset $CINDER_CONF $be_name vmware_host_password "$VMWAREAPI_PASSWORD" +} + + +# Restore xtrace +$_XTRACE_CINDER_VMDK + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_backups/ceph b/lib/cinder_backups/ceph new file mode 100644 index 0000000000..e4d6b96407 --- /dev/null +++ b/lib/cinder_backups/ceph @@ -0,0 +1,58 @@ +#!/bin/bash +# +# lib/cinder_backups/ceph +# Configure the ceph backup driver + +# Enable with: +# +# CINDER_BACKUP_DRIVER=ceph + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# Save trace setting +_XTRACE_CINDER_CEPH=$(set +o | grep xtrace) +set +o xtrace + +# Defaults +# -------- + +CINDER_BAK_CEPH_MAX_SNAPSHOTS=${CINDER_BAK_CEPH_MAX_SNAPSHOTS:-0} +CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups} +CINDER_BAK_CEPH_POOL_PG=${CINDER_BAK_CEPH_POOL_PG:-8} +CINDER_BAK_CEPH_POOL_PGP=${CINDER_BAK_CEPH_POOL_PGP:-8} +CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak} + + +function configure_cinder_backup_ceph { + # Execute this part only when cephadm is not used + if [[ "$CEPHADM_DEPLOY" = "False" ]]; then + sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP} + if [[ "$REMOTE_CEPH" = "False" && "$CEPH_REPLICAS" -ne 1 ]]; then + sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID} + fi + sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "profile rbd" osd "profile rbd pool=${CINDER_BAK_CEPH_POOL}, profile rbd pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring + sudo chown $STACK_USER ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring + fi + + iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver" + iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE" + iniset $CINDER_CONF DEFAULT backup_ceph_max_snapshots "$CINDER_BAK_CEPH_MAX_SNAPSHOTS" + iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL" + iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER" + iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0 + iniset $CINDER_CONF DEFAULT backup_ceph_stripe_count 0 + iniset $CINDER_CONF DEFAULT restore_discard_excess_bytes True +} + +# init_cinder_backup_ceph: nothing to do +# cleanup_cinder_backup_ceph: nothing to do + +# Restore xtrace +$_XTRACE_CINDER_CEPH + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_backups/s3_swift b/lib/cinder_backups/s3_swift new file mode 100644 index 0000000000..6fb248606e --- /dev/null +++ b/lib/cinder_backups/s3_swift @@ -0,0 +1,45 @@ +#!/bin/bash +# +# lib/cinder_backups/s3_swift +# Configure the s3 backup driver with swift s3api +# +# TODO: create lib/cinder_backup/s3 for external s3 compatible storage + +# Enable with: +# +# CINDER_BACKUP_DRIVER=s3_swift +# enable_service s3api s-proxy s-object s-container s-account + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# Save trace setting +_XTRACE_CINDER_S3_SWIFT=$(set +o | grep xtrace) +set +o xtrace + +function configure_cinder_backup_s3_swift { + # This configuration requires swift and s3api. If we're + # on a subnode we might not know if they are enabled + iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.s3.S3BackupDriver" + iniset $CINDER_CONF DEFAULT backup_s3_endpoint_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$S3_SERVICE_PORT" +} + +function init_cinder_backup_s3_swift { + openstack ec2 credential create + iniset $CINDER_CONF DEFAULT backup_s3_store_access_key "$(openstack ec2 credential list -c Access -f value)" + iniset $CINDER_CONF DEFAULT backup_s3_store_secret_key "$(openstack ec2 credential list -c Secret -f value)" + if is_service_enabled tls-proxy; then + iniset $CINDER_CONF DEFAULT backup_s3_ca_cert_file "$SSL_BUNDLE_FILE" + fi +} + +# cleanup_cinder_backup_s3_swift: nothing to do + +# Restore xtrace +$_XTRACE_CINDER_S3_SWIFT + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_backups/swift b/lib/cinder_backups/swift new file mode 100644 index 0000000000..c7ec306246 --- /dev/null +++ b/lib/cinder_backups/swift @@ -0,0 +1,41 @@ +#!/bin/bash +# +# lib/cinder_backups/swift +# Configure the swift backup driver + +# Enable with: +# +# CINDER_BACKUP_DRIVER=swift + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# Save trace setting +_XTRACE_CINDER_SWIFT=$(set +o | grep xtrace) +set +o xtrace + + +function configure_cinder_backup_swift { + # NOTE(mriedem): The default backup driver uses swift and if we're + # on a subnode we might not know if swift is enabled, but chances are + # good that it is on the controller so configure the backup service + # to use it. + iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.swift.SwiftBackupDriver" + iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_" + if is_service_enabled tls-proxy; then + iniset $CINDER_CONF DEFAULT backup_swift_ca_cert_file $SSL_BUNDLE_FILE + fi +} + +# init_cinder_backup_swift: nothing to do +# cleanup_cinder_backup_swift: nothing to do + + +# Restore xtrace +$_XTRACE_CINDER_SWIFT + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_plugins/glusterfs b/lib/cinder_plugins/glusterfs new file mode 100644 index 0000000000..329dd6c649 --- /dev/null +++ b/lib/cinder_plugins/glusterfs @@ -0,0 +1,52 @@ +#!/bin/bash +# +# lib/cinder_plugins/glusterfs +# Configure the glusterfs driver + +# Enable with: +# +# CINDER_DRIVER=glusterfs + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_cinder_driver - make configuration changes, including those to other services + +# Save trace setting +_XTRACE_CINDER_GLUSTERFS=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories + + +# Entry Points +# ------------ + +# configure_cinder_driver - Set config files, create data dirs, etc +function configure_cinder_driver { + # To use glusterfs, set the following in localrc: + # CINDER_DRIVER=glusterfs + # CINDER_GLUSTERFS_SHARES="127.0.0.1:/vol1;127.0.0.1:/vol2" + # Shares are : and separated by semicolons. + + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.glusterfs.GlusterfsDriver" + iniset $CINDER_CONF DEFAULT glusterfs_shares_config "$CINDER_CONF_DIR/glusterfs_shares" + touch $CINDER_CONF_DIR/glusterfs_shares + if [ ! -z "$CINDER_GLUSTERFS_SHARES" ]; then + CINDER_GLUSTERFS_SHARES=$(echo $CINDER_GLUSTERFS_SHARES | tr ";" "\n") + echo "$CINDER_GLUSTERFS_SHARES" > $CINDER_CONF_DIR/glusterfs_shares + fi +} + +# Restore xtrace +$_XTRACE_CINDER_GLUSTERFS + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_plugins/nfs b/lib/cinder_plugins/nfs new file mode 100644 index 0000000000..6e4ffe068e --- /dev/null +++ b/lib/cinder_plugins/nfs @@ -0,0 +1,43 @@ +#!/bin/bash +# +# lib/cinder_plugins/nfs +# Configure the nfs driver + +# Enable with: +# +# CINDER_DRIVER=nfs + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_cinder_driver - make configuration changes, including those to other services + +# Save trace setting +_XTRACE_CINDER_NFS=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories + + +# Entry Points +# ------------ + +# configure_cinder_driver - Set config files, create data dirs, etc +function configure_cinder_driver { + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.nfs.NfsDriver" + iniset $CINDER_CONF DEFAULT nfs_shares_config "$CINDER_CONF_DIR/nfs_shares.conf" + echo "$CINDER_NFS_SERVERPATH" | tee "$CINDER_CONF_DIR/nfs_shares.conf" +} + +# Restore xtrace +$_XTRACE_CINDER_NFS + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder_plugins/vsphere b/lib/cinder_plugins/vsphere new file mode 100644 index 0000000000..1b28ffe602 --- /dev/null +++ b/lib/cinder_plugins/vsphere @@ -0,0 +1,44 @@ +#!/bin/bash +# +# lib/cinder_plugins/vsphere +# Configure the vsphere driver + +# Enable with: +# +# CINDER_DRIVER=vsphere + +# Dependencies: +# +# - ``functions`` file +# - ``cinder`` configurations + +# configure_cinder_driver - make configuration changes, including those to other services + +# Save trace setting +_XTRACE_CINDER_VSPHERE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories + + +# Entry Points +# ------------ + +# configure_cinder_driver - Set config files, create data dirs, etc +function configure_cinder_driver { + iniset $CINDER_CONF DEFAULT vmware_host_ip "$VMWAREAPI_IP" + iniset $CINDER_CONF DEFAULT vmware_host_username "$VMWAREAPI_USER" + iniset $CINDER_CONF DEFAULT vmware_host_password "$VMWAREAPI_PASSWORD" + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver" +} + +# Restore xtrace +$_XTRACE_CINDER_VSPHERE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/database b/lib/database new file mode 100644 index 0000000000..78563f6f6d --- /dev/null +++ b/lib/database @@ -0,0 +1,145 @@ +#!/bin/bash +# +# lib/database +# Interface for interacting with different database backends + +# Dependencies: +# ``ENABLED_SERVICES`` must be defined + +# ``DATABASE_BACKENDS`` will contain a list of available database backends +# after sourcing this file. + +# This is a wrapper for the specific database backends available. +# Each database must implement four functions: +# +# - recreate_database_$DATABASE_TYPE +# - install_database_$DATABASE_TYPE +# - configure_database_$DATABASE_TYPE +# - database_connection_url_$DATABASE_TYPE +# +# and call register_database $DATABASE_TYPE + +# Save trace setting +_XTRACE_LIB_DB=$(set +o | grep xtrace) +set +o xtrace + +DATABASE_BACKENDS="" + +# Register a database backend +# +# $1 The name of the database backend +# +# This is required to be defined before the specific database scripts are sourced +function register_database { + DATABASE_BACKENDS+=" $1" +} + +# Sourcing the database libs sets DATABASE_BACKENDS with the available list +for f in $TOP_DIR/lib/databases/*; do + source $f; +done + +# ``DATABASE_BACKENDS`` now contains a list of the supported databases +# Look in ``ENABLED_SERVICES`` to see if one has been selected +for db in $DATABASE_BACKENDS; do + # Set the type for the rest of the backend to use + if is_service_enabled $db; then + # Set this now for the rest of the database functions + DATABASE_TYPE=$db + fi +done +# If ``DATABASE_TYPE`` is unset here no database was selected +# This is not an error as multi-node installs will do this on the compute nodes + + +# Functions +# --------- + +# Get rid of everything enough to cleanly change database backends +function cleanup_database { + cleanup_database_$DATABASE_TYPE +} + +# Set the database type based on the configuration +function initialize_database_backends { + for backend in $DATABASE_BACKENDS; do + is_service_enabled $backend && DATABASE_TYPE=$backend + done + + [ -z "$DATABASE_TYPE" ] && return 1 + + # For backward-compatibility, read in the MYSQL_HOST/USER variables and use + # them as the default values for the DATABASE_HOST/USER variables. + MYSQL_HOST=${MYSQL_HOST:-$SERVICE_LOCAL_HOST} + MYSQL_USER=${MYSQL_USER:-root} + + # Set DATABASE_HOST equal to MYSQL_HOST. If SERVICE_IP_VERSION is equal to 6, + # set DATABASE_HOST equal to [MYSQL_HOST]. MYSQL_HOST cannot use brackets due + # to mysql not using bracketing for IPv6 addresses. DATABASE_HOST must have brackets + # due to sqlalchemy only reading IPv6 addresses with brackets. + if [[ "$SERVICE_IP_VERSION" == 6 ]]; then + DATABASE_HOST=${DATABASE_HOST:-[$MYSQL_HOST]} + else + DATABASE_HOST=${DATABASE_HOST:-${MYSQL_HOST}} + fi + + DATABASE_USER=${DATABASE_USER:-${MYSQL_USER}} + + if [ -n "$MYSQL_PASSWORD" ]; then + DATABASE_PASSWORD=$MYSQL_PASSWORD + fi + + return 0 +} + +function define_database_baseurl { + # We configure Nova, Horizon, Glance and Keystone to use MySQL as their + # database server. While they share a single server, each has their own + # database and tables. + + # By default this script will install and configure MySQL. If you want to + # use an existing server, you can pass in the user/password/host parameters. + # You will need to send the same ``DATABASE_PASSWORD`` to every host if you are doing + # a multi-node DevStack installation. + + # NOTE: Don't specify ``/db`` in this string so we can use it for multiple services + BASE_SQL_CONN=${BASE_SQL_CONN:-$(get_database_type_$DATABASE_TYPE)://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOST} +} + +# Recreate a given database +# $1 The name of the database +function recreate_database { + local db=$1 + recreate_database_$DATABASE_TYPE $db +} + +# Install the database +function install_database { + install_database_$DATABASE_TYPE +} + +# Install the database Python packages +function install_database_python { + install_database_python_$DATABASE_TYPE +} + +# Configure and start the database +function configure_database { + configure_database_$DATABASE_TYPE +} + +# Generate an SQLAlchemy connection URL and output it using echo +# $1 The name of the database +function database_connection_url { + local db=$1 + database_connection_url_$DATABASE_TYPE $db +} + + +# Restore xtrace +$_XTRACE_LIB_DB + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/databases/mysql b/lib/databases/mysql new file mode 100644 index 0000000000..a47580ca3d --- /dev/null +++ b/lib/databases/mysql @@ -0,0 +1,273 @@ +#!/bin/bash +# +# lib/databases/mysql +# Functions to control the configuration and operation of the **MySQL** database backend + +# Dependencies: +# +# - DATABASE_{HOST,USER,PASSWORD} must be defined + +# Save trace setting +_XTRACE_DB_MYSQL=$(set +o | grep xtrace) +set +o xtrace + +MYSQL_DRIVER=${MYSQL_DRIVER:-PyMySQL} +INSTALL_DATABASE_SERVER_PACKAGES=$(trueorfalse True INSTALL_DATABASE_SERVER_PACKAGES) + +register_database mysql + +if [[ -z "$MYSQL_SERVICE_NAME" ]]; then + MYSQL_SERVICE_NAME=mysql + if is_fedora && ! is_oraclelinux; then + MYSQL_SERVICE_NAME=mariadb + elif [[ "$DISTRO" =~ trixie|bookworm|bullseye ]]; then + MYSQL_SERVICE_NAME=mariadb + fi +fi + +# Functions +# --------- + +function get_database_type_mysql { + if [[ "$MYSQL_DRIVER" == "PyMySQL" ]]; then + echo mysql+pymysql + else + echo mysql + fi +} + +# Get rid of everything enough to cleanly change database backends +function cleanup_database_mysql { + stop_service $MYSQL_SERVICE_NAME + if is_ubuntu; then + # Get ruthless with mysql + apt_get purge -y mysql* mariadb* + sudo rm -rf /var/lib/mysql + sudo rm -rf /etc/mysql + return + elif is_oraclelinux; then + uninstall_package mysql-community-server + sudo rm -rf /var/lib/mysql + elif is_fedora; then + uninstall_package mariadb-server + sudo rm -rf /var/lib/mysql + else + return + fi +} + +function recreate_database_mysql { + local db=$1 + mysql -u$DATABASE_USER -p$DATABASE_PASSWORD -h$MYSQL_HOST -e "DROP DATABASE IF EXISTS $db;" + mysql -u$DATABASE_USER -p$DATABASE_PASSWORD -h$MYSQL_HOST -e "CREATE DATABASE $db CHARACTER SET utf8;" +} + +function configure_database_mysql { + local my_conf mysql slow_log my_client_conf + echo_summary "Configuring and starting MySQL" + + if is_ubuntu; then + my_conf=/etc/mysql/my.cnf + elif is_oraclelinux; then + my_conf=/etc/my.cnf + elif is_fedora; then + my_conf=/etc/my.cnf + local cracklib_conf=/etc/my.cnf.d/cracklib_password_check.cnf + if [ -f "$cracklib_conf" ]; then + inicomment -sudo "$cracklib_conf" "mariadb" "plugin-load-add" + fi + else + exit_distro_not_supported "mysql configuration" + fi + + # Set fips mode on + if is_ubuntu; then + if is_fips_enabled; then + my_client_conf=/etc/mysql/mysql.conf.d/mysql.cnf + iniset -sudo $my_client_conf mysql ssl-fips-mode "on" + iniset -sudo $my_conf mysqld ssl-fips-mode "on" + fi + fi + + # Change bind-address from localhost (127.0.0.1) to any (::) + iniset -sudo $my_conf mysqld bind-address "$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)" + + # (Re)Start mysql-server + if is_fedora; then + # service is not started by default + start_service $MYSQL_SERVICE_NAME + elif is_ubuntu; then + # required since bind-address could have changed above + restart_service $MYSQL_SERVICE_NAME + fi + + # Set the root password - only works the first time. For Ubuntu, we already + # did that with debconf before installing the package, but we still try, + # because the package might have been installed already. We don't do this + # for Ubuntu 22.04+ because the authorization model change in + # version 10.4 of mariadb. See + # https://mariadb.org/authentication-in-mariadb-10-4/ + if ! (is_ubuntu && [[ ! "$DISTRO" =~ trixie|bookworm|bullseye ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then + sudo mysqladmin -u root password $DATABASE_PASSWORD || true + fi + + # In case of Mariadb, giving hostname in arguments causes permission + # problems as it expects connection through socket + if is_ubuntu && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then + local cmd_args="-uroot -p$DATABASE_PASSWORD " + else + local cmd_args="-uroot -p$DATABASE_PASSWORD -h$SERVICE_LOCAL_HOST " + fi + + # Workaround for mariadb > 11.6.2, + # see https://bugs.launchpad.net/nova/+bug/2116186/comments/3 + min_db_ver="11.6.2" + db_version=$(sudo mysql ${cmd_args} -e "select version();" -sN | cut -d '-' -f 1) + max_db_ver=$(printf '%s\n' ${min_db_ver} ${db_version} | sort -V | tail -n 1) + if [[ "${min_db_ver}" != "${max_db_ver}" ]]; then + iniset -sudo $my_conf mysqld innodb_snapshot_isolation OFF + restart_service $MYSQL_SERVICE_NAME + fi + + # In mariadb e.g. on Ubuntu socket plugin is used for authentication + # as root so it works only as sudo. To restore old "mysql like" behaviour, + # we need to change auth plugin for root user + # TODO(frickler): simplify this logic + if is_ubuntu && [[ ! "$DISTRO" =~ bookworm|bullseye ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then + # For Ubuntu 22.04+ we follow the model outlined in + # https://mariadb.org/authentication-in-mariadb-10-4/ + sudo mysql -e "ALTER USER $DATABASE_USER@localhost IDENTIFIED VIA mysql_native_password USING PASSWORD('$DATABASE_PASSWORD');" + fi + if ! (is_ubuntu && [[ ! "$DISTRO" =~ bookworm|bullseye ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then + # Create DB user if it does not already exist + sudo mysql $cmd_args -e "CREATE USER IF NOT EXISTS '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';" + # Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases: + sudo mysql $cmd_args -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%';" + fi + + # Now update ``my.cnf`` for some local needs and restart the mysql service + + # Set default db type to InnoDB + iniset -sudo $my_conf mysqld sql_mode TRADITIONAL + iniset -sudo $my_conf mysqld default-storage-engine InnoDB + iniset -sudo $my_conf mysqld max_connections 1024 + + if [[ "$DATABASE_QUERY_LOGGING" == "True" ]]; then + echo_summary "Enabling MySQL query logging" + if is_fedora; then + slow_log=/var/log/mariadb/mariadb-slow.log + else + slow_log=/var/log/mysql/mysql-slow.log + fi + sudo sed -e '/log.slow.queries/d' \ + -e '/long.query.time/d' \ + -e '/log.queries.not.using.indexes/d' \ + -i $my_conf + + # Turn on slow query log, log all queries (any query taking longer than + # 0 seconds) and log all non-indexed queries + iniset -sudo $my_conf mysqld slow-query-log 1 + iniset -sudo $my_conf mysqld slow-query-log-file $slow_log + iniset -sudo $my_conf mysqld long-query-time 0 + iniset -sudo $my_conf mysqld log-queries-not-using-indexes 1 + fi + + if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then + echo "enabling MySQL performance counting" + + # Install our sqlalchemy plugin + pip_install ${TOP_DIR}/tools/dbcounter + + # Create our stats database for accounting + recreate_database stats + mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST -e \ + "CREATE TABLE queries (db VARCHAR(32), op VARCHAR(32), + count INT, PRIMARY KEY (db, op)) ENGINE MEMORY" stats + fi + + if [[ "$MYSQL_REDUCE_MEMORY" == "True" ]]; then + iniset -sudo $my_conf mysqld read_buffer_size 64K + iniset -sudo $my_conf mysqld innodb_buffer_pool_size 16M + iniset -sudo $my_conf mysqld thread_stack 192K + iniset -sudo $my_conf mysqld thread_cache_size 8 + iniset -sudo $my_conf mysqld tmp_table_size 8M + iniset -sudo $my_conf mysqld sort_buffer_size 8M + iniset -sudo $my_conf mysqld max_allowed_packet 8M + fi + + restart_service $MYSQL_SERVICE_NAME +} + +function install_database_mysql { + if is_ubuntu; then + # Seed configuration with mysql password so that apt-get install doesn't + # prompt us for a password upon install. + sudo debconf-set-selections <$HOME/.my.cnf +[client] +user=$DATABASE_USER +password=$DATABASE_PASSWORD +EOF + + if ! is_ubuntu || [ "$MYSQL_SERVICE_NAME" != "mariadb" ]; then + echo "host=$MYSQL_HOST" >> $HOME/.my.cnf + fi + chmod 0600 $HOME/.my.cnf + fi + # Install mysql-server + if [[ "$INSTALL_DATABASE_SERVER_PACKAGES" == "True" ]]; then + if is_oraclelinux; then + install_package mysql-community-server + elif is_fedora; then + install_package mariadb-server mariadb-devel mariadb + sudo systemctl enable $MYSQL_SERVICE_NAME + elif is_ubuntu; then + install_package $MYSQL_SERVICE_NAME-server + else + exit_distro_not_supported "mysql installation" + fi + fi +} + +function install_database_python_mysql { + # Install Python client module + pip_install_gr $MYSQL_DRIVER + if [[ "$MYSQL_DRIVER" == "MySQL-python" ]]; then + ADDITIONAL_VENV_PACKAGES+=",MySQL-python" + elif [[ "$MYSQL_DRIVER" == "PyMySQL" ]]; then + ADDITIONAL_VENV_PACKAGES+=",PyMySQL" + fi +} + +function database_connection_url_mysql { + local db=$1 + local plugin + + # NOTE(danms): We don't enable perf on subnodes yet because the + # plugin is not installed there + if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then + if is_service_enabled mysql; then + plugin="&plugin=dbcounter" + fi + fi + + echo "$BASE_SQL_CONN/$db?charset=utf8$plugin" +} + + +# Restore xtrace +$_XTRACE_DB_MYSQL + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/databases/postgresql b/lib/databases/postgresql new file mode 100644 index 0000000000..2aa38ccf76 --- /dev/null +++ b/lib/databases/postgresql @@ -0,0 +1,139 @@ +#!/bin/bash +# +# lib/databases/postgresql +# Functions to control the configuration and operation of the **PostgreSQL** database backend + +# Dependencies: +# +# - DATABASE_{HOST,USER,PASSWORD} must be defined + +# Save trace setting +_XTRACE_PG=$(set +o | grep xtrace) +set +o xtrace + + +MAX_DB_CONNECTIONS=${MAX_DB_CONNECTIONS:-200} +INSTALL_DATABASE_SERVER_PACKAGES=$(trueorfalse True INSTALL_DATABASE_SERVER_PACKAGES) + +register_database postgresql + + +# Functions +# --------- + +function get_database_type_postgresql { + echo postgresql +} + +# Get rid of everything enough to cleanly change database backends +function cleanup_database_postgresql { + stop_service postgresql + if is_ubuntu; then + # Get ruthless with mysql + apt_get purge -y postgresql* + return + elif is_fedora; then + uninstall_package postgresql-server + else + return + fi +} + +function recreate_database_postgresql { + local db=$1 + # Avoid unsightly error when calling dropdb when the database doesn't exist + psql -h$DATABASE_HOST -U$DATABASE_USER -dtemplate1 -c "DROP DATABASE IF EXISTS $db" + createdb -h $DATABASE_HOST -U$DATABASE_USER -l C -T template0 -E utf8 $db +} + +function _exit_pg_init { + sudo cat /var/lib/pgsql/initdb_postgresql.log +} + +function configure_database_postgresql { + local pg_conf pg_dir pg_hba check_role version + echo_summary "Configuring and starting PostgreSQL" + if is_fedora; then + pg_hba=/var/lib/pgsql/data/pg_hba.conf + pg_conf=/var/lib/pgsql/data/postgresql.conf + if ! sudo [ -e $pg_hba ]; then + trap _exit_pg_init EXIT + sudo postgresql-setup initdb + trap - EXIT + fi + elif is_ubuntu; then + version=`psql --version | cut -d ' ' -f3 | cut -d. -f1-2` + if vercmp $version '>=' 9.3; then + if [ -z "`pg_lsclusters -h`" ]; then + echo 'No PostgreSQL clusters exist; will create one' + sudo pg_createcluster $version main --start + fi + fi + pg_dir=`find /etc/postgresql -name pg_hba.conf|xargs dirname` + pg_hba=$pg_dir/pg_hba.conf + pg_conf=$pg_dir/postgresql.conf + else + exit_distro_not_supported "postgresql configuration" + fi + # Listen on all addresses + sudo sed -i "/listen_addresses/s/.*/listen_addresses = '*'/" $pg_conf + # Set max_connections + sudo sed -i "/max_connections/s/.*/max_connections = $MAX_DB_CONNECTIONS/" $pg_conf + # Do password auth from all IPv4 clients + sudo sed -i "/^host/s/all\s\+127.0.0.1\/32\s\+ident/$DATABASE_USER\t0.0.0.0\/0\tpassword/" $pg_hba + # Do password auth for all IPv6 clients + sudo sed -i "/^host/s/all\s\+::1\/128\s\+ident/$DATABASE_USER\t::0\/0\tpassword/" $pg_hba + restart_service postgresql + + # Create the role if it's not here or else alter it. + check_role=$(sudo -u root sudo -u postgres -i psql -t -c "SELECT 'HERE' from pg_roles where rolname='$DATABASE_USER'") + if [[ ${check_role} == *HERE ]];then + sudo -u root sudo -u postgres -i psql -c "ALTER ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" + else + sudo -u root sudo -u postgres -i psql -c "CREATE ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" + fi +} + +function install_database_postgresql { + echo_summary "Installing postgresql" + local pgpass=$HOME/.pgpass + if [[ ! -e $pgpass ]]; then + cat < $pgpass +*:*:*:$DATABASE_USER:$DATABASE_PASSWORD +EOF + chmod 0600 $pgpass + else + sed -i "s/:root:\w\+/:root:$DATABASE_PASSWORD/" $pgpass + fi + if [[ "$INSTALL_DATABASE_SERVER_PACKAGES" == "True" ]]; then + if is_ubuntu; then + install_package postgresql + elif is_fedora; then + install_package postgresql-server + if is_fedora; then + sudo systemctl enable postgresql + fi + else + exit_distro_not_supported "postgresql installation" + fi + fi +} + +function install_database_python_postgresql { + # Install Python client module + pip_install_gr psycopg2 + ADDITIONAL_VENV_PACKAGES+=",psycopg2" +} + +function database_connection_url_postgresql { + local db=$1 + echo "$BASE_SQL_CONN/$db?client_encoding=utf8" +} + + +# Restore xtrace +$_XTRACE_PG + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/dstat b/lib/dstat new file mode 100644 index 0000000000..9bd0370847 --- /dev/null +++ b/lib/dstat @@ -0,0 +1,58 @@ +#!/bin/bash +# +# lib/dstat +# Functions to start and stop dstat + +# Dependencies: +# +# - ``functions`` file + +# ``stack.sh`` calls the entry points in this order: +# +# - install_dstat +# - start_dstat +# - stop_dstat + +# Save trace setting +_XTRACE_DSTAT=$(set +o | grep xtrace) +set +o xtrace + +# install_dstat() - Install prerequisites for dstat services +function install_dstat { + if is_service_enabled memory_tracker; then + # Install python libraries required by tools/mlock_report.py + pip_install_gr psutil + fi +} + +# start_dstat() - Start running processes +function start_dstat { + # A better kind of sysstat, with the top process per time slice + run_process dstat "$TOP_DIR/tools/dstat.sh $LOGDIR" + + # To enable memory_tracker add: + # enable_service memory_tracker + # to your localrc + run_process memory_tracker "$TOP_DIR/tools/memory_tracker.sh" "" "root" "PYTHON=python${PYTHON3_VERSION}" + + # TODO(jh): Fail when using the old service name otherwise consumers might + # never notice that is has been removed. + if is_service_enabled peakmem_tracker; then + die $LINENO "The peakmem_tracker service has been removed, use memory_tracker instead" + fi + + # To enable file_tracker add: + # enable_service file_tracker + # to your localrc + run_process file_tracker "$TOP_DIR/tools/file_tracker.sh" +} + +# stop_dstat() stop dstat process +function stop_dstat { + stop_process dstat + stop_process memory_tracker + stop_process file_tracker +} + +# Restore xtrace +$_XTRACE_DSTAT diff --git a/lib/etcd3 b/lib/etcd3 new file mode 100644 index 0000000000..0d22de8c73 --- /dev/null +++ b/lib/etcd3 @@ -0,0 +1,136 @@ +#!/bin/bash +# +# lib/etcd3 +# +# Functions to control the installation and configuration of etcd 3.x +# that provides a key-value store (and possibly other functions). + +# Dependencies: +# +# - ``functions`` file + +# ``stack.sh`` calls the entry points in this order: +# +# - start_etcd3 +# - stop_etcd3 +# - cleanup_etcd3 + +# Save trace setting +_XTRACE_ETCD3=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default values for etcd +ETCD_DATA_DIR="$DATA_DIR/etcd" +ETCD_SYSTEMD_SERVICE="devstack@etcd.service" +ETCD_BIN_DIR="$DEST/bin" +# Option below will mount ETCD_DATA_DIR as ramdisk, which is useful to run +# etcd-heavy services in the gate VM's, e.g. Kubernetes. +ETCD_USE_RAMDISK=$(trueorfalse True ETCD_USE_RAMDISK) +ETCD_RAMDISK_MB=${ETCD_RAMDISK_MB:-512} + +if is_ubuntu ; then + UBUNTU_RELEASE_BASE_NUM=`lsb_release -r | awk '{print $2}' | cut -d '.' -f 1` +fi + +# start_etcd3() - Starts to run the etcd process +function start_etcd3 { + local cmd="$ETCD_BIN_DIR/etcd" + cmd+=" --name $HOSTNAME --data-dir $ETCD_DATA_DIR" + cmd+=" --initial-cluster-state new --initial-cluster-token etcd-cluster-01" + cmd+=" --initial-cluster $HOSTNAME=http://$SERVICE_HOST:$ETCD_PEER_PORT" + cmd+=" --initial-advertise-peer-urls http://$SERVICE_HOST:$ETCD_PEER_PORT" + cmd+=" --advertise-client-urls http://$SERVICE_HOST:$ETCD_PORT" + if [ "$SERVICE_LISTEN_ADDRESS" == "::" ]; then + cmd+=" --listen-peer-urls http://[::]:$ETCD_PEER_PORT " + else + cmd+=" --listen-peer-urls http://0.0.0.0:$ETCD_PEER_PORT " + fi + cmd+=" --listen-client-urls http://$SERVICE_HOST:$ETCD_PORT" + if [ "$ENABLE_DEBUG_LOG_LEVEL" == "True" ]; then + cmd+=" --log-level=debug" + fi + + local unitfile="$SYSTEMD_DIR/$ETCD_SYSTEMD_SERVICE" + write_user_unit_file $ETCD_SYSTEMD_SERVICE "$cmd" "" "root" + + iniset -sudo $unitfile "Unit" "After" "network.target" + iniset -sudo $unitfile "Service" "Type" "notify" + iniset -sudo $unitfile "Service" "Restart" "on-failure" + iniset -sudo $unitfile "Service" "LimitNOFILE" "65536" + if is_arch "aarch64"; then + iniset -sudo $unitfile "Service" "Environment" "ETCD_UNSUPPORTED_ARCH=arm64" + fi + + $SYSTEMCTL daemon-reload + $SYSTEMCTL enable $ETCD_SYSTEMD_SERVICE + $SYSTEMCTL start $ETCD_SYSTEMD_SERVICE +} + +# stop_etcd3() stops the etcd3 process +function stop_etcd3 { + # Don't install in sub nodes (multinode scenario) + if [ "$SERVICE_HOST" != "$HOST_IP" ]; then + return + fi + + $SYSTEMCTL stop $ETCD_SYSTEMD_SERVICE +} + +function cleanup_etcd3 { + # Don't install in sub nodes (multinode scenario) + if [ "$SERVICE_HOST" != "$HOST_IP" ]; then + return + fi + + $SYSTEMCTL disable $ETCD_SYSTEMD_SERVICE + + local unitfile="$SYSTEMD_DIR/$ETCD_SYSTEMD_SERVICE" + sudo rm -f $unitfile + + $SYSTEMCTL daemon-reload + + if [[ "$ETCD_USE_RAMDISK" == "True" ]]; then + sudo umount $ETCD_DATA_DIR + fi + sudo rm -rf $ETCD_DATA_DIR +} + +function install_etcd3 { + echo "Installing etcd" + + # Create the necessary directories + sudo mkdir -p $ETCD_BIN_DIR + sudo mkdir -p $ETCD_DATA_DIR + if [[ "$ETCD_USE_RAMDISK" == "True" ]]; then + sudo mount -t tmpfs -o nodev,nosuid,size=${ETCD_RAMDISK_MB}M tmpfs $ETCD_DATA_DIR + fi + + # Download and cache the etcd tgz for subsequent use + local etcd_file + etcd_file="$(get_extra_file $ETCD_DOWNLOAD_LOCATION)" + if [ ! -f "$FILES/etcd-$ETCD_VERSION-linux-$ETCD_ARCH/etcd" ]; then + echo "${ETCD_SHA256} $etcd_file" > $FILES/etcd.sha256sum + # NOTE(yuanke wei): rm the damaged file when checksum fails + sha256sum -c $FILES/etcd.sha256sum || (sudo rm -f $etcd_file; exit 1) + + tar xzvf $etcd_file -C $FILES + sudo cp $FILES/$ETCD_NAME/etcd $ETCD_BIN_DIR/etcd + sudo cp $FILES/$ETCD_NAME/etcdctl $ETCD_BIN_DIR/etcdctl + fi + if [ ! -f "$ETCD_BIN_DIR/etcd" ]; then + sudo cp $FILES/$ETCD_NAME/etcd $ETCD_BIN_DIR/etcd + sudo cp $FILES/$ETCD_NAME/etcdctl $ETCD_BIN_DIR/etcdctl + fi +} + +# Restore xtrace +$_XTRACE_ETCD3 + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/glance b/lib/glance new file mode 100644 index 0000000000..9422c22141 --- /dev/null +++ b/lib/glance @@ -0,0 +1,676 @@ +#!/bin/bash +# +# lib/glance +# Functions to control the configuration and operation of the **Glance** service + +# Dependencies: +# +# - ``functions`` file +# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined +# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined +# - ``SERVICE_HOST`` +# - ``KEYSTONE_TOKEN_FORMAT`` must be defined + +# ``stack.sh`` calls the entry points in this order: +# +# - install_glance +# - configure_glance +# - init_glance +# - start_glance +# - stop_glance +# - cleanup_glance + +# Save trace setting +_XTRACE_GLANCE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories +GITDIR["python-glanceclient"]=$DEST/python-glanceclient +GITDIR["glance_store"]=$DEST/glance_store +GLANCE_DIR=$DEST/glance + +# Glance virtual environment +if [[ ${USE_VENV} = True ]]; then + PROJECT_VENV["glance"]=${GLANCE_DIR}.venv + GLANCE_BIN_DIR=${PROJECT_VENV["glance"]}/bin +else + GLANCE_BIN_DIR=$(get_python_exec_prefix) +fi + +#S3 for Glance +GLANCE_USE_S3=$(trueorfalse False GLANCE_USE_S3) +GLANCE_S3_DEFAULT_BACKEND=${GLANCE_S3_DEFAULT_BACKEND:-s3_fast} +GLANCE_S3_BUCKET_ON_PUT=$(trueorfalse True GLANCE_S3_BUCKET_ON_PUT) +GLANCE_S3_BUCKET_NAME=${GLANCE_S3_BUCKET_NAME:-images} + +# Cinder for Glance +USE_CINDER_FOR_GLANCE=$(trueorfalse False USE_CINDER_FOR_GLANCE) +# GLANCE_CINDER_DEFAULT_BACKEND should be one of the values +# from CINDER_ENABLED_BACKENDS +GLANCE_CINDER_DEFAULT_BACKEND=${GLANCE_CINDER_DEFAULT_BACKEND:-lvmdriver-1} +GLANCE_STORE_ROOTWRAP_BASE_DIR=/usr/local/etc/glance +if [[ "$GLOBAL_VENV" == "True" ]] ; then + GLANCE_STORE_ROOTWRAP_BASE_DIR=${DEVSTACK_VENV}/etc/glance +fi +# When Cinder is used as a glance store, you can optionally configure cinder to +# optimize bootable volume creation by allowing volumes to be cloned directly +# in the backend instead of transferring data via Glance. To use this feature, +# set CINDER_ALLOWED_DIRECT_URL_SCHEMES for cinder.conf and enable +# GLANCE_SHOW_DIRECT_URL and/or GLANCE_SHOW_MULTIPLE_LOCATIONS for Glance. The +# default value for both of these is False, because for some backends they +# present a grave security risk (though not for Cinder, because all that's +# exposed is the volume_id where the image data is stored.) See OSSN-0065 for +# more information: https://wiki.openstack.org/wiki/OSSN/OSSN-0065 +GLANCE_SHOW_DIRECT_URL=$(trueorfalse False GLANCE_SHOW_DIRECT_URL) +GLANCE_SHOW_MULTIPLE_LOCATIONS=$(trueorfalse False GLANCE_SHOW_MULTIPLE_LOCATIONS) + +# Glance multi-store configuration +# Boolean flag to enable multiple store configuration for glance +GLANCE_ENABLE_MULTIPLE_STORES=$(trueorfalse False GLANCE_ENABLE_MULTIPLE_STORES) + +# Comma separated list for configuring multiple file stores of glance, +# for example; GLANCE_MULTIPLE_FILE_STORES = fast,cheap,slow +GLANCE_MULTIPLE_FILE_STORES=${GLANCE_MULTIPLE_FILE_STORES:-fast} + +# Default store/backend for glance, must be one of the store specified +# in GLANCE_MULTIPLE_FILE_STORES option. +GLANCE_DEFAULT_BACKEND=${GLANCE_DEFAULT_BACKEND:-fast} + +GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache} +GLANCE_CACHE_DRIVER=${GLANCE_CACHE_DRIVER:-centralized_db} + +# File path for each store specified in GLANCE_MULTIPLE_FILE_STORES, the store +# identifier will be appended to this path at runtime. If GLANCE_MULTIPLE_FILE_STORES +# has fast,cheap specified then filepath will be generated like $DATA_DIR/glance/fast +# and $DATA_DIR/glance/cheap. +GLANCE_MULTISTORE_FILE_IMAGE_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/glance} +GLANCE_IMAGE_DIR=${GLANCE_IMAGE_DIR:=$DATA_DIR/glance/images} +GLANCE_NFS_MOUNTPOINT=$GLANCE_IMAGE_DIR/mnt +GLANCE_LOCK_DIR=${GLANCE_LOCK_DIR:=$DATA_DIR/glance/locks} +GLANCE_STAGING_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/os_glance_staging_store} +GLANCE_TASKS_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/os_glance_tasks_store} + +GLANCE_USE_IMPORT_WORKFLOW=$(trueorfalse False GLANCE_USE_IMPORT_WORKFLOW) +GLANCE_ENABLE_QUOTAS=$(trueorfalse True GLANCE_ENABLE_QUOTAS) + +# Flag to set the oslo_policy.enforce_scope. This is used to switch +# This is used to disable the Image API policies scope and new defaults. +# By Default, it is True. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +GLANCE_ENFORCE_SCOPE=$(trueorfalse True GLANCE_ENFORCE_SCOPE) + +# Flag to disable image format inspection on upload +GLANCE_ENFORCE_IMAGE_FORMAT=$(trueorfalse True GLANCE_ENFORCE_IMAGE_FORMAT) + +GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance} +GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs +GLANCE_API_CONF=$GLANCE_CONF_DIR/glance-api.conf +GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini +GLANCE_CACHE_CONF=$GLANCE_CONF_DIR/glance-cache.conf +GLANCE_SCHEMA_JSON=$GLANCE_CONF_DIR/schema-image.json +GLANCE_SWIFT_STORE_CONF=$GLANCE_CONF_DIR/glance-swift-store.conf +GLANCE_IMAGE_IMPORT_CONF=$GLANCE_CONF_DIR/glance-image-import.conf + +if is_service_enabled tls-proxy; then + GLANCE_SERVICE_PROTOCOL="https" +fi + +# Glance connection info. Note the port must be specified. +GLANCE_SERVICE_HOST=${GLANCE_SERVICE_HOST:-$SERVICE_HOST} +GLANCE_SERVICE_LISTEN_ADDRESS=${GLANCE_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} +GLANCE_SERVICE_PORT=${GLANCE_SERVICE_PORT:-9292} +GLANCE_SERVICE_PORT_INT=${GLANCE_SERVICE_PORT_INT:-19292} +GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT} +GLANCE_SERVICE_PROTOCOL=${GLANCE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} +GLANCE_UWSGI=glance.wsgi.api:application +GLANCE_UWSGI_CONF=$GLANCE_CONF_DIR/glance-uwsgi.ini + +# Glance default limit for Devstack +GLANCE_LIMIT_IMAGE_SIZE_TOTAL=${GLANCE_LIMIT_IMAGE_SIZE_TOTAL:-2000} + +GLANCE_URL="$GLANCE_SERVICE_PROTOCOL://$GLANCE_SERVICE_HOST/image" + +# Functions +# --------- + +# Test if any Glance services are enabled +# is_glance_enabled +function is_glance_enabled { + [[ ,${DISABLED_SERVICES} =~ ,"glance" ]] && return 1 + [[ ,${ENABLED_SERVICES} =~ ,"g-" ]] && return 0 + return 1 +} + +# cleanup_glance() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_glance { + # delete image files (glance) and all of the glance-remote temporary + # storage + sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR "${DATA_DIR}/glance-remote" + + # Cleanup multiple stores directories + if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "True" ]]; then + local store file_dir + for store in $(echo $GLANCE_MULTIPLE_FILE_STORES | tr "," "\n"); do + file_dir="${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/${store}/" + sudo rm -rf $file_dir + done + + # Cleanup reserved stores directories + sudo rm -rf $GLANCE_STAGING_DIR $GLANCE_TASKS_DIR + fi + remove_uwsgi_config "$GLANCE_UWSGI_CONF" "glance-wsgi-api" +} + +# Set multiple s3 store related config options +# +function configure_multiple_s3_stores { + enabled_backends="${GLANCE_S3_DEFAULT_BACKEND}:s3" + + iniset $GLANCE_API_CONF DEFAULT enabled_backends ${enabled_backends} + iniset $GLANCE_API_CONF glance_store default_backend $GLANCE_S3_DEFAULT_BACKEND +} + +# Set common S3 store options to given config section +# +# Arguments: +# config_section +# +function set_common_s3_store_params { + local config_section="$1" + openstack ec2 credential create + iniset $GLANCE_API_CONF $config_section s3_store_host "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$S3_SERVICE_PORT" + iniset $GLANCE_API_CONF $config_section s3_store_access_key "$(openstack ec2 credential list -c Access -f value)" + iniset $GLANCE_API_CONF $config_section s3_store_secret_key "$(openstack ec2 credential list -c Secret -f value)" + iniset $GLANCE_API_CONF $config_section s3_store_create_bucket_on_put $GLANCE_S3_BUCKET_ON_PUT + iniset $GLANCE_API_CONF $config_section s3_store_bucket $GLANCE_S3_BUCKET_NAME + iniset $GLANCE_API_CONF $config_section s3_store_bucket_url_format "path" + if is_service_enabled tls-proxy; then + iniset $GLANCE_API_CONF $config_section s3_store_cacert $SSL_BUNDLE_FILE + fi +} + +# Set multiple cinder store related config options for each of the cinder store +# +function configure_multiple_cinder_stores { + + local be be_name be_type enabled_backends + for be in ${CINDER_ENABLED_BACKENDS//,/ }; do + be_type=${be%%:*} + be_name=${be##*:} + enabled_backends+="${be_name}:cinder," + + set_common_cinder_store_params $be_name + iniset $GLANCE_API_CONF $be_name cinder_volume_type ${be_name} + if [[ "$be_type" == "nfs" ]]; then + mkdir -p "$GLANCE_NFS_MOUNTPOINT" + iniset $GLANCE_API_CONF $be_name cinder_mount_point_base "$GLANCE_NFS_MOUNTPOINT" + fi + done + iniset $GLANCE_API_CONF DEFAULT enabled_backends ${enabled_backends::-1} + iniset $GLANCE_API_CONF glance_store default_backend $GLANCE_CINDER_DEFAULT_BACKEND +} + +# Set common cinder store options to given config section +# +# Arguments: +# config_section +# +function set_common_cinder_store_params { + local config_section="$1" + iniset $GLANCE_API_CONF $config_section cinder_store_auth_address $KEYSTONE_SERVICE_URI_V3 + iniset $GLANCE_API_CONF $config_section cinder_store_user_name glance + iniset $GLANCE_API_CONF $config_section cinder_store_password $SERVICE_PASSWORD + iniset $GLANCE_API_CONF $config_section cinder_store_project_name $SERVICE_PROJECT_NAME +} + +# Configure multiple file stores options for each file store +# +# Arguments: +# +function configure_multiple_file_stores { + local store enabled_backends + enabled_backends="" + for store in $(echo $GLANCE_MULTIPLE_FILE_STORES | tr "," "\n"); do + enabled_backends+="${store}:file," + done + iniset $GLANCE_API_CONF DEFAULT enabled_backends ${enabled_backends::-1} + + # Glance multiple store Store specific configs + iniset $GLANCE_API_CONF glance_store default_backend $GLANCE_DEFAULT_BACKEND + local store + for store in $(echo $glance_multiple_file_stores | tr "," "\n"); do + iniset $GLANCE_API_CONF $store filesystem_store_datadir "${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/${store}/" + done +} + +# Set reserved stores for glance +function configure_reserved_stores { + iniset $GLANCE_API_CONF os_glance_staging_store filesystem_store_datadir "${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/os_glance_staging_store/" + iniset $GLANCE_API_CONF os_glance_tasks_store filesystem_store_datadir "${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/os_glance_tasks_store/" +} + +# Copy rootwrap file from glance_store/etc/glance to /etc/glance +# +# Arguments: +# source_path Source path to copy rootwrap files from +# +function copy_rootwrap { + local source_path="$1" + # Make glance configuration directory if it is not exists + sudo install -d -o $STACK_USER $GLANCE_CONF_DIR + cp -r $source_path/rootwrap.* $GLANCE_CONF_DIR/ +} + +# Set glance_store related config options +# +# Arguments: +# USE_CINDER_FOR_GLANCE +# GLANCE_ENABLE_MULTIPLE_STORES +# +function configure_glance_store { + local use_cinder_for_glance="$1" + local glance_enable_multiple_stores="$2" + local be + + if [[ "$glance_enable_multiple_stores" == "False" ]]; then + if [[ "$use_cinder_for_glance" == "True" ]]; then + # set common glance_store parameters + iniset $GLANCE_API_CONF glance_store stores "cinder,file,http" + iniset $GLANCE_API_CONF glance_store default_store cinder + + # set cinder related store parameters + set_common_cinder_store_params glance_store + # set nfs mount_point dir + for be in ${CINDER_ENABLED_BACKENDS//,/ }; do + local be_name=${be##*:} + if [[ "$be_name" == "nfs" ]]; then + mkdir -p $GLANCE_NFS_MOUNTPOINT + iniset $GLANCE_API_CONF glance_store cinder_mount_point_base $GLANCE_NFS_MOUNTPOINT + fi + done + fi + # Store specific configs + iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ + else + if [[ "$use_cinder_for_glance" == "True" ]]; then + # Configure multiple cinder stores for glance + configure_multiple_cinder_stores + elif ! is_service_enabled s-proxy && [[ "$GLANCE_USE_S3" == "False" ]]; then + # Configure multiple file stores for glance + configure_multiple_file_stores + fi + # Configure reserved stores + configure_reserved_stores + fi +} + +function configure_glance_quotas { + + # Registered limit resources in keystone are system-specific resources. + # Make sure we use a system-scoped token to interact with this API. + + openstack --os-cloud devstack-system-admin registered limit create --service glance \ + --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL --region $REGION_NAME image_size_total + openstack --os-cloud devstack-system-admin registered limit create --service glance \ + --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL --region $REGION_NAME image_stage_total + openstack --os-cloud devstack-system-admin registered limit create --service glance \ + --default-limit 100 --region $REGION_NAME image_count_total + openstack --os-cloud devstack-system-admin registered limit create --service glance \ + --default-limit 100 --region $REGION_NAME image_count_uploading + + # Tell glance to use these limits + iniset $GLANCE_API_CONF DEFAULT use_keystone_limits True + + # Configure oslo_limit so it can talk to keystone + iniset $GLANCE_API_CONF oslo_limit user_domain_name $SERVICE_DOMAIN_NAME + iniset $GLANCE_API_CONF oslo_limit password $SERVICE_PASSWORD + iniset $GLANCE_API_CONF oslo_limit username glance + iniset $GLANCE_API_CONF oslo_limit auth_type password + iniset $GLANCE_API_CONF oslo_limit auth_url $KEYSTONE_SERVICE_URI + iniset $GLANCE_API_CONF oslo_limit system_scope all + iniset $GLANCE_API_CONF oslo_limit endpoint_id \ + $(openstack --os-cloud devstack-system-admin endpoint list --service glance -f value -c ID) + + # Allow the glance service user to read quotas + openstack --os-cloud devstack-system-admin role add --user glance \ + --user-domain $SERVICE_DOMAIN_NAME --system all reader +} + +# configure_glance() - Set config files, create data dirs, etc +function configure_glance { + sudo install -d -o $STACK_USER $GLANCE_CONF_DIR $GLANCE_METADEF_DIR + + # Set non-default configuration options for the API server + local dburl + dburl=`database_connection_url glance` + + iniset $GLANCE_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $GLANCE_API_CONF database connection $dburl + iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG + iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ + iniset $GLANCE_API_CONF DEFAULT image_cache_driver $GLANCE_CACHE_DRIVER + iniset $GLANCE_API_CONF oslo_concurrency lock_path $GLANCE_LOCK_DIR + iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement + configure_keystone_authtoken_middleware $GLANCE_API_CONF glance + iniset $GLANCE_API_CONF oslo_messaging_notifications driver messagingv2 + iniset_rpc_backend glance $GLANCE_API_CONF + if [ "$VIRT_DRIVER" = 'libvirt' ] && [ "$LIBVIRT_TYPE" = 'parallels' ]; then + iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso,ploop" + fi + # Only use these if you know what you are doing! See OSSN-0065 + iniset $GLANCE_API_CONF DEFAULT show_image_direct_url $GLANCE_SHOW_DIRECT_URL + iniset $GLANCE_API_CONF DEFAULT show_multiple_locations $GLANCE_SHOW_MULTIPLE_LOCATIONS + iniset $GLANCE_API_CONF image_format require_image_format_match $GLANCE_ENFORCE_IMAGE_FORMAT + + # Configure glance_store + configure_glance_store $USE_CINDER_FOR_GLANCE $GLANCE_ENABLE_MULTIPLE_STORES + + # CORS feature support - to allow calls from Horizon by default + if [ -n "$GLANCE_CORS_ALLOWED_ORIGIN" ]; then + iniset $GLANCE_API_CONF cors allowed_origin "$GLANCE_CORS_ALLOWED_ORIGIN" + else + iniset $GLANCE_API_CONF cors allowed_origin "http://$SERVICE_HOST" + fi + + # No multiple stores for swift yet + if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "False" ]]; then + # Return if s3api is enabled for glance + if [[ "$GLANCE_USE_S3" == "True" ]]; then + if is_service_enabled s3api; then + # set common glance_store parameters + iniset $GLANCE_API_CONF glance_store stores "s3,file,http" + iniset $GLANCE_API_CONF glance_store default_store s3 + fi + elif is_service_enabled s-proxy; then + # Store the images in swift if enabled. + iniset $GLANCE_API_CONF glance_store default_store swift + iniset $GLANCE_API_CONF glance_store swift_store_create_container_on_put True + + iniset $GLANCE_API_CONF glance_store swift_store_config_file $GLANCE_SWIFT_STORE_CONF + iniset $GLANCE_API_CONF glance_store default_swift_reference ref1 + iniset $GLANCE_API_CONF glance_store stores "file, http, swift" + if is_service_enabled tls-proxy; then + iniset $GLANCE_API_CONF glance_store swift_store_cacert $SSL_BUNDLE_FILE + fi + iniset $GLANCE_API_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" + + iniset $GLANCE_SWIFT_STORE_CONF ref1 user $SERVICE_PROJECT_NAME:glance-swift + + iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD + iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v3 + iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version 3 + fi + else + if [[ "$GLANCE_USE_S3" == "True" ]]; then + if is_service_enabled s3api; then + configure_multiple_s3_stores + fi + fi + fi + + # We need to tell glance what it's public endpoint is so that the version + # discovery document will be correct + iniset $GLANCE_API_CONF DEFAULT public_endpoint $GLANCE_URL + + if is_service_enabled tls-proxy; then + iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT + iniset $GLANCE_API_CONF keystone_authtoken identity_uri $KEYSTONE_SERVICE_URI + fi + + # Format logging + setup_logging $GLANCE_API_CONF + + cp -p $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI + + # Set non-default configuration options for the glance-cache + iniset $GLANCE_CACHE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG + iniset $GLANCE_CACHE_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ + iniset $GLANCE_CACHE_CONF DEFAULT image_cache_driver $GLANCE_CACHE_DRIVER + + # Store specific confs + iniset $GLANCE_CACHE_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ + + # Set default configuration options for the glance-image-import + iniset $GLANCE_IMAGE_IMPORT_CONF image_import_opts image_import_plugins "[]" + iniset $GLANCE_IMAGE_IMPORT_CONF inject_metadata_properties ignore_user_roles admin + iniset $GLANCE_IMAGE_IMPORT_CONF inject_metadata_properties inject + + cp -p $GLANCE_DIR/etc/schema-image.json $GLANCE_SCHEMA_JSON + cp -p $GLANCE_DIR/etc/metadefs/*.json $GLANCE_METADEF_DIR + + if is_service_enabled tls-proxy; then + CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST} + CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776} + + iniset $GLANCE_API_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/%(project_id)s" + iniset $GLANCE_CACHE_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/%(project_id)s" + fi + + write_local_uwsgi_http_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" "/image" "glance-api" + + # Grab our uwsgi listen address and use that to fill out our + # worker_self_reference_url config + iniset $GLANCE_API_CONF DEFAULT worker_self_reference_url $(awk '-F= ' '/^http-socket/ { print "http://"$2}' $GLANCE_UWSGI_CONF) + + # Configure the Python binary used for "import" plugins. If unset, these + # will attempt the uwsgi binary instead. + iniset $GLANCE_API_CONF wsgi python_interpreter $PYTHON + + if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $GLANCE_API_CONF oslo_policy enforce_scope true + iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults true + iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac true + else + iniset $GLANCE_API_CONF oslo_policy enforce_scope false + iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults false + iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac false + fi +} + +# create_glance_accounts() - Set up common required glance accounts + +# Project User Roles +# --------------------------------------------------------------------- +# SERVICE_PROJECT_NAME glance service +# SERVICE_PROJECT_NAME glance-swift ResellerAdmin (if Swift is enabled) +# SERVICE_PROJECT_NAME glance-search search (if Search is enabled) + +function create_glance_accounts { + if is_service_enabled g-api; then + + # When cinder talk to glance service APIs user needs service + # role for RBAC checks and admin role for cinder to access images. + create_service_user "glance" "admin" + + # required for swift access + if is_service_enabled s-proxy; then + create_service_user "glance-swift" "ResellerAdmin" + fi + + get_or_create_service "glance" "image" "Glance Image Service" + get_or_create_endpoint \ + "image" \ + "$REGION_NAME" \ + "$GLANCE_URL" + + # Note(frickler): Crude workaround for https://bugs.launchpad.net/glance-store/+bug/1620999 + service_domain_id=$(get_or_create_domain $SERVICE_DOMAIN_NAME) + iniset $GLANCE_SWIFT_STORE_CONF ref1 project_domain_id $service_domain_id + iniset $GLANCE_SWIFT_STORE_CONF ref1 user_domain_id $service_domain_id + + if [[ "$GLANCE_ENABLE_QUOTAS" = True ]]; then + configure_glance_quotas + fi + + if is_service_enabled s3api && [[ "$GLANCE_USE_S3" == "True" ]]; then + if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "False" ]]; then + set_common_s3_store_params glance_store + else + set_common_s3_store_params $GLANCE_S3_DEFAULT_BACKEND + fi + fi + fi +} + +# init_glance() - Initialize databases, etc. +function init_glance { + # Delete existing images + rm -rf $GLANCE_IMAGE_DIR + mkdir -p $GLANCE_IMAGE_DIR + + # (Re)create glance database + recreate_database glance + + time_start "dbsync" + # Migrate glance database + $GLANCE_BIN_DIR/glance-manage --config-file $GLANCE_CONF_DIR/glance-api.conf db_sync + + # Load metadata definitions + $GLANCE_BIN_DIR/glance-manage --config-file $GLANCE_CONF_DIR/glance-api.conf db_load_metadefs + time_stop "dbsync" +} + +# install_glanceclient() - Collect source and prepare +function install_glanceclient { + if use_library_from_git "python-glanceclient"; then + git_clone_by_name "python-glanceclient" + setup_dev_lib "python-glanceclient" + sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-glanceclient"]}/tools/,/etc/bash_completion.d/}glance.bash_completion + fi +} + +# install_glance() - Collect source and prepare +function install_glance { + local glance_store_extras=() + + if is_service_enabled cinder; then + glance_store_extras=("cinder" "${glance_store_extras[@]}") + fi + + if is_service_enabled swift; then + glance_store_extras=("swift" "${glance_store_extras[@]}") + fi + + # Install glance_store from git so we make sure we're testing + # the latest code. + if use_library_from_git "glance_store"; then + git_clone_by_name "glance_store" + setup_dev_lib "glance_store" $(join_extras "${glance_store_extras[@]}") + copy_rootwrap ${DEST}/glance_store/etc/glance + else + # we still need to pass extras + pip_install_gr_extras glance-store $(join_extras "${glance_store_extras[@]}") + copy_rootwrap $GLANCE_STORE_ROOTWRAP_BASE_DIR + fi + + git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH + + setup_develop $GLANCE_DIR +} + +# glance_remote_conf() - Return the path to an alternate config file for +# the remote glance clone +function glance_remote_conf { + echo $(dirname "${GLANCE_CONF_DIR}")/glance-remote/$(basename "$1") +} + +# start_glance_remote_clone() - Clone the regular glance api worker +function start_glance_remote_clone { + local glance_remote_conf_dir glance_remote_port remote_data + local glance_remote_uwsgi venv + + glance_remote_conf_dir="$(glance_remote_conf "")" + glance_remote_port=$(get_random_port) + glance_remote_uwsgi="$(glance_remote_conf $GLANCE_UWSGI_CONF)" + + # Clone the existing ready-to-go glance-api setup + sudo rm -Rf "$glance_remote_conf_dir" + sudo cp -r "$GLANCE_CONF_DIR" "$glance_remote_conf_dir" + sudo chown $STACK_USER -R "$glance_remote_conf_dir" + + # Point this worker at different data dirs + remote_data="${DATA_DIR}/glance-remote" + mkdir -p $remote_data/os_glance_tasks_store \ + "${remote_data}/os_glance_staging_store" + iniset $(glance_remote_conf "$GLANCE_API_CONF") os_glance_staging_store \ + filesystem_store_datadir "${remote_data}/os_glance_staging_store" + iniset $(glance_remote_conf "$GLANCE_API_CONF") os_glance_tasks_store \ + filesystem_store_datadir "${remote_data}/os_glance_tasks_store" + + # Point this worker to use different cache dir + mkdir -p "$remote_data/cache" + iniset $(glance_remote_conf "$GLANCE_API_CONF") DEFAULT \ + image_cache_dir "${remote_data}/cache" + + # Change our uwsgi to our new port + sed -ri "s/^(http-socket.*):[0-9]+/\1:$glance_remote_port/" \ + "$glance_remote_uwsgi" + + # Update the self-reference url with our new port + iniset $(glance_remote_conf $GLANCE_API_CONF) DEFAULT \ + worker_self_reference_url \ + $(awk '-F= ' '/^http-socket/ { print "http://"$2 }' \ + "$glance_remote_uwsgi") + + # We need to create the systemd service for the clone, but then + # change it to include an Environment line to point the WSGI app + # at the alternate config directory. + if [[ "$GLOBAL_VENV" == True ]]; then + venv="--venv $DEVSTACK_VENV" + fi + write_uwsgi_user_unit_file devstack@g-api-r.service "$(which uwsgi) \ + --procname-prefix \ + glance-api-remote \ + --ini $glance_remote_uwsgi \ + $venv" \ + "" "$STACK_USER" + iniadd -sudo ${SYSTEMD_DIR}/devstack@g-api-r.service \ + "Service" "Environment" \ + "OS_GLANCE_CONFIG_DIR=$glance_remote_conf_dir" + + # Reload and restart with the new config + $SYSTEMCTL daemon-reload + $SYSTEMCTL restart devstack@g-api-r + + get_or_create_service glance_remote image_remote "Alternate glance" + get_or_create_endpoint image_remote $REGION_NAME \ + $(awk '-F= ' '/^http-socket/ { print "http://"$2 }' \ + $glance_remote_uwsgi) +} + +# start_glance() - Start running processes +function start_glance { + local service_protocol=$GLANCE_SERVICE_PROTOCOL + + run_process g-api "$(which uwsgi) --procname-prefix glance-api --ini $GLANCE_UWSGI_CONF" + + if is_service_enabled g-api-r; then + echo "Starting the g-api-r clone service..." + start_glance_remote_clone + fi + + echo "Waiting for g-api ($GLANCE_SERVICE_HOST) to start..." + if ! wait_for_service $SERVICE_TIMEOUT $GLANCE_URL; then + die $LINENO "g-api did not start" + fi +} + +# stop_glance() - Stop running processes +function stop_glance { + stop_process g-api + stop_process g-api-r +} + +# Restore xtrace +$_XTRACE_GLANCE + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/horizon b/lib/horizon new file mode 100644 index 0000000000..7c0d443aa6 --- /dev/null +++ b/lib/horizon @@ -0,0 +1,199 @@ +#!/bin/bash +# +# lib/horizon +# Functions to control the configuration and operation of the horizon service + +# Dependencies: +# +# - ``functions`` file +# - ``apache`` file +# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined + +# ``stack.sh`` calls the entry points in this order: +# +# - install_horizon +# - configure_horizon +# - init_horizon +# - start_horizon +# - stop_horizon +# - cleanup_horizon + +# Save trace setting +_XTRACE_HORIZON=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +HORIZON_DIR=$DEST/horizon + +# local_settings.py is used to customize Dashboard settings. +# The example file in Horizon repo is used by default. +HORIZON_SETTINGS=${HORIZON_SETTINGS:-$HORIZON_DIR/openstack_dashboard/local/local_settings.py.example} + +# Functions +# --------- + +# utility method of setting python option +function _horizon_config_set { + local file=$1 + local section=$2 + local option=$3 + local value=$4 + + if [ -z "$section" ]; then + sed -e "/^$option/d" -i $file + echo "$option = $value" >> $file + elif grep -q "^$section" $file; then + local line + line=$(sed -ne "/^$section/,/^}/ { /^ *'$option':/ p; }" $file) + if [ -n "$line" ]; then + sed -i -e "/^$section/,/^}/ s/^\( *'$option'\) *:.*$/\1: $value,/" $file + else + sed -i -e "/^$section/a\ '$option': $value," $file + fi + else + echo -e "\n\n$section = {\n '$option': $value,\n}" >> $file + fi +} + + + +# Entry Points +# ------------ + +# cleanup_horizon() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_horizon { + disable_apache_site horizon + sudo rm -f $(apache_site_config_for horizon) +} + +# configure_horizon() - Set config files, create data dirs, etc +function configure_horizon { + setup_develop $HORIZON_DIR + + # Compile message catalogs. + # Horizon is installed as develop mode, so we can compile here. + # Message catalog compilation is handled by Django admin script, + # so compiling them after the installation avoids Django installation twice. + (cd $HORIZON_DIR; $PYTHON manage.py compilemessages) + + # ``local_settings.py`` is used to override horizon default settings. + local local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py + cp $HORIZON_SETTINGS $local_settings + + # Ensure local_setting.py file ends with EOL (newline) + echo >> $local_settings + + _horizon_config_set $local_settings "" WEBROOT \"$HORIZON_APACHE_ROOT/\" + + _horizon_config_set $local_settings "" COMPRESS_OFFLINE True + _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_DEFAULT_ROLE \"member\" + + _horizon_config_set $local_settings "" OPENSTACK_HOST \"${KEYSTONE_SERVICE_HOST}\" + + _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_URI}/v3\"" + + # note(trebskit): if HOST_IP points at non-localhost ip address, horizon cannot be accessed + # from outside the virtual machine. This fixes is meant primarily for local development + # purpose + _horizon_config_set $local_settings "" ALLOWED_HOSTS [\"*\"] + + if [ -f $SSL_BUNDLE_FILE ]; then + _horizon_config_set $local_settings "" OPENSTACK_SSL_CACERT \"${SSL_BUNDLE_FILE}\" + fi + + if is_service_enabled ldap; then + _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT "True" + fi + + if is_service_enabled c-bak; then + _horizon_config_set $local_settings OPENSTACK_CINDER_FEATURES enable_backup "True" + fi + + # Create an empty directory that apache uses as docroot + sudo mkdir -p $HORIZON_DIR/.blackhole + + local horizon_conf + horizon_conf=$(apache_site_config_for horizon) + + local wsgi_venv_config="" + if [[ "$GLOBAL_VENV" == "True" ]] ; then + wsgi_venv_config="WSGIPythonHome $DEVSTACK_VENV" + fi + + # Configure apache to run horizon + # Set up the django horizon application to serve via apache/wsgi + sudo sh -c "sed -e \" + s,%USER%,$APACHE_USER,g; + s,%GROUP%,$APACHE_GROUP,g; + s,%HORIZON_DIR%,$HORIZON_DIR,g; + s,%APACHE_NAME%,$APACHE_NAME,g; + s,%DEST%,$DEST,g; + s,%WEBROOT%,$HORIZON_APACHE_ROOT,g; + s,%WSGIPYTHONHOME%,$wsgi_venv_config,g; + \" $FILES/apache-horizon.template >$horizon_conf" + + if is_ubuntu; then + disable_apache_site 000-default + sudo touch $horizon_conf + elif is_fedora; then + : # nothing to do + else + exit_distro_not_supported "horizon apache configuration" + fi + enable_apache_site horizon +} + +# init_horizon() - Initialize databases, etc. +function init_horizon { + # Remove old log files that could mess with how DevStack detects whether Horizon + # has been successfully started (see start_horizon() and functions::screen_it()) + # and run_process + sudo rm -f /var/log/$APACHE_NAME/horizon_* + + # Setup alias for django-admin which could be different depending on distro + local django_admin + if type -p django-admin > /dev/null; then + django_admin=django-admin + else + django_admin=django-admin.py + fi + + # These need to be run after horizon plugins are configured. + DJANGO_SETTINGS_MODULE=openstack_dashboard.settings $django_admin collectstatic --noinput + DJANGO_SETTINGS_MODULE=openstack_dashboard.settings $django_admin compress --force + +} + +# install_horizon() - Collect source and prepare +function install_horizon { + # Apache installation, because we mark it NOPRIME + install_apache_wsgi + + # Install the memcache library so that horizon can use memcached as its + # cache backend + pip_install_gr pymemcache + + git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH +} + +# start_horizon() - Start running processes +function start_horizon { + restart_apache_server +} + +# stop_horizon() - Stop running processes +function stop_horizon { + stop_apache_server +} + +# Restore xtrace +$_XTRACE_HORIZON + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/host b/lib/host new file mode 100644 index 0000000000..58062eff6b --- /dev/null +++ b/lib/host @@ -0,0 +1,98 @@ +#!/bin/bash + +# Kernel Samepage Merging (KSM) +# ----------------------------- + +# Processes that mark their memory as mergeable can share identical memory +# pages if KSM is enabled. This is particularly useful for nova + libvirt +# backends but any other setup that marks its memory as mergeable can take +# advantage. The drawback is there is higher cpu load; however, we tend to +# be memory bound not cpu bound so enable KSM by default but allow people +# to opt out if the CPU time is more important to them. +ENABLE_KSM=$(trueorfalse True ENABLE_KSM) +ENABLE_KSMTUNED=$(trueorfalse True ENABLE_KSMTUNED) +function configure_ksm { + if [[ $ENABLE_KSMTUNED == "True" ]] ; then + install_package "ksmtuned" + fi + if [[ -f /sys/kernel/mm/ksm/run ]] ; then + echo $(bool_to_int ENABLE_KSM) | sudo tee /sys/kernel/mm/ksm/run + fi +} + +# Compressed swap (ZSWAP) +#------------------------ + +# as noted in the kernel docs https://docs.kernel.org/admin-guide/mm/zswap.html +# Zswap is a lightweight compressed cache for swap pages. +# It takes pages that are in the process of being swapped out and attempts +# to compress them into a dynamically allocated RAM-based memory pool. +# zswap basically trades CPU cycles for potentially reduced swap I/O. +# This trade-off can also result in a significant performance improvement +# if reads from the compressed cache are faster than reads from a swap device. + +ENABLE_ZSWAP=$(trueorfalse False ENABLE_ZSWAP) +# lz4 is very fast although it does not have the best compression +# zstd has much better compression but more latency +ZSWAP_COMPRESSOR=${ZSWAP_COMPRESSOR:="lz4"} +ZSWAP_ZPOOL=${ZSWAP_ZPOOL:="zsmalloc"} +function configure_zswap { + if [[ $ENABLE_ZSWAP == "True" ]] ; then + # Centos 9 stream seems to only support enabling but not run time + # tuning so dont try to choose better default on centos + if is_ubuntu; then + echo ${ZSWAP_COMPRESSOR} | sudo tee /sys/module/zswap/parameters/compressor + echo ${ZSWAP_ZPOOL} | sudo tee /sys/module/zswap/parameters/zpool + fi + echo 1 | sudo tee /sys/module/zswap/parameters/enabled + # print curent zswap kernel config + sudo grep -R . /sys/module/zswap/parameters || /bin/true + fi +} + +ENABLE_SYSCTL_MEM_TUNING=$(trueorfalse False ENABLE_SYSCTL_MEM_TUNING) +function configure_sysctl_mem_parmaters { + if [[ $ENABLE_SYSCTL_MEM_TUNING == "True" ]] ; then + # defer write when memory is available + sudo sysctl -w vm.dirty_ratio=60 + sudo sysctl -w vm.dirty_background_ratio=10 + sudo sysctl -w vm.vfs_cache_pressure=50 + # assume swap is compressed so on new kernels + # give it equal priority as page cache which is + # uncompressed. on kernels < 5.8 the max is 100 + # not 200 so it will strongly prefer swapping. + sudo sysctl -w vm.swappiness=100 + sudo grep -R . /proc/sys/vm/ || /bin/true + fi +} + +function configure_host_mem { + configure_zswap + configure_ksm + configure_sysctl_mem_parmaters +} + +ENABLE_SYSCTL_NET_TUNING=$(trueorfalse False ENABLE_SYSCTL_NET_TUNING) +function configure_sysctl_net_parmaters { + if [[ $ENABLE_SYSCTL_NET_TUNING == "True" ]] ; then + # detect dead TCP connections after 120 seconds + sudo sysctl -w net.ipv4.tcp_keepalive_time=60 + sudo sysctl -w net.ipv4.tcp_keepalive_intvl=10 + sudo sysctl -w net.ipv4.tcp_keepalive_probes=6 + # reudce network latency for new connections + sudo sysctl -w net.ipv4.tcp_fastopen=3 + # print tcp options + sudo grep -R . /proc/sys/net/ipv4/tcp* || /bin/true + # disable qos by default + sudo sysctl -w net.core.default_qdisc=pfifo_fast + fi +} + +function configure_host_net { + configure_sysctl_net_parmaters +} + +function tune_host { + configure_host_mem + configure_host_net +} diff --git a/lib/infra b/lib/infra new file mode 100644 index 0000000000..f4760c352c --- /dev/null +++ b/lib/infra @@ -0,0 +1,58 @@ +#!/bin/bash +# +# lib/infra +# +# Functions to install infrastructure projects needed by other projects +# early in the cycle. We need this so we can do things like gate on +# requirements as a global list + +# Dependencies: +# +# - ``functions`` file + +# ``stack.sh`` calls the entry points in this order: +# +# - install_infra + +# Save trace setting +_XTRACE_INFRA=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- +GITDIR["pbr"]=$DEST/pbr + +# Entry Points +# ------------ + +# install_infra() - Collect source and prepare +function install_infra { + local PIP_VIRTUAL_ENV="$REQUIREMENTS_DIR/.venv" + [ ! -d $PIP_VIRTUAL_ENV ] && ${VIRTUALENV_CMD} $PIP_VIRTUAL_ENV + # We don't care about testing git pbr in the requirements venv. + PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr setuptools[core] + PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install $REQUIREMENTS_DIR + + # Unset the PIP_VIRTUAL_ENV so that PBR does not end up trapped + # down the VENV well + unset PIP_VIRTUAL_ENV + + # Install pbr + if use_library_from_git "pbr"; then + git_clone_by_name "pbr" + setup_dev_lib "pbr" + else + # Always upgrade pbr to latest version as we may have pulled it + # in via system packages. + pip_install "-U" "pbr" + fi +} + +# Restore xtrace +$_XTRACE_INFRA + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/keystone b/lib/keystone new file mode 100644 index 0000000000..840103b9f4 --- /dev/null +++ b/lib/keystone @@ -0,0 +1,658 @@ +#!/bin/bash +# +# lib/keystone +# Functions to control the configuration and operation of **Keystone** + +# Dependencies: +# +# - ``functions`` file +# - ``tls`` file +# - ``DEST``, ``STACK_USER`` +# - ``FILES`` +# - ``BASE_SQL_CONN`` +# - ``SERVICE_HOST``, ``SERVICE_PROTOCOL`` +# - ``S3_SERVICE_PORT`` (template backend only) + +# ``stack.sh`` calls the entry points in this order: +# +# - install_keystone +# - configure_keystone +# - _config_keystone_apache_wsgi +# - init_keystone +# - start_keystone +# - bootstrap_keystone +# - create_keystone_accounts +# - stop_keystone +# - cleanup_keystone + +# Save trace setting +_XTRACE_KEYSTONE=$(set +o | grep xtrace) +set +o xtrace + +# Defaults +# -------- + +# Set up default directories +GITDIR["keystoneauth"]=$DEST/keystoneauth +GITDIR["python-keystoneclient"]=$DEST/python-keystoneclient +GITDIR["keystonemiddleware"]=$DEST/keystonemiddleware +KEYSTONE_DIR=$DEST/keystone + +# Keystone virtual environment +if [[ ${USE_VENV} = True ]]; then + PROJECT_VENV["keystone"]=${KEYSTONE_DIR}.venv + KEYSTONE_BIN_DIR=${PROJECT_VENV["keystone"]}/bin +else + KEYSTONE_BIN_DIR=$(get_python_exec_prefix) +fi + +KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone} +KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf +KEYSTONE_PUBLIC_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-public.ini +KEYSTONE_PUBLIC_UWSGI=keystone.wsgi.api:application + +# Select the Identity backend driver +KEYSTONE_IDENTITY_BACKEND=${KEYSTONE_IDENTITY_BACKEND:-sql} + +# Select the Assignment backend driver +KEYSTONE_ASSIGNMENT_BACKEND=${KEYSTONE_ASSIGNMENT_BACKEND:-sql} + +# Select the Role backend driver +KEYSTONE_ROLE_BACKEND=${KEYSTONE_ROLE_BACKEND:-sql} + +# Select the Resource backend driver +KEYSTONE_RESOURCE_BACKEND=${KEYSTONE_RESOURCE_BACKEND:-sql} + +# Select Keystone's token provider (and format) +# Refer keystone doc for supported token provider: +# https://docs.openstack.org/keystone/latest/admin/token-provider.html +KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-fernet} +KEYSTONE_TOKEN_FORMAT=$(echo ${KEYSTONE_TOKEN_FORMAT} | tr '[:upper:]' '[:lower:]') + +# Public facing bits +KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST} +KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000} +KEYSTONE_SERVICE_PORT_INT=${KEYSTONE_SERVICE_PORT_INT:-5001} +KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} + +# Set the project for service accounts in Keystone +SERVICE_DOMAIN_NAME=${SERVICE_DOMAIN_NAME:-Default} +SERVICE_PROJECT_NAME=${SERVICE_PROJECT_NAME:-service} + +# Note 2016-03 : SERVICE_TENANT_NAME is kept for backwards +# compatibility; we should be using SERVICE_PROJECT_NAME now +SERVICE_TENANT_NAME=${SERVICE_PROJECT_NAME:-service} + +# if we are running with SSL use https protocols +if is_service_enabled tls-proxy; then + KEYSTONE_SERVICE_PROTOCOL="https" +fi + +KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}/identity +# for compat +KEYSTONE_AUTH_URI=$KEYSTONE_SERVICE_URI + +# V3 URIs +KEYSTONE_AUTH_URI_V3=$KEYSTONE_SERVICE_URI/v3 +KEYSTONE_SERVICE_URI_V3=$KEYSTONE_SERVICE_URI/v3 + +# Security compliance +KEYSTONE_SECURITY_COMPLIANCE_ENABLED=${KEYSTONE_SECURITY_COMPLIANCE_ENABLED:-True} +KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS=${KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS:-2} +KEYSTONE_LOCKOUT_DURATION=${KEYSTONE_LOCKOUT_DURATION:-10} +KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT=${KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT:-2} + +# Number of bcrypt hashing rounds, increasing number exponentially increases required +# resources to generate password hash. This is very effective way to protect from +# bruteforce attacks. 4 is minimal value that can be specified for bcrypt and +# it works way faster than default 12. Minimal value is great for CI and development +# however may not be suitable for real production. +KEYSTONE_PASSWORD_HASH_ROUNDS=${KEYSTONE_PASSWORD_HASH_ROUNDS:-4} + +# Cache settings +KEYSTONE_ENABLE_CACHE=${KEYSTONE_ENABLE_CACHE:-True} + +# Whether to create a keystone admin endpoint for legacy applications +KEYSTONE_ADMIN_ENDPOINT=$(trueorfalse False KEYSTONE_ADMIN_ENDPOINT) + +# Flag to set the oslo_policy.enforce_scope. This is used to switch +# the Identity API policies to start checking the scope of token. By Default, +# this flag is False. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +KEYSTONE_ENFORCE_SCOPE=$(trueorfalse False KEYSTONE_ENFORCE_SCOPE) + +# Functions +# --------- + +# Test if Keystone is enabled +# is_keystone_enabled +function is_keystone_enabled { + [[ ,${DISABLED_SERVICES} =~ ,"keystone" ]] && return 1 + [[ ,${ENABLED_SERVICES}, =~ ,"key", ]] && return 0 + return 1 +} + +# cleanup_keystone() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_keystone { + stop_process "keystone" + remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "keystone-wsgi-public" + sudo rm -f $(apache_site_config_for keystone-wsgi-public) +} + +# _config_keystone_apache_wsgi() - Set WSGI config files of Keystone +function _config_keystone_apache_wsgi { + local keystone_apache_conf + keystone_apache_conf=$(apache_site_config_for keystone) + keystone_ssl_listen="#" + local keystone_ssl="" + local keystone_certfile="" + local keystone_keyfile="" + local keystone_service_port=$KEYSTONE_SERVICE_PORT + local venv_path="" + + if is_service_enabled tls-proxy; then + keystone_service_port=$KEYSTONE_SERVICE_PORT_INT + fi + if [[ ${USE_VENV} = True ]]; then + venv_path="python-path=${PROJECT_VENV["keystone"]}/lib/$(python_version)/site-packages" + fi + + sudo cp $FILES/apache-keystone.template $keystone_apache_conf + sudo sed -e " + s|%PUBLICPORT%|$keystone_service_port|g; + s|%APACHE_NAME%|$APACHE_NAME|g; + s|%SSLLISTEN%|$keystone_ssl_listen|g; + s|%SSLENGINE%|$keystone_ssl|g; + s|%SSLCERTFILE%|$keystone_certfile|g; + s|%SSLKEYFILE%|$keystone_keyfile|g; + s|%USER%|$STACK_USER|g; + s|%VIRTUALENV%|$venv_path|g + s|%KEYSTONE_BIN%|$KEYSTONE_BIN_DIR|g + " -i $keystone_apache_conf +} + +# configure_keystone() - Set config files, create data dirs, etc +function configure_keystone { + sudo install -d -o $STACK_USER $KEYSTONE_CONF_DIR + + if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then + install -m 600 /dev/null $KEYSTONE_CONF + fi + # Populate ``keystone.conf`` + if is_service_enabled ldap; then + iniset $KEYSTONE_CONF identity domain_config_dir "$KEYSTONE_CONF_DIR/domains" + iniset $KEYSTONE_CONF identity domain_specific_drivers_enabled "True" + fi + iniset $KEYSTONE_CONF identity driver "$KEYSTONE_IDENTITY_BACKEND" + iniset $KEYSTONE_CONF identity password_hash_rounds $KEYSTONE_PASSWORD_HASH_ROUNDS + iniset $KEYSTONE_CONF assignment driver "$KEYSTONE_ASSIGNMENT_BACKEND" + iniset $KEYSTONE_CONF role driver "$KEYSTONE_ROLE_BACKEND" + iniset $KEYSTONE_CONF resource driver "$KEYSTONE_RESOURCE_BACKEND" + + # Enable caching + iniset $KEYSTONE_CONF cache enabled $KEYSTONE_ENABLE_CACHE + iniset $KEYSTONE_CONF cache backend $CACHE_BACKEND + iniset $KEYSTONE_CONF cache memcache_servers $MEMCACHE_SERVERS + + # Enable errors if response validation fails. We want this enabled in CI + # and development contexts to highlights bugs in our response schemas. + iniset $KEYSTONE_CONF api response_validation error + + iniset_rpc_backend keystone $KEYSTONE_CONF oslo_messaging_notifications + + local service_port=$KEYSTONE_SERVICE_PORT + + if is_service_enabled tls-proxy; then + # Set the service ports for a proxy to take the originals + service_port=$KEYSTONE_SERVICE_PORT_INT + fi + + # Override the endpoints advertised by keystone so that clients use the correct + # endpoint. By default, the keystone server uses the public_port which isn't + # going to work when you want to use a different port (in the case of proxy), + # or you don't want the port (in the case of putting keystone on a path in apache). + iniset $KEYSTONE_CONF DEFAULT public_endpoint $KEYSTONE_SERVICE_URI + + if [[ "$KEYSTONE_TOKEN_FORMAT" != "" ]]; then + iniset $KEYSTONE_CONF token provider $KEYSTONE_TOKEN_FORMAT + fi + + iniset $KEYSTONE_CONF database connection `database_connection_url keystone` + + # Set up logging + if [ "$SYSLOG" != "False" ]; then + iniset $KEYSTONE_CONF DEFAULT use_syslog "True" + fi + + # Format logging + setup_logging $KEYSTONE_CONF + + iniset $KEYSTONE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + + write_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" "/identity" "" "keystone-api" + + iniset $KEYSTONE_CONF DEFAULT max_token_size 16384 + + iniset $KEYSTONE_CONF fernet_tokens key_repository "$KEYSTONE_CONF_DIR/fernet-keys/" + + iniset $KEYSTONE_CONF credential key_repository "$KEYSTONE_CONF_DIR/credential-keys/" + + # Configure the project created by the 'keystone-manage bootstrap' as the cloud-admin project. + # The users from this project are globally admin as before, but it also + # allows policy changes in order to clarify the adminess scope. + #iniset $KEYSTONE_CONF resource admin_project_domain_name Default + #iniset $KEYSTONE_CONF resource admin_project_name admin + + if [[ "$KEYSTONE_SECURITY_COMPLIANCE_ENABLED" = True ]]; then + iniset $KEYSTONE_CONF security_compliance lockout_failure_attempts $KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS + iniset $KEYSTONE_CONF security_compliance lockout_duration $KEYSTONE_LOCKOUT_DURATION + iniset $KEYSTONE_CONF security_compliance unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT + fi + + iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml + + if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $KEYSTONE_CONF oslo_policy enforce_scope true + iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults true + else + iniset $KEYSTONE_CONF oslo_policy enforce_scope false + iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults false + fi +} + +# create_keystone_accounts() - Sets up common required keystone accounts + +# Project User Roles +# ------------------------------------------------------------------ +# admin admin admin +# service -- -- +# -- -- service +# -- -- ResellerAdmin +# -- -- member +# demo admin admin +# demo demo member, anotherrole +# alt_demo admin admin +# alt_demo alt_demo member, anotherrole +# invisible_to_admin demo member + +# Group Users Roles Project +# ------------------------------------------------------------------ +# admins admin admin admin +# nonadmins demo, alt_demo member, anotherrole demo, alt_demo + +# System User Roles +# ------------------------------------------------------------------ +# all admin admin +# all system_reader reader +# all system_member member + + +# Migrated from keystone_data.sh +function create_keystone_accounts { + + # The keystone bootstrapping process (performed via keystone-manage + # bootstrap) creates an admin user and an admin + # project. As a sanity check we exercise the CLI to retrieve the IDs for + # these values. + local admin_project + admin_project=$(openstack project show "admin" -f value -c id) + local admin_user + admin_user=$(openstack user show "admin" -f value -c id) + # These roles are also created during bootstrap but we don't need their IDs + local admin_role="admin" + local member_role="member" + local reader_role="reader" + + async_run ks-domain-role get_or_add_user_domain_role $admin_role $admin_user default + + # Create service project/role + get_or_create_domain "$SERVICE_DOMAIN_NAME" + async_run ks-project get_or_create_project "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME" + + # Service role, so service users do not have to be admins + async_run ks-service get_or_create_role service + + # The ResellerAdmin role is used by Nova and Ceilometer so we need to keep it. + # The admin role in swift allows a user to act as an admin for their project, + # but ResellerAdmin is needed for a user to act as any project. The name of this + # role is also configurable in swift-proxy.conf + async_run ks-reseller get_or_create_role ResellerAdmin + + # another_role demonstrates that an arbitrary role may be created and used + # TODO(sleepsonthefloor): show how this can be used for rbac in the future! + local another_role="anotherrole" + async_run ks-anotherrole get_or_create_role $another_role + + # invisible project - admin can't see this one + local invis_project + invis_project=$(get_or_create_project "invisible_to_admin" default) + + # demo + local demo_project + demo_project=$(get_or_create_project "demo" default) + local demo_user + demo_user=$(get_or_create_user "demo" \ + "$ADMIN_PASSWORD" "default" "demo@example.com") + + async_wait ks-{domain-role,domain,project,service,reseller,anotherrole} + + async_run ks-demo-member get_or_add_user_project_role $member_role $demo_user $demo_project + + async_run ks-demo-admin get_or_add_user_project_role $admin_role $admin_user $demo_project + async_run ks-demo-another get_or_add_user_project_role $another_role $demo_user $demo_project + async_run ks-demo-invis get_or_add_user_project_role $member_role $demo_user $invis_project + + # Create a user to act as a reader on project demo + local demo_reader + demo_reader=$(get_or_create_user "demo_reader" \ + "$ADMIN_PASSWORD" "default" "demo_reader@example.com") + + async_run ks-demo-reader get_or_add_user_project_role $reader_role $demo_reader $demo_project + + # Create a different project called alt_demo + local alt_demo_project + alt_demo_project=$(get_or_create_project "alt_demo" default) + # Create a user to act as member, admin and anotherrole on project alt_demo + local alt_demo_user + alt_demo_user=$(get_or_create_user "alt_demo" \ + "$ADMIN_PASSWORD" "default" "alt_demo@example.com") + + async_run ks-alt-admin get_or_add_user_project_role $admin_role $alt_demo_user $alt_demo_project + async_run ks-alt-another get_or_add_user_project_role $another_role $alt_demo_user $alt_demo_project + + # Create another user to act as a member on project alt_demo + local alt_demo_member + alt_demo_member=$(get_or_create_user "alt_demo_member" \ + "$ADMIN_PASSWORD" "default" "alt_demo_member@example.com") + async_run ks-alt-member-user get_or_add_user_project_role $member_role $alt_demo_member $alt_demo_project + + # Create another user to act as a reader on project alt_demo + local alt_demo_reader + alt_demo_reader=$(get_or_create_user "alt_demo_reader" \ + "$ADMIN_PASSWORD" "default" "alt_demo_reader@example.com") + async_run ks-alt-reader-user get_or_add_user_project_role $reader_role $alt_demo_reader $alt_demo_project + + # Create two users, give one the member role on the system and the other the + # reader role on the system. These two users model system-member and + # system-reader personas. The admin user already has the admin role on the + # system and we can re-use this user as a system-admin. + system_member_user=$(get_or_create_user "system_member" \ + "$ADMIN_PASSWORD" "default" "system_member@example.com") + async_run ks-system-member get_or_add_user_system_role $member_role $system_member_user "all" + + system_reader_user=$(get_or_create_user "system_reader" \ + "$ADMIN_PASSWORD" "default" "system_reader@example.com") + async_run ks-system-reader get_or_add_user_system_role $reader_role $system_reader_user "all" + + # groups + local admin_group + admin_group=$(get_or_create_group "admins" \ + "default" "openstack admin group") + local non_admin_group + non_admin_group=$(get_or_create_group "nonadmins" \ + "default" "non-admin group") + + async_run ks-group-memberdemo get_or_add_group_project_role $member_role $non_admin_group $demo_project + async_run ks-group-anotherdemo get_or_add_group_project_role $another_role $non_admin_group $demo_project + async_run ks-group-memberalt get_or_add_group_project_role $member_role $non_admin_group $alt_demo_project + async_run ks-group-anotheralt get_or_add_group_project_role $another_role $non_admin_group $alt_demo_project + async_run ks-group-admin get_or_add_group_project_role $admin_role $admin_group $admin_project + + async_wait ks-demo-{member,admin,another,invis,reader} + async_wait ks-alt-{admin,another,member-user,reader-user} + async_wait ks-system-{member,reader} + async_wait ks-group-{memberdemo,anotherdemo,memberalt,anotheralt,admin} + + if is_service_enabled ldap; then + create_ldap_domain + fi +} + +# Create a user that is capable of verifying keystone tokens for use with auth_token middleware. +# +# create_service_user [role] +# +# We always add the service role, other roles are also allowed to be added as historically +# a lot of projects have configured themselves with the admin or other role here if they are +# using this user for other purposes beyond simply auth_token middleware. +function create_service_user { + get_or_create_user "$1" "$SERVICE_PASSWORD" "$SERVICE_DOMAIN_NAME" + get_or_add_user_project_role service "$1" "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME" "$SERVICE_DOMAIN_NAME" + + if [[ -n "$2" ]]; then + get_or_add_user_project_role "$2" "$1" "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME" "$SERVICE_DOMAIN_NAME" + fi +} + +# Configure a service to use the auth token middleware. +# +# configure_keystone_authtoken_middleware conf_file admin_user IGNORED [section] +# +# section defaults to keystone_authtoken, which is where auth_token looks in +# the .conf file. If the paste config file is used (api-paste.ini) then +# provide the section name for the auth_token filter. +function configure_keystone_authtoken_middleware { + local conf_file=$1 + local admin_user=$2 + local section=${3:-keystone_authtoken} + local service_type=$4 + + iniset $conf_file $section auth_type password + iniset $conf_file $section interface public + iniset $conf_file $section auth_url $KEYSTONE_SERVICE_URI + iniset $conf_file $section username $admin_user + iniset $conf_file $section password $SERVICE_PASSWORD + iniset $conf_file $section user_domain_name "$SERVICE_DOMAIN_NAME" + iniset $conf_file $section project_name $SERVICE_PROJECT_NAME + iniset $conf_file $section project_domain_name "$SERVICE_DOMAIN_NAME" + + iniset $conf_file $section cafile $SSL_BUNDLE_FILE + iniset $conf_file $section memcached_servers $MEMCACHE_SERVERS + if [[ -n "$service_type" ]]; then + iniset $conf_file $section service_type $service_type + fi +} + +# configure_auth_token_middleware conf_file admin_user IGNORED [section] +# TODO(frickler): old function for backwards compatibility, remove in U cycle +function configure_auth_token_middleware { + echo "WARNING: configure_auth_token_middleware is deprecated, use configure_keystone_authtoken_middleware instead" + configure_keystone_authtoken_middleware $1 $2 $4 +} + +# init_keystone() - Initialize databases, etc. +function init_keystone { + if is_service_enabled ldap; then + init_ldap + fi + + if [[ "$RECREATE_KEYSTONE_DB" == True ]]; then + # (Re)create keystone database + recreate_database keystone + fi + + time_start "dbsync" + # Initialize keystone database + $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF db_sync + time_stop "dbsync" + + if [[ "$KEYSTONE_TOKEN_FORMAT" == "fernet" ]]; then + rm -rf "$KEYSTONE_CONF_DIR/fernet-keys/" + $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF fernet_setup + fi + rm -rf "$KEYSTONE_CONF_DIR/credential-keys/" + $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF credential_setup + +} + +# install_keystoneauth() - Collect source and prepare +function install_keystoneauth { + if use_library_from_git "keystoneauth"; then + git_clone_by_name "keystoneauth" + setup_dev_lib "keystoneauth" + fi +} + +# install_keystoneclient() - Collect source and prepare +function install_keystoneclient { + if use_library_from_git "python-keystoneclient"; then + git_clone_by_name "python-keystoneclient" + setup_dev_lib "python-keystoneclient" + fi +} + +# install_keystonemiddleware() - Collect source and prepare +function install_keystonemiddleware { + # install_keystonemiddleware() is called when keystonemiddleware is needed + # to provide an opportunity to install it from the source repo + if use_library_from_git "keystonemiddleware"; then + git_clone_by_name "keystonemiddleware" + setup_dev_lib "keystonemiddleware" + else + # When not installing from repo, keystonemiddleware is still needed... + pip_install_gr keystonemiddleware + fi + # Install the memcache library so keystonemiddleware can cache tokens in a + # shared location. + pip_install_gr python-memcached +} + +# install_keystone() - Collect source and prepare +function install_keystone { + # only install ldap if the service has been enabled + if is_service_enabled ldap; then + install_ldap + fi + + git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH + setup_develop $KEYSTONE_DIR + + if is_service_enabled ldap; then + setup_develop $KEYSTONE_DIR ldap + fi +} + +# start_keystone() - Start running processes +function start_keystone { + # Get right service port for testing + local service_port=$KEYSTONE_SERVICE_PORT + local auth_protocol=$KEYSTONE_SERVICE_PROTOCOL + if is_service_enabled tls-proxy; then + service_port=$KEYSTONE_SERVICE_PORT_INT + auth_protocol="http" + fi + + run_process keystone "$(which uwsgi) --procname-prefix keystone --ini $KEYSTONE_PUBLIC_UWSGI_CONF" "" + + echo "Waiting for keystone to start..." + # Check that the keystone service is running. Even if the tls tunnel + # should be enabled, make sure the internal port is checked using + # unencryted traffic at this point. + # If running in Apache, use the path rather than port. + + local service_uri=$auth_protocol://$KEYSTONE_SERVICE_HOST/identity/v3/ + + if ! wait_for_service $SERVICE_TIMEOUT $service_uri; then + die $LINENO "keystone did not start" + fi + + # Start proxies if enabled + if is_service_enabled tls-proxy; then + start_tls_proxy keystone-service '*' $KEYSTONE_SERVICE_PORT $KEYSTONE_SERVICE_HOST $KEYSTONE_SERVICE_PORT_INT + fi + + # (re)start memcached to make sure we have a clean memcache. + restart_service memcached +} + +# stop_keystone() - Stop running processes +function stop_keystone { + stop_process keystone +} + +# bootstrap_keystone() - Initialize user, role and project +# This function uses the following GLOBAL variables: +# - ``KEYSTONE_BIN_DIR`` +# - ``ADMIN_PASSWORD`` +# - ``REGION_NAME`` +# - ``KEYSTONE_SERVICE_URI`` +function bootstrap_keystone { + $KEYSTONE_BIN_DIR/keystone-manage bootstrap \ + --bootstrap-username admin \ + --bootstrap-password "$ADMIN_PASSWORD" \ + --bootstrap-project-name admin \ + --bootstrap-role-name admin \ + --bootstrap-service-name keystone \ + --bootstrap-region-id "$REGION_NAME" \ + --bootstrap-public-url "$KEYSTONE_SERVICE_URI" + if [ "$KEYSTONE_ADMIN_ENDPOINT" == "True" ]; then + openstack endpoint create --region "$REGION_NAME" \ + --os-username admin \ + --os-user-domain-id default \ + --os-password "$ADMIN_PASSWORD" \ + --os-project-name admin \ + --os-project-domain-id default \ + keystone admin "$KEYSTONE_SERVICE_URI" + fi +} + +# create_ldap_domain() - Create domain file and initialize domain with a user +function create_ldap_domain { + # Creates domain Users + openstack domain create --description "LDAP domain" Users + + # Create domain file inside etc/keystone/domains + KEYSTONE_LDAP_DOMAIN_FILE=$KEYSTONE_CONF_DIR/domains/keystone.Users.conf + mkdir -p "$KEYSTONE_CONF_DIR/domains" + touch "$KEYSTONE_LDAP_DOMAIN_FILE" + + # Set identity driver 'ldap' + iniset $KEYSTONE_LDAP_DOMAIN_FILE identity driver "ldap" + + # LDAP settings for Users domain + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_tree_dn "ou=Users,$LDAP_BASE_DN" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_objectclass "inetOrgPerson" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_name_attribute "cn" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_mail_attribute "mail" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_id_attribute "uid" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_enabled_emulation "True" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user "cn=Manager,dc=openstack,dc=org" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap url "ldap://localhost" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap suffix $LDAP_BASE_DN + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap password $LDAP_PASSWORD + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_tree_dn "ou=Groups,$LDAP_BASE_DN" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_objectclass "groupOfNames" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_name_attribute "cn" + iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_id_attribute "cn" + + # Restart apache and identity services to associate domain and conf file + sudo service apache2 reload + sudo systemctl restart devstack@keystone + + # Create LDAP user.ldif and add user to LDAP backend + local tmp_ldap_dir + tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX) + + _ldap_varsubst $FILES/ldap/user.ldif.in $slappass >$tmp_ldap_dir/user.ldif + sudo ldapadd -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -c -f $tmp_ldap_dir/user.ldif + rm -rf $tmp_ldap_dir + + local admin_project + admin_project=$(get_or_create_project "admin" default) + local ldap_user + ldap_user=$(openstack user show --domain=Users demo -f value -c id) + local admin_role="admin" + get_or_create_role $admin_role + + # Grant demo LDAP user access to project and role + get_or_add_user_project_role $admin_role $ldap_user $admin_project +} + +# Restore xtrace +$_XTRACE_KEYSTONE + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/ldap b/lib/ldap new file mode 100644 index 0000000000..66c2afc4d5 --- /dev/null +++ b/lib/ldap @@ -0,0 +1,190 @@ +#!/bin/bash +# +# lib/ldap +# Functions to control the installation and configuration of **ldap** + +# ``lib/keystone`` calls the entry points in this order: +# +# - install_ldap() + +# Save trace setting +_XTRACE_LDAP=$(set +o | grep xtrace) +set +o xtrace + + +LDAP_DOMAIN=${LDAP_DOMAIN:-openstack.org} +# Make an array of domain components +DC=(${LDAP_DOMAIN/./ }) + +# Leftmost domain component used in top-level entry +LDAP_BASE_DC=${DC[0]} + +# Build the base DN +dn="" +for dc in ${DC[*]}; do + dn="$dn,dc=$dc" +done +LDAP_BASE_DN=${dn#,} + +LDAP_MANAGER_DN="${LDAP_MANAGER_DN:-cn=Manager,${LDAP_BASE_DN}}" +LDAP_URL=${LDAP_URL:-ldap://localhost} + +LDAP_SERVICE_NAME=slapd + +if is_ubuntu; then + LDAP_OLCDB_NUMBER=1 + LDAP_OLCDB_TYPE=mdb + LDAP_ROOTPW_COMMAND=replace +elif is_fedora; then + LDAP_OLCDB_NUMBER=2 + LDAP_OLCDB_TYPE=hdb + LDAP_ROOTPW_COMMAND=add +fi + + +# Functions +# --------- + +# Perform common variable substitutions on the data files +# _ldap_varsubst file +function _ldap_varsubst { + local infile=$1 + local slappass=$2 + sed -e " + s|\${LDAP_OLCDB_NUMBER}|$LDAP_OLCDB_NUMBER| + s|\${LDAP_OLCDB_TYPE}|$LDAP_OLCDB_TYPE| + s|\${SLAPPASS}|$slappass| + s|\${LDAP_ROOTPW_COMMAND}|$LDAP_ROOTPW_COMMAND| + s|\${BASE_DC}|$LDAP_BASE_DC| + s|\${BASE_DN}|$LDAP_BASE_DN| + s|\${MANAGER_DN}|$LDAP_MANAGER_DN| + " $infile +} + +# clean_ldap() - Remove ldap server +function cleanup_ldap { + uninstall_package $(get_packages ldap) + if is_ubuntu; then + uninstall_package slapd ldap-utils libslp1 + sudo rm -rf /etc/ldap/ldap.conf /var/lib/ldap + elif is_fedora; then + sudo rm -rf /etc/openldap /var/lib/ldap + fi +} + +# init_ldap +# init_ldap() - Initialize databases, etc. +function init_ldap { + local keystone_ldif + + local tmp_ldap_dir + tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX) + + # Remove data but not schemas + clear_ldap_state + if is_ubuntu; then + # a bug in OpenLDAP 2.6.7+ + # (https://bugs.openldap.org/show_bug.cgi?id=10336) causes slapd crash + # after deleting nonexisting tree. It is fixed upstream, but Ubuntu is + # still not having a fix in Noble. Try temporarily simly restarting the + # process. + sudo service $LDAP_SERVICE_NAME restart + fi + + # Add our top level ldap nodes + if ldapsearch -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -b "$LDAP_BASE_DN" | grep -q "Success"; then + printf "LDAP already configured for $LDAP_BASE_DC\n" + else + printf "Configuring LDAP for $LDAP_BASE_DC\n" + # If BASE_DN is changed, the user may override the default file + if [[ -r $FILES/ldap/${LDAP_BASE_DC}.ldif.in ]]; then + local keystone_ldif=${LDAP_BASE_DC}.ldif + else + local keystone_ldif=keystone.ldif + fi + _ldap_varsubst $FILES/ldap/${keystone_ldif}.in >$tmp_ldap_dir/${keystone_ldif} + if [[ -r $tmp_ldap_dir/${keystone_ldif} ]]; then + ldapadd -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -c -f $tmp_ldap_dir/${keystone_ldif} + fi + fi + + rm -rf $tmp_ldap_dir +} + +# install_ldap +# install_ldap() - Collect source and prepare +function install_ldap { + echo "Installing LDAP inside function" + echo "os_VENDOR is $os_VENDOR" + + local tmp_ldap_dir + tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX) + + printf "installing OpenLDAP" + if is_ubuntu; then + configure_ldap + elif is_fedora; then + start_ldap + fi + + echo "LDAP_PASSWORD is $LDAP_PASSWORD" + local slappass + slappass=$(slappasswd -s $LDAP_PASSWORD) + printf "LDAP secret is $slappass\n" + + # Create manager.ldif and add to olcdb + _ldap_varsubst $FILES/ldap/manager.ldif.in $slappass >$tmp_ldap_dir/manager.ldif + sudo ldapmodify -Y EXTERNAL -H ldapi:/// -f $tmp_ldap_dir/manager.ldif + + # On fedora we need to manually add cosine and inetorgperson schemas + if is_fedora; then + sudo ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/cosine.ldif + sudo ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/inetorgperson.ldif + fi + + rm -rf $tmp_ldap_dir +} + +# configure_ldap() - Configure LDAP - reconfigure slapd +function configure_ldap { + sudo debconf-set-selections </dev/null)" ]]; then + local backing_file=$DATA_DIR/$vg$BACKING_FILE_SUFFIX + + if [[ -n "$vg$BACKING_FILE_SUFFIX" ]] && \ + [[ -e "/etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service" ]]; then + sudo systemctl disable --now $vg$BACKING_FILE_SUFFIX.service + sudo rm -f /etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service + sudo systemctl daemon-reload + fi + + # If the backing physical device is a loop device, it was probably setup by DevStack + if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then + rm -f $backing_file + fi + fi +} + +# _create_lvm_volume_group creates default volume group +# +# Usage: _create_lvm_volume_group() $vg $size +function _create_lvm_volume_group { + local vg=$1 + local size=$2 + + local backing_file=$DATA_DIR/$vg$BACKING_FILE_SUFFIX + if ! sudo vgs $vg; then + # Only create if the file doesn't already exists + [[ -f $backing_file ]] || truncate -s $size $backing_file + + local directio="" + # Check to see if we can do direct-io + if losetup -h | grep -q direct-io; then + directio="--direct-io=on" + fi + + # Only create systemd service if it doesn't already exists + if [[ ! -e "/etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service" ]]; then + sed -e " + s|%DIRECTIO%|${directio}|g; + s|%BACKING_FILE%|${backing_file}|g; + " $FILES/lvm-backing-file.template | sudo tee \ + /etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service + + sudo systemctl daemon-reload + sudo systemctl enable --now $vg$BACKING_FILE_SUFFIX.service + fi + + local vg_dev + vg_dev=$(sudo losetup --associated $backing_file -O NAME -n) + + # Only create volume group if it doesn't already exist + if ! sudo vgs $vg; then + sudo vgcreate $vg $vg_dev + fi + fi +} + +# init_lvm_volume_group() initializes the volume group creating the backing +# file if necessary +# +# Usage: init_lvm_volume_group() $vg $size +function init_lvm_volume_group { + local vg=$1 + local size=$2 + + # Start the tgtd service on Fedora if tgtadm is used + if is_fedora; then + start_service tgtd + fi + + # Start with a clean volume group + _create_lvm_volume_group $vg $size + + if is_service_enabled cinder; then + # Remove iscsi targets + if [ "$CINDER_TARGET_HELPER" = "lioadm" ]; then + sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete + elif [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then + sudo tgtadm --op show --mode target | awk '/Target/ {print $3}' | sudo xargs -r -n1 tgt-admin --delete + elif [ "$CINDER_TARGET_HELPER" = "nvmet" ]; then + # If we don't disconnect everything vgremove will block + sudo nvme disconnect-all + sudo nvmetcli clear + fi + fi + _clean_lvm_volume_group $vg +} + +# Sentinal value to ensure that init of default lvm volume group is +# only performed once across calls of init_default_lvm_volume_group. +_DEFAULT_LVM_INIT=${_DEFAULT_LVM_INIT:-0} + +# init_default_lvm_volume_group() initializes a default volume group +# intended to be shared between cinder and nova. It is idempotent; +# the init of the default volume group is guaranteed to be performed +# only once so that either or both of the dependent services can +# safely call this function. +# +# Usage: init_default_lvm_volume_group() +function init_default_lvm_volume_group { + if [[ "$_DEFAULT_LVM_INIT" = "0" ]]; then + init_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME $VOLUME_BACKING_FILE_SIZE + _DEFAULT_LVM_INIT=1 + fi +} + +# clean_lvm_filter() Remove the filter rule set in set_lvm_filter() +# +# Usage: clean_lvm_filter() +function clean_lvm_filter { + sudo sed -i "s/^.*# from devstack$//" /etc/lvm/lvm.conf +} + +# set_lvm_filter() Gather all devices configured for LVM and +# use them to build a global device filter +# set_lvm_filter() Create a device filter +# and add to /etc/lvm.conf. Note this uses +# all current PV's in use by LVM on the +# system to build it's filter. +# +# Usage: set_lvm_filter() +function set_lvm_filter { + local filter_suffix='"r|.*|" ] # from devstack' + local filter_string="global_filter = [ " + local pv + local vg + local line + + for pv_info in $(sudo pvs --noheadings -o name); do + pv=$(echo -e "${pv_info}" | sed 's/ //g' | sed 's/\/dev\///g') + new="\"a|$pv|\", " + filter_string=$filter_string$new + done + filter_string=$filter_string$filter_suffix + + clean_lvm_filter + sudo sed -i "/# global_filter = \[.*\]/a\ $filter_string" /etc/lvm/lvm.conf + echo_summary "set lvm.conf device global_filter to: $filter_string" +} + +# Restore xtrace +$_XTRACE_LVM + +# mode: shell-script +# End: diff --git a/lib/neutron b/lib/neutron new file mode 100644 index 0000000000..dec15fb782 --- /dev/null +++ b/lib/neutron @@ -0,0 +1,1158 @@ +#!/bin/bash +# +# lib/neutron +# functions - functions specific to neutron + +# Dependencies: +# ``functions`` file +# ``DEST`` must be defined +# ``STACK_USER`` must be defined + +# ``stack.sh`` calls the entry points in this order: +# +# - install_neutron_agent_packages +# - install_neutronclient +# - install_neutron +# - install_neutron_third_party +# - configure_neutron +# - init_neutron +# - configure_neutron_third_party +# - init_neutron_third_party +# - start_neutron_third_party +# - create_nova_conf_neutron +# - configure_neutron_after_post_config +# - start_neutron_service_and_check +# - check_neutron_third_party_integration +# - start_neutron_agents +# - create_neutron_initial_network +# +# ``unstack.sh`` calls the entry points in this order: +# +# - stop_neutron +# - stop_neutron_third_party +# - cleanup_neutron + +# Functions in lib/neutron are classified into the following categories: +# +# - entry points (called from stack.sh or unstack.sh) +# - internal functions +# - neutron exercises +# - 3rd party programs + + +# Neutron Networking +# ------------------ + +# Make sure that neutron is enabled in ``ENABLED_SERVICES``. If you want +# to run Neutron on this host, make sure that q-svc is also in +# ``ENABLED_SERVICES``. +# +# See "Neutron Network Configuration" below for additional variables +# that must be set in localrc for connectivity across hosts with +# Neutron. + +# Settings +# -------- + + +# Neutron Network Configuration +# ----------------------------- + +if is_service_enabled tls-proxy; then + Q_PROTOCOL="https" +fi + +# Set up default directories +GITDIR["python-neutronclient"]=$DEST/python-neutronclient + +NEUTRON_DIR=$DEST/neutron +NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas + +# Support entry points installation of console scripts +if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then + NEUTRON_BIN_DIR=$NEUTRON_DIR/bin +else + NEUTRON_BIN_DIR=$(get_python_exec_prefix) +fi + +NEUTRON_CONF_DIR=/etc/neutron +NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf +export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} + +NEUTRON_UWSGI=neutron.wsgi.api:application +NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini + +# If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope" +# and "enforce_new_defaults" to True in the Neutron's config to enforce usage +# of the new RBAC policies and scopes. Set it to False if you do not +# want to run Neutron with new RBAC. +NEUTRON_ENFORCE_SCOPE=$(trueorfalse True NEUTRON_ENFORCE_SCOPE) + +# Agent binaries. Note, binary paths for other agents are set in per-service +# scripts in lib/neutron_plugins/services/ +AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent" +AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"} +AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent" + +# Agent config files. Note, plugin-specific Q_PLUGIN_CONF_FILE is set and +# loaded from per-plugin scripts in lib/neutron_plugins/ +Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini +# NOTE(slaweq): NEUTRON_DHCP_CONF is used e.g. in neutron repository, +# it was previously defined in the lib/neutron module which is now deleted. +NEUTRON_DHCP_CONF=$Q_DHCP_CONF_FILE +Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini +# NOTE(slaweq): NEUTRON_L3_CONF is used e.g. in neutron repository, +# it was previously defined in the lib/neutron module which is now deleted. +NEUTRON_L3_CONF=$Q_L3_CONF_FILE +Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini + +# Default name for Neutron database +Q_DB_NAME=${Q_DB_NAME:-neutron} +# Default Neutron Plugin +Q_PLUGIN=${Q_PLUGIN:-ml2} +# Default Neutron Port +Q_PORT=${Q_PORT:-9696} +# Default Neutron Internal Port when using TLS proxy +Q_PORT_INT=${Q_PORT_INT:-19696} +# Default Neutron Host +Q_HOST=${Q_HOST:-$SERVICE_HOST} +# Default protocol +Q_PROTOCOL=${Q_PROTOCOL:-$SERVICE_PROTOCOL} +# Default listen address +Q_LISTEN_ADDRESS=${Q_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} +# Default admin username +Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron} +# Default auth strategy +Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} +# RHEL's support for namespaces requires using veths with ovs +Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False} +Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} +Q_USE_ROOTWRAP_DAEMON=$(trueorfalse True Q_USE_ROOTWRAP_DAEMON) +# Meta data IP +Q_META_DATA_IP=${Q_META_DATA_IP:-$(ipv6_unquote $SERVICE_HOST)} +# Allow Overlapping IP among subnets +Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True} +Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True} +Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True} +VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True} +VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300} + +# Allow to skip stopping of OVN services +SKIP_STOP_OVN=${SKIP_STOP_OVN:-False} + +# The directory which contains files for Q_PLUGIN_EXTRA_CONF_FILES. +# /etc/neutron is assumed by many of devstack plugins. Do not change. +_Q_PLUGIN_EXTRA_CONF_PATH=/etc/neutron + +# The name of the service in the endpoint URL +NEUTRON_ENDPOINT_SERVICE_NAME=${NEUTRON_ENDPOINT_SERVICE_NAME-"networking"} +if [[ -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then + NEUTRON_ENDPOINT_SERVICE_NAME="networking" +fi + +# Source install libraries +ALEMBIC_REPO=${ALEMBIC_REPO:-https://github.com/sqlalchemy/alembic.git} +ALEMBIC_DIR=${ALEMBIC_DIR:-$DEST/alembic} +ALEMBIC_BRANCH=${ALEMBIC_BRANCH:-main} +SQLALCHEMY_REPO=${SQLALCHEMY_REPO:-https://github.com/sqlalchemy/sqlalchemy.git} +SQLALCHEMY_DIR=${SQLALCHEMY_DIR:-$DEST/sqlalchemy} +SQLALCHEMY_BRANCH=${SQLALCHEMY_BRANCH:-main} + +# List of config file names in addition to the main plugin config file +# To add additional plugin config files, use ``neutron_server_config_add`` +# utility function. For example: +# +# ``neutron_server_config_add file1`` +# +# These config files are relative to ``/etc/neutron``. The above +# example would specify ``--config-file /etc/neutron/file1`` for +# neutron server. +declare -a -g Q_PLUGIN_EXTRA_CONF_FILES + +# same as Q_PLUGIN_EXTRA_CONF_FILES, but with absolute path. +declare -a -g _Q_PLUGIN_EXTRA_CONF_FILES_ABS + + +Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf +if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + Q_RR_COMMAND="sudo" +else + NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) + Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + Q_RR_DAEMON_COMMAND="sudo $NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" + fi +fi + + +# Distributed Virtual Router (DVR) configuration +# Can be: +# - ``legacy`` - No DVR functionality +# - ``dvr_snat`` - Controller or single node DVR +# - ``dvr`` - Compute node in multi-node DVR +# - ``dvr_no_external`` - Compute node in multi-node DVR, no external network +# +Q_DVR_MODE=${Q_DVR_MODE:-legacy} +if [[ "$Q_DVR_MODE" != "legacy" ]]; then + Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,l2population +fi + +# Provider Network Configurations +# -------------------------------- + +# The following variables control the Neutron ML2 plugins' allocation +# of tenant networks and availability of provider networks. If these +# are not configured in ``localrc``, tenant networks will be local to +# the host (with no remote connectivity), and no physical resources +# will be available for the allocation of provider networks. + +# To disable tunnels (GRE or VXLAN) for tenant networks, +# set to False in ``local.conf``. +# GRE tunnels are only supported by the openvswitch. +ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-True} + +# If using GRE, VXLAN or GENEVE tunnels for tenant networks, +# specify the range of IDs from which tenant networks are +# allocated. Can be overridden in ``localrc`` if necessary. +TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGES:-1:1000} + +# To use VLANs for tenant networks, set to True in localrc. VLANs +# are supported by the ML2 plugins, requiring additional configuration +# described below. +ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} + +# If using VLANs for tenant networks, set in ``localrc`` to specify +# the range of VLAN VIDs from which tenant networks are +# allocated. An external network switch must be configured to +# trunk these VLANs between hosts for multi-host connectivity. +# +# Example: ``TENANT_VLAN_RANGE=1000:1999`` +TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} + +# If using VLANs for tenant networks, or if using flat or VLAN +# provider networks, set in ``localrc`` to the name of the physical +# network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the +# openvswitch agent, as described below. +# +# Example: ``PHYSICAL_NETWORK=default`` +PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public} + +# With the openvswitch agent, if using VLANs for tenant networks, +# or if using flat or VLAN provider networks, set in ``localrc`` to +# the name of the OVS bridge to use for the physical network. The +# bridge will be created if it does not already exist, but a +# physical interface must be manually added to the bridge as a +# port for external connectivity. +# +# Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` +OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex} + +# With the openvswitch plugin, set to True in ``localrc`` to enable +# provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. +# +# Example: ``OVS_ENABLE_TUNNELING=True`` +OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} + +# Use DHCP agent for providing metadata service in the case of +# without L3 agent (No Route Agent), set to True in localrc. +ENABLE_ISOLATED_METADATA=${ENABLE_ISOLATED_METADATA:-False} + +# Add a static route as dhcp option, so the request to 169.254.169.254 +# will be able to reach through a route(DHCP agent) +# This option require ENABLE_ISOLATED_METADATA = True +ENABLE_METADATA_NETWORK=${ENABLE_METADATA_NETWORK:-False} +# Neutron plugin specific functions +# --------------------------------- + +# Please refer to ``lib/neutron_plugins/README.md`` for details. +if [ -f $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN ]; then + source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN +fi + +# Agent metering service plugin functions +# ------------------------------------------- + +# Hardcoding for 1 service plugin for now +source $TOP_DIR/lib/neutron_plugins/services/metering + +# L3 Service functions +source $TOP_DIR/lib/neutron_plugins/services/l3 + +# Additional Neutron service plugins +source $TOP_DIR/lib/neutron_plugins/services/placement +source $TOP_DIR/lib/neutron_plugins/services/trunk +source $TOP_DIR/lib/neutron_plugins/services/qos +source $TOP_DIR/lib/neutron_plugins/services/segments + +# Use security group or not +if has_neutron_plugin_security_group; then + Q_USE_SECGROUP=${Q_USE_SECGROUP:-True} +else + Q_USE_SECGROUP=False +fi + +# OVN_BRIDGE_MAPPINGS - ovn-bridge-mappings +# NOTE(hjensas): Initialize after sourcing neutron_plugins/services/l3 +# which initialize PUBLIC_BRIDGE. +OVN_BRIDGE_MAPPINGS=${OVN_BRIDGE_MAPPINGS:-$PHYSICAL_NETWORK:$PUBLIC_BRIDGE} + +# Save trace setting +_XTRACE_NEUTRON=$(set +o | grep xtrace) +set +o xtrace + + +# Functions +# --------- + +# Test if any Neutron services are enabled +# is_neutron_enabled +function is_neutron_enabled { + [[ ,${DISABLED_SERVICES} =~ ,"neutron" ]] && return 1 + [[ ,${ENABLED_SERVICES} =~ ,"neutron-" || ,${ENABLED_SERVICES} =~ ,"q-" ]] && return 0 + return 1 +} + +# Test if any Neutron services are enabled +# TODO(slaweq): this is not really needed now and we should remove it as soon +# as it will not be called from any other Devstack plugins, like e.g. Neutron +# plugin +function is_neutron_legacy_enabled { + return 0 +} + +function _determine_config_server { + if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" != '' ]]; then + if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" = "$_Q_PLUGIN_EXTRA_CONF_PATH" ]]; then + deprecated "Q_PLUGIN_EXTRA_CONF_PATH is deprecated" + else + die $LINENO "Q_PLUGIN_EXTRA_CONF_PATH is deprecated" + fi + fi + if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 ]]; then + deprecated "Q_PLUGIN_EXTRA_CONF_FILES is deprecated. Use neutron_server_config_add instead." + fi + for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do + _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($_Q_PLUGIN_EXTRA_CONF_PATH/$cfg_file) + done + + local cfg_file + local opts="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" + for cfg_file in ${_Q_PLUGIN_EXTRA_CONF_FILES_ABS[@]}; do + opts+=" --config-file $cfg_file" + done + echo "$opts" +} + +function _determine_config_l3 { + local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE" + echo "$opts" +} + +function _enable_ovn_maintenance { + if [[ $Q_AGENT == "ovn" ]]; then + enable_service neutron-ovn-maintenance-worker + fi +} + +function _run_ovn_maintenance { + if [[ $Q_AGENT == "ovn" ]]; then + run_process neutron-ovn-maintenance-worker "$NEUTRON_BIN_DIR/neutron-ovn-maintenance-worker $cfg_file_options" + fi +} + +function _stop_ovn_maintenance { + if [[ $Q_AGENT == "ovn" ]]; then + stop_process neutron-ovn-maintenance-worker + fi +} + +# For services and agents that require it, dynamically construct a list of +# --config-file arguments that are passed to the binary. +function determine_config_files { + local opts="" + case "$1" in + "neutron-server") opts="$(_determine_config_server)" ;; + "neutron-l3-agent") opts="$(_determine_config_l3)" ;; + esac + if [ -z "$opts" ] ; then + die $LINENO "Could not determine config files for $1." + fi + echo "$opts" +} + +# configure_neutron() +# Set common config for all neutron server and agents. +function configure_neutron { + _configure_neutron_common + iniset_rpc_backend neutron $NEUTRON_CONF + + if is_service_enabled q-metering neutron-metering; then + _configure_neutron_metering + fi + if is_service_enabled q-agt neutron-agent; then + _configure_neutron_plugin_agent + fi + if is_service_enabled q-dhcp neutron-dhcp; then + _configure_neutron_dhcp_agent + fi + if is_service_enabled q-l3 neutron-l3; then + _configure_neutron_l3_agent + fi + if is_service_enabled q-meta neutron-metadata-agent; then + _configure_neutron_metadata_agent + fi + + if [[ "$Q_DVR_MODE" != "legacy" ]]; then + _configure_dvr + fi + if is_service_enabled ceilometer; then + _configure_neutron_ceilometer_notifications + fi + + if [[ $Q_AGENT == "ovn" ]]; then + configure_ovn + configure_ovn_plugin + fi + + # Configure Neutron's advanced services + if is_service_enabled q-placement neutron-placement; then + configure_placement_extension + fi + if is_service_enabled q-trunk neutron-trunk; then + configure_trunk_extension + fi + if is_service_enabled q-qos neutron-qos; then + configure_qos + if is_service_enabled q-l3 neutron-l3; then + configure_l3_agent_extension_fip_qos + configure_l3_agent_extension_gateway_ip_qos + fi + fi + if is_service_enabled neutron-segments; then + configure_placement_neutron + configure_segments_extension + fi + + # Finally configure Neutron server and core plugin + if is_service_enabled q-agt neutron-agent q-svc neutron-api; then + _configure_neutron_service + fi + + iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS" + # devstack is not a tool for running uber scale OpenStack + # clouds, therefore running without a dedicated RPC worker + # for state reports is more than adequate. + iniset $NEUTRON_CONF DEFAULT rpc_state_report_workers 0 + + write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_UWSGI" "/networking" "" "neutron-api" +} + +function configure_neutron_nova { + create_nova_conf_neutron $NOVA_CONF + if [[ "${CELLSV2_SETUP}" == "superconductor" ]]; then + for i in $(seq 1 $NOVA_NUM_CELLS); do + local conf + conf=$(conductor_conf $i) + create_nova_conf_neutron $conf + done + fi +} + +function create_nova_conf_neutron { + local conf=${1:-$NOVA_CONF} + iniset $conf neutron auth_type "password" + iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI" + iniset $conf neutron username nova + iniset $conf neutron password "$SERVICE_PASSWORD" + iniset $conf neutron user_domain_name "$SERVICE_DOMAIN_NAME" + iniset $conf neutron project_name "$SERVICE_PROJECT_NAME" + iniset $conf neutron project_domain_name "$SERVICE_DOMAIN_NAME" + iniset $conf neutron auth_strategy "$Q_AUTH_STRATEGY" + iniset $conf neutron region_name "$REGION_NAME" + + # optionally set options in nova_conf + neutron_plugin_create_nova_conf $conf + + if is_service_enabled q-meta neutron-metadata-agent; then + iniset $conf neutron service_metadata_proxy "True" + fi + + iniset $conf DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL" + iniset $conf DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT" +} + +# create_neutron_accounts() - Set up common required neutron accounts + +# Tenant User Roles +# ------------------------------------------------------------------ +# service neutron admin # if enabled + +# Migrated from keystone_data.sh +function create_neutron_accounts { + local neutron_url + neutron_url=$Q_PROTOCOL://$SERVICE_HOST/ + if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then + neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME + fi + + if is_service_enabled q-svc neutron-api; then + + create_service_user "neutron" + + get_or_create_service "neutron" "network" "Neutron Service" + get_or_create_endpoint \ + "network" \ + "$REGION_NAME" "$neutron_url" + fi +} + +# init_neutron() - Initialize databases, etc. +function init_neutron { + recreate_database $Q_DB_NAME + time_start "dbsync" + # Run Neutron db migrations + $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head + time_stop "dbsync" +} + +# install_neutron() - Collect source and prepare +function install_neutron { + # Install neutron-lib from git so we make sure we're testing + # the latest code. + if use_library_from_git "neutron-lib"; then + git_clone_by_name "neutron-lib" + setup_dev_lib "neutron-lib" + fi + + # Install SQLAlchemy and alembic from git when these are required + # see https://bugs.launchpad.net/neutron/+bug/2042941 + if use_library_from_git "sqlalchemy"; then + git_clone $SQLALCHEMY_REPO $SQLALCHEMY_DIR $SQLALCHEMY_BRANCH + setup_develop $SQLALCHEMY_DIR + fi + if use_library_from_git "alembic"; then + git_clone $ALEMBIC_REPO $ALEMBIC_DIR $ALEMBIC_BRANCH + setup_develop $ALEMBIC_DIR + fi + + git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH + setup_develop $NEUTRON_DIR + + if [[ $Q_AGENT == "ovn" ]]; then + install_ovn + fi +} + +# install_neutronclient() - Collect source and prepare +function install_neutronclient { + if use_library_from_git "python-neutronclient"; then + git_clone_by_name "python-neutronclient" + setup_dev_lib "python-neutronclient" + fi +} + +# install_neutron_agent_packages() - Collect source and prepare +function install_neutron_agent_packages { + # radvd doesn't come with the OS. Install it if the l3 service is enabled. + if is_service_enabled q-l3 neutron-l3; then + install_package radvd + fi + # install packages that are specific to plugin agent(s) + if is_service_enabled q-agt neutron-agent q-dhcp neutron-dhcp q-l3 neutron-l3; then + neutron_plugin_install_agent_packages + fi +} + +# Finish neutron configuration +function configure_neutron_after_post_config { + if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then + iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES + fi + configure_rbac_policies +} + +# configure_rbac_policies() - Configure Neutron to enforce new RBAC +# policies and scopes if NEUTRON_ENFORCE_SCOPE == True +function configure_rbac_policies { + if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == True ]]; then + iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True + iniset $NEUTRON_CONF oslo_policy enforce_scope True + else + iniset $NEUTRON_CONF oslo_policy enforce_new_defaults False + iniset $NEUTRON_CONF oslo_policy enforce_scope False + fi +} + +# Start running OVN processes +function start_ovn_services { + if [[ $Q_AGENT == "ovn" ]]; then + if [ "$VIRT_DRIVER" != 'ironic' ]; then + # NOTE(TheJulia): Ironic's devstack plugin needs to perform + # additional networking configuration to setup a working test + # environment with test virtual machines to emulate baremetal, + # which requires OVN to be up and running earlier to complete + # that base configuration. + init_ovn + start_ovn + fi + if [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]]; then + if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" != "True" ]]; then + echo "OVN_L3_CREATE_PUBLIC_NETWORK=True is being ignored " + echo "because NEUTRON_CREATE_INITIAL_NETWORKS is set to False" + else + create_public_bridge + fi + fi + fi +} + +# Start running processes +function start_neutron_service_and_check { + local service_port=$Q_PORT + local service_protocol=$Q_PROTOCOL + local cfg_file_options + local neutron_url + + cfg_file_options="$(determine_config_files neutron-server)" + + if is_service_enabled tls-proxy; then + service_port=$Q_PORT_INT + service_protocol="http" + fi + + # Start the Neutron service + # The default value of "rpc_workers" is None (not defined). If + # "rpc_workers" is explicitly set to 0, the RPC workers process + # should not be executed. + local rpc_workers + rpc_workers=$(iniget_multiline $NEUTRON_CONF DEFAULT rpc_workers) + + enable_service neutron-api + run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF" + neutron_url=$Q_PROTOCOL://$Q_HOST/ + if [ "$rpc_workers" != "0" ]; then + enable_service neutron-rpc-server + fi + enable_service neutron-periodic-workers + _enable_ovn_maintenance + if [ "$rpc_workers" != "0" ]; then + run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options" + fi + run_process neutron-periodic-workers "$NEUTRON_BIN_DIR/neutron-periodic-workers $cfg_file_options" + _run_ovn_maintenance + if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then + neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME + fi + echo "Waiting for Neutron to start..." + + local testcmd="wget ${ssl_ca} --no-proxy -q -O- $neutron_url" + test_with_retry "$testcmd" "Neutron did not start" $SERVICE_TIMEOUT +} + +function start_neutron { + start_l2_agent "$@" + start_other_agents "$@" +} + +# Control of the l2 agent is separated out to make it easier to test partial +# upgrades (everything upgraded except the L2 agent) +function start_l2_agent { + run_process q-agt "$AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" + + if is_provider_network && [[ $Q_AGENT == "openvswitch" ]]; then + sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE + sudo ip link set $OVS_PHYSICAL_BRIDGE up + sudo ip link set br-int up + sudo ip link set $PUBLIC_INTERFACE up + if is_ironic_hardware; then + for IP in $(ip addr show dev $PUBLIC_INTERFACE | grep ' inet ' | awk '{print $2}'); do + sudo ip addr del $IP dev $PUBLIC_INTERFACE + sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE + done + sudo ip route replace $FIXED_RANGE via $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE + fi + fi +} + +function start_other_agents { + run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $Q_DHCP_CONF_FILE" + + run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)" + + run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE" + run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" +} + +# Start running processes, including screen +function start_neutron_agents { + # NOTE(slaweq): it's now just a wrapper for start_neutron function + start_neutron "$@" +} + +function stop_l2_agent { + stop_process q-agt +} + +# stop_other() - Stop running processes +function stop_other { + if is_service_enabled q-dhcp neutron-dhcp; then + stop_process q-dhcp + pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }') + [ ! -z "$pid" ] && sudo kill -9 $pid + fi + + stop_process neutron-rpc-server + stop_process neutron-periodic-workers + stop_process neutron-api + _stop_ovn_maintenance + + if is_service_enabled q-l3 neutron-l3; then + sudo pkill -f "radvd -C $DATA_DIR/neutron/ra" + stop_process q-l3 + fi + + if is_service_enabled q-meta neutron-metadata-agent; then + stop_process q-meta + fi + + if is_service_enabled q-metering neutron-metering; then + neutron_metering_stop + fi + + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + # pkill takes care not to kill itself, but it may kill its parent + # sudo unless we use the "ps | grep [f]oo" trick + sudo pkill -9 -f "$NEUTRON_ROOTWRAP-[d]aemon" || : + fi +} + +# stop_neutron() - Stop running processes (non-screen) +function stop_neutron { + stop_other + stop_l2_agent + + if [[ $Q_AGENT == "ovn" && $SKIP_STOP_OVN != "True" ]]; then + stop_ovn + fi +} + +# _move_neutron_addresses_route() - Move the primary IP to the OVS bridge +# on startup, or back to the public interface on cleanup. If no IP is +# configured on the interface, just add it as a port to the OVS bridge. +function _move_neutron_addresses_route { + local from_intf=$1 + local to_intf=$2 + local add_ovs_port=$3 + local del_ovs_port=$4 + local af=$5 + + if [[ -n "$from_intf" && -n "$to_intf" ]]; then + # Remove the primary IP address from $from_intf and add it to $to_intf, + # along with the default route, if it exists. Also, when called + # on configure we will also add $from_intf as a port on $to_intf, + # assuming it is an OVS bridge. + + local IP_REPLACE="" + local IP_DEL="" + local IP_UP="" + local DEFAULT_ROUTE_GW + DEFAULT_ROUTE_GW=$(ip -f $af r | awk "/default.+$from_intf\s/ { print \$3; exit }") + local ADD_OVS_PORT="" + local DEL_OVS_PORT="" + local ARP_CMD="" + + IP_BRD=$(ip -f $af a s dev $from_intf scope global primary | grep inet | awk '{ print $2, $3, $4; exit }') + + if [ "$DEFAULT_ROUTE_GW" != "" ]; then + ADD_DEFAULT_ROUTE="sudo ip -f $af r replace default via $DEFAULT_ROUTE_GW dev $to_intf" + fi + + if [[ "$add_ovs_port" == "True" ]]; then + ADD_OVS_PORT="sudo ovs-vsctl --may-exist add-port $to_intf $from_intf" + fi + + if [[ "$del_ovs_port" == "True" ]]; then + DEL_OVS_PORT="sudo ovs-vsctl --if-exists del-port $from_intf $to_intf" + fi + + if [[ "$IP_BRD" != "" ]]; then + IP_DEL="sudo ip addr del $IP_BRD dev $from_intf" + IP_REPLACE="sudo ip addr replace $IP_BRD dev $to_intf" + IP_UP="sudo ip link set $to_intf up" + if [[ "$af" == "inet" ]]; then + IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1) + ARP_CMD="sudo arping -A -c 3 -w 5 -I $to_intf $IP " + fi + fi + + # The add/del OVS port calls have to happen either before or + # after the address is moved in order to not leave it orphaned. + $DEL_OVS_PORT; $IP_DEL; $IP_REPLACE; $IP_UP; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE; $ARP_CMD + fi +} + +# _configure_public_network_connectivity() - Configures connectivity to the +# external network using $PUBLIC_INTERFACE or NAT on the single interface +# machines +function _configure_public_network_connectivity { + # If we've given a PUBLIC_INTERFACE to take over, then we assume + # that we can own the whole thing, and privot it into the OVS + # bridge. If we are not, we're probably on a single interface + # machine, and we just setup NAT so that fixed guests can get out. + if [[ -n "$PUBLIC_INTERFACE" ]]; then + _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet" + + if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then + _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6" + fi + else + for d in $default_v4_route_devs; do + sudo iptables -t nat -A POSTROUTING -o $d -s $FLOATING_RANGE -j MASQUERADE + done + fi +} + +# cleanup_neutron() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_neutron { + stop_process neutron-api + stop_process neutron-rpc-server + stop_process neutron-periodic-workers + _stop_ovn_maintenance + remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "neutron-api" + sudo rm -f $(apache_site_config_for neutron-api) + + if [[ -n "$OVS_PHYSICAL_BRIDGE" ]]; then + _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False True "inet" + + if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then + # ip(8) wants the prefix length when deleting + local v6_gateway + v6_gateway=$(ip -6 a s dev $OVS_PHYSICAL_BRIDGE | grep $IPV6_PUBLIC_NETWORK_GATEWAY | awk '{ print $2 }') + sudo ip -6 addr del $v6_gateway dev $OVS_PHYSICAL_BRIDGE + _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False False "inet6" + fi + + if is_provider_network && is_ironic_hardware; then + for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do + sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE + sudo ip addr add $IP dev $PUBLIC_INTERFACE + done + sudo route del -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE + fi + fi + + if is_neutron_ovs_base_plugin; then + neutron_ovs_base_cleanup + fi + + # delete all namespaces created by neutron + for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do + sudo ip netns delete ${ns} + done + + if [[ $Q_AGENT == "ovn" ]]; then + cleanup_ovn + fi +} + + +function _create_neutron_conf_dir { + # Put config files in ``NEUTRON_CONF_DIR`` for everyone to find + sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR +} + +# _configure_neutron_common() +# Set common config for all neutron server and agents. +# This MUST be called before other ``_configure_neutron_*`` functions. +function _configure_neutron_common { + _create_neutron_conf_dir + + # Uses oslo config generator to generate core sample configuration files + (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) + + cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF + + Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json + + # allow neutron user to administer neutron to match neutron account + # NOTE(amotoki): This is required for nova works correctly with neutron. + if [ -f $NEUTRON_DIR/etc/policy.json ]; then + cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE + sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $Q_POLICY_FILE + else + echo '{"context_is_admin": "role:admin or user_name:neutron"}' > $Q_POLICY_FILE + fi + + # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``. + # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``. + neutron_plugin_configure_common + + if [[ "$Q_PLUGIN_CONF_PATH" == '' || "$Q_PLUGIN_CONF_FILENAME" == '' || "$Q_PLUGIN_CLASS" == '' ]]; then + die $LINENO "Neutron plugin not set.. exiting" + fi + + # If needed, move config file from ``$NEUTRON_DIR/etc/neutron`` to ``NEUTRON_CONF_DIR`` + mkdir -p /$Q_PLUGIN_CONF_PATH + Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME + # NOTE(slaweq): NEUTRON_CORE_PLUGIN_CONF is used e.g. in neutron repository, + # it was previously defined in the lib/neutron module which is now deleted. + NEUTRON_CORE_PLUGIN_CONF=$Q_PLUGIN_CONF_FILE + # NOTE(hichihara): Some neutron vendor plugins were already decomposed and + # there is no config file in Neutron tree. They should prepare the file in each plugin. + if [ -f "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" ]; then + cp "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" /$Q_PLUGIN_CONF_FILE + elif [ -f $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE ]; then + cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE + fi + + iniset $NEUTRON_CONF database connection `database_connection_url $Q_DB_NAME` + iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron + iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG + iniset $NEUTRON_CONF DEFAULT bind_host $Q_LISTEN_ADDRESS + iniset $NEUTRON_CONF oslo_concurrency lock_path $DATA_DIR/neutron/lock + + # NOTE(freerunner): Need to adjust Region Name for nova in multiregion installation + iniset $NEUTRON_CONF nova region_name $REGION_NAME + + if [ "$VIRT_DRIVER" = 'fake' ]; then + # Disable arbitrary limits + iniset $NEUTRON_CONF quotas quota_network -1 + iniset $NEUTRON_CONF quotas quota_subnet -1 + iniset $NEUTRON_CONF quotas quota_port -1 + iniset $NEUTRON_CONF quotas quota_security_group -1 + iniset $NEUTRON_CONF quotas quota_security_group_rule -1 + fi + + # Format logging + setup_logging $NEUTRON_CONF + + _neutron_setup_rootwrap +} + +function _configure_neutron_dhcp_agent { + + cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $Q_DHCP_CONF_FILE + + iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + # make it so we have working DNS from guests + iniset $Q_DHCP_CONF_FILE DEFAULT dnsmasq_local_resolv True + configure_root_helper_options $Q_DHCP_CONF_FILE + + if ! is_service_enabled q-l3 neutron-l3; then + if [[ "$ENABLE_ISOLATED_METADATA" = "True" ]]; then + iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata $ENABLE_ISOLATED_METADATA + iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network $ENABLE_METADATA_NETWORK + else + if [[ "$ENABLE_METADATA_NETWORK" = "True" ]]; then + die "$LINENO" "Enable isolated metadata is a must for metadata network" + fi + fi + fi + + _neutron_setup_interface_driver $Q_DHCP_CONF_FILE + + neutron_plugin_configure_dhcp_agent $Q_DHCP_CONF_FILE +} + + +function _configure_neutron_metadata_agent { + cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $Q_META_CONF_FILE + + iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $Q_META_CONF_FILE DEFAULT nova_metadata_host $Q_META_DATA_IP + iniset $Q_META_CONF_FILE DEFAULT metadata_workers $API_WORKERS + configure_root_helper_options $Q_META_CONF_FILE +} + +function _configure_neutron_ceilometer_notifications { + iniset $NEUTRON_CONF oslo_messaging_notifications driver messagingv2 +} + +function _configure_neutron_metering { + neutron_agent_metering_configure_common + neutron_agent_metering_configure_agent +} + +function _configure_dvr { + iniset $NEUTRON_CONF DEFAULT router_distributed True + iniset $Q_L3_CONF_FILE DEFAULT agent_mode $Q_DVR_MODE +} + + +# _configure_neutron_plugin_agent() - Set config files for neutron plugin agent +# It is called when q-agt is enabled. +function _configure_neutron_plugin_agent { + # Specify the default root helper prior to agent configuration to + # ensure that an agent's configuration can override the default + configure_root_helper_options /$Q_PLUGIN_CONF_FILE + iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + + # Configure agent for plugin + neutron_plugin_configure_plugin_agent +} + +# _configure_neutron_service() - Set config files for neutron service +# It is called when q-svc is enabled. +function _configure_neutron_service { + Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini + if test -r $NEUTRON_DIR/etc/neutron/api-paste.ini; then + cp $NEUTRON_DIR/etc/neutron/api-paste.ini $Q_API_PASTE_FILE + else + # TODO(stephenfin): Remove this branch once [1] merges + # [1] https://review.opendev.org/c/openstack/neutron/+/961130 + cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE + fi + + # Update either configuration file with plugin + iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS + + iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $NEUTRON_CONF oslo_policy policy_file $Q_POLICY_FILE + + iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY + configure_keystone_authtoken_middleware $NEUTRON_CONF $Q_ADMIN_USERNAME + + # Configuration for neutron notifications to nova. + iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES + iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES + + configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova + + # Configuration for placement client + configure_keystone_authtoken_middleware $NEUTRON_CONF placement placement + + # Configure plugin + neutron_plugin_configure_service +} + +# Utility Functions +#------------------ + +# neutron_service_plugin_class_add() - add service plugin class +function neutron_service_plugin_class_add { + local service_plugin_class=$1 + if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then + Q_SERVICE_PLUGIN_CLASSES=$service_plugin_class + elif [[ ! ,${Q_SERVICE_PLUGIN_CLASSES}, =~ ,${service_plugin_class}, ]]; then + Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$service_plugin_class" + fi +} + +# neutron_ml2_extension_driver_add() - add ML2 extension driver +function neutron_ml2_extension_driver_add { + local extension=$1 + if [[ $Q_ML2_PLUGIN_EXT_DRIVERS == '' ]]; then + Q_ML2_PLUGIN_EXT_DRIVERS=$extension + elif [[ ! ,${Q_ML2_PLUGIN_EXT_DRIVERS}, =~ ,${extension}, ]]; then + Q_ML2_PLUGIN_EXT_DRIVERS="$Q_ML2_PLUGIN_EXT_DRIVERS,$extension" + fi +} + +# neutron_server_config_add() - add server config file +function neutron_server_config_add { + _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($1) +} + +# neutron_deploy_rootwrap_filters() - deploy rootwrap filters to $Q_CONF_ROOTWRAP_D (owned by root). +function neutron_deploy_rootwrap_filters { + if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + return + fi + local srcdir=$1 + sudo install -d -o root -m 755 $Q_CONF_ROOTWRAP_D + sudo install -o root -m 644 $srcdir/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ +} + +# _neutron_setup_rootwrap() - configure Neutron's rootwrap +function _neutron_setup_rootwrap { + if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + return + fi + # Wipe any existing ``rootwrap.d`` files first + Q_CONF_ROOTWRAP_D=$NEUTRON_CONF_DIR/rootwrap.d + if [[ -d $Q_CONF_ROOTWRAP_D ]]; then + sudo rm -rf $Q_CONF_ROOTWRAP_D + fi + + neutron_deploy_rootwrap_filters $NEUTRON_DIR + + # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d`` + # location moved in newer versions, prefer new location + if test -r $NEUTRON_DIR/etc/neutron/rootwrap.conf; then + sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE + else + # TODO(stephenfin): Remove this branch once [1] merges + # [1] https://review.opendev.org/c/openstack/neutron/+/961130 + sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE + fi + sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE + # Rely on $PATH set by devstack to determine what is safe to execute + # by rootwrap rather than use explicit whitelist of paths in + # rootwrap.conf + sudo sed -e 's/^exec_dirs=.*/#&/' -i $Q_RR_CONF_FILE + + # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap + ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *" + ROOTWRAP_DAEMON_SUDOER_CMD="$NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" + + # Set up the rootwrap sudoers for neutron + TEMPFILE=`mktemp` + echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE + echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_DAEMON_SUDOER_CMD" >>$TEMPFILE + chmod 0440 $TEMPFILE + sudo chown root:root $TEMPFILE + sudo mv $TEMPFILE /etc/sudoers.d/neutron-rootwrap + + # Update the root_helper + configure_root_helper_options $NEUTRON_CONF +} + +function configure_root_helper_options { + local conffile=$1 + iniset $conffile agent root_helper "$Q_RR_COMMAND" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + iniset $conffile agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" + fi +} + +function _neutron_setup_interface_driver { + + # ovs_use_veth needs to be set before the plugin configuration + # occurs to allow plugins to override the setting. + iniset $1 DEFAULT ovs_use_veth $Q_OVS_USE_VETH + + neutron_plugin_setup_interface_driver $1 +} +# Functions for Neutron Exercises +#-------------------------------- + +# ssh check +function _ssh_check_neutron { + local from_net=$1 + local key_file=$2 + local ip=$3 + local user=$4 + local timeout_sec=$5 + local probe_cmd = "" + probe_cmd=`_get_probe_cmd_prefix $from_net` + local testcmd="$probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success" + test_with_retry "$testcmd" "server $ip didn't become ssh-able" $timeout_sec +} + +function plugin_agent_add_l2_agent_extension { + local l2_agent_extension=$1 + if [[ -z "$L2_AGENT_EXTENSIONS" ]]; then + L2_AGENT_EXTENSIONS=$l2_agent_extension + elif [[ ! ,${L2_AGENT_EXTENSIONS}, =~ ,${l2_agent_extension}, ]]; then + L2_AGENT_EXTENSIONS+=",$l2_agent_extension" + fi +} + +# Restore xtrace +$_XTRACE_NEUTRON + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/neutron-legacy b/lib/neutron-legacy new file mode 100644 index 0000000000..e90400fec1 --- /dev/null +++ b/lib/neutron-legacy @@ -0,0 +1,6 @@ +#!/bin/bash + +# TODO(slaweq): remove that file when other projects, like e.g. Grenade will +# be using lib/neutron + +source $TOP_DIR/lib/neutron diff --git a/lib/neutron_plugins/README.md b/lib/neutron_plugins/README.md new file mode 100644 index 0000000000..728aaee85f --- /dev/null +++ b/lib/neutron_plugins/README.md @@ -0,0 +1,35 @@ +Neutron plugin specific files +============================= +Neutron plugins require plugin specific behavior. +The files under the directory, ``lib/neutron_plugins/``, will be used +when their service is enabled. +Each plugin has ``lib/neutron_plugins/$Q_PLUGIN`` and define the following +functions. +Plugin specific configuration variables should be in this file. + +* filename: ``$Q_PLUGIN`` + * The corresponding file name MUST be the same to plugin name ``$Q_PLUGIN``. + Plugin specific configuration variables should be in this file. + +functions +--------- +``lib/neutron`` calls the following functions when the ``$Q_PLUGIN`` is enabled + +* ``neutron_plugin_create_nova_conf`` : + optionally set options in nova_conf +* ``neutron_plugin_install_agent_packages`` : + install packages that is specific to plugin agent + e.g. + install_package bridge-utils +* ``neutron_plugin_configure_common`` : + set plugin-specific variables, ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``, + ``Q_PLUGIN_CLASS`` +* ``neutron_plugin_configure_dhcp_agent`` +* ``neutron_plugin_configure_l3_agent`` +* ``neutron_plugin_configure_plugin_agent`` +* ``neutron_plugin_configure_service`` +* ``neutron_plugin_setup_interface_driver`` +* ``has_neutron_plugin_security_group``: + return 0 if the plugin support neutron security group otherwise return 1 +* ``neutron_plugin_check_adv_test_requirements``: + return 0 if requirements are satisfied otherwise return 1 diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight new file mode 100644 index 0000000000..84ca7ec42c --- /dev/null +++ b/lib/neutron_plugins/bigswitch_floodlight @@ -0,0 +1,74 @@ +#!/bin/bash +# +# Neutron Big Switch/FloodLight plugin +# ------------------------------------ + +# Save trace setting +_XTRACE_NEUTRON_BIGSWITCH=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/neutron_plugins/ovs_base +source $TOP_DIR/lib/neutron_thirdparty/bigswitch_floodlight # for third party service specific configuration values + +function neutron_plugin_create_nova_conf { + : +} + +function neutron_plugin_install_agent_packages { + _neutron_ovs_base_install_agent_packages +} + +function neutron_plugin_configure_common { + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/bigswitch + Q_PLUGIN_CONF_FILENAME=restproxy.ini + Q_PLUGIN_CLASS="neutron.plugins.bigswitch.plugin.NeutronRestProxyV2" + BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80} + BS_FL_CONTROLLER_TIMEOUT=${BS_FL_CONTROLLER_TIMEOUT:-10} +} + +function neutron_plugin_configure_dhcp_agent { + : +} + +function neutron_plugin_configure_l3_agent { + _neutron_ovs_base_configure_l3_agent +} + +function neutron_plugin_configure_plugin_agent { + # Set up integration bridge + _neutron_ovs_base_setup_bridge $OVS_BRIDGE + iniset /$Q_PLUGIN_CONF_FILE restproxyagent integration_bridge $OVS_BRIDGE + AGENT_BINARY="$NEUTRON_DIR/neutron/plugins/bigswitch/agent/restproxy_agent.py" + + _neutron_ovs_base_configure_firewall_driver +} + +function neutron_plugin_configure_service { + iniset /$Q_PLUGIN_CONF_FILE restproxy servers $BS_FL_CONTROLLERS_PORT + iniset /$Q_PLUGIN_CONF_FILE restproxy servertimeout $BS_FL_CONTROLLER_TIMEOUT + if [ "$BS_FL_VIF_DRIVER" = "ivs" ]; then + iniset /$Q_PLUGIN_CONF_FILE nova vif_type ivs + fi +} + +function neutron_plugin_setup_interface_driver { + local conf_file=$1 + if [ "$BS_FL_VIF_DRIVER" = "ivs" ]; then + iniset $conf_file DEFAULT interface_driver ivs + else + iniset $conf_file DEFAULT interface_driver openvswitch + fi +} + + +function has_neutron_plugin_security_group { + # 1 means False here + return 0 +} + +function neutron_plugin_check_adv_test_requirements { + is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0 +} + +# Restore xtrace +$_XTRACE_NEUTRON_BIGSWITCH diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade new file mode 100644 index 0000000000..96400634af --- /dev/null +++ b/lib/neutron_plugins/brocade @@ -0,0 +1,79 @@ +#!/bin/bash +# +# Brocade Neutron Plugin +# ---------------------- + +# Save trace setting +_XTRACE_NEUTRON_BROCADE=$(set +o | grep xtrace) +set +o xtrace + +function is_neutron_ovs_base_plugin { + return 1 +} + +function neutron_plugin_create_nova_conf { + : +} + +function neutron_plugin_install_agent_packages { + install_package bridge-utils +} + +function neutron_plugin_configure_common { + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/brocade + Q_PLUGIN_CONF_FILENAME=brocade.ini + Q_PLUGIN_CLASS="neutron.plugins.brocade.NeutronPlugin.BrocadePluginV2" +} + +function neutron_plugin_configure_service { + + if [[ "$BROCADE_SWITCH_OS_VERSION" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE switch osversion $BROCADE_SWITCH_OS_VERSION + fi + + if [[ "$BROCADE_SWITCH_OS_TYPE" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE switch ostype $BROCADE_SWITCH_OS_TYPE + fi + + if [[ "$BROCADE_SWITCH_PASSWORD" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE switch password $BROCADE_SWITCH_PASSWORD + fi + + if [[ "$BROCADE_SWITCH_USERNAME" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE switch username $BROCADE_SWITCH_USERNAME + fi + + if [[ "$BROCADE_SWITCH_IPADDR" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE switch address $BROCADE_SWITCH_IPADDR + fi + +} + +function neutron_plugin_configure_dhcp_agent { + iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport +} + +function neutron_plugin_configure_l3_agent { + iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport +} + +function neutron_plugin_configure_plugin_agent { + AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-linuxbridge-agent" +} + +function neutron_plugin_setup_interface_driver { + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver linuxbridge +} + +function has_neutron_plugin_security_group { + # 0 means True here + return 0 +} + +function neutron_plugin_check_adv_test_requirements { + is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0 +} + +# Restore xtrace +$_XTRACE_NEUTRON_BROCADE diff --git a/lib/neutron_plugins/cisco b/lib/neutron_plugins/cisco new file mode 100644 index 0000000000..b397169b59 --- /dev/null +++ b/lib/neutron_plugins/cisco @@ -0,0 +1,152 @@ +#!/bin/bash +# +# Neutron Cisco plugin +# --------------------------- + +# Save trace setting +_XTRACE_NEUTRON_CISCO=$(set +o | grep xtrace) +set +o xtrace + +# Scecify the VSM parameters +Q_CISCO_PLUGIN_VSM_IP=${Q_CISCO_PLUGIN_VSM_IP:-} +# Specify the VSM username +Q_CISCO_PLUGIN_VSM_USERNAME=${Q_CISCO_PLUGIN_VSM_USERNAME:-admin} +# Specify the VSM passward for above username +Q_CISCO_PLUGIN_VSM_PASSWORD=${Q_CISCO_PLUGIN_VSM_PASSWORD:-} +# Specify the uVEM integration bridge name +Q_CISCO_PLUGIN_INTEGRATION_BRIDGE=${Q_CISCO_PLUGIN_INTEGRATION_BRIDGE:-br-int} +# Specify if tunneling is enabled +Q_CISCO_PLUGIN_ENABLE_TUNNELING=${Q_CISCO_PLUGIN_ENABLE_TUNNELING:-True} +# Specify the VXLAN range +Q_CISCO_PLUGIN_VXLAN_ID_RANGES=${Q_CISCO_PLUGIN_VXLAN_ID_RANGES:-5000:10000} +# Specify the VLAN range +Q_CISCO_PLUGIN_VLAN_RANGES=${Q_CISCO_PLUGIN_VLAN_RANGES:-vlan:1:4094} + +# This routine put a prefix on an existing function name +function _prefix_function { + declare -F $1 > /dev/null || die "$1 doesn't exist" + eval "$(echo "${2}_${1}()"; declare -f ${1} | tail -n +2)" +} + +function _has_n1kv_subplugin { + local subplugin + for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do + if [[ "$subplugin" == "n1kv" ]]; then + return 0 + fi + done + return 1 +} + +# Prefix openvswitch plugin routines with "ovs" in order to differentiate from +# cisco plugin routines. This means, ovs plugin routines will coexist with cisco +# plugin routines in this script. +source $TOP_DIR/lib/neutron_plugins/openvswitch +_prefix_function neutron_plugin_create_nova_conf ovs +_prefix_function neutron_plugin_install_agent_packages ovs +_prefix_function neutron_plugin_configure_common ovs +_prefix_function neutron_plugin_configure_dhcp_agent ovs +_prefix_function neutron_plugin_configure_l3_agent ovs +_prefix_function neutron_plugin_configure_plugin_agent ovs +_prefix_function neutron_plugin_configure_service ovs +_prefix_function neutron_plugin_setup_interface_driver ovs +_prefix_function has_neutron_plugin_security_group ovs + +function has_neutron_plugin_security_group { + return 1 +} + +function is_neutron_ovs_base_plugin { + return +} + +# populate required nova configuration parameters +function neutron_plugin_create_nova_conf { + _neutron_ovs_base_configure_nova_vif_driver +} + +function neutron_plugin_install_agent_packages { + # Cisco plugin uses openvswitch to operate in one of its configurations + ovs_neutron_plugin_install_agent_packages +} + +# Configure common parameters +function neutron_plugin_configure_common { + # setup default subplugins + if [ ! -v Q_CISCO_PLUGIN_SUBPLUGINS ]; then + declare -ga Q_CISCO_PLUGIN_SUBPLUGINS + Q_CISCO_PLUGIN_SUBPLUGINS=(n1kv) + fi + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/cisco + Q_PLUGIN_CONF_FILENAME=cisco_plugins.ini + Q_PLUGIN_CLASS="neutron.plugins.cisco.network_plugin.PluginV2" +} + +function neutron_plugin_configure_dhcp_agent { + iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport +} + +function neutron_plugin_configure_l3_agent { + : +} + +# Configure n1kv plugin +function _configure_n1kv_subplugin { + local cisco_cfg_file=$1 + + # populate the cisco plugin cfg file with the VSM information + echo "Configuring n1kv in $cisco_cfg_file-- $Q_CISCO_PLUGIN_VSM_IP $Q_CISCO_PLUGIN_VSM_USERNAME $Q_CISCO_PLUGIN_VSM_PASSWORD" + iniset $cisco_cfg_file N1KV:$Q_CISCO_PLUGIN_VSM_IP username $Q_CISCO_PLUGIN_VSM_USERNAME + iniset $cisco_cfg_file N1KV:$Q_CISCO_PLUGIN_VSM_IP password $Q_CISCO_PLUGIN_VSM_PASSWORD + + iniset $cisco_cfg_file CISCO_N1K integration_bridge $Q_CISCO_PLUGIN_INTEGRATION_BRIDGE + iniset $cisco_cfg_file CISCO_N1K enable_tunneling $Q_CISCO_PLUGIN_ENABLE_TUNNELING + iniset $cisco_cfg_file CISCO_N1K vxlan_id_ranges $Q_CISCO_PLUGIN_VXLAN_ID_RANGES + iniset $cisco_cfg_file CISCO_N1K network_vlan_ranges $Q_CISCO_PLUGIN_VLAN_RANGES + + # Setup the integration bridge by calling the ovs_base + OVS_BRIDGE=$Q_CISCO_PLUGIN_INTEGRATION_BRIDGE + _neutron_ovs_base_setup_bridge $OVS_BRIDGE +} + +function neutron_plugin_configure_plugin_agent { + : +} + +function neutron_plugin_configure_service { + local subplugin + local cisco_cfg_file + + cisco_cfg_file=/$Q_PLUGIN_CONF_FILE + + # Setup the [CISCO_PLUGINS] section + if [[ ${#Q_CISCO_PLUGIN_SUBPLUGINS[@]} > 2 ]]; then + die $LINENO "At most two subplugins are supported." + fi + + # Setup the subplugins + inicomment $cisco_cfg_file CISCO_PLUGINS vswitch_plugin + inicomment $cisco_cfg_file CISCO_TEST host + for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do + case $subplugin in + n1kv) iniset $cisco_cfg_file CISCO_PLUGINS vswitch_plugin neutron.plugins.cisco.n1kv.n1kv_neutron_plugin.N1kvNeutronPluginV2;; + *) die $LINENO "Unsupported cisco subplugin: $subplugin";; + esac + done + + if _has_n1kv_subplugin; then + _configure_n1kv_subplugin $cisco_cfg_file + fi +} + +function neutron_plugin_create_initial_network_profile { + neutron cisco-network-profile-create default_network_profile vlan --segment_range 1-3000 --physical_network "$1" +} + +function neutron_plugin_setup_interface_driver { + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver openvswitch +} + +# Restore xtrace +$_XTRACE_NEUTRON_CISCO diff --git a/lib/neutron_plugins/embrane b/lib/neutron_plugins/embrane new file mode 100644 index 0000000000..385dab8354 --- /dev/null +++ b/lib/neutron_plugins/embrane @@ -0,0 +1,43 @@ +#!/bin/bash +# +# Neutron Embrane plugin +# --------------------------- + +# Save trace setting +_XTRACE_NEUTRON_EMBR=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/neutron_plugins/openvswitch + +function save_function { + local ORIG_FUNC + ORIG_FUNC=$(declare -f $1) + local NEW_FUNC="$2${ORIG_FUNC#$1}" + eval "$NEW_FUNC" +} + +save_function neutron_plugin_configure_service _neutron_plugin_configure_service + +function neutron_plugin_configure_common { + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/embrane + Q_PLUGIN_CONF_FILENAME=heleos_conf.ini + Q_PLUGIN_CLASS="neutron.plugins.embrane.plugins.embrane_ovs_plugin.EmbraneOvsPlugin" +} + +function neutron_plugin_configure_service { + _neutron_plugin_configure_service + iniset /$Q_PLUGIN_CONF_FILE heleos esm_mgmt $HELEOS_ESM_MGMT + iniset /$Q_PLUGIN_CONF_FILE heleos admin_username $HELEOS_ADMIN_USERNAME + iniset /$Q_PLUGIN_CONF_FILE heleos admin_password $HELEOS_ADMIN_PASSWORD + iniset /$Q_PLUGIN_CONF_FILE heleos router_image $HELEOS_ROUTER_IMAGE + iniset /$Q_PLUGIN_CONF_FILE heleos mgmt_id $HELEOS_MGMT_ID + iniset /$Q_PLUGIN_CONF_FILE heleos inband_id $HELEOS_INBAND_ID + iniset /$Q_PLUGIN_CONF_FILE heleos oob_id $HELEOS_OOB_ID + iniset /$Q_PLUGIN_CONF_FILE heleos dummy_utif_id $HELEOS_DUMMY_UTIF_ID + iniset /$Q_PLUGIN_CONF_FILE heleos resource_pool_id $HELEOS_RESOURCE_POOL_ID + iniset /$Q_PLUGIN_CONF_FILE heleos async_requests $HELEOS_ASYNC_REQUESTS +} + +# Restore xtrace +$_XTRACE_NEUTRON_EMBR + diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 new file mode 100644 index 0000000000..687167bf79 --- /dev/null +++ b/lib/neutron_plugins/ml2 @@ -0,0 +1,154 @@ +#!/bin/bash +# +# Neutron Modular Layer 2 plugin +# ------------------------------ + +# Save trace setting +_XTRACE_NEUTRON_ML2=$(set +o | grep xtrace) +set +o xtrace + +# Default OVN L2 agent +Q_AGENT=${Q_AGENT:-ovn} +if [ -f $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent ]; then + source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent +fi + +# Enable this to simply and quickly enable tunneling with ML2. +# For ML2/OVS select either 'gre', 'vxlan', or 'gre,vxlan'. +# For ML2/OVN use 'geneve'. +Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"geneve"} +# This has to be set here since the agent will set this in the config file +if [[ "$Q_ML2_TENANT_NETWORK_TYPE" == "gre" || "$Q_ML2_TENANT_NETWORK_TYPE" == "vxlan" ]]; then + Q_TUNNEL_TYPES=$Q_ML2_TENANT_NETWORK_TYPE +elif [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then + Q_TUNNEL_TYPES=gre +fi + +# List of MechanismDrivers to load +Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn} +# Default GRE TypeDriver options +Q_ML2_PLUGIN_GRE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GRE_TYPE_OPTIONS:-tunnel_id_ranges=$TENANT_TUNNEL_RANGES} +# Default VXLAN TypeDriver options +Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS:-vni_ranges=$TENANT_TUNNEL_RANGES} +# Default VLAN TypeDriver options +Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS:-} +# Default GENEVE TypeDriver options +Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS:-vni_ranges=$TENANT_TUNNEL_RANGES} +# List of extension drivers to load, use '-' instead of ':-' to allow people to +# explicitly override this to blank +if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then + Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS-port_security} +else + Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS:-} +fi + +# L3 Plugin to load for ML2 +# For some flat network environment, they not want to extend L3 plugin. +# Make sure it is able to set empty to ML2_L3_PLUGIN. +ML2_L3_PLUGIN=${ML2_L3_PLUGIN-router} + +function populate_ml2_config { + CONF=$1 + SECTION=$2 + OPTS=$3 + + if [ -z "$OPTS" ]; then + return + fi + for I in "${OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + iniset $CONF $SECTION ${I/=/ } + done +} + +function neutron_plugin_configure_common { + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ml2 + Q_PLUGIN_CONF_FILENAME=ml2_conf.ini + Q_PLUGIN_CLASS="ml2" + # The ML2 plugin delegates L3 routing/NAT functionality to + # the L3 service plugin which must therefore be specified. + neutron_service_plugin_class_add $ML2_L3_PLUGIN +} + +function neutron_plugin_configure_service { + if [[ "$Q_ML2_TENANT_NETWORK_TYPE" != "local" ]]; then + Q_SRV_EXTRA_OPTS+=(tenant_network_types=$Q_ML2_TENANT_NETWORK_TYPE) + elif [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then + # This assumes you want a simple configuration, and will overwrite + # Q_SRV_EXTRA_OPTS if set in addition to ENABLE_TENANT_TUNNELS. + Q_SRV_EXTRA_OPTS+=(tenant_network_types=gre) + Q_ML2_PLUGIN_GRE_TYPE_OPTIONS=(tunnel_id_ranges=$TENANT_TUNNEL_RANGES) + elif [[ "$ENABLE_TENANT_VLANS" == "True" ]]; then + Q_SRV_EXTRA_OPTS+=(tenant_network_types=vlan) + else + echo "WARNING - The ml2 plugin is using local tenant networks, with no connectivity between hosts." + fi + + # Allow for overrding VLAN configuration (for example, to configure provider + # VLANs) by first checking if Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS is set. + if [ "$Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS" == "" ]; then + if [[ "$ML2_VLAN_RANGES" == "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then + ML2_VLAN_RANGES=$PHYSICAL_NETWORK + if [[ "$TENANT_VLAN_RANGE" != "" ]]; then + ML2_VLAN_RANGES=$ML2_VLAN_RANGES:$TENANT_VLAN_RANGE + fi + fi + if [[ "$ML2_VLAN_RANGES" != "" ]]; then + Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=(network_vlan_ranges=$ML2_VLAN_RANGES) + fi + fi + + + # Allow for setup the flat type network + if [[ -z "$Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS" ]]; then + if [[ -n "$PHYSICAL_NETWORK" || -n "$PUBLIC_PHYSICAL_NETWORK" ]]; then + Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS="flat_networks=" + if [[ -n "$PHYSICAL_NETWORK" ]]; then + Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS+="${PHYSICAL_NETWORK}," + fi + if [[ -n "$PUBLIC_PHYSICAL_NETWORK" ]] && [[ "${PHYSICAL_NETWORK}" != "$PUBLIC_PHYSICAL_NETWORK" ]]; then + Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS+="${PUBLIC_PHYSICAL_NETWORK}," + fi + fi + fi + populate_ml2_config /$Q_PLUGIN_CONF_FILE securitygroup enable_security_group=$Q_USE_SECGROUP + + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 overlay_ip_version=$TUNNEL_IP_VERSION + + if [[ -n "$Q_ML2_PLUGIN_TYPE_DRIVERS" ]]; then + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS + fi + + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 extension_drivers=$Q_ML2_PLUGIN_EXT_DRIVERS + + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 $Q_SRV_EXTRA_OPTS + + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_gre $Q_ML2_PLUGIN_GRE_TYPE_OPTIONS + + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_vxlan $Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS + + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_flat $Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS + + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_vlan $Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS + + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_geneve $Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS + + if [[ "$Q_DVR_MODE" != "legacy" ]]; then + populate_ml2_config /$Q_PLUGIN_CONF_FILE agent l2_population=True + populate_ml2_config /$Q_PLUGIN_CONF_FILE agent tunnel_types=vxlan + populate_ml2_config /$Q_PLUGIN_CONF_FILE agent enable_distributed_routing=True + populate_ml2_config /$Q_PLUGIN_CONF_FILE agent arp_responder=True + fi +} + +function has_neutron_plugin_security_group { + return 0 +} + +function configure_qos_ml2 { + neutron_ml2_extension_driver_add "qos" +} + +# Restore xtrace +$_XTRACE_NEUTRON_ML2 diff --git a/lib/neutron_plugins/nuage b/lib/neutron_plugins/nuage new file mode 100644 index 0000000000..8c75e15048 --- /dev/null +++ b/lib/neutron_plugins/nuage @@ -0,0 +1,64 @@ +#!/bin/bash +# +# Nuage Neutron Plugin +# ---------------------- + +# Save trace setting +_XTRACE_NEUTRON_NU=$(set +o | grep xtrace) +set +o xtrace + +function neutron_plugin_create_nova_conf { + local conf="$1" + NOVA_OVS_BRIDGE=${NOVA_OVS_BRIDGE:-"br-int"} + iniset $conf neutron ovs_bridge $NOVA_OVS_BRIDGE +} + +function neutron_plugin_install_agent_packages { + : +} + +function neutron_plugin_configure_common { + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/nuage + Q_PLUGIN_CONF_FILENAME=nuage_plugin.ini + Q_PLUGIN_CLASS="neutron.plugins.nuage.plugin.NuagePlugin" + Q_PLUGIN_EXTENSIONS_PATH=neutron/plugins/nuage/extensions + #Nuage specific Neutron defaults. Actual value must be set and sourced + NUAGE_CNA_SERVERS=${NUAGE_CNA_SERVERS:-'localhost:8443'} + NUAGE_CNA_SERVER_AUTH=${NUAGE_CNA_SERVER_AUTH:-'username:password'} + NUAGE_CNA_ORGANIZATION=${NUAGE_CNA_ORGANIZATION:-'org'} + NUAGE_CNA_SERVER_SSL=${NUAGE_CNA_SERVER_SSL:-'True'} + NUAGE_CNA_BASE_URI=${NUAGE_CNA_BASE_URI:-'/'} + NUAGE_CNA_AUTH_RESOURCE=${NUAGE_CNA_AUTH_RESOURCE:-'/'} + NUAGE_CNA_DEF_NETPART_NAME=${NUAGE_CNA_DEF_NETPART_NAME:-''} +} + +function neutron_plugin_configure_dhcp_agent { + : +} + +function neutron_plugin_configure_l3_agent { + : +} + +function neutron_plugin_configure_plugin_agent { + : +} + +function neutron_plugin_configure_service { + iniset $NEUTRON_CONF DEFAULT api_extensions_path neutron/plugins/nuage/extensions/ + iniset /$Q_PLUGIN_CONF_FILE restproxy base_uri $NUAGE_CNA_BASE_URI + iniset /$Q_PLUGIN_CONF_FILE restproxy serverssl $NUAGE_CNA_SERVER_SSL + iniset /$Q_PLUGIN_CONF_FILE restproxy serverauth $NUAGE_CNA_SERVER_AUTH + iniset /$Q_PLUGIN_CONF_FILE restproxy organization $NUAGE_CNA_ORGANIZATION + iniset /$Q_PLUGIN_CONF_FILE restproxy server $NUAGE_CNA_SERVERS + iniset /$Q_PLUGIN_CONF_FILE restproxy auth_resource $NUAGE_CNA_AUTH_RESOURCE + iniset /$Q_PLUGIN_CONF_FILE restproxy default_net_partition_name $NUAGE_CNA_DEF_NETPART_NAME +} + +function has_neutron_plugin_security_group { + # 1 means False here + return 1 +} + +# Restore xtrace +$_XTRACE_NEUTRON_NU diff --git a/lib/neutron_plugins/openvswitch b/lib/neutron_plugins/openvswitch new file mode 100644 index 0000000000..130eaacab3 --- /dev/null +++ b/lib/neutron_plugins/openvswitch @@ -0,0 +1,60 @@ +#!/bin/bash +# +# Common code used by cisco and embrane plugins +# --------------------------------------------- + +# This module used to be for Open vSwitch monolithic plugin, +# which has been removed in Juno. + +# Save trace setting +_XTRACE_NEUTRON_OVS=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/neutron_plugins/openvswitch_agent + +function neutron_plugin_configure_common { + Q_PLUGIN_CONF_PATH=etc/neutron/plugins/openvswitch + Q_PLUGIN_CONF_FILENAME=ovs_neutron_plugin.ini + Q_PLUGIN_CLASS="neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2" +} + +function neutron_plugin_configure_service { + if [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE ovs tenant_network_type gre + iniset /$Q_PLUGIN_CONF_FILE ovs tunnel_id_ranges $TENANT_TUNNEL_RANGES + elif [[ "$ENABLE_TENANT_VLANS" == "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE ovs tenant_network_type vlan + else + echo "WARNING - The openvswitch plugin is using local tenant networks, with no connectivity between hosts." + fi + + # Override ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` + # for more complex physical network configurations. + if [[ "$OVS_VLAN_RANGES" == "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then + OVS_VLAN_RANGES=$PHYSICAL_NETWORK + if [[ "$TENANT_VLAN_RANGE" != "" ]]; then + OVS_VLAN_RANGES=$OVS_VLAN_RANGES:$TENANT_VLAN_RANGE + fi + fi + if [[ "$OVS_VLAN_RANGES" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE ovs network_vlan_ranges $OVS_VLAN_RANGES + fi + + _neutron_ovs_base_configure_firewall_driver + + # Define extra "OVS" configuration options when q-svc is configured by defining + # the array ``Q_SRV_EXTRA_OPTS``. + # For Example: ``Q_SRV_EXTRA_OPTS=(foo=true bar=2)`` + for I in "${Q_SRV_EXTRA_OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + iniset /$Q_PLUGIN_CONF_FILE ovs ${I/=/ } + done +} + +function has_neutron_plugin_security_group { + return 0 +} + +# Restore xtrace +$_XTRACE_NEUTRON_OVS + diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent new file mode 100644 index 0000000000..6e79984e9b --- /dev/null +++ b/lib/neutron_plugins/openvswitch_agent @@ -0,0 +1,75 @@ +#!/bin/bash +# +# Neutron Open vSwitch L2 agent +# ----------------------------- + +# Save trace setting +_XTRACE_NEUTRON_OVSL2=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/neutron_plugins/ovs_base + +function neutron_plugin_create_nova_conf { + _neutron_ovs_base_configure_nova_vif_driver +} + +function neutron_plugin_install_agent_packages { + _neutron_ovs_base_install_agent_packages + if use_library_from_git "os-ken"; then + git_clone_by_name "os-ken" + setup_dev_lib "os-ken" + fi +} + +function neutron_plugin_configure_dhcp_agent { + local conf_file=$1 + : +} + +function neutron_plugin_configure_l3_agent { + local conf_file=$1 + _neutron_ovs_base_configure_l3_agent +} + +function neutron_plugin_configure_plugin_agent { + # Setup integration bridge + _neutron_ovs_base_setup_bridge $OVS_BRIDGE + _neutron_ovs_base_configure_firewall_driver + + # Setup agent for tunneling + if [[ "$OVS_ENABLE_TUNNELING" == "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $TUNNEL_ENDPOINT_IP + iniset /$Q_PLUGIN_CONF_FILE ovs tunnel_bridge $OVS_TUNNEL_BRIDGE + fi + + # Setup physical network bridge mappings. Override + # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more + # complex physical network configurations. + if [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then + if [[ "$OVS_BRIDGE_MAPPINGS" == "" ]]; then + OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE + fi + + # Configure bridge manually with physical interface as port for multi-node + _neutron_ovs_base_add_bridge $OVS_PHYSICAL_BRIDGE + fi + if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE ovs bridge_mappings $OVS_BRIDGE_MAPPINGS + fi + AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-openvswitch-agent" + + iniset /$Q_PLUGIN_CONF_FILE agent tunnel_types $Q_TUNNEL_TYPES + iniset /$Q_PLUGIN_CONF_FILE ovs datapath_type $OVS_DATAPATH_TYPE +} + +function neutron_plugin_setup_interface_driver { + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver openvswitch +} + +function neutron_plugin_check_adv_test_requirements { + is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0 +} + +# Restore xtrace +$_XTRACE_NEUTRON_OVSL2 diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent new file mode 100644 index 0000000000..48e92a1782 --- /dev/null +++ b/lib/neutron_plugins/ovn_agent @@ -0,0 +1,867 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +# Global Sources +# -------------- + +# There are some ovs functions OVN depends on that must be sourced from +# the ovs neutron plugins. +source ${TOP_DIR}/lib/neutron_plugins/ovs_base +source ${TOP_DIR}/lib/neutron_plugins/openvswitch_agent + +# Load devstack ovs compliation and loading functions +source ${TOP_DIR}/lib/neutron_plugins/ovs_source + +# Set variables for building OVN from source +OVN_REPO=${OVN_REPO:-https://github.com/ovn-org/ovn.git} +OVN_REPO_NAME=$(basename ${OVN_REPO} | cut -f1 -d'.') +OVN_REPO_NAME=${OVN_REPO_NAME:-ovn} +OVN_BRANCH=${OVN_BRANCH:-branch-24.03} +# The commit removing OVN bits from the OVS tree, it is the commit that is not +# present in OVN tree and is used to distinguish if OVN is part of OVS or not. +# https://github.com/openvswitch/ovs/commit/05bf1dbb98b0635a51f75e268ef8aed27601401d +OVN_SPLIT_HASH=05bf1dbb98b0635a51f75e268ef8aed27601401d + +if is_service_enabled tls-proxy; then + OVN_PROTO=ssl +else + OVN_PROTO=tcp +fi + +# How to connect to ovsdb-server hosting the OVN SB database. +OVN_SB_REMOTE=${OVN_SB_REMOTE:-$OVN_PROTO:$SERVICE_HOST:6642} + +# How to connect to ovsdb-server hosting the OVN NB database +OVN_NB_REMOTE=${OVN_NB_REMOTE:-$OVN_PROTO:$SERVICE_HOST:6641} + +# ml2/config for neutron_sync_mode +OVN_NEUTRON_SYNC_MODE=${OVN_NEUTRON_SYNC_MODE:-log} + +# Configured DNS servers to be used with internal_dns extension, only +# if the subnet DNS is not configured. +OVN_DNS_SERVERS=${OVN_DNS_SERVERS:-8.8.8.8} + +# The type of OVN L3 Scheduler to use. The OVN L3 Scheduler determines the +# hypervisor/chassis where a routers gateway should be hosted in OVN. The +# default OVN L3 scheduler is leastloaded +OVN_L3_SCHEDULER=${OVN_L3_SCHEDULER:-leastloaded} + +# A UUID to uniquely identify this system. If one is not specified, a random +# one will be generated. A randomly generated UUID will be saved in a file +# $OVS_SYSCONFDIR/system-id.conf (typically /etc/openvswitch/system-id.conf) +# so that the same one will be re-used if you re-run DevStack or restart +# Open vSwitch service. +OVN_UUID=${OVN_UUID:-} + +# Whether or not to build the openvswitch kernel module from ovs. This is required +# unless the distro kernel includes ovs+conntrack support. +OVN_BUILD_MODULES=$(trueorfalse False OVN_BUILD_MODULES) +OVN_BUILD_FROM_SOURCE=$(trueorfalse False OVN_BUILD_FROM_SOURCE) +if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + Q_BUILD_OVS_FROM_GIT=True +fi + +# Whether or not to install the ovs python module from ovs source. This can be +# used to test and validate new ovs python features. This should only be used +# for development purposes since the ovs python version is controlled by OpenStack +# requirements. +OVN_INSTALL_OVS_PYTHON_MODULE=$(trueorfalse False OVN_INSTALL_OVS_PYTHON_MODULE) + +# GENEVE overlay protocol overhead. Defaults to 38 bytes plus the IP version +# overhead (20 bytes for IPv4 (default) or 40 bytes for IPv6) which is determined +# based on the ML2 overlay_ip_version option. The ML2 framework will use this to +# configure the MTU DHCP option. +OVN_GENEVE_OVERHEAD=${OVN_GENEVE_OVERHEAD:-38} + +# The log level of the OVN databases (north and south). +# Supported log levels are: off, emer, err, warn, info or dbg. +# More information about log levels can be found at +# http://www.openvswitch.org/support/dist-docs/ovs-appctl.8.txt +OVN_DBS_LOG_LEVEL=${OVN_DBS_LOG_LEVEL:-info} + +# OVN metadata agent configuration +OVN_META_CONF=$NEUTRON_CONF_DIR/neutron_ovn_metadata_agent.ini +OVN_META_DATA_HOST=${OVN_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)} + +# OVN agent configuration +# The OVN agent is configured, by default, with the "metadata" extension. +OVN_AGENT_CONF=$NEUTRON_CONF_DIR/plugins/ml2/ovn_agent.ini +OVN_AGENT_EXTENSIONS=${OVN_AGENT_EXTENSIONS:-metadata} +# The variable TARGET_ENABLE_OVN_AGENT, if True, overrides the OVN Metadata +# agent service (q-ovn-metadata-agent neutron-ovn-metadata-agent) and the OVN +# agent service (q-ovn-agent neutron-ovn-agent) configuration, always disabling +# the first one (OVN Metadata agent) and enabling the second (OVN agent). +# This variable will be removed in 2026.2, along with the OVN Metadata agent +# removal. +TARGET_ENABLE_OVN_AGENT=$(trueorfalse False TARGET_ENABLE_OVN_AGENT) + +# If True (default) the node will be considered a gateway node. +ENABLE_CHASSIS_AS_GW=$(trueorfalse True ENABLE_CHASSIS_AS_GW) +OVN_L3_CREATE_PUBLIC_NETWORK=$(trueorfalse True OVN_L3_CREATE_PUBLIC_NETWORK) + +export OVSDB_SERVER_LOCAL_HOST=$SERVICE_LOCAL_HOST +TUNNEL_IP=$TUNNEL_ENDPOINT_IP +if [[ "$SERVICE_IP_VERSION" == 6 ]]; then + OVSDB_SERVER_LOCAL_HOST=[$OVSDB_SERVER_LOCAL_HOST] + TUNNEL_IP=[$TUNNEL_IP] +fi + +OVN_IGMP_SNOOPING_ENABLE=$(trueorfalse False OVN_IGMP_SNOOPING_ENABLE) + +OVS_PREFIX= +if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + OVS_PREFIX=/usr/local +fi +OVS_SBINDIR=$OVS_PREFIX/sbin +OVS_BINDIR=$OVS_PREFIX/bin +OVS_RUNDIR=$OVS_PREFIX/var/run/openvswitch +OVS_SHAREDIR=$OVS_PREFIX/share/openvswitch +OVS_SCRIPTDIR=$OVS_SHAREDIR/scripts +OVS_DATADIR=$DATA_DIR/ovs +OVS_SYSCONFDIR=${OVS_SYSCONFDIR:-$OVS_PREFIX/etc/openvswitch} + +if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + OVN_DATADIR=$DATA_DIR/ovn +else + # When using OVN from packages, the data dir for OVN DBs is + # /var/lib/ovn + OVN_DATADIR=/var/lib/ovn +fi +OVN_SHAREDIR=$OVS_PREFIX/share/ovn +OVN_SCRIPTDIR=$OVN_SHAREDIR/scripts +OVN_RUNDIR=$OVS_PREFIX/var/run/ovn + +NEUTRON_OVN_BIN_DIR=$(get_python_exec_prefix) +NEUTRON_OVN_METADATA_BINARY="neutron-ovn-metadata-agent" +NEUTRON_OVN_AGENT_BINARY="neutron-ovn-agent" + +STACK_GROUP="$( id --group --name "$STACK_USER" )" + +OVN_NORTHD_SERVICE=ovn-northd.service +if is_ubuntu; then + # The ovn-central.service file on Ubuntu is responsible for starting + # ovn-northd and the OVN DBs (on CentOS this is done by ovn-northd.service) + OVN_NORTHD_SERVICE=ovn-central.service +fi +OVSDB_SERVER_SERVICE=ovsdb-server.service +OVS_VSWITCHD_SERVICE=ovs-vswitchd.service +OVN_CONTROLLER_SERVICE=ovn-controller.service +OVN_CONTROLLER_VTEP_SERVICE=ovn-controller-vtep.service +if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + OVSDB_SERVER_SERVICE=devstack@ovsdb-server.service + OVS_VSWITCHD_SERVICE=devstack@ovs-vswitchd.service + OVN_NORTHD_SERVICE=devstack@ovn-northd.service + OVN_CONTROLLER_SERVICE=devstack@ovn-controller.service + OVN_CONTROLLER_VTEP_SERVICE=devstack@ovn-controller-vtep.service +fi + +# Defaults Overwrite +# ------------------ +# NOTE(ralonsoh): during the eventlet removal, the "logger" mech +# driver has been removed from this list. Re-add it once the removal +# is finished or the mech driver does not call monkey_patch(). +Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn} +Q_ML2_PLUGIN_TYPE_DRIVERS=${Q_ML2_PLUGIN_TYPE_DRIVERS:-local,flat,vlan,geneve} +Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"geneve"} +Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS:-"vni_ranges=1:65536"} +Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS:-port_security,qos} +# this one allows empty: +ML2_L3_PLUGIN=${ML2_L3_PLUGIN-"ovn-router"} + +Q_LOG_DRIVER_RATE_LIMIT=${Q_LOG_DRIVER_RATE_LIMIT:-100} +Q_LOG_DRIVER_BURST_LIMIT=${Q_LOG_DRIVER_BURST_LIMIT:-25} +Q_LOG_DRIVER_LOG_BASE=${Q_LOG_DRIVER_LOG_BASE:-acl_log_meter} + +# Utility Functions +# ----------------- + +function wait_for_db_file { + local count=0 + while [ ! -f $1 ]; do + sleep 1 + count=$((count+1)) + if [ "$count" -gt 40 ]; then + die $LINENO "DB File $1 not found" + fi + done +} + +function wait_for_sock_file { + local count=0 + while [ ! -S $1 ]; do + sleep 1 + count=$((count+1)) + if [ "$count" -gt 40 ]; then + die $LINENO "Socket $1 not found" + fi + done +} + +function use_new_ovn_repository { + if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]]; then + return 0 + fi + if [ -z "$is_new_ovn" ]; then + local ovs_repo_dir=$DEST/$OVS_REPO_NAME + if [ ! -d $ovs_repo_dir ]; then + git_timed clone $OVS_REPO $ovs_repo_dir + pushd $ovs_repo_dir + git checkout $OVS_BRANCH + popd + else + clone_repository $OVS_REPO $ovs_repo_dir $OVS_BRANCH + fi + # Check the split commit exists in the current branch + pushd $ovs_repo_dir + git log $OVS_BRANCH --pretty=format:"%H" | grep -q $OVN_SPLIT_HASH + is_new_ovn=$? + popd + fi + return $is_new_ovn +} + +# NOTE(rtheis): Function copied from DevStack _neutron_ovs_base_setup_bridge +# and _neutron_ovs_base_add_bridge with the call to neutron-ovs-cleanup +# removed. The call is not relevant for OVN, as it is specific to the use +# of Neutron's OVS agent and hangs when running stack.sh because +# neutron-ovs-cleanup uses the OVSDB native interface. +function ovn_base_setup_bridge { + local bridge=$1 + local addbr_cmd="sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge -- set bridge $bridge protocols=OpenFlow13,OpenFlow15" + + if [ "$OVS_DATAPATH_TYPE" != "system" ] ; then + addbr_cmd="$addbr_cmd -- set Bridge $bridge datapath_type=${OVS_DATAPATH_TYPE}" + fi + + $addbr_cmd + sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge +} + +function _start_process { + $SYSTEMCTL daemon-reload + $SYSTEMCTL enable $1 + $SYSTEMCTL restart $1 +} + +function _run_process { + local service=$1 + local cmd="$2" + local stop_cmd="$3" + local group=$4 + local user=$5 + local rundir=${6:-$OVS_RUNDIR} + + local systemd_service="devstack@$service.service" + local unit_file="$SYSTEMD_DIR/$systemd_service" + local environment="OVN_RUNDIR=$OVN_RUNDIR OVN_DBDIR=$OVN_DATADIR OVN_LOGDIR=$LOGDIR OVS_RUNDIR=$OVS_RUNDIR OVS_DBDIR=$OVS_DATADIR OVS_LOGDIR=$LOGDIR" + + echo "Starting $service executed command": $cmd + + write_user_unit_file $systemd_service "$cmd" "$group" "$user" + iniset -sudo $unit_file "Service" "Type" "forking" + iniset -sudo $unit_file "Service" "RemainAfterExit" "yes" + iniset -sudo $unit_file "Service" "KillMode" "mixed" + iniset -sudo $unit_file "Service" "LimitNOFILE" "65536" + iniset -sudo $unit_file "Service" "Environment" "$environment" + if [ -n "$stop_cmd" ]; then + iniset -sudo $unit_file "Service" "ExecStop" "$stop_cmd" + fi + + _start_process $systemd_service + + local testcmd="test -e $rundir/$service.pid" + test_with_retry "$testcmd" "$service did not start" $SERVICE_TIMEOUT 1 + local service_ctl_file + service_ctl_file=$(ls $rundir | grep $service | grep ctl) + if [ -z "$service_ctl_file" ]; then + die $LINENO "ctl file for service $service is not present." + fi + sudo ovs-appctl -t $rundir/$service_ctl_file vlog/set console:off syslog:info file:info +} + +function clone_repository { + local repo=$1 + local dir=$2 + local branch=$3 + # Set ERROR_ON_CLONE to false to avoid the need of having the + # repositories like OVN and OVS in the required_projects of the job + # definition. + ERROR_ON_CLONE=false git_clone $repo $dir $branch +} + +function create_public_bridge { + # Create the public bridge that OVN will use + sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE -- set bridge $PUBLIC_BRIDGE protocols=OpenFlow13,OpenFlow15 + sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=${OVN_BRIDGE_MAPPINGS} + _configure_public_network_connectivity +} + +function is_ovn_metadata_agent_enabled { + if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent && [[ "$TARGET_ENABLE_OVN_AGENT" == "False" ]]; then + return 0 + fi + return 1 +} + +function is_ovn_agent_enabled { + if is_service_enabled q-ovn-agent neutron-ovn-agent || [[ "$TARGET_ENABLE_OVN_AGENT" == "True" ]]; then + enable_service q-ovn-agent + return 0 + fi + return 1 + +} + +# OVN compilation functions +# ------------------------- + + +# compile_ovn() - Compile OVN from source and load needed modules +# Accepts three parameters: +# - first optional parameter defines prefix for +# ovn compilation +# - second optional parameter defines localstatedir for +# ovn single machine runtime +function compile_ovn { + local prefix=$1 + local localstatedir=$2 + + if [ -n "$prefix" ]; then + prefix="--prefix=$prefix" + fi + + if [ -n "$localstatedir" ]; then + localstatedir="--localstatedir=$localstatedir" + fi + + clone_repository $OVN_REPO $DEST/$OVN_REPO_NAME $OVN_BRANCH + pushd $DEST/$OVN_REPO_NAME + + if [ ! -f configure ] ; then + ./boot.sh + fi + + # NOTE(mnaser): OVN requires that you build using the OVS from the + # submodule. + # + # https://github.com/ovn-org/ovn/blob/3fb397b63663297acbcbf794e1233951222ae5af/Documentation/intro/install/general.rst#bootstrapping + # https://github.com/ovn-org/ovn/issues/128 + git submodule update --init + pushd ovs + if [ ! -f configure ] ; then + ./boot.sh + fi + if [ ! -f config.status ] || [ configure -nt config.status ] ; then + ./configure + fi + make -j$(($(nproc) + 1)) + popd + + if [ ! -f config.status ] || [ configure -nt config.status ] ; then + ./configure $prefix $localstatedir + fi + make -j$(($(nproc) + 1)) + sudo make install + popd +} + + +# OVN Neutron driver functions +# ---------------------------- + +# OVN service sanity check +function ovn_sanity_check { + if is_service_enabled q-agt neutron-agent; then + die $LINENO "The q-agt/neutron-agt service must be disabled with OVN." + elif is_service_enabled q-l3 neutron-l3; then + die $LINENO "The q-l3/neutron-l3 service must be disabled with OVN." + elif is_service_enabled q-svc neutron-api && [[ ! $Q_ML2_PLUGIN_MECHANISM_DRIVERS =~ "ovn" ]]; then + die $LINENO "OVN needs to be enabled in \$Q_ML2_PLUGIN_MECHANISM_DRIVERS" + elif is_service_enabled q-svc neutron-api && [[ ! $Q_ML2_PLUGIN_TYPE_DRIVERS =~ "geneve" ]]; then + die $LINENO "Geneve needs to be enabled in \$Q_ML2_PLUGIN_TYPE_DRIVERS to be used with OVN" + fi +} + +# install_ovn() - Collect source and prepare +function install_ovn { + echo "Installing OVN and dependent packages" + + # Check the OVN configuration + ovn_sanity_check + + # Install tox, used to generate the config (see devstack/override-defaults) + pip_install tox + + sudo mkdir -p $OVS_RUNDIR + sudo chown $(whoami) $OVS_RUNDIR + + if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + # If OVS is already installed, remove it, because we're about to + # re-install it from source. + for package in openvswitch openvswitch-switch openvswitch-common; do + if is_package_installed $package ; then + uninstall_package $package + fi + done + + remove_ovs_packages + sudo rm -f $OVS_RUNDIR/* + + compile_ovs $OVN_BUILD_MODULES + if use_new_ovn_repository; then + compile_ovn + fi + + sudo mkdir -p $OVS_PREFIX/var/log/openvswitch + sudo chown $(whoami) $OVS_PREFIX/var/log/openvswitch + sudo mkdir -p $OVS_PREFIX/var/log/ovn + sudo chown $(whoami) $OVS_PREFIX/var/log/ovn + else + install_package $(get_packages openvswitch) + install_package $(get_packages ovn) + fi + + # Ensure that the OVS commands are accessible in the PATH + export PATH=$OVS_BINDIR:$PATH + + # Archive log files and create new + local log_archive_dir=$LOGDIR/archive + mkdir -p $log_archive_dir + for logfile in ovs-vswitchd.log ovn-northd.log ovn-controller.log ovn-controller-vtep.log ovs-vtep.log ovsdb-server.log ovsdb-server-nb.log ovsdb-server-sb.log; do + if [ -f "$LOGDIR/$logfile" ] ; then + mv "$LOGDIR/$logfile" "$log_archive_dir/$logfile.${CURRENT_LOG_TIME}" + fi + done + + # Install ovsdbapp from source if requested + if use_library_from_git "ovsdbapp"; then + git_clone_by_name "ovsdbapp" + setup_dev_lib "ovsdbapp" + fi + + # Install ovs python module from ovs source. + if [[ "$OVN_INSTALL_OVS_PYTHON_MODULE" == "True" ]]; then + sudo pip uninstall -y ovs + # Clone the OVS repository if it's not yet present + clone_repository $OVS_REPO $DEST/$OVS_REPO_NAME $OVS_BRANCH + sudo pip install -e $DEST/$OVS_REPO_NAME/python + fi +} + +# filter_network_api_extensions() - Remove non-supported API extensions by +# the OVN driver from the list of enabled API extensions +function filter_network_api_extensions { + SUPPORTED_NETWORK_API_EXTENSIONS=$($PYTHON -c \ + 'from neutron.common.ovn import extensions ;\ + print(",".join(extensions.ML2_SUPPORTED_API_EXTENSIONS))') + SUPPORTED_NETWORK_API_EXTENSIONS=$SUPPORTED_NETWORK_API_EXTENSIONS,$($PYTHON -c \ + 'from neutron.common.ovn import extensions ;\ + print(",".join(extensions.ML2_SUPPORTED_API_EXTENSIONS_OVN_L3))') + if is_service_enabled q-qos neutron-qos ; then + SUPPORTED_NETWORK_API_EXTENSIONS="$SUPPORTED_NETWORK_API_EXTENSIONS,qos" + fi + NETWORK_API_EXTENSIONS=${NETWORK_API_EXTENSIONS:-$SUPPORTED_NETWORK_API_EXTENSIONS} + extensions=$(echo $NETWORK_API_EXTENSIONS | tr ', ' '\n' | sort -u) + supported_ext=$(echo $SUPPORTED_NETWORK_API_EXTENSIONS | tr ', ' '\n' | sort -u) + enabled_ext=$(comm -12 <(echo -e "$extensions") <(echo -e "$supported_ext")) + disabled_ext=$(comm -3 <(echo -e "$extensions") <(echo -e "$enabled_ext")) + + # Log a message in case some extensions had to be disabled because + # they are not supported by the OVN driver + if [ ! -z "$disabled_ext" ]; then + _disabled=$(echo $disabled_ext | tr ' ' ',') + echo "The folling network API extensions have been disabled because they are not supported by OVN: $_disabled" + fi + + # Export the final list of extensions that have been enabled and are + # supported by OVN + export NETWORK_API_EXTENSIONS=$(echo $enabled_ext | tr ' ' ',') +} + +function configure_ovn_plugin { + echo "Configuring Neutron for OVN" + + if is_service_enabled q-svc neutron-api; then + filter_network_api_extensions + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_geneve max_header_size=$OVN_GENEVE_OVERHEAD + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_connection="$OVN_NB_REMOTE" + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_connection="$OVN_SB_REMOTE" + if is_service_enabled tls-proxy; then + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_ca_cert="$INT_CA_DIR/ca-chain.pem" + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_certificate="$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt" + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_private_key="$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key" + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_ca_cert="$INT_CA_DIR/ca-chain.pem" + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_certificate="$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt" + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_private_key="$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key" + fi + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn neutron_sync_mode="$OVN_NEUTRON_SYNC_MODE" + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_l3_scheduler="$OVN_L3_SCHEDULER" + populate_ml2_config /$Q_PLUGIN_CONF_FILE securitygroup enable_security_group="$Q_USE_SECGROUP" + inicomment /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver + + if is_service_enabled q-log neutron-log; then + populate_ml2_config /$Q_PLUGIN_CONF_FILE network_log rate_limit="$Q_LOG_DRIVER_RATE_LIMIT" + populate_ml2_config /$Q_PLUGIN_CONF_FILE network_log burst_limit="$Q_LOG_DRIVER_BURST_LIMIT" + inicomment /$Q_PLUGIN_CONF_FILE network_log local_output_log_base="$Q_LOG_DRIVER_LOG_BASE" + fi + + if is_ovn_metadata_agent_enabled; then + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True + elif is_ovn_agent_enabled && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]]; then + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True + else + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=False + fi + + if is_service_enabled q-dns neutron-dns ; then + iniset $NEUTRON_CONF DEFAULT dns_domain openstackgate.local + populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn dns_servers="$OVN_DNS_SERVERS" + fi + + iniset $NEUTRON_CONF ovs igmp_snooping_enable $OVN_IGMP_SNOOPING_ENABLE + fi + + if is_service_enabled q-dhcp neutron-dhcp ; then + iniset $NEUTRON_CONF DEFAULT dhcp_agent_notification True + else + iniset $NEUTRON_CONF DEFAULT dhcp_agent_notification False + fi + + if is_service_enabled n-api-meta ; then + if is_ovn_metadata_agent_enabled; then + iniset $NOVA_CONF neutron service_metadata_proxy True + elif is_ovn_agent_enabled && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]]; then + iniset $NOVA_CONF neutron service_metadata_proxy True + fi + fi +} + +function configure_ovn { + echo "Configuring OVN" + + if [ -z "$OVN_UUID" ] ; then + if [ -f $OVS_SYSCONFDIR/system-id.conf ]; then + OVN_UUID=$(cat $OVS_SYSCONFDIR/system-id.conf) + else + OVN_UUID=$(uuidgen) + echo $OVN_UUID | sudo tee $OVS_SYSCONFDIR/system-id.conf + fi + else + local ovs_uuid + ovs_uuid=$(cat $OVS_SYSCONFDIR/system-id.conf) + if [ "$ovs_uuid" != $OVN_UUID ]; then + echo $OVN_UUID | sudo tee $OVS_SYSCONFDIR/system-id.conf + fi + fi + + # Erase the pre-set configurations from packages. DevStack will + # configure OVS and OVN accordingly for its use. + if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]] && is_fedora; then + sudo truncate -s 0 /etc/openvswitch/default.conf + sudo truncate -s 0 /etc/sysconfig/openvswitch + sudo truncate -s 0 /etc/sysconfig/ovn + fi + + # Metadata + local sample_file="" + local config_file="" + if is_ovn_agent_enabled && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]] && is_service_enabled ovn-controller; then + sample_file=$NEUTRON_DIR/etc/neutron/plugins/ml2/ovn_agent.ini.sample + config_file=$OVN_AGENT_CONF + elif is_ovn_metadata_agent_enabled && is_service_enabled ovn-controller; then + sample_file=$NEUTRON_DIR/etc/neutron_ovn_metadata_agent.ini.sample + config_file=$OVN_META_CONF + fi + if [ -n "$config_file" ]; then + sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR + + mkdir -p $NEUTRON_DIR/etc/neutron/plugins/ml2 + (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) + + cp $sample_file $config_file + configure_root_helper_options $config_file + + iniset $config_file DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $config_file DEFAULT nova_metadata_host $OVN_META_DATA_HOST + iniset $config_file DEFAULT metadata_workers $API_WORKERS + iniset $config_file DEFAULT state_path $DATA_DIR/neutron + iniset $config_file ovs ovsdb_connection tcp:$OVSDB_SERVER_LOCAL_HOST:6640 + iniset $config_file ovn ovn_sb_connection $OVN_SB_REMOTE + if is_service_enabled tls-proxy; then + iniset $config_file ovn \ + ovn_sb_ca_cert $INT_CA_DIR/ca-chain.pem + iniset $config_file ovn \ + ovn_sb_certificate $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt + iniset $config_file ovn \ + ovn_sb_private_key $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key + fi + if [[ $config_file == $OVN_AGENT_CONF ]]; then + iniset $config_file agent extensions $OVN_AGENT_EXTENSIONS + iniset $config_file ovn ovn_nb_connection $OVN_NB_REMOTE + fi + fi +} + +function init_ovn { + # clean up from previous (possibly aborted) runs + # create required data files + + # Assumption: this is a dedicated test system and there is nothing important + # in the ovn, ovn-nb, or ovs databases. We're going to trash them and + # create new ones on each devstack run. + + local mkdir_cmd="mkdir -p ${OVN_DATADIR}" + + if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]]; then + mkdir_cmd="sudo ${mkdir_cmd}" + fi + + $mkdir_cmd + mkdir -p $OVS_DATADIR + + rm -f $OVS_DATADIR/*.db + rm -f $OVS_DATADIR/.*.db.~lock~ + sudo rm -f $OVN_DATADIR/*.db + sudo rm -f $OVN_DATADIR/.*.db.~lock~ + sudo rm -f $OVN_RUNDIR/*.sock +} + +function _start_ovs { + echo "Starting OVS" + if is_service_enabled ovn-controller ovn-controller-vtep ovn-northd; then + # ovsdb-server and ovs-vswitchd are used privately in OVN as openvswitch service names. + enable_service ovsdb-server + enable_service ovs-vswitchd + + if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + if [ ! -f $OVS_DATADIR/conf.db ]; then + ovsdb-tool create $OVS_DATADIR/conf.db $OVS_SHAREDIR/vswitch.ovsschema + fi + + if is_service_enabled ovn-controller-vtep; then + if [ ! -f $OVS_DATADIR/vtep.db ]; then + ovsdb-tool create $OVS_DATADIR/vtep.db $OVS_SHAREDIR/vtep.ovsschema + fi + fi + + local dbcmd="$OVS_SBINDIR/ovsdb-server --remote=punix:$OVS_RUNDIR/db.sock --remote=ptcp:6640:$OVSDB_SERVER_LOCAL_HOST --pidfile --detach --log-file" + dbcmd+=" --remote=db:Open_vSwitch,Open_vSwitch,manager_options" + if is_service_enabled ovn-controller-vtep; then + dbcmd+=" --remote=db:hardware_vtep,Global,managers $OVS_DATADIR/vtep.db" + fi + dbcmd+=" $OVS_DATADIR/conf.db" + _run_process ovsdb-server "$dbcmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR" + + # Note: ovn-controller will create and configure br-int once it is started. + # So, no need to create it now because nothing depends on that bridge here. + local ovscmd="$OVS_SBINDIR/ovs-vswitchd --log-file --pidfile --detach" + _run_process ovs-vswitchd "$ovscmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR" + else + _start_process "$OVSDB_SERVER_SERVICE" + _start_process "$OVS_VSWITCHD_SERVICE" + fi + + echo "Configuring OVSDB" + if is_service_enabled tls-proxy; then + sudo ovs-vsctl --no-wait set-ssl \ + $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key \ + $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt \ + $INT_CA_DIR/ca-chain.pem + fi + + sudo ovs-vsctl --no-wait set-manager ptcp:6640:$OVSDB_SERVER_LOCAL_HOST + sudo ovs-vsctl --no-wait set open_vswitch . system-type="devstack" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:system-id="$OVN_UUID" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-remote="$OVN_SB_REMOTE" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-bridge="br-int" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-type="geneve" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$TUNNEL_IP" + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:hostname=$(hostname) + # Select this chassis to host gateway routers + if [[ "$ENABLE_CHASSIS_AS_GW" == "True" ]]; then + sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-cms-options="enable-chassis-as-gw" + fi + + if is_provider_network || [[ $Q_USE_PROVIDERNET_FOR_PUBLIC == "True" ]]; then + ovn_base_setup_bridge $OVS_PHYSICAL_BRIDGE + sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=${PHYSICAL_NETWORK}:${OVS_PHYSICAL_BRIDGE} + fi + + if is_service_enabled ovn-controller-vtep ; then + ovn_base_setup_bridge br-v + vtep-ctl add-ps br-v + vtep-ctl set Physical_Switch br-v tunnel_ips=$TUNNEL_IP + + enable_service ovs-vtep + local vtepcmd="$OVS_SCRIPTDIR/ovs-vtep --log-file --pidfile --detach br-v" + _run_process ovs-vtep "$vtepcmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR" + + vtep-ctl set-manager tcp:$HOST_IP:6640 + fi + fi +} + +function _wait_for_ovn_and_set_custom_config { + # Wait for the service to be ready + # Check for socket and db files for both OVN NB and SB + wait_for_sock_file $OVN_RUNDIR/ovnnb_db.sock + wait_for_sock_file $OVN_RUNDIR/ovnsb_db.sock + wait_for_db_file $OVN_DATADIR/ovnnb_db.db + wait_for_db_file $OVN_DATADIR/ovnsb_db.db + + if is_service_enabled tls-proxy; then + sudo ovn-nbctl --db=unix:$OVN_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem + sudo ovn-sbctl --db=unix:$OVN_RUNDIR/ovnsb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem + fi + + sudo ovn-nbctl --db=unix:$OVN_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 + sudo ovn-sbctl --db=unix:$OVN_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 + sudo ovs-appctl -t $OVN_RUNDIR/ovnnb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL + sudo ovs-appctl -t $OVN_RUNDIR/ovnsb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL +} + +# start_ovn() - Start running processes, including screen +function start_ovn { + echo "Starting OVN" + + _start_ovs + + local SCRIPTDIR=$OVN_SCRIPTDIR + if ! use_new_ovn_repository; then + SCRIPTDIR=$OVS_SCRIPTDIR + fi + + if is_service_enabled ovn-northd ; then + if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_northd" + local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_northd" + + _run_process ovn-northd "$cmd" "$stop_cmd" "$STACK_GROUP" "root" "$OVN_RUNDIR" + else + _start_process "$OVN_NORTHD_SERVICE" + fi + + _wait_for_ovn_and_set_custom_config + + fi + + if is_service_enabled ovn-controller ; then + if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_controller" + local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_controller" + + _run_process ovn-controller "$cmd" "$stop_cmd" "$STACK_GROUP" "root" "$OVN_RUNDIR" + else + _start_process "$OVN_CONTROLLER_SERVICE" + fi + fi + + if is_service_enabled ovn-controller-vtep ; then + if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then + local cmd="$OVS_BINDIR/ovn-controller-vtep --log-file --pidfile --detach --ovnsb-db=$OVN_SB_REMOTE" + _run_process ovn-controller-vtep "$cmd" "" "$STACK_GROUP" "root" "$OVN_RUNDIR" + else + _start_process "$OVN_CONTROLLER_VTEP_SERVICE" + fi + fi + + if is_ovn_metadata_agent_enabled; then + run_process q-ovn-metadata-agent "$NEUTRON_OVN_BIN_DIR/$NEUTRON_OVN_METADATA_BINARY --config-file $OVN_META_CONF" + # Format logging + setup_logging $OVN_META_CONF + fi + + if is_ovn_agent_enabled; then + run_process q-ovn-agent "$NEUTRON_OVN_BIN_DIR/$NEUTRON_OVN_AGENT_BINARY --config-file $OVN_AGENT_CONF" + # Format logging + setup_logging $OVN_AGENT_CONF + fi +} + +function _stop_ovs_dp { + sudo ovs-dpctl dump-dps | sudo xargs -n1 ovs-dpctl del-dp + modprobe -q -r vport_geneve vport_vxlan openvswitch || true +} + +function _stop_process { + local service=$1 + echo "Stopping process $service" + if $SYSTEMCTL is-enabled $service; then + $SYSTEMCTL stop $service + $SYSTEMCTL disable $service + fi +} + +function stop_ovn { + # NOTE(ralonsoh): this check doesn't use "is_ovn_metadata_agent_enabled", + # instead it relies only in the configured services, disregarding the + # flag "TARGET_ENABLE_OVN_AGENT". It is needed to force the OVN Metadata + # agent stop in case the flag "TARGET_ENABLE_OVN_AGENT" is set. + if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then + # pkill takes care not to kill itself, but it may kill its parent + # sudo unless we use the "ps | grep [f]oo" trick + sudo pkill -9 -f "[h]aproxy" || : + _stop_process "devstack@q-ovn-metadata-agent.service" + fi + if is_ovn_agent_enabled; then + # pkill takes care not to kill itself, but it may kill its parent + # sudo unless we use the "ps | grep [f]oo" trick + sudo pkill -9 -f "[h]aproxy" || : + _stop_process "devstack@q-ovn-agent.service" + fi + if is_service_enabled ovn-controller-vtep ; then + _stop_process "$OVN_CONTROLLER_VTEP_SERVICE" + fi + if is_service_enabled ovn-controller ; then + _stop_process "$OVN_CONTROLLER_SERVICE" + fi + if is_service_enabled ovn-northd ; then + _stop_process "$OVN_NORTHD_SERVICE" + fi + if is_service_enabled ovs-vtep ; then + _stop_process "devstack@ovs-vtep.service" + fi + + _stop_process "$OVS_VSWITCHD_SERVICE" + _stop_process "$OVSDB_SERVER_SERVICE" + + _stop_ovs_dp +} + +function _cleanup { + local path=${1:-$DEST/$OVN_REPO_NAME} + pushd $path + cd $path + sudo make uninstall + sudo make distclean + popd +} + +# cleanup_ovn() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_ovn { + local ovn_path=$DEST/$OVN_REPO_NAME + local ovs_path=$DEST/$OVS_REPO_NAME + + if [ -d $ovn_path ]; then + _cleanup $ovn_path + fi + + if [ -d $ovs_path ]; then + _cleanup $ovs_path + fi + + sudo rm -rf $OVN_RUNDIR +} diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base new file mode 100644 index 0000000000..adabc56412 --- /dev/null +++ b/lib/neutron_plugins/ovs_base @@ -0,0 +1,123 @@ +#!/bin/bash +# +# common functions for ovs based plugin +# ------------------------------------- + +# Save trace setting +_XTRACE_NEUTRON_OVS_BASE=$(set +o | grep xtrace) +set +o xtrace + +# Load devstack ovs compliation and loading functions +source ${TOP_DIR}/lib/neutron_plugins/ovs_source + +# Defaults +# -------- + +OVS_BRIDGE=${OVS_BRIDGE:-br-int} +# OVS recognize default 'system' datapath or 'netdev' for userspace datapath +OVS_DATAPATH_TYPE=${OVS_DATAPATH_TYPE:-system} +OVS_TUNNEL_BRIDGE=${OVS_TUNNEL_BRIDGE:-br-tun} + +function is_neutron_ovs_base_plugin { + # Yes, we use OVS. + return 0 +} + +function _neutron_ovs_base_add_bridge { + local bridge=$1 + local addbr_cmd="sudo ovs-vsctl -- --may-exist add-br $bridge" + + if [ "$OVS_DATAPATH_TYPE" != "system" ] ; then + addbr_cmd="$addbr_cmd -- set Bridge $bridge datapath_type=${OVS_DATAPATH_TYPE}" + fi + + $addbr_cmd +} + +function _neutron_ovs_base_setup_bridge { + local bridge=$1 + neutron-ovs-cleanup --config-file $NEUTRON_CONF + _neutron_ovs_base_add_bridge $bridge + sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge +} + +function neutron_ovs_base_cleanup { + # remove all OVS ports that look like Neutron created ports + for port in $(sudo ovs-vsctl list port | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do + sudo ovs-vsctl del-port ${port} + done + + # remove all OVS bridges created by Neutron + for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${OVS_BRIDGE} -e ${PUBLIC_BRIDGE} -e ${OVS_TUNNEL_BRIDGE}); do + sudo ovs-vsctl del-br ${bridge} + done +} + +function _neutron_ovs_base_install_ubuntu_dkms { + # install Dynamic Kernel Module Support packages if needed + local kernel_version + kernel_version=$(uname -r) + local kernel_major_minor + kernel_major_minor=`echo $kernel_version | cut -d. -f1-2` + # From kernel 3.13 on, openvswitch-datapath-dkms is not needed + if vercmp "$kernel_major_minor" "<" "3.13" ; then + install_package "dkms openvswitch-datapath-dkms linux-headers-$kernel_version" + fi +} + +function _neutron_ovs_base_install_agent_packages { + if [ "$Q_BUILD_OVS_FROM_GIT" == "True" ]; then + remove_ovs_packages + compile_ovs False /usr/local /var + load_conntrack_gre_module + start_new_ovs + else + # Install deps + install_package $(get_packages "openvswitch") + if is_ubuntu; then + _neutron_ovs_base_install_ubuntu_dkms + restart_service openvswitch-switch + elif is_fedora; then + restart_service openvswitch + sudo systemctl enable openvswitch + fi + fi +} + +function _neutron_ovs_base_configure_firewall_driver { + if [[ "$Q_USE_SECGROUP" == "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver openvswitch + if ! running_in_container; then + enable_kernel_bridge_firewall + fi + else + iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver noop + fi +} + +function _neutron_ovs_base_configure_l3_agent { + neutron-ovs-cleanup --config-file $NEUTRON_CONF + if [[ "$Q_USE_PUBLIC_VETH" = "True" ]]; then + ip link show $Q_PUBLIC_VETH_INT > /dev/null 2>&1 || + sudo ip link add $Q_PUBLIC_VETH_INT type veth \ + peer name $Q_PUBLIC_VETH_EX + sudo ip link set $Q_PUBLIC_VETH_INT up + sudo ip link set $Q_PUBLIC_VETH_EX up + sudo ip addr flush dev $Q_PUBLIC_VETH_EX + else + _neutron_ovs_base_add_public_bridge + sudo ovs-vsctl br-set-external-id $PUBLIC_BRIDGE bridge-id $PUBLIC_BRIDGE + fi +} + +function _neutron_ovs_base_add_public_bridge { + _neutron_ovs_base_add_bridge $PUBLIC_BRIDGE + set_mtu $PUBLIC_BRIDGE $PUBLIC_BRIDGE_MTU +} + +function _neutron_ovs_base_configure_nova_vif_driver { + : +} + +# Restore xtrace +$_XTRACE_NEUTRON_OVS_BASE diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source new file mode 100644 index 0000000000..6b6f531a01 --- /dev/null +++ b/lib/neutron_plugins/ovs_source @@ -0,0 +1,214 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Defaults +# -------- +Q_BUILD_OVS_FROM_GIT=$(trueorfalse False Q_BUILD_OVS_FROM_GIT) + +# Set variables for building OVS from source +OVS_REPO=${OVS_REPO:-https://github.com/openvswitch/ovs.git} +OVS_REPO_NAME=$(basename ${OVS_REPO} | cut -f1 -d'.') +OVS_REPO_NAME=${OVS_REPO_NAME:-ovs} +OVS_BRANCH=${OVS_BRANCH:-branch-3.3} + +# Functions + +# load_module() - Load module using modprobe module given by argument and dies +# on failure +# - fatal argument is optional and says whether function should +# exit if module can't be loaded +function load_module { + local module=$1 + local fatal=$2 + + if [ "$(trueorfalse True fatal)" == "True" ]; then + sudo modprobe $module || (sudo dmesg && die $LINENO "FAILED TO LOAD $module") + else + sudo modprobe $module || (echo "FAILED TO LOAD $module" && sudo dmesg) + fi +} + +# prepare_for_compilation() - Fetch ovs git repository and install packages needed for +# compilation. +function prepare_for_ovs_compilation { + local build_modules=${1:-False} + OVS_DIR=$DEST/$OVS_REPO_NAME + + if [ ! -d $OVS_DIR ] ; then + # We can't use git_clone here because we want to ignore ERROR_ON_CLONE + git_timed clone $OVS_REPO $OVS_DIR + cd $OVS_DIR + git checkout $OVS_BRANCH + else + # Even though the directory already exists, call git_clone to update it + # if needed based on the RECLONE option + git_clone $OVS_REPO $OVS_DIR $OVS_BRANCH + cd $OVS_DIR + fi + + # TODO: Can you create package list files like you can inside devstack? + install_package autoconf automake libtool gcc patch make + + # If build_modules is False, we don't need to install the kernel-* + # packages. Just return. + if [[ "$build_modules" == "False" ]]; then + return + fi + + KERNEL_VERSION=`uname -r` + if is_fedora ; then + # is_fedora covers Fedora, RHEL, CentOS, etc... + if [[ "$os_VENDOR" == "Fedora" ]]; then + install_package elfutils-libelf-devel + KERNEL_VERSION=`echo $KERNEL_VERSION | cut --delimiter='-' --field 1` + elif [[ ${KERNEL_VERSION:0:2} != "3." ]]; then + # dash is illegal character in rpm version so replace + # them with underscore like it is done in the kernel + # https://github.com/torvalds/linux/blob/master/scripts/package/mkspec#L25 + # but only for latest series of the kernel, not 3.x + + KERNEL_VERSION=`echo $KERNEL_VERSION | tr - _` + fi + + echo NOTE: if kernel-devel-$KERNEL_VERSION or kernel-headers-$KERNEL_VERSION installation + echo failed, please, provide a repository with the package, or yum update / reboot + echo your machine to get the latest kernel. + + install_package kernel-devel-$KERNEL_VERSION + install_package kernel-headers-$KERNEL_VERSION + if is_service_enabled tls-proxy; then + install_package openssl-devel + fi + + elif is_ubuntu ; then + install_package linux-headers-$KERNEL_VERSION + if is_service_enabled tls-proxy; then + install_package libssl-dev + fi + fi +} + +# load_ovs_kernel_modules() - load openvswitch kernel module +function load_ovs_kernel_modules { + load_module openvswitch + load_module vport-geneve False + sudo dmesg | tail +} + +# reload_ovs_kernel_modules() - reload openvswitch kernel module +function reload_ovs_kernel_modules { + set +e + ovs_system=$(sudo ovs-dpctl dump-dps | grep ovs-system) + if [ -n "$ovs_system" ]; then + sudo ovs-dpctl del-dp ovs-system + fi + set -e + sudo modprobe -r vport_geneve + sudo modprobe -r openvswitch + load_ovs_kernel_modules +} + +# compile_ovs() - Compile OVS from source and load needed modules. +# Accepts two parameters: +# - first one is False by default and means that modules are not built and installed. +# - second optional parameter defines prefix for ovs compilation +# - third optional parameter defines localstatedir for ovs single machine runtime +# Env variables OVS_REPO_NAME, OVS_REPO and OVS_BRANCH must be set +function compile_ovs { + local _pwd=$PWD + local build_modules=${1:-False} + local prefix=$2 + local localstatedir=$3 + + if [ -n "$prefix" ]; then + prefix="--prefix=$prefix" + fi + + if [ -n "$localstatedir" ]; then + localstatedir="--localstatedir=$localstatedir" + fi + + prepare_for_ovs_compilation $build_modules + + KERNEL_VERSION=$(uname -r) + major_version=$(echo "${KERNEL_VERSION}" | cut -d '.' -f1) + patch_level=$(echo "${KERNEL_VERSION}" | cut -d '.' -f2) + if [ "${major_version}" -gt 5 ] || [ "${major_version}" == 5 ] && [ "${patch_level}" -gt 5 ]; then + echo "NOTE: KERNEL VERSION is ${KERNEL_VERSION} and OVS doesn't support compiling " + echo "Kernel module for version higher than 5.5. Skipping module compilation..." + build_modules="False" + fi + + if [ ! -f configure ] ; then + ./boot.sh + fi + if [ ! -f config.status ] || [ configure -nt config.status ] ; then + if [[ "$build_modules" == "True" ]]; then + ./configure $prefix $localstatedir --with-linux=/lib/modules/$(uname -r)/build + else + ./configure $prefix $localstatedir + fi + fi + make -j$(($(nproc) + 1)) + sudo make install + if [[ "$build_modules" == "True" ]]; then + sudo make INSTALL_MOD_DIR=kernel/net/openvswitch modules_install + fi + reload_ovs_kernel_modules + + cd $_pwd +} + +# action_service - call an action over openvswitch service +# Accepts one parameter that can be either +# 'start', 'restart' and 'stop'. +function action_openvswitch { + local action=$1 + + if is_ubuntu; then + ${action}_service openvswitch-switch + elif is_fedora; then + ${action}_service openvswitch + fi +} + +# start_new_ovs() - removes old ovs database, creates a new one and starts ovs +function start_new_ovs { + sudo rm -f /etc/openvswitch/conf.db /etc/openvswitch/.conf.db~lock~ + sudo /usr/local/share/openvswitch/scripts/ovs-ctl start +} + +# stop_new_ovs() - stops ovs +function stop_new_ovs { + local ovs_ctl='/usr/local/share/openvswitch/scripts/ovs-ctl' + + if [ -x $ovs_ctl ] ; then + sudo $ovs_ctl stop + fi +} + +# remove_ovs_packages() - removes old ovs packages from the system +function remove_ovs_packages { + for package in openvswitch openvswitch-switch openvswitch-common; do + if is_package_installed $package; then + uninstall_package $package + fi + done +} + + +# load_conntrack_gre_module() - loads nf_conntrack_proto_gre kernel module +function load_conntrack_gre_module { + load_module nf_conntrack_proto_gre False +} diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3 new file mode 100644 index 0000000000..bbedc57a44 --- /dev/null +++ b/lib/neutron_plugins/services/l3 @@ -0,0 +1,436 @@ +#!/bin/bash +# Subnet IP version +IP_VERSION=${IP_VERSION:-"4+6"} +# Validate IP_VERSION +if [[ $IP_VERSION != "4" ]] && [[ $IP_VERSION != "6" ]] && [[ $IP_VERSION != "4+6" ]]; then + die $LINENO "IP_VERSION must be either 4, 6, or 4+6" +fi +# Specify if the initial private and external networks should be created +NEUTRON_CREATE_INITIAL_NETWORKS=${NEUTRON_CREATE_INITIAL_NETWORKS:-True} + +## Provider Network Information +PROVIDER_SUBNET_NAME=${PROVIDER_SUBNET_NAME:-"provider_net"} +IPV6_PROVIDER_SUBNET_NAME=${IPV6_PROVIDER_SUBNET_NAME:-"provider_net_v6"} +IPV6_PROVIDER_FIXED_RANGE=${IPV6_PROVIDER_FIXED_RANGE:-} +IPV6_PROVIDER_NETWORK_GATEWAY=${IPV6_PROVIDER_NETWORK_GATEWAY:-} + +PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} +PUBLIC_BRIDGE_MTU=${PUBLIC_BRIDGE_MTU:-1500} + +# If Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE=True, assign the gateway IP of the public +# subnet to the public bridge interface even if Q_USE_PROVIDERNET_FOR_PUBLIC is +# used. +Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE=${Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE:-True} + +# The name of the default router +Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1} + +# If Q_USE_PUBLIC_VETH=True, create and use a veth pair instead of +# PUBLIC_BRIDGE. This is intended to be used with +# Q_USE_PROVIDERNET_FOR_PUBLIC=True. +Q_USE_PUBLIC_VETH=${Q_USE_PUBLIC_VETH:-False} +Q_PUBLIC_VETH_EX=${Q_PUBLIC_VETH_EX:-veth-pub-ex} +Q_PUBLIC_VETH_INT=${Q_PUBLIC_VETH_INT:-veth-pub-int} + +# The next variable is configured by plugin +# e.g. _configure_neutron_l3_agent or lib/neutron_plugins/* +# +# L3 routers exist per tenant +Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-True} + + +# Use providernet for public network +# +# If Q_USE_PROVIDERNET_FOR_PUBLIC=True, use a provider network +# for external interface of neutron l3-agent. In that case, +# PUBLIC_PHYSICAL_NETWORK specifies provider:physical_network value +# used for the network. In case of ofagent, you should add the +# corresponding entry to your OFAGENT_PHYSICAL_INTERFACE_MAPPINGS. +# For openvswitch agent, you should add the corresponding entry to +# your OVS_BRIDGE_MAPPINGS and for OVN add the corresponding entry +# to your OVN_BRIDGE_MAPPINGS. +# +# eg. (ofagent) +# Q_USE_PROVIDERNET_FOR_PUBLIC=True +# Q_USE_PUBLIC_VETH=True +# PUBLIC_PHYSICAL_NETWORK=public +# OFAGENT_PHYSICAL_INTERFACE_MAPPINGS=public:veth-pub-int +# +# eg. (openvswitch agent) +# Q_USE_PROVIDERNET_FOR_PUBLIC=True +# PUBLIC_PHYSICAL_NETWORK=public +# OVS_BRIDGE_MAPPINGS=public:br-ex +# +# eg. (ovn agent) +# Q_USER_PROVIDERNET_FOR_PUBLIC=True +# PUBLIC_PHYSICAL_NETWORK=public +# OVN_BRIDGE_MAPPINGS=public:br-ex +# +# The provider-network-type defaults to flat, however, the values +# PUBLIC_PROVIDERNET_TYPE and PUBLIC_PROVIDERNET_SEGMENTATION_ID could +# be set to specify the parameters for an alternate network type. +Q_USE_PROVIDERNET_FOR_PUBLIC=${Q_USE_PROVIDERNET_FOR_PUBLIC:-True} +PUBLIC_PHYSICAL_NETWORK=${PUBLIC_PHYSICAL_NETWORK:-public} + +# Generate 40-bit IPv6 Global ID to comply with RFC 4193 +IPV6_GLOBAL_ID=`uuidgen | sed s/-//g | cut -c 23- | sed -e "s/\(..\)\(....\)\(....\)/\1:\2:\3/"` + +# IPv6 gateway and subnet defaults, in case they are not customized in localrc +IPV6_RA_MODE=${IPV6_RA_MODE:-slaac} +IPV6_ADDRESS_MODE=${IPV6_ADDRESS_MODE:-slaac} +IPV6_PUBLIC_SUBNET_NAME=${IPV6_PUBLIC_SUBNET_NAME:-ipv6-public-subnet} +IPV6_PRIVATE_SUBNET_NAME=${IPV6_PRIVATE_SUBNET_NAME:-ipv6-private-subnet} +IPV6_ADDRS_SAFE_TO_USE=${IPV6_ADDRS_SAFE_TO_USE:-fd$IPV6_GLOBAL_ID::/56} +# if we got larger than a /64 safe to use, we only use the first /64 to +# avoid side effects outlined in rfc7421 +FIXED_RANGE_V6=${FIXED_RANGE_V6:-$(echo $IPV6_ADDRS_SAFE_TO_USE | awk -F '/' '{ print $1"/"($2>63 ? $2 : 64) }')} +IPV6_PRIVATE_NETWORK_GATEWAY=${IPV6_PRIVATE_NETWORK_GATEWAY:-} +IPV6_PUBLIC_RANGE=${IPV6_PUBLIC_RANGE:-2001:db8::/64} +IPV6_PUBLIC_NETWORK_GATEWAY=${IPV6_PUBLIC_NETWORK_GATEWAY:-2001:db8::2} +IPV6_ROUTER_GW_IP=${IPV6_ROUTER_GW_IP:-2001:db8::1} + +# Gateway and subnet defaults, in case they are not customized in localrc +NETWORK_GATEWAY=${NETWORK_GATEWAY:-} +PUBLIC_NETWORK_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-} +PRIVATE_SUBNET_NAME=${PRIVATE_SUBNET_NAME:-"private-subnet"} +PUBLIC_SUBNET_NAME=${PUBLIC_SUBNET_NAME:-"public-subnet"} + +# Subnetpool defaults +USE_SUBNETPOOL=${USE_SUBNETPOOL:-True} +SUBNETPOOL_NAME_V4=${SUBNETPOOL_NAME:-"shared-default-subnetpool-v4"} +SUBNETPOOL_NAME_V6=${SUBNETPOOL_NAME:-"shared-default-subnetpool-v6"} + +SUBNETPOOL_PREFIX_V4=${SUBNETPOOL_PREFIX_V4:-$IPV4_ADDRS_SAFE_TO_USE} +SUBNETPOOL_PREFIX_V6=${SUBNETPOOL_PREFIX_V6:-$IPV6_ADDRS_SAFE_TO_USE} + +SUBNETPOOL_SIZE_V4=${SUBNETPOOL_SIZE_V4:-26} +SUBNETPOOL_SIZE_V6=${SUBNETPOOL_SIZE_V6:-64} + +default_v4_route_devs=$(ip -4 route | grep ^default | awk '{print $5}') + +default_v6_route_devs=$(ip -6 route list match default table all | grep via | awk '{print $5}') + +function _determine_config_l3 { + local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE" + echo "$opts" +} + +function _configure_neutron_l3_agent { + + cp $NEUTRON_DIR/etc/l3_agent.ini.sample $Q_L3_CONF_FILE + + iniset $Q_L3_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $Q_L3_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + iniset $Q_L3_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND" + fi + + _neutron_setup_interface_driver $Q_L3_CONF_FILE + + neutron_plugin_configure_l3_agent $Q_L3_CONF_FILE + + _configure_public_network_connectivity +} + +# Explicitly set router id in l3 agent configuration +function _neutron_set_router_id { + if [[ "$Q_L3_ROUTER_PER_TENANT" == "False" ]]; then + iniset $Q_L3_CONF_FILE DEFAULT router_id $ROUTER_ID + fi +} + +# Get ext_gw_interface depending on value of Q_USE_PUBLIC_VETH +function _neutron_get_ext_gw_interface { + if [[ "$Q_USE_PUBLIC_VETH" == "True" ]]; then + echo $Q_PUBLIC_VETH_EX + else + # Disable in-band as we are going to use local port + # to communicate with VMs + sudo ovs-vsctl set Bridge $PUBLIC_BRIDGE \ + other_config:disable-in-band=true + echo $PUBLIC_BRIDGE + fi +} + +function create_neutron_initial_network { + # Allow drivers that need to create an initial network to do so here + if type -p neutron_plugin_create_initial_network_profile > /dev/null; then + neutron_plugin_create_initial_network_profile $PHYSICAL_NETWORK + fi + + if is_networking_extension_supported "auto-allocated-topology"; then + if [[ "$USE_SUBNETPOOL" == "True" ]]; then + if [[ "$IP_VERSION" =~ 4.* ]]; then + SUBNETPOOL_V4_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V4 --default-prefix-length $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --share --default -f value -c id) + fi + if [[ "$IP_VERSION" =~ .*6 ]]; then + SUBNETPOOL_V6_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V6 --default-prefix-length $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --share --default -f value -c id) + fi + fi + fi + + if is_provider_network; then + die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK" + die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specify the PROVIDER_NETWORK_TYPE" + NET_ID=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share -f value -c id) + die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK" + + if [[ "$IP_VERSION" =~ 4.* ]]; then + if [ -z $SUBNETPOOL_V4_ID ]; then + fixed_range_v4=$FIXED_RANGE + fi + SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} -f value -c id) + die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME" + fi + + if [[ "$IP_VERSION" =~ .*6 ]]; then + die_if_not_set $LINENO IPV6_PROVIDER_FIXED_RANGE "IPV6_PROVIDER_FIXED_RANGE has not been set, but Q_USE_PROVIDER_NETWORKING is true and IP_VERSION includes 6" + die_if_not_set $LINENO IPV6_PROVIDER_NETWORK_GATEWAY "IPV6_PROVIDER_NETWORK_GATEWAY has not been set, but Q_USE_PROVIDER_NETWORKING is true and IP_VERSION includes 6" + if [ -z $SUBNETPOOL_V6_ID ]; then + fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE + fi + IPV6_SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} -f value -c id) + die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME" + fi + + if [[ $Q_AGENT == "openvswitch" ]]; then + sudo ip link set $OVS_PHYSICAL_BRIDGE up + sudo ip link set br-int up + sudo ip link set $PUBLIC_INTERFACE up + fi + else + NET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" network create "$PRIVATE_NETWORK_NAME" -f value -c id) + die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME" + + if [[ "$IP_VERSION" =~ 4.* ]]; then + # Create IPv4 private subnet + SUBNET_ID=$(_neutron_create_private_subnet_v4) + fi + + if [[ "$IP_VERSION" =~ .*6 ]]; then + # Create IPv6 private subnet + IPV6_SUBNET_ID=$(_neutron_create_private_subnet_v6) + fi + fi + + if is_networking_extension_supported "router" && is_networking_extension_supported "external-net"; then + # Create a router, and add the private subnet as one of its interfaces + if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then + # create a tenant-owned router. + ROUTER_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" router create $Q_ROUTER_NAME -f value -c id) + die_if_not_set $LINENO ROUTER_ID "Failure creating router $Q_ROUTER_NAME" + else + # Plugin only supports creating a single router, which should be admin owned. + ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME -f value -c id) + die_if_not_set $LINENO ROUTER_ID "Failure creating router $Q_ROUTER_NAME" + fi + + EXTERNAL_NETWORK_FLAGS="--external" + if is_networking_extension_supported "auto-allocated-topology"; then + EXTERNAL_NETWORK_FLAGS="$EXTERNAL_NETWORK_FLAGS --default" + fi + # Create an external network, and a subnet. Configure the external network as router gw + if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then + EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} -f value -c id) + else + EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS -f value -c id) + fi + die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME" + + if [[ "$IP_VERSION" =~ 4.* ]]; then + # Configure router for IPv4 public access + _neutron_configure_router_v4 + fi + + if [[ "$IP_VERSION" =~ .*6 ]]; then + # Configure router for IPv6 public access + _neutron_configure_router_v6 + fi + fi +} + +# Create private IPv4 subnet +function _neutron_create_private_subnet_v4 { + if [ -z $SUBNETPOOL_V4_ID ]; then + fixed_range_v4=$FIXED_RANGE + fi + local subnet_params="--ip-version 4 " + if [[ -n "$NETWORK_GATEWAY" ]]; then + subnet_params+="--gateway $NETWORK_GATEWAY " + fi + + subnet_params+="${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} " + subnet_params+="${fixed_range_v4:+--subnet-range $fixed_range_v4} " + subnet_params+="--network $NET_ID $PRIVATE_SUBNET_NAME" + local subnet_id + subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params -f value -c id) + die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet" + echo $subnet_id +} + +# Create private IPv6 subnet +function _neutron_create_private_subnet_v6 { + die_if_not_set $LINENO IPV6_RA_MODE "IPV6 RA Mode not set" + die_if_not_set $LINENO IPV6_ADDRESS_MODE "IPV6 Address Mode not set" + local ipv6_modes="--ipv6-ra-mode $IPV6_RA_MODE --ipv6-address-mode $IPV6_ADDRESS_MODE" + if [ -z $SUBNETPOOL_V6_ID ]; then + fixed_range_v6=$FIXED_RANGE_V6 + fi + local subnet_params="--ip-version 6 " + if [[ -n "$IPV6_PRIVATE_NETWORK_GATEWAY" ]]; then + subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY " + fi + subnet_params+="${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} " + subnet_params+="${fixed_range_v6:+--subnet-range $fixed_range_v6} " + subnet_params+="$ipv6_modes --network $NET_ID $IPV6_PRIVATE_SUBNET_NAME " + local ipv6_subnet_id + ipv6_subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params -f value -c id) + die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet" + echo $ipv6_subnet_id +} + +# Create public IPv4 subnet +function _neutron_create_public_subnet_v4 { + local subnet_params="--ip-version 4 " + subnet_params+="${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} " + if [[ -n "$PUBLIC_NETWORK_GATEWAY" ]]; then + subnet_params+="--gateway $PUBLIC_NETWORK_GATEWAY " + fi + subnet_params+="--network $EXT_NET_ID --subnet-range $FLOATING_RANGE --no-dhcp " + subnet_params+="$PUBLIC_SUBNET_NAME" + local id_and_ext_gw_ip + id_and_ext_gw_ip=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ') + die_if_not_set $LINENO id_and_ext_gw_ip "Failure creating public IPv4 subnet" + echo $id_and_ext_gw_ip +} + +# Create public IPv6 subnet +function _neutron_create_public_subnet_v6 { + local subnet_params="--ip-version 6 " + subnet_params+="--gateway $IPV6_PUBLIC_NETWORK_GATEWAY " + subnet_params+="--network $EXT_NET_ID --subnet-range $IPV6_PUBLIC_RANGE --no-dhcp " + subnet_params+="$IPV6_PUBLIC_SUBNET_NAME" + local ipv6_id_and_ext_gw_ip + ipv6_id_and_ext_gw_ip=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ') + die_if_not_set $LINENO ipv6_id_and_ext_gw_ip "Failure creating an IPv6 public subnet" + echo $ipv6_id_and_ext_gw_ip +} + +# Configure neutron router for IPv4 public access +function _neutron_configure_router_v4 { + openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router add subnet $ROUTER_ID $SUBNET_ID + # Create a public subnet on the external network + local id_and_ext_gw_ip + id_and_ext_gw_ip=$(_neutron_create_public_subnet_v4 $EXT_NET_ID) + local ext_gw_ip + ext_gw_ip=$(echo $id_and_ext_gw_ip | get_field 2) + PUB_SUBNET_ID=$(echo $id_and_ext_gw_ip | get_field 5) + # Configure the external network as the default router gateway + openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID + + # This logic is specific to using OVN or the l3-agent for layer 3 + if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-api) || is_service_enabled q-l3 neutron-l3; then + # Configure and enable public bridge + local ext_gw_interface="none" + if is_neutron_ovs_base_plugin; then + ext_gw_interface=$(_neutron_get_ext_gw_interface) + fi + if [[ "$ext_gw_interface" != "none" ]]; then + local cidr_len=${FLOATING_RANGE#*/} + local testcmd="ip -o link | grep -q $ext_gw_interface" + test_with_retry "$testcmd" "$ext_gw_interface creation failed" + if [[ $(ip addr show dev $ext_gw_interface | grep -c $ext_gw_ip) == 0 && ( $Q_USE_PROVIDERNET_FOR_PUBLIC == "False" || $Q_USE_PUBLIC_VETH == "True" || $Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE == "True" ) ]]; then + sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface + sudo ip link set $ext_gw_interface up + fi + ROUTER_GW_IP=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' --device-owner network:router_gateway | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ') + die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP" + fi + _neutron_set_router_id + fi +} + +# Configure neutron router for IPv6 public access +function _neutron_configure_router_v6 { + openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router add subnet $ROUTER_ID $IPV6_SUBNET_ID + # Create a public subnet on the external network + local ipv6_id_and_ext_gw_ip + ipv6_id_and_ext_gw_ip=$(_neutron_create_public_subnet_v6 $EXT_NET_ID) + local ipv6_ext_gw_ip + ipv6_ext_gw_ip=$(echo $ipv6_id_and_ext_gw_ip | get_field 2) + local ipv6_pub_subnet_id + ipv6_pub_subnet_id=$(echo $ipv6_id_and_ext_gw_ip | get_field 5) + + # If the external network has not already been set as the default router + # gateway when configuring an IPv4 public subnet, do so now + if [[ "$IP_VERSION" == "6" ]]; then + openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID + fi + + # This logic is specific to using OVN or the l3-agent for layer 3 + if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-api) || is_service_enabled q-l3 neutron-l3; then + # if the Linux host considers itself to be a router then it will + # ignore all router advertisements + # Ensure IPv6 RAs are accepted on interfaces with a default route. + # This is needed for neutron-based devstack clouds to work in + # IPv6-only clouds in the gate. Please do not remove this without + # talking to folks in Infra. + for d in $default_v6_route_devs; do + # Slashes must be used in this sysctl command because route devices + # can have dots in their names. If dots were used, dots in the + # device name would be reinterpreted as a slash, causing an error. + sudo sysctl -w net/ipv6/conf/$d/accept_ra=2 + done + # Ensure IPv6 forwarding is enabled on the host + sudo sysctl -w net.ipv6.conf.all.forwarding=1 + # Configure and enable public bridge + # Override global IPV6_ROUTER_GW_IP with the true value from neutron + # NOTE(slaweq): when enforce scopes is enabled in Neutron, router's + # gateway ports aren't visible in API because such ports don't belongs + # to any tenant. Because of that, at least temporary we need to find + # IPv6 address of the router's gateway in a bit different way. + # It can be reverted when bug + # https://bugs.launchpad.net/neutron/+bug/1959332 will be fixed + IPV6_ROUTER_GW_IP=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router show $ROUTER_ID -c external_gateway_info -f json | grep -C 1 $ipv6_pub_subnet_id | grep ip_address | awk '{print $2}' | tr -d '"') + die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP" + + if is_neutron_ovs_base_plugin; then + local ext_gw_interface + ext_gw_interface=$(_neutron_get_ext_gw_interface) + local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/} + + # Configure interface for public bridge by setting the interface + # to "up" in case the job is running entirely private network based + # testing. + sudo ip link set $ext_gw_interface up + sudo ip -6 addr replace $ipv6_ext_gw_ip/$ipv6_cidr_len dev $ext_gw_interface + # Any IPv6 private subnet that uses the default IPV6 subnet pool + # and that is plugged into the default router (Q_ROUTER_NAME) will + # be reachable from the devstack node (ex: ipv6-private-subnet). + # Some scenario tests (such as octavia-tempest-plugin) rely heavily + # on this feature. + local replace_range=${SUBNETPOOL_PREFIX_V6} + if [[ -z "${SUBNETPOOL_V6_ID}" ]]; then + replace_range=${FIXED_RANGE_V6} + fi + sudo ip -6 route replace $replace_range via $IPV6_ROUTER_GW_IP dev $ext_gw_interface + fi + _neutron_set_router_id + fi +} + +function is_networking_extension_supported { + local extension=$1 + # TODO(sc68cal) cache this instead of calling every time + EXT_LIST=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" extension list --network -c Alias -f value) + [[ $EXT_LIST =~ $extension ]] && return 0 +} + +function plugin_agent_add_l3_agent_extension { + local l3_agent_extension=$1 + if [[ -z "$L3_AGENT_EXTENSIONS" ]]; then + L3_AGENT_EXTENSIONS=$l3_agent_extension + elif [[ ! ,${L3_AGENT_EXTENSIONS}, =~ ,${l3_agent_extension}, ]]; then + L3_AGENT_EXTENSIONS+=",$l3_agent_extension" + fi +} diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering new file mode 100644 index 0000000000..757a562ee6 --- /dev/null +++ b/lib/neutron_plugins/services/metering @@ -0,0 +1,33 @@ +#!/bin/bash + +# Neutron metering plugin +# --------------------------- + +# Save trace setting +_XTRACE_NETURON_METER=$(set +o | grep xtrace) +set +o xtrace + + +AGENT_METERING_BINARY="$NEUTRON_BIN_DIR/neutron-metering-agent" +METERING_PLUGIN="neutron.services.metering.metering_plugin.MeteringPlugin" + +function neutron_agent_metering_configure_common { + neutron_service_plugin_class_add $METERING_PLUGIN +} + +function neutron_agent_metering_configure_agent { + METERING_AGENT_CONF_PATH=/etc/neutron/services/metering + mkdir -p $METERING_AGENT_CONF_PATH + + METERING_AGENT_CONF_FILENAME="$METERING_AGENT_CONF_PATH/metering_agent.ini" + + cp $NEUTRON_DIR/etc/metering_agent.ini.sample $METERING_AGENT_CONF_FILENAME +} + +function neutron_metering_stop { + stop_process q-metering +} + +# Restore xtrace +$_XTRACE_NETURON_METER + diff --git a/lib/neutron_plugins/services/placement b/lib/neutron_plugins/services/placement new file mode 100644 index 0000000000..3ec185bae6 --- /dev/null +++ b/lib/neutron_plugins/services/placement @@ -0,0 +1,21 @@ +#!/bin/bash + +function configure_placement_service_plugin { + neutron_service_plugin_class_add "placement" +} + +function configure_placement_neutron { + iniset $NEUTRON_CONF placement auth_type "$NEUTRON_PLACEMENT_AUTH_TYPE" + iniset $NEUTRON_CONF placement auth_url "$KEYSTONE_SERVICE_URI" + iniset $NEUTRON_CONF placement username "$NEUTRON_PLACEMENT_USERNAME" + iniset $NEUTRON_CONF placement password "$SERVICE_PASSWORD" + iniset $NEUTRON_CONF placement user_domain_name "$SERVICE_DOMAIN_NAME" + iniset $NEUTRON_CONF placement project_name "$SERVICE_TENANT_NAME" + iniset $NEUTRON_CONF placement project_domain_name "$SERVICE_DOMAIN_NAME" + iniset $NEUTRON_CONF placement region_name "$REGION_NAME" +} + +function configure_placement_extension { + configure_placement_service_plugin + configure_placement_neutron +} diff --git a/lib/neutron_plugins/services/qos b/lib/neutron_plugins/services/qos new file mode 100644 index 0000000000..c11c315586 --- /dev/null +++ b/lib/neutron_plugins/services/qos @@ -0,0 +1,30 @@ +#!/bin/bash + +function configure_qos_service_plugin { + neutron_service_plugin_class_add "qos" +} + + +function configure_qos_core_plugin { + configure_qos_$Q_PLUGIN +} + + +function configure_qos_l2_agent { + plugin_agent_add_l2_agent_extension "qos" +} + + +function configure_qos { + configure_qos_service_plugin + configure_qos_core_plugin + configure_qos_l2_agent +} + +function configure_l3_agent_extension_fip_qos { + plugin_agent_add_l3_agent_extension "fip_qos" +} + +function configure_l3_agent_extension_gateway_ip_qos { + plugin_agent_add_l3_agent_extension "gateway_ip_qos" +} diff --git a/lib/neutron_plugins/services/segments b/lib/neutron_plugins/services/segments new file mode 100644 index 0000000000..08936bae49 --- /dev/null +++ b/lib/neutron_plugins/services/segments @@ -0,0 +1,10 @@ +#!/bin/bash + +function configure_segments_service_plugin { + neutron_service_plugin_class_add segments +} + +function configure_segments_extension { + configure_segments_service_plugin +} + diff --git a/lib/neutron_plugins/services/trunk b/lib/neutron_plugins/services/trunk new file mode 100644 index 0000000000..8e0f6944cf --- /dev/null +++ b/lib/neutron_plugins/services/trunk @@ -0,0 +1,5 @@ +#!/bin/bash + +function configure_trunk_extension { + neutron_service_plugin_class_add "trunk" +} diff --git a/lib/nova b/lib/nova new file mode 100644 index 0000000000..460b4adc85 --- /dev/null +++ b/lib/nova @@ -0,0 +1,1300 @@ +#!/bin/bash +# +# lib/nova +# Functions to control the configuration and operation of the **Nova** service + +# Dependencies: +# +# - ``functions`` file +# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined +# - ``FILES`` +# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined +# - ``LIBVIRT_TYPE`` must be defined +# - ``INSTANCE_NAME_PREFIX``, ``VOLUME_NAME_PREFIX`` must be defined +# - ``KEYSTONE_TOKEN_FORMAT`` must be defined + +# ``stack.sh`` calls the entry points in this order: +# +# - install_nova +# - configure_nova +# - create_nova_conf +# - init_nova +# - start_nova +# - stop_nova +# - cleanup_nova + +# Save trace setting +_XTRACE_LIB_NOVA=$(set +o | grep xtrace) +set +o xtrace + +# Defaults +# -------- + +# Set up default directories +GITDIR["python-novaclient"]=$DEST/python-novaclient +GITDIR["os-vif"]=$DEST/os-vif +NOVA_DIR=$DEST/nova + +# Nova virtual environment +if [[ ${USE_VENV} = True ]]; then + PROJECT_VENV["nova"]=${NOVA_DIR}.venv + NOVA_BIN_DIR=${PROJECT_VENV["nova"]}/bin +else + NOVA_BIN_DIR=$(get_python_exec_prefix) +fi + +NOVA_STATE_PATH=${NOVA_STATE_PATH:=$DATA_DIR/nova} +# INSTANCES_PATH is the previous name for this +NOVA_INSTANCES_PATH=${NOVA_INSTANCES_PATH:=${INSTANCES_PATH:=$NOVA_STATE_PATH/instances}} + +NOVA_CONF_DIR=/etc/nova +NOVA_CONF=$NOVA_CONF_DIR/nova.conf +NOVA_COND_CONF=$NOVA_CONF_DIR/nova.conf +NOVA_CPU_CONF=$NOVA_CONF_DIR/nova-cpu.conf +NOVA_FAKE_CONF=$NOVA_CONF_DIR/nova-fake.conf +NOVA_API_DB=${NOVA_API_DB:-nova_api} +NOVA_UWSGI=nova.wsgi.osapi_compute:application +NOVA_METADATA_UWSGI=nova.wsgi.metadata:application +NOVA_UWSGI_CONF=$NOVA_CONF_DIR/nova-api-uwsgi.ini +NOVA_METADATA_UWSGI_CONF=$NOVA_CONF_DIR/nova-metadata-uwsgi.ini + +# Allow forcing the stable compute uuid to something specific. This would be +# done by deployment tools that pre-allocate the UUIDs, but it is also handy +# for developers that need to re-stack a compute-only deployment multiple +# times. Since the DB is non-local and not erased on an unstack, making it +# stay the same each time is what developers want. Set to a uuid here or +# leave it blank for default allocate-on-start behavior. +NOVA_CPU_UUID="" + +# The total number of cells we expect. Must be greater than one and doesn't +# count cell0. +NOVA_NUM_CELLS=${NOVA_NUM_CELLS:-1} +# Our cell index, so we know what rabbit vhost to connect to. +# This should be in the range of 1-$NOVA_NUM_CELLS +NOVA_CPU_CELL=${NOVA_CPU_CELL:-1} + +NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini} + +# We do not need to report service status every 10s for devstack-like +# deployments. In the gate this generates extra work for the services and the +# database which are already taxed. +NOVA_SERVICE_REPORT_INTERVAL=${NOVA_SERVICE_REPORT_INTERVAL:-120} + +if is_service_enabled tls-proxy; then + NOVA_SERVICE_PROTOCOL="https" +fi + +# Whether to use TLS for comms between the VNC/SPICE/serial proxy +# services and the compute node +NOVA_CONSOLE_PROXY_COMPUTE_TLS=${NOVA_CONSOLE_PROXY_COMPUTE_TLS:-False} + +# Validate configuration +if ! is_service_enabled tls-proxy && [ "$NOVA_CONSOLE_PROXY_COMPUTE_TLS" == "True" ]; then + die $LINENO "enabling TLS for the console proxy requires the tls-proxy service" +fi + +# Public facing bits +NOVA_SERVICE_HOST=${NOVA_SERVICE_HOST:-$SERVICE_HOST} +NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774} +NOVA_SERVICE_PORT_INT=${NOVA_SERVICE_PORT_INT:-18774} +NOVA_SERVICE_PROTOCOL=${NOVA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} +NOVA_SERVICE_LISTEN_ADDRESS=${NOVA_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} +METADATA_SERVICE_PORT=${METADATA_SERVICE_PORT:-8775} +NOVA_ENABLE_CACHE=${NOVA_ENABLE_CACHE:-True} + +# Flag to set the oslo_policy.enforce_scope and oslo_policy.enforce_new_defaults. +# This is used to disable the compute API policies scope and new defaults. +# By Default, it is True. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +NOVA_ENFORCE_SCOPE=$(trueorfalse True NOVA_ENFORCE_SCOPE) + +if [[ $SERVICE_IP_VERSION == 6 ]]; then + NOVA_MY_IP="$HOST_IPV6" +else + NOVA_MY_IP="$HOST_IP" +fi + +# Option to enable/disable config drive +# NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive +FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"False"} + +# The following NOVA_FILTERS contains SameHostFilter and DifferentHostFilter with +# the default filters. +NOVA_FILTERS="ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter" + +QEMU_CONF=/etc/libvirt/qemu.conf + +# ``NOVA_VNC_ENABLED`` can be used to forcibly enable VNC configuration. +# In multi-node setups allows compute hosts to not run ``n-novnc``. +NOVA_VNC_ENABLED=$(trueorfalse False NOVA_VNC_ENABLED) +# same as ``NOVA_VNC_ENABLED`` but for Spice and serial console respectively. +NOVA_SPICE_ENABLED=$(trueorfalse False NOVA_SPICE_ENABLED) +NOVA_SERIAL_ENABLED=$(trueorfalse False NOVA_SERIAL_ENABLED) + +# Get hypervisor configuration +# ---------------------------- + +NOVA_PLUGINS=$TOP_DIR/lib/nova_plugins +if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + # Load plugin + source $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER +fi + +# Other Nova configurations +# ---------------------------- + +# ``NOVA_USE_SERVICE_TOKEN`` is a mode where service token is passed along with +# user token while communicating to external RESP API's like Neutron, Cinder +# and Glance. +NOVA_USE_SERVICE_TOKEN=$(trueorfalse True NOVA_USE_SERVICE_TOKEN) + +# ``NOVA_ALLOW_MOVE_TO_SAME_HOST`` can be set to False in multi node DevStack, +# where there are at least two nova-computes. +NOVA_ALLOW_MOVE_TO_SAME_HOST=$(trueorfalse True NOVA_ALLOW_MOVE_TO_SAME_HOST) + +# Enable debugging levels for iscsid service (goes from 0-8) +ISCSID_DEBUG=$(trueorfalse False ISCSID_DEBUG) +ISCSID_DEBUG_LEVEL=${ISCSID_DEBUG_LEVEL:-4} + +# Format for notifications. Nova defaults to "unversioned" since Train. +# Other options include "versioned" and "both". +NOVA_NOTIFICATION_FORMAT=${NOVA_NOTIFICATION_FORMAT:-unversioned} + +# Timeout for servers to gracefully shutdown the OS during operations +# like shelve, rescue, stop, rebuild. Defaults to 0 since the default +# image in devstack is CirrOS. +NOVA_SHUTDOWN_TIMEOUT=${NOVA_SHUTDOWN_TIMEOUT:-0} + +# Whether to use Keystone unified limits instead of legacy quota limits. +NOVA_USE_UNIFIED_LIMITS=$(trueorfalse False NOVA_USE_UNIFIED_LIMITS) + +# TB Cache Size in MiB for qemu guests +NOVA_LIBVIRT_TB_CACHE_SIZE=${NOVA_LIBVIRT_TB_CACHE_SIZE:-0} + +# Functions +# --------- + +# Test if any Nova services are enabled +# is_nova_enabled +function is_nova_enabled { + [[ ,${DISABLED_SERVICES} =~ ,"nova" ]] && return 1 + [[ ,${ENABLED_SERVICES} =~ ,"n-" ]] && return 0 + return 1 +} + +# is_nova_console_proxy_compute_tls_enabled() - Test if the Nova Console Proxy +# service has TLS enabled +function is_nova_console_proxy_compute_tls_enabled { + [[ ${NOVA_CONSOLE_PROXY_COMPUTE_TLS} = "True" ]] && return 0 + return 1 +} + +# Helper to clean iptables rules +function clean_iptables { + # Delete rules + sudo iptables -S -v | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-A" | sed "s/-A/-D/g" | awk '{print "sudo iptables",$0}' | bash + # Delete nat rules + sudo iptables -S -v -t nat | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-A" | sed "s/-A/-D/g" | awk '{print "sudo iptables -t nat",$0}' | bash + # Delete chains + sudo iptables -S -v | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-N" | sed "s/-N/-X/g" | awk '{print "sudo iptables",$0}' | bash + # Delete nat chains + sudo iptables -S -v -t nat | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-N" | sed "s/-N/-X/g" | awk '{print "sudo iptables -t nat",$0}' | bash +} + +# cleanup_nova() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_nova { + if is_service_enabled n-cpu; then + # Clean iptables from previous runs + clean_iptables + + # Destroy old instances + local instances + instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"` + if [ ! "$instances" = "" ]; then + echo $instances | xargs -n1 sudo virsh destroy || true + if ! xargs -n1 sudo virsh undefine --managed-save --nvram <<< $instances; then + # Can't delete with nvram flags, then just try without this flag + xargs -n1 sudo virsh undefine --managed-save <<< $instances + fi + fi + + # Logout and delete iscsi sessions + local tgts + tgts=$(sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d ' ' -f2) + local target + for target in $tgts; do + sudo iscsiadm --mode node -T $target --logout || true + done + sudo iscsiadm --mode node --op delete || true + + # Disconnect all nvmeof connections + sudo nvme disconnect-all || true + + # Clean out the instances directory. + sudo rm -rf $NOVA_INSTANCES_PATH/* + fi + + sudo rm -rf $NOVA_STATE_PATH + + # NOTE(dtroyer): This really should be called from here but due to the way + # nova abuses the _cleanup() function we're moving it + # directly into cleanup.sh until this can be fixed. + #if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + # cleanup_nova_hypervisor + #fi + + stop_process "n-api" + stop_process "n-api-meta" + remove_uwsgi_config "$NOVA_UWSGI_CONF" "nova-api" + remove_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "nova-metadata" + + if [[ "$NOVA_BACKEND" == "LVM" ]]; then + clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME + fi +} + +# configure_nova() - Set config files, create data dirs, etc +function configure_nova { + # Put config files in ``/etc/nova`` for everyone to find + sudo install -d -o $STACK_USER $NOVA_CONF_DIR + + configure_rootwrap nova + + if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then + # Get the sample configuration file in place + cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF_DIR + fi + + if is_service_enabled n-cpu; then + # Force IP forwarding on, just on case + sudo sysctl -w net.ipv4.ip_forward=1 + + if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then + # Check for kvm (hardware based virtualization). If unable to initialize + # kvm, we drop back to the slower emulation mode (qemu). Note: many systems + # come with hardware virtualization disabled in BIOS. + if [[ "$LIBVIRT_TYPE" == "kvm" ]]; then + sudo modprobe kvm || true + if [ ! -e /dev/kvm ]; then + echo "WARNING: Switching to QEMU" + LIBVIRT_TYPE=qemu + LIBVIRT_CPU_MODE=custom + LIBVIRT_CPU_MODEL=Nehalem + if which selinuxenabled >/dev/null 2>&1 && selinuxenabled; then + # https://bugzilla.redhat.com/show_bug.cgi?id=753589 + sudo setsebool virt_use_execmem on + fi + fi + fi + + # Install and configure **LXC** if specified. LXC is another approach to + # splitting a system into many smaller parts. LXC uses cgroups and chroot + # to simulate multiple systems. + if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then + if is_ubuntu; then + # enable nbd for lxc unless you're using an lvm backend + # otherwise you can't boot instances + if [[ "$NOVA_BACKEND" != "LVM" ]]; then + sudo modprobe nbd + fi + fi + fi + fi + + # Instance Storage + # ---------------- + + # Nova stores each instance in its own directory. + sudo install -d -o $STACK_USER $NOVA_INSTANCES_PATH + + # You can specify a different disk to be mounted and used for backing the + # virtual machines. If there is a partition labeled nova-instances we + # mount it (ext filesystems can be labeled via e2label). + if [ -L /dev/disk/by-label/nova-instances ]; then + if ! mount -n | grep -q $NOVA_INSTANCES_PATH; then + sudo mount -L nova-instances $NOVA_INSTANCES_PATH + sudo chown -R $STACK_USER $NOVA_INSTANCES_PATH + fi + fi + + # Due to cinder bug #1966513 we ALWAYS need an initiator name for LVM + # Ensure each compute host uses a unique iSCSI initiator + echo InitiatorName=$(iscsi-iname) | sudo tee /etc/iscsi/initiatorname.iscsi + + if [[ ${ISCSID_DEBUG} == "True" ]]; then + # Install an override that starts iscsid with debugging + # enabled. + cat > /tmp/iscsid.override <=v1.0.0 from source. + NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:$((6080 + offset))/vnc_lite.html"} + fi + iniset $NOVA_CPU_CONF vnc novncproxy_base_url "$NOVNCPROXY_URL" + SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:$((6081 + offset))/spice_auto.html"} + iniset $NOVA_CPU_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL" + fi + + if is_service_enabled n-novnc || [ "$NOVA_VNC_ENABLED" != False ]; then + # Address on which instance vncservers will listen on compute hosts. + # For multi-host, this should be the management ip of the compute host. + VNCSERVER_LISTEN=${VNCSERVER_LISTEN:-$NOVA_SERVICE_LISTEN_ADDRESS} + VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS:-$default_proxyclient_addr} + iniset $NOVA_CPU_CONF vnc server_listen "$VNCSERVER_LISTEN" + iniset $NOVA_CPU_CONF vnc server_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" + else + iniset $NOVA_CPU_CONF vnc enabled false + fi + + if is_service_enabled n-spice || [ "$NOVA_SPICE_ENABLED" != False ]; then + # Address on which instance spiceservers will listen on compute hosts. + # For multi-host, this should be the management ip of the compute host. + SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS:-$default_proxyclient_addr} + SPICESERVER_LISTEN=${SPICESERVER_LISTEN:-$NOVA_SERVICE_LISTEN_ADDRESS} + iniset $NOVA_CPU_CONF spice enabled true + iniset $NOVA_CPU_CONF spice server_listen "$SPICESERVER_LISTEN" + iniset $NOVA_CPU_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" + fi + + if is_service_enabled n-sproxy || [ "$NOVA_SERIAL_ENABLED" != False ]; then + iniset $NOVA_CPU_CONF serial_console enabled True + iniset $NOVA_CPU_CONF serial_console base_url "ws://$SERVICE_HOST:$((6082 + offset))/" + fi +} + +function configure_console_proxies { + # Use the provided config file path or default to $NOVA_CONF. + local conf=${1:-$NOVA_CONF} + local offset=${2:-0} + # Stagger the offset based on the total number of possible console proxies + # (novnc, spice, serial) so that their ports will not collide if + # all are enabled. + offset=$((offset * 3)) + + if is_service_enabled n-novnc || [ "$NOVA_VNC_ENABLED" != False ]; then + iniset $conf vnc novncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" + iniset $conf vnc novncproxy_port $((6080 + offset)) + + if is_nova_console_proxy_compute_tls_enabled ; then + iniset $conf vnc auth_schemes "vencrypt" + iniset $conf vnc vencrypt_client_key "/etc/pki/nova-novnc/client-key.pem" + iniset $conf vnc vencrypt_client_cert "/etc/pki/nova-novnc/client-cert.pem" + iniset $conf vnc vencrypt_ca_certs "/etc/pki/nova-novnc/ca-cert.pem" + + sudo mkdir -p /etc/pki/nova-novnc + deploy_int_CA /etc/pki/nova-novnc/ca-cert.pem + deploy_int_cert /etc/pki/nova-novnc/client-cert.pem /etc/pki/nova-novnc/client-key.pem + # OpenSSL 1.1.0 generates the key file with permissions: 600, by + # default, and the deploy_int* methods use 'sudo cp' to copy the + # files, making them owned by root:root. + # Change ownership of everything under /etc/pki/nova-novnc to + # $STACK_USER:$(id -g ${STACK_USER}) so that $STACK_USER can read + # the key file. + sudo chown -R $STACK_USER:$(id -g ${STACK_USER}) /etc/pki/nova-novnc + # This is needed to enable TLS in the proxy itself, example log: + # WebSocket server settings: + # - Listen on 0.0.0.0:6080 + # - Flash security policy server + # - Web server (no directory listings). Web root: /usr/share/novnc + # - SSL/TLS support + # - proxying from 0.0.0.0:6080 to None:None + iniset $conf DEFAULT key "/etc/pki/nova-novnc/client-key.pem" + iniset $conf DEFAULT cert "/etc/pki/nova-novnc/client-cert.pem" + fi + fi + + if is_service_enabled n-spice; then + iniset $conf spice html5proxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" + iniset $conf spice html5proxy_port $((6081 + offset)) + fi + + if is_service_enabled n-sproxy; then + iniset $conf serial_console serialproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" + iniset $conf serial_console serialproxy_port $((6082 + offset)) + fi +} + +function configure_nova_unified_limits { + # Registered limit resources in keystone are system-specific resources. + # Make sure we use a system-scoped token to interact with this API. + + # Default limits here mirror the legacy config-based default values. + # Note: disk quota is new in nova as of unified limits. + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10 --region $REGION_NAME servers + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 20 --region $REGION_NAME class:VCPU + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit $((50 * 1024)) --region $REGION_NAME class:MEMORY_MB + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 20 --region $REGION_NAME class:DISK_GB + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 128 --region $REGION_NAME server_metadata_items + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 5 --region $REGION_NAME server_injected_files + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10240 --region $REGION_NAME server_injected_file_content_bytes + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 255 --region $REGION_NAME server_injected_file_path_bytes + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 100 --region $REGION_NAME server_key_pairs + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10 --region $REGION_NAME server_groups + openstack --os-cloud devstack-system-admin registered limit create \ + --service nova --default-limit 10 --region $REGION_NAME server_group_members + + # Tell nova to use these limits + iniset $NOVA_CONF quota driver "nova.quota.UnifiedLimitsDriver" + + # Configure oslo_limit so it can talk to keystone + iniset $NOVA_CONF oslo_limit user_domain_name $SERVICE_DOMAIN_NAME + iniset $NOVA_CONF oslo_limit password $SERVICE_PASSWORD + iniset $NOVA_CONF oslo_limit username nova + iniset $NOVA_CONF oslo_limit auth_type password + iniset $NOVA_CONF oslo_limit auth_url $KEYSTONE_SERVICE_URI + iniset $NOVA_CONF oslo_limit system_scope all + iniset $NOVA_CONF oslo_limit endpoint_id \ + $(openstack endpoint list --service nova -f value -c ID) + + # Allow the nova service user to read quotas + openstack --os-cloud devstack-system-admin role add --user nova \ + --user-domain $SERVICE_DOMAIN_NAME --system all reader +} + +function init_nova_service_user_conf { + iniset $NOVA_CONF service_user send_service_user_token True + iniset $NOVA_CONF service_user auth_type password + iniset $NOVA_CONF service_user auth_url "$KEYSTONE_SERVICE_URI" + iniset $NOVA_CONF service_user username nova + iniset $NOVA_CONF service_user password "$SERVICE_PASSWORD" + iniset $NOVA_CONF service_user user_domain_name "$SERVICE_DOMAIN_NAME" + iniset $NOVA_CONF service_user project_name "$SERVICE_PROJECT_NAME" + iniset $NOVA_CONF service_user project_domain_name "$SERVICE_DOMAIN_NAME" +} + +function conductor_conf { + local cell="$1" + echo "${NOVA_CONF_DIR}/nova_cell${cell}.conf" +} + +# create_nova_keys_dir() - Part of the init_nova() process +function create_nova_keys_dir { + # Create keys dir + sudo install -d -o $STACK_USER ${NOVA_STATE_PATH} ${NOVA_STATE_PATH}/keys +} + +function init_nova_db { + local dbname="$1" + local conffile="$2" + recreate_database $dbname + $NOVA_BIN_DIR/nova-manage --config-file $conffile db sync --local_cell +} + +# init_nova() - Initialize databases, etc. +function init_nova { + # All nova components talk to a central database. + # Only do this step once on the API node for an entire cluster. + if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then + # (Re)create nova databases + if [[ "$CELLSV2_SETUP" == "singleconductor" ]]; then + # If we are doing singleconductor mode, we have some strange + # interdependencies. in that the main config refers to cell1 + # instead of cell0. In that case, just make sure the cell0 database + # is created before we need it below, but don't db_sync it until + # after the cellN databases are there. + recreate_database nova_cell0 + else + async_run nova-cell-0 init_nova_db nova_cell0 $NOVA_CONF + fi + + for i in $(seq 1 $NOVA_NUM_CELLS); do + async_run nova-cell-$i init_nova_db nova_cell${i} $(conductor_conf $i) + done + + recreate_database $NOVA_API_DB + $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF api_db sync + + # map_cell0 will create the cell mapping record in the nova_api DB so + # this needs to come after the api_db sync happens. + $NOVA_BIN_DIR/nova-manage cell_v2 map_cell0 --database_connection `database_connection_url nova_cell0` + + # Wait for DBs to finish from above + for i in $(seq 0 $NOVA_NUM_CELLS); do + async_wait nova-cell-$i + done + + if [[ "$CELLSV2_SETUP" == "singleconductor" ]]; then + # We didn't db sync cell0 above, so run it now + $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync + fi + + # Run online migrations on the new databases + # Needed for flavor conversion + $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db online_data_migrations + + # create the cell1 cell for the main nova db where the hosts live + for i in $(seq 1 $NOVA_NUM_CELLS); do + $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF --config-file $(conductor_conf $i) cell_v2 create_cell --name "cell$i" + done + fi + + create_nova_keys_dir + + if [[ "$NOVA_BACKEND" == "LVM" ]]; then + init_default_lvm_volume_group + fi +} + +# install_novaclient() - Collect source and prepare +function install_novaclient { + if use_library_from_git "python-novaclient"; then + git_clone_by_name "python-novaclient" + setup_dev_lib "python-novaclient" + sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-novaclient"]}/tools/,/etc/bash_completion.d/}nova.bash_completion + fi +} + +# install_nova() - Collect source and prepare +function install_nova { + + # Install os-vif + if use_library_from_git "os-vif"; then + git_clone_by_name "os-vif" + setup_dev_lib "os-vif" + fi + + if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + install_nova_hypervisor + fi + + if is_service_enabled n-novnc; then + # a websockets/html5 or flash powered VNC console for vm instances + NOVNC_FROM_PACKAGE=$(trueorfalse False NOVNC_FROM_PACKAGE) + if [ "$NOVNC_FROM_PACKAGE" = "True" ]; then + # Installing novnc on Debian bullseye breaks the global pip + # install. This happens because novnc pulls in distro cryptography + # which will be prefered by distro pip, but if anything has + # installed pyOpenSSL from pypi (keystone) that is not compatible + # with distro cryptography. Fix this by installing + # python3-openssl (pyOpenSSL) from the distro which pip will prefer + # on Debian. Ubuntu has inverse problems so we only do this for + # Debian. + local novnc_packages + novnc_packages="novnc" + GetOSVersion + if [[ "$os_VENDOR" = "Debian" ]] ; then + novnc_packages="$novnc_packages python3-openssl" + fi + + NOVNC_WEB_DIR=/usr/share/novnc + install_package $novnc_packages + else + NOVNC_WEB_DIR=$DEST/novnc + git_clone $NOVNC_REPO $NOVNC_WEB_DIR $NOVNC_BRANCH + fi + fi + + if is_service_enabled n-spice; then + # a websockets/html5 or flash powered SPICE console for vm instances + SPICE_FROM_PACKAGE=$(trueorfalse True SPICE_FROM_PACKAGE) + if [ "$SPICE_FROM_PACKAGE" = "True" ]; then + SPICE_WEB_DIR=/usr/share/spice-html5 + install_package spice-html5 + else + SPICE_WEB_DIR=$DEST/spice-html5 + git_clone $SPICE_REPO $SPICE_WEB_DIR $SPICE_BRANCH + fi + fi + + git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH + setup_develop $NOVA_DIR + sudo install -D -m 0644 -o $STACK_USER {$NOVA_DIR/tools/,/etc/bash_completion.d/}nova-manage.bash_completion +} + +# start_nova_api() - Start the API process ahead of other things +function start_nova_api { + # Get right service port for testing + local service_port=$NOVA_SERVICE_PORT + local service_protocol=$NOVA_SERVICE_PROTOCOL + local nova_url + if is_service_enabled tls-proxy; then + service_port=$NOVA_SERVICE_PORT_INT + service_protocol="http" + fi + + # Hack to set the path for rootwrap + local old_path=$PATH + export PATH=$NOVA_BIN_DIR:$PATH + + run_process "n-api" "$(which uwsgi) --procname-prefix nova-api --ini $NOVA_UWSGI_CONF" + nova_url=$service_protocol://$SERVICE_HOST/compute/v2.1/ + + echo "Waiting for nova-api to start..." + if ! wait_for_service $SERVICE_TIMEOUT $nova_url; then + die $LINENO "nova-api did not start" + fi + + export PATH=$old_path +} + + +# start_nova_compute() - Start the compute process +function start_nova_compute { + # Hack to set the path for rootwrap + local old_path=$PATH + export PATH=$NOVA_BIN_DIR:$PATH + + local compute_cell_conf=$NOVA_CONF + + # Bug #1802143: $NOVA_CPU_CONF is constructed by first copying $NOVA_CONF... + cp $compute_cell_conf $NOVA_CPU_CONF + # ...and then adding/overriding anything explicitly set in $NOVA_CPU_CONF + merge_config_file $TOP_DIR/local.conf post-config '$NOVA_CPU_CONF' + + if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then + # NOTE(danms): Grenade doesn't setup multi-cell rabbit, so + # skip these bits and use the normal config. + echo "Skipping multi-cell conductor fleet setup" + else + # "${CELLSV2_SETUP}" is "superconductor" + # FIXME(danms): Should this be configurable? + iniset $NOVA_CPU_CONF workarounds disable_group_policy_check_upcall True + # Since the nova-compute service cannot reach nova-scheduler over + # RPC, we also disable track_instance_changes. + iniset $NOVA_CPU_CONF filter_scheduler track_instance_changes False + iniset_rpc_backend nova $NOVA_CPU_CONF DEFAULT "nova_cell${NOVA_CPU_CELL}" + fi + + # Make sure we nuke any database config + inidelete $NOVA_CPU_CONF database connection + inidelete $NOVA_CPU_CONF api_database connection + + # Console proxies were configured earlier in create_nova_conf. Now that the + # nova-cpu.conf has been created, configure the console settings required + # by the compute process. + configure_console_compute + + # Set rebuild timeout longer for BFV instances because we likely have + # slower disk than expected. Default is 20s/GB + iniset $NOVA_CPU_CONF DEFAULT reimage_timeout_per_gb 180 + + # Configure the OVSDB connection for os-vif + if [ -n "$OVSDB_SERVER_LOCAL_HOST" ]; then + iniset $NOVA_CPU_CONF os_vif_ovs ovsdb_connection "tcp:$OVSDB_SERVER_LOCAL_HOST:6640" + fi + + # Workaround bug #1939108 + if [[ "$VIRT_DRIVER" == "libvirt" && "$LIBVIRT_TYPE" == "qemu" ]]; then + iniset $NOVA_CPU_CONF workarounds libvirt_disable_apic True + fi + + if [[ "$NOVA_CPU_UUID" ]]; then + echo -n $NOVA_CPU_UUID > $NOVA_CONF_DIR/compute_id + fi + + if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then + if [ ${NOVA_LIBVIRT_TB_CACHE_SIZE} -gt 0 ]; then + iniset $NOVA_CPU_CONF libvirt tb_cache_size ${NOVA_LIBVIRT_TB_CACHE_SIZE} + fi + # The group **$LIBVIRT_GROUP** is added to the current user in this script. + # ``sg`` is used in run_process to execute nova-compute as a member of the + # **$LIBVIRT_GROUP** group. + run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF" $LIBVIRT_GROUP + elif [[ "$VIRT_DRIVER" = 'lxd' ]]; then + run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF" $LXD_GROUP + elif [[ "$VIRT_DRIVER" = 'docker' || "$VIRT_DRIVER" = 'zun' ]]; then + run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF" $DOCKER_GROUP + elif [[ "$VIRT_DRIVER" = 'fake' ]]; then + local i + for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do + # Avoid process redirection of fake host configurations by + # creating or modifying real configurations. Each fake + # gets its own configuration and own log file. + local fake_conf="${NOVA_FAKE_CONF}-${i}" + iniset $fake_conf DEFAULT host "${HOSTNAME}${i}" + # Ensure that each fake compute has its own state path so that it + # can have its own compute_id file + local state_path + state_path="$NOVA_STATE_PATH/${HOSTNAME}${i}" + COMPUTE_ID=$(uuidgen) + sudo mkdir -p "$state_path" + iniset $fake_conf DEFAULT state_path "$state_path" + # use the generated UUID as the stable compute node UUID + echo "$COMPUTE_ID" | sudo tee "$state_path/compute_id" + run_process "n-cpu-${i}" "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF --config-file $fake_conf" + done + else + if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + start_nova_hypervisor + fi + run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF" + fi + + export PATH=$old_path +} + +# start_nova() - Start running processes +function start_nova_rest { + # Hack to set the path for rootwrap + local old_path=$PATH + export PATH=$NOVA_BIN_DIR:$PATH + + local compute_cell_conf=$NOVA_CONF + + run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf" + run_process n-api-meta "$(which uwsgi) --procname-prefix nova-api-meta --ini $NOVA_METADATA_UWSGI_CONF" + + export PATH=$old_path +} + +function enable_nova_console_proxies { + for i in $(seq 1 $NOVA_NUM_CELLS); do + for srv in n-novnc n-spice n-sproxy; do + if is_service_enabled $srv; then + enable_service ${srv}-cell${i} + fi + done + done +} + +function start_nova_console_proxies { + # Hack to set the path for rootwrap + local old_path=$PATH + # This is needed to find the nova conf + export PATH=$NOVA_BIN_DIR:$PATH + + local api_cell_conf=$NOVA_CONF + # console proxies run globally for singleconductor, else they run per cell + if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then + run_process n-novnc "$NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR" + run_process n-spice "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR" + run_process n-sproxy "$NOVA_BIN_DIR/nova-serialproxy --config-file $api_cell_conf" + else + enable_nova_console_proxies + for i in $(seq 1 $NOVA_NUM_CELLS); do + local conf + conf=$(conductor_conf $i) + run_process n-novnc-cell${i} "$NOVA_BIN_DIR/nova-novncproxy --config-file $conf --web $NOVNC_WEB_DIR" + run_process n-spice-cell${i} "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $conf --web $SPICE_WEB_DIR" + run_process n-sproxy-cell${i} "$NOVA_BIN_DIR/nova-serialproxy --config-file $conf" + done + fi + + export PATH=$old_path +} + +function enable_nova_fleet { + if is_service_enabled n-cond; then + enable_service n-super-cond + for i in $(seq 1 $NOVA_NUM_CELLS); do + enable_service n-cond-cell${i} + done + fi +} + +function start_nova_conductor { + if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then + echo "Starting nova-conductor in a cellsv1-compatible way" + run_process n-cond "$NOVA_BIN_DIR/nova-conductor --config-file $NOVA_COND_CONF" + return + fi + + enable_nova_fleet + if is_service_enabled n-super-cond; then + run_process n-super-cond "$NOVA_BIN_DIR/nova-conductor --config-file $NOVA_COND_CONF" + fi + for i in $(seq 1 $NOVA_NUM_CELLS); do + if is_service_enabled n-cond-cell${i}; then + local conf + conf=$(conductor_conf $i) + run_process n-cond-cell${i} "$NOVA_BIN_DIR/nova-conductor --config-file $conf" + fi + done +} + +function is_nova_ready { + # NOTE(sdague): with cells v2 all the compute services must be up + # and checked into the database before discover_hosts is run. This + # happens in all in one installs by accident, because > 30 seconds + # happen between here and the script ending. However, in multinode + # tests this can very often not be the case. So ensure that the + # compute is up before we move on. + wait_for_compute $NOVA_READY_TIMEOUT +} + +function start_nova { + start_nova_rest + start_nova_console_proxies + start_nova_conductor + start_nova_compute + if is_service_enabled n-api; then + # dump the cell mapping to ensure life is good + echo "Dumping cells_v2 mapping" + $NOVA_BIN_DIR/nova-manage cell_v2 list_cells --verbose + fi +} + +function stop_nova_compute { + if [ "$VIRT_DRIVER" == "fake" ]; then + local i + for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do + stop_process n-cpu-${i} + done + else + stop_process n-cpu + fi + if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then + stop_nova_hypervisor + fi +} + +function stop_nova_rest { + # Kill the non-compute nova processes + for serv in n-api n-api-meta n-sch; do + stop_process $serv + done +} + +function stop_nova_console_proxies { + if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then + for srv in n-novnc n-spice n-sproxy; do + stop_process $srv + done + else + enable_nova_console_proxies + for i in $(seq 1 $NOVA_NUM_CELLS); do + for srv in n-novnc n-spice n-sproxy; do + stop_process ${srv}-cell${i} + done + done + fi +} + +function stop_nova_conductor { + if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then + stop_process n-cond + return + fi + + enable_nova_fleet + for srv in n-super-cond $(seq -f n-cond-cell%0.f 1 $NOVA_NUM_CELLS); do + if is_service_enabled $srv; then + stop_process $srv + fi + done +} + +# stop_nova() - Stop running processes +function stop_nova { + stop_nova_rest + stop_nova_console_proxies + stop_nova_conductor + stop_nova_compute +} + +# create_instance_types(): Create default flavors +function create_flavors { + if is_service_enabled n-api; then + if ! openstack --os-region-name="$REGION_NAME" flavor list | grep -q ds512M; then + # Note that danms hates these flavors and apologizes for sdague + openstack --os-region-name="$REGION_NAME" flavor create --id c1 --ram 256 --disk 1 --vcpus 1 --property hw_rng:allowed=True cirros256 + openstack --os-region-name="$REGION_NAME" flavor create --id d1 --ram 512 --disk 5 --vcpus 1 --property hw_rng:allowed=True ds512M + openstack --os-region-name="$REGION_NAME" flavor create --id d2 --ram 1024 --disk 10 --vcpus 1 --property hw_rng:allowed=True ds1G + openstack --os-region-name="$REGION_NAME" flavor create --id d3 --ram 2048 --disk 10 --vcpus 2 --property hw_rng:allowed=True ds2G + openstack --os-region-name="$REGION_NAME" flavor create --id d4 --ram 4096 --disk 20 --vcpus 4 --property hw_rng:allowed=True ds4G + fi + + if ! openstack --os-region-name="$REGION_NAME" flavor list | grep -q m1.tiny; then + openstack --os-region-name="$REGION_NAME" flavor create --id 1 --ram 512 --disk 1 --vcpus 1 --property hw_rng:allowed=True m1.tiny + openstack --os-region-name="$REGION_NAME" flavor create --id 2 --ram 2048 --disk 20 --vcpus 1 --property hw_rng:allowed=True m1.small + openstack --os-region-name="$REGION_NAME" flavor create --id 3 --ram 4096 --disk 40 --vcpus 2 --property hw_rng:allowed=True m1.medium + openstack --os-region-name="$REGION_NAME" flavor create --id 4 --ram 8192 --disk 80 --vcpus 4 --property hw_rng:allowed=True m1.large + openstack --os-region-name="$REGION_NAME" flavor create --id 5 --ram 16384 --disk 160 --vcpus 8 --property hw_rng:allowed=True m1.xlarge + fi + fi +} + +# Restore xtrace +$_XTRACE_LIB_NOVA + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt new file mode 100644 index 0000000000..c0713f9953 --- /dev/null +++ b/lib/nova_plugins/functions-libvirt @@ -0,0 +1,207 @@ +#!/bin/bash +# +# lib/nova_plugins/functions-libvirt +# Common libvirt configuration functions + +# Dependencies: +# ``functions`` file +# ``STACK_USER`` has to be defined + +# Save trace setting +_XTRACE_NOVA_FN_LIBVIRT=$(set +o | grep xtrace) +set +o xtrace + +# Defaults +# -------- + +# Turn on selective debug log filters for libvirt. +# (NOTE: Enabling this by default, because the log filters enabled in +# 'configure_libvirt' function further below are _selective_ and not +# extremely verbose.) +DEBUG_LIBVIRT=$(trueorfalse True DEBUG_LIBVIRT) + +# Try to enable coredumps for libvirt +# Currently fairly specific to OpenStackCI hosts +DEBUG_LIBVIRT_COREDUMPS=$(trueorfalse False DEBUG_LIBVIRT_COREDUMPS) + +# Enable the Fedora Virtualization Preview Copr repo that provides the latest +# rawhide builds of QEMU, Libvirt and other virt tools. +ENABLE_FEDORA_VIRT_PREVIEW_REPO=$(trueorfalse False ENABLE_FEDORA_VIRT_PREVIEW_REPO) + +# Enable coredumps for libvirt +# Bug: https://bugs.launchpad.net/nova/+bug/1643911 +function _enable_coredump { + local confdir=/etc/systemd/system/libvirtd.service.d + local conffile=${confdir}/coredump.conf + + # Create a coredump directory, and instruct the kernel to save to + # here + sudo mkdir -p /var/core + sudo chmod a+wrx /var/core + echo '/var/core/core.%e.%p.%h.%t' | \ + sudo tee /proc/sys/kernel/core_pattern + + # Drop a config file to up the core ulimit + sudo mkdir -p ${confdir} + sudo tee ${conffile} < + elif is_fedora; then + + # Optionally enable the virt-preview repo when on Fedora + if [[ $DISTRO =~ f[0-9][0-9] ]] && [[ ${ENABLE_FEDORA_VIRT_PREVIEW_REPO} == "True" ]]; then + # https://copr.fedorainfracloud.org/coprs/g/virtmaint-sig/virt-preview/ + sudo dnf copr enable -y @virtmaint-sig/virt-preview + fi + + if is_openeuler; then + qemu_package=qemu + else + qemu_package=qemu-kvm + fi + + # Note that in CentOS/RHEL this needs to come from the RDO + # repositories (qemu-kvm-ev ... which provides this package) + # as the base system version is too old. We should have + # pre-installed these + install_package $qemu_package + install_package libvirt libvirt-devel + + if [[ $DISTRO =~ rhel9 ]]; then + pip_install_gr libvirt-python + else + install_package python3-libvirt + fi + + if is_arch "aarch64"; then + install_package edk2-aarch64 + fi + fi + + if [[ $DEBUG_LIBVIRT_COREDUMPS == True ]]; then + _enable_coredump + fi +} + +# Configures the installed libvirt system so that is accessible by +# STACK_USER via qemu:///system with management capabilities. +function configure_libvirt { + if is_service_enabled neutron && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then + # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces + cat </dev/null; then + sudo groupadd $LIBVIRT_GROUP + fi + add_user_to_group $STACK_USER $LIBVIRT_GROUP + + # Enable server side traces for libvirtd + if [[ "$DEBUG_LIBVIRT" = "True" ]] ; then + if is_ubuntu; then + # Unexpectedly binary package builds in ubuntu get fully qualified + # source file paths, not relative paths. This screws with the matching + # of '1:libvirt' making everything turn on. So use libvirt.c for now. + # This will have to be re-visited when Ubuntu ships libvirt >= 1.2.3 + local log_filters="1:libvirt.c 1:qemu 1:conf 1:security 3:object 3:event 3:json 3:file 1:util 1:cpu" + else + local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:object 3:event 3:json 3:file 1:util 1:cpu" + fi + local log_outputs="1:file:/var/log/libvirt/libvirtd.log" + if ! sudo grep -q "^log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then + echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf + fi + if ! sudo grep -q "^log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then + echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf + fi + fi + + if is_nova_console_proxy_compute_tls_enabled ; then + echo "vnc_tls = 1" | sudo tee -a $QEMU_CONF + echo "vnc_tls_x509_verify = 1" | sudo tee -a $QEMU_CONF + + sudo mkdir -p /etc/pki/libvirt-vnc + deploy_int_CA /etc/pki/libvirt-vnc/ca-cert.pem + deploy_int_cert /etc/pki/libvirt-vnc/server-cert.pem /etc/pki/libvirt-vnc/server-key.pem + # OpenSSL 1.1.0 generates the key file with permissions: 600, by + # default and the deploy_int* methods use 'sudo cp' to copy the + # files, making them owned by root:root. + # Change ownership of everything under /etc/pki/libvirt-vnc to + # libvirt-qemu:libvirt-qemu so that libvirt-qemu can read the key + # file. + sudo chown -R libvirt-qemu:libvirt-qemu /etc/pki/libvirt-vnc + fi + + # Service needs to be started on redhat/fedora -- do a restart for + # sanity after fiddling the config. + restart_service libvirtd + + # Restart virtlogd companion service to ensure it is running properly + # https://bugs.launchpad.net/ubuntu/+source/libvirt/+bug/1577455 + # https://bugzilla.redhat.com/show_bug.cgi?id=1290357 + # (not all platforms have it; libvirt 1.3+ only, thus the ignore) + restart_service virtlogd || true +} + + +# Restore xtrace +$_XTRACE_NOVA_FN_LIBVIRT + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/nova_plugins/hypervisor-fake b/lib/nova_plugins/hypervisor-fake new file mode 100644 index 0000000000..39cb45ca67 --- /dev/null +++ b/lib/nova_plugins/hypervisor-fake @@ -0,0 +1,68 @@ +#!/bin/bash +# +# lib/nova_plugins/hypervisor-fake +# Configure the fake hypervisor + +# Enable with: +# VIRT_DRIVER=fake + +# Dependencies: +# ``functions`` file +# ``nova`` configuration + +# install_nova_hypervisor - install any external requirements +# configure_nova_hypervisor - make configuration changes, including those to other services +# start_nova_hypervisor - start any external services +# stop_nova_hypervisor - stop any external services +# cleanup_nova_hypervisor - remove transient data and cache + +# Save trace setting +_XTRACE_VIRTFAKE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + + +# Entry Points +# ------------ + +# clean_nova_hypervisor - Clean up an installation +function cleanup_nova_hypervisor { + # This function intentionally left blank + : +} + +# configure_nova_hypervisor - Set config files, create data dirs, etc +function configure_nova_hypervisor { + iniset $NOVA_CONF DEFAULT compute_driver "fake.FakeDriverWithoutFakeNodes" + # Disable arbitrary limits + iniset $NOVA_CONF quota driver nova.quota.NoopQuotaDriver +} + +# install_nova_hypervisor() - Install external components +function install_nova_hypervisor { + # This function intentionally left blank + : +} + +# start_nova_hypervisor - Start any required external services +function start_nova_hypervisor { + # This function intentionally left blank + : +} + +# stop_nova_hypervisor - Stop any external services +function stop_nova_hypervisor { + # This function intentionally left blank + : +} + + +# Restore xtrace +$_XTRACE_VIRTFAKE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic new file mode 100644 index 0000000000..9a39c798a8 --- /dev/null +++ b/lib/nova_plugins/hypervisor-ironic @@ -0,0 +1,98 @@ +#!/bin/bash +# +# lib/nova_plugins/hypervisor-ironic +# Configure the ironic hypervisor + +# Enable with: +# VIRT_DRIVER=ironic + +# Dependencies: +# ``functions`` file +# ``nova`` configuration + +# install_nova_hypervisor - install any external requirements +# configure_nova_hypervisor - make configuration changes, including those to other services +# start_nova_hypervisor - start any external services +# stop_nova_hypervisor - stop any external services +# cleanup_nova_hypervisor - remove transient data and cache + +# Save trace setting +_XTRACE_HYP_IRONIC=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/nova_plugins/functions-libvirt + +# Defaults +# -------- + +# Entry Points +# ------------ + +# clean_nova_hypervisor - Clean up an installation +function cleanup_nova_hypervisor { + # This function intentionally left blank + : +} + +# configure_nova_hypervisor - Set config files, create data dirs, etc +function configure_nova_hypervisor { + if ! is_ironic_hardware; then + configure_libvirt + fi + + iniset $NOVA_CONF DEFAULT compute_driver ironic.IronicDriver + + # ironic section + iniset $NOVA_CONF ironic auth_type password + iniset $NOVA_CONF ironic username admin + iniset $NOVA_CONF ironic password $ADMIN_PASSWORD + iniset $NOVA_CONF ironic auth_url $KEYSTONE_SERVICE_URI + if is_ironic_enforce_scope; then + iniset $NOVA_CONF ironic system_scope all + else + iniset $NOVA_CONF ironic project_domain_id default + iniset $NOVA_CONF ironic project_name demo + fi + if is_ironic_sharded; then + iniset $NOVA_CONF ironic shard $IRONIC_SHARD_1_NAME + fi + + iniset $NOVA_CONF ironic user_domain_id default + iniset $NOVA_CONF ironic region_name $REGION_NAME + + # These are used with crufty legacy ironicclient + iniset $NOVA_CONF ironic api_max_retries 300 + iniset $NOVA_CONF ironic api_retry_interval 5 + # These are used with shiny new openstacksdk + iniset $NOVA_CONF ironic connect_retries 300 + iniset $NOVA_CONF ironic connect_retry_delay 5 + iniset $NOVA_CONF ironic status_code_retries 300 + iniset $NOVA_CONF ironic status_code_retry_delay 5 +} + +# install_nova_hypervisor() - Install external components +function install_nova_hypervisor { + if is_ironic_hardware; then + return + fi + install_libvirt +} + +# start_nova_hypervisor - Start any required external services +function start_nova_hypervisor { + # This function intentionally left blank + : +} + +# stop_nova_hypervisor - Stop any external services +function stop_nova_hypervisor { + # This function intentionally left blank + : +} + +# Restore xtrace +$_XTRACE_HYP_IRONIC + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt new file mode 100644 index 0000000000..4b44c1f86f --- /dev/null +++ b/lib/nova_plugins/hypervisor-libvirt @@ -0,0 +1,145 @@ +#!/bin/bash +# +# lib/nova_plugins/hypervisor-libvirt +# Configure the libvirt hypervisor + +# Enable with: +# VIRT_DRIVER=libvirt + +# Dependencies: +# ``functions`` file +# ``nova`` configuration + +# install_nova_hypervisor - install any external requirements +# configure_nova_hypervisor - make configuration changes, including those to other services +# start_nova_hypervisor - start any external services +# stop_nova_hypervisor - stop any external services +# cleanup_nova_hypervisor - remove transient data and cache + +# Save trace setting +_XTRACE_NOVA_LIBVIRT=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/nova_plugins/functions-libvirt + +# Defaults +# -------- + + +# Entry Points +# ------------ + +# clean_nova_hypervisor - Clean up an installation +function cleanup_nova_hypervisor { + # This function intentionally left blank + : +} + +# configure_nova_hypervisor - Set config files, create data dirs, etc +function configure_nova_hypervisor { + configure_libvirt + iniset $NOVA_CONF libvirt virt_type "$LIBVIRT_TYPE" + iniset $NOVA_CONF libvirt cpu_mode "$LIBVIRT_CPU_MODE" + if [ "$LIBVIRT_CPU_MODE" == "custom" ] ; then + iniset $NOVA_CONF libvirt cpu_model "$LIBVIRT_CPU_MODEL" + fi + # Do not enable USB tablet input devices to avoid QEMU CPU overhead. + iniset $NOVA_CONF DEFAULT pointer_model "ps2mouse" + iniset $NOVA_CONF libvirt live_migration_uri "qemu+ssh://$STACK_USER@%s/system" + iniset $NOVA_CONF DEFAULT default_ephemeral_format "ext4" + iniset $NOVA_CONF DEFAULT compute_driver "libvirt.LibvirtDriver" + # Power architecture currently does not support graphical consoles. + if is_arch "ppc64"; then + iniset $NOVA_CONF vnc enabled "false" + fi + + # arm64-specific configuration + if is_arch "aarch64"; then + iniset $NOVA_CONF libvirt cpu_mode "host-passthrough" + # NOTE(mnaser): We cannot have `cpu_model` set if the `cpu_mode` is + # set to `host-passthrough`, or `nova-compute` refuses to + # start. + inidelete $NOVA_CONF libvirt cpu_model + fi + + if isset ENABLE_FILE_INJECTION; then + if [ "$ENABLE_FILE_INJECTION" == "True" ]; then + # -1 means use libguestfs to inspect the guest OS image for the + # root partition to use for file injection. + iniset $NOVA_CONF libvirt inject_partition '-1' + fi + fi + + if [[ "$LIBVIRT_TYPE" = "parallels" ]]; then + iniset $NOVA_CONF libvirt connection_uri "parallels+unix:///system" + iniset $NOVA_CONF libvirt images_type "ploop" + iniset $NOVA_CONF DEFAULT force_raw_images "False" + iniset $NOVA_CONF vnc server_proxyclient_address $HOST_IP + iniset $NOVA_CONF vnc server_listen $HOST_IP + iniset $NOVA_CONF vnc keymap + elif [[ "$NOVA_BACKEND" == "LVM" ]]; then + iniset $NOVA_CONF libvirt images_type "lvm" + iniset $NOVA_CONF libvirt images_volume_group $DEFAULT_VOLUME_GROUP_NAME + if isset LVM_VOLUME_CLEAR; then + iniset $NOVA_CONF libvirt volume_clear "$LVM_VOLUME_CLEAR" + fi + fi +} + +# install_nova_hypervisor() - Install external components +function install_nova_hypervisor { + install_libvirt + + # Install and configure **LXC** if specified. LXC is another approach to + # splitting a system into many smaller parts. LXC uses cgroups and chroot + # to simulate multiple systems. + if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then + if is_ubuntu; then + if [[ "$DISTRO" > natty ]]; then + install_package cgroup-lite + fi + else + ### FIXME(dtroyer): figure this out + echo "RPM-based cgroup not implemented yet" + yum_install libcgroup-tools + fi + fi + + if [[ "$ENABLE_FILE_INJECTION" == "True" ]] ; then + if is_ubuntu; then + install_package python3-guestfs + # NOTE(andreaf) Ubuntu kernel can only be read by root, which breaks libguestfs: + # https://bugs.launchpad.net/ubuntu/+source/linux/+bug/759725) + INSTALLED_KERNELS="$(ls /boot/vmlinuz-*)" + for kernel in $INSTALLED_KERNELS; do + STAT_OVERRIDE="root root 644 ${kernel}" + # unstack won't remove the statoverride, so make this idempotent + if [[ ! $(dpkg-statoverride --list | grep "$STAT_OVERRIDE") ]]; then + sudo dpkg-statoverride --add --update $STAT_OVERRIDE + fi + done + elif is_fedora; then + install_package python3-libguestfs + fi + fi +} + +# start_nova_hypervisor - Start any required external services +function start_nova_hypervisor { + # This function intentionally left blank + : +} + +# stop_nova_hypervisor - Stop any external services +function stop_nova_hypervisor { + # This function intentionally left blank + : +} + + +# Restore xtrace +$_XTRACE_NOVA_LIBVIRT + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/nova_plugins/hypervisor-openvz b/lib/nova_plugins/hypervisor-openvz new file mode 100644 index 0000000000..57dc45c1c5 --- /dev/null +++ b/lib/nova_plugins/hypervisor-openvz @@ -0,0 +1,67 @@ +#!/bin/bash +# +# lib/nova_plugins/hypervisor-openvz +# Configure the openvz hypervisor + +# Enable with: +# VIRT_DRIVER=openvz + +# Dependencies: +# ``functions`` file +# ``nova`` configuration + +# install_nova_hypervisor - install any external requirements +# configure_nova_hypervisor - make configuration changes, including those to other services +# start_nova_hypervisor - start any external services +# stop_nova_hypervisor - stop any external services +# cleanup_nova_hypervisor - remove transient data and cache + +# Save trace setting +_XTRACE_OPENVZ=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + + +# Entry Points +# ------------ + +# clean_nova_hypervisor - Clean up an installation +function cleanup_nova_hypervisor { + # This function intentionally left blank + : +} + +# configure_nova_hypervisor - Set config files, create data dirs, etc +function configure_nova_hypervisor { + iniset $NOVA_CONF DEFAULT compute_driver "openvz.OpenVzDriver" + iniset $NOVA_CONF DEFAULT connection_type "openvz" +} + +# install_nova_hypervisor() - Install external components +function install_nova_hypervisor { + # This function intentionally left blank + : +} + +# start_nova_hypervisor - Start any required external services +function start_nova_hypervisor { + # This function intentionally left blank + : +} + +# stop_nova_hypervisor - Stop any external services +function stop_nova_hypervisor { + # This function intentionally left blank + : +} + + +# Restore xtrace +$_XTRACE_OPENVZ + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/nova_plugins/hypervisor-vsphere b/lib/nova_plugins/hypervisor-vsphere new file mode 100644 index 0000000000..7c08bc945b --- /dev/null +++ b/lib/nova_plugins/hypervisor-vsphere @@ -0,0 +1,71 @@ +#!/bin/bash +# +# lib/nova_plugins/hypervisor-vsphere +# Configure the vSphere hypervisor + +# Enable with: +# VIRT_DRIVER=vsphere + +# Dependencies: +# ``functions`` file +# ``nova`` configuration + +# install_nova_hypervisor - install any external requirements +# configure_nova_hypervisor - make configuration changes, including those to other services +# start_nova_hypervisor - start any external services +# stop_nova_hypervisor - stop any external services +# cleanup_nova_hypervisor - remove transient data and cache + +# Save trace setting +_XTRACE_NOVA_VSPHERE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + + +# Entry Points +# ------------ + +# clean_nova_hypervisor - Clean up an installation +function cleanup_nova_hypervisor { + # This function intentionally left blank + : +} + +# configure_nova_hypervisor - Set config files, create data dirs, etc +function configure_nova_hypervisor { + iniset $NOVA_CONF DEFAULT compute_driver "vmwareapi.VMwareVCDriver" + VMWAREAPI_USER=${VMWAREAPI_USER:-"root"} + iniset $NOVA_CONF vmware host_ip "$VMWAREAPI_IP" + iniset $NOVA_CONF vmware host_username "$VMWAREAPI_USER" + iniset $NOVA_CONF vmware host_password "$VMWAREAPI_PASSWORD" + iniset_multiline $NOVA_CONF vmware cluster_name "$VMWAREAPI_CLUSTER" +} + +# install_nova_hypervisor() - Install external components +function install_nova_hypervisor { + # This function intentionally left blank + : +} + +# start_nova_hypervisor - Start any required external services +function start_nova_hypervisor { + # This function intentionally left blank + : +} + +# stop_nova_hypervisor - Stop any external services +function stop_nova_hypervisor { + # This function intentionally left blank + : +} + + +# Restore xtrace +$_XTRACE_NOVA_VSPHERE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/os-vif b/lib/os-vif new file mode 100644 index 0000000000..7c8bee3744 --- /dev/null +++ b/lib/os-vif @@ -0,0 +1,22 @@ +#!/bin/bash + +function is_ml2_ovs { + if [[ "${Q_AGENT}" == "openvswitch" ]]; then + echo "True" + fi + echo "False" +} + +# This should be true for any ml2/ovs job but should be set to false for +# all other ovs based jobs e.g. ml2/ovn +OS_VIF_OVS_ISOLATE_VIF=${OS_VIF_OVS_ISOLATE_VIF:=$(is_ml2_ovs)} +OS_VIF_OVS_ISOLATE_VIF=$(trueorfalse False OS_VIF_OVS_ISOLATE_VIF) + +function configure_os_vif { + if [[ -e ${NOVA_CONF} ]]; then + iniset ${NOVA_CONF} os_vif_ovs isolate_vif ${OS_VIF_OVS_ISOLATE_VIF} + fi + if [[ -e ${NEUTRON_CONF} ]]; then + iniset ${NEUTRON_CONF} os_vif_ovs isolate_vif ${OS_VIF_OVS_ISOLATE_VIF} + fi +} diff --git a/lib/placement b/lib/placement new file mode 100644 index 0000000000..03aaa0344b --- /dev/null +++ b/lib/placement @@ -0,0 +1,151 @@ +#!/bin/bash +# +# lib/placement +# Functions to control the configuration and operation of the **Placement** service +# + +# Dependencies: +# +# - ``functions`` file +# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined +# - ``FILES`` + +# ``stack.sh`` calls the entry points in this order: +# +# - install_placement +# - cleanup_placement +# - configure_placement +# - init_placement +# - start_placement +# - stop_placement + +# Save trace setting +_XTRACE_LIB_PLACEMENT=$(set +o | grep xtrace) +set +o xtrace + +# Defaults +# -------- + +PLACEMENT_DIR=$DEST/placement +PLACEMENT_CONF_DIR=/etc/placement +PLACEMENT_CONF=$PLACEMENT_CONF_DIR/placement.conf +PLACEMENT_AUTH_STRATEGY=${PLACEMENT_AUTH_STRATEGY:-keystone} +# Placement virtual environment +if [[ ${USE_VENV} = True ]]; then + PROJECT_VENV["placement"]=${PLACEMENT_DIR}.venv + PLACEMENT_BIN_DIR=${PROJECT_VENV["placement"]}/bin +else + PLACEMENT_BIN_DIR=$(get_python_exec_prefix) +fi +PLACEMENT_UWSGI=placement.wsgi.api:application +PLACEMENT_UWSGI_CONF=$PLACEMENT_CONF_DIR/placement-uwsgi.ini + +if is_service_enabled tls-proxy; then + PLACEMENT_SERVICE_PROTOCOL="https" +fi + +# Public facing bits +PLACEMENT_SERVICE_PROTOCOL=${PLACEMENT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} +PLACEMENT_SERVICE_HOST=${PLACEMENT_SERVICE_HOST:-$SERVICE_HOST} + +# Flag to set the oslo_policy.enforce_scope and oslo_policy.enforce_new_defaults. +# This is used to switch the Placement API policies scope and new defaults. +# By Default, these flag are False. +# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope +PLACEMENT_ENFORCE_SCOPE=$(trueorfalse False PLACEMENT_ENFORCE_SCOPE) + +# Functions +# --------- + +# Test if any placement services are enabled +# is_placement_enabled +function is_placement_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"placement-api" ]] && return 0 + return 1 +} + +# cleanup_placement() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_placement { + sudo rm -f $(apache_site_config_for placement-api) + remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "placement-api" +} + +# create_placement_conf() - Write config +function create_placement_conf { + rm -f $PLACEMENT_CONF + iniset $PLACEMENT_CONF placement_database connection `database_connection_url placement` + iniset $PLACEMENT_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" + iniset $PLACEMENT_CONF api auth_strategy $PLACEMENT_AUTH_STRATEGY + configure_keystone_authtoken_middleware $PLACEMENT_CONF placement + setup_logging $PLACEMENT_CONF +} + +# configure_placement() - Set config files, create data dirs, etc +function configure_placement { + sudo install -d -o $STACK_USER $PLACEMENT_CONF_DIR + create_placement_conf + + write_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" "/placement" "" "placement-api" + if [[ "$PLACEMENT_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then + iniset $PLACEMENT_CONF oslo_policy enforce_new_defaults True + iniset $PLACEMENT_CONF oslo_policy enforce_scope True + else + iniset $PLACEMENT_CONF oslo_policy enforce_new_defaults False + iniset $PLACEMENT_CONF oslo_policy enforce_scope False + fi +} + +# create_placement_accounts() - Set up required placement accounts +# and service and endpoints. +function create_placement_accounts { + create_service_user "placement" "admin" + local placement_api_url="$PLACEMENT_SERVICE_PROTOCOL://$PLACEMENT_SERVICE_HOST/placement" + get_or_create_service "placement" "placement" "Placement Service" + get_or_create_endpoint \ + "placement" \ + "$REGION_NAME" \ + "$placement_api_url" +} + +# init_placement() - Create service user and endpoints +function init_placement { + recreate_database placement + $PLACEMENT_BIN_DIR/placement-manage db sync + create_placement_accounts +} + +# install_placement() - Collect source and prepare +function install_placement { + # Install the openstackclient placement client plugin for CLI + pip_install_gr osc-placement + git_clone $PLACEMENT_REPO $PLACEMENT_DIR $PLACEMENT_BRANCH + setup_develop $PLACEMENT_DIR +} + +# start_placement_api() - Start the API processes ahead of other things +function start_placement_api { + run_process "placement-api" "$(which uwsgi) --procname-prefix placement --ini $PLACEMENT_UWSGI_CONF" + + echo "Waiting for placement-api to start..." + if ! wait_for_service $SERVICE_TIMEOUT $PLACEMENT_SERVICE_PROTOCOL://$PLACEMENT_SERVICE_HOST/placement; then + die $LINENO "placement-api did not start" + fi +} + +function start_placement { + start_placement_api +} + +# stop_placement() - Disable the api service and stop it. +function stop_placement { + stop_process "placement-api" +} + +# Restore xtrace +$_XTRACE_LIB_PLACEMENT + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/rpc_backend b/lib/rpc_backend new file mode 100644 index 0000000000..bbb41499be --- /dev/null +++ b/lib/rpc_backend @@ -0,0 +1,180 @@ +#!/bin/bash +# +# lib/rpc_backend +# Interface for installing RabbitMQ on the system + +# Dependencies: +# +# - ``functions`` file +# - ``RABBIT_{HOST|PASSWORD|USERID}`` must be defined when RabbitMQ is used + +# ``stack.sh`` calls the entry points in this order: +# +# - check_rpc_backend +# - install_rpc_backend +# - restart_rpc_backend +# - iniset_rpc_backend (stable interface) +# +# Note: if implementing an out of tree plugin for an RPC backend, you +# should install all services through normal plugin methods, then +# redefine ``iniset_rpc_backend`` in your code. That's the one portion +# of this file which is a standard interface. + +# Save trace setting +_XTRACE_RPC_BACKEND=$(set +o | grep xtrace) +set +o xtrace + +RABBIT_USERID=${RABBIT_USERID:-stackrabbit} +if is_service_enabled rabbit; then + RABBIT_HOST=${RABBIT_HOST:-$SERVICE_HOST} +fi + +# Functions +# --------- + +# clean up after rpc backend - eradicate all traces so changing backends +# produces a clean switch +function cleanup_rpc_backend { + if is_service_enabled rabbit; then + # Obliterate rabbitmq-server + uninstall_package rabbitmq-server + # in case it's not actually running, /bin/true at the end + sudo killall epmd || sudo killall -9 epmd || /bin/true + if is_ubuntu; then + # And the Erlang runtime too + apt_get purge -y erlang* + fi + fi +} + +# install rpc backend +function install_rpc_backend { + if is_service_enabled rabbit; then + # Install rabbitmq-server + install_package rabbitmq-server + if is_fedora; then + # NOTE(jangutter): If rabbitmq is not running (as in a fresh + # install) then rabbit_setuser triggers epmd@0.0.0.0.socket with + # socket activation. This fails the first time and does not get + # cleared. It is benign, but the workaround is to start rabbitmq a + # bit earlier for RPM based distros. + sudo systemctl --now enable rabbitmq-server + fi + fi +} + +# restart the rpc backend +function restart_rpc_backend { + if is_service_enabled rabbit; then + # Start rabbitmq-server + echo_summary "Starting RabbitMQ" + # NOTE(bnemec): Retry initial rabbitmq configuration to deal with + # the fact that sometimes it fails to start properly. + # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1144100 + # NOTE(tonyb): Extend the original retry logic to only restart rabbitmq + # every second time around the loop. + # See: https://bugs.launchpad.net/devstack/+bug/1449056 for details on + # why this is needed. This can bee seen on vivid and Debian unstable + # (May 2015) + # TODO(tonyb): Remove this when Debian and Ubuntu have a fixed systemd + # service file. + local i + for i in `seq 20`; do + local rc=0 + + [[ $i -eq "20" ]] && die $LINENO "Failed to set rabbitmq password" + + if [[ $(( i % 2 )) == "0" ]] ; then + restart_service rabbitmq-server + fi + + rabbit_setuser "$RABBIT_USERID" "$RABBIT_PASSWORD" || rc=$? + if [ $rc -ne 0 ]; then + continue + fi + + # change the rabbit password since the default is "guest" + sudo rabbitmqctl change_password \ + $RABBIT_USERID $RABBIT_PASSWORD || rc=$? + if [ $rc -ne 0 ]; then + continue; + fi + + break + done + # NOTE(frickler): Remove the default guest user + sudo rabbitmqctl delete_user guest || true + fi +} + +# adds a vhost to the rpc backend +function rpc_backend_add_vhost { + local vhost="$1" + if is_service_enabled rabbit; then + if [ -z `sudo rabbitmqctl list_vhosts | grep $vhost` ]; then + sudo rabbitmqctl add_vhost $vhost + sudo rabbitmqctl set_permissions -p $vhost $RABBIT_USERID ".*" ".*" ".*" + fi + else + echo 'RPC backend does not support vhosts' + return 1 + fi +} + +# Returns the address of the RPC backend in URL format. +function get_transport_url { + local virtual_host=$1 + if is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then + echo "rabbit://$RABBIT_USERID:$RABBIT_PASSWORD@$RABBIT_HOST:5672/$virtual_host" + fi +} + +# Returns the address of the Notification backend in URL format. This +# should be used to set the transport_url option in the +# oslo_messaging_notifications group. +function get_notification_url { + local virtual_host=$1 + if is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then + echo "rabbit://$RABBIT_USERID:$RABBIT_PASSWORD@$RABBIT_HOST:5672/$virtual_host" + fi +} + +# iniset configuration +function iniset_rpc_backend { + local package=$1 + local file=$2 + local section=${3:-DEFAULT} + local virtual_host=$4 + if is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then + iniset $file $section transport_url $(get_transport_url "$virtual_host") + if [ -n "$RABBIT_HEARTBEAT_TIMEOUT_THRESHOLD" ]; then + iniset $file oslo_messaging_rabbit heartbeat_timeout_threshold $RABBIT_HEARTBEAT_TIMEOUT_THRESHOLD + fi + if [ -n "$RABBIT_HEARTBEAT_RATE" ]; then + iniset $file oslo_messaging_rabbit heartbeat_rate $RABBIT_HEARTBEAT_RATE + fi + fi +} + +function rabbit_setuser { + local user="$1" pass="$2" found="" out="" + out=$(sudo rabbitmqctl list_users) || + { echo "failed to list users" 1>&2; return 1; } + found=$(echo "$out" | awk '$1 == user { print $1 }' "user=$user") + if [ "$found" = "$user" ]; then + sudo rabbitmqctl change_password "$user" "$pass" || + { echo "failed changing pass for '$user'" 1>&2; return 1; } + else + sudo rabbitmqctl add_user "$user" "$pass" || + { echo "failed changing pass for $user"; return 1; } + fi + sudo rabbitmqctl set_permissions "$user" ".*" ".*" ".*" +} + +# Restore xtrace +$_XTRACE_RPC_BACKEND + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/stack b/lib/stack new file mode 100644 index 0000000000..bada26f1c2 --- /dev/null +++ b/lib/stack @@ -0,0 +1,40 @@ +#!/bin/bash +# +# lib/stack +# +# These functions are code snippets pulled out of ``stack.sh`` for easier +# re-use by Grenade. They can assume the same environment is available +# as in the lower part of ``stack.sh``, namely a valid stackrc has been sourced +# as well as all of the ``lib/*`` files for the services have been sourced. +# +# For clarity, all functions declared here that came from ``stack.sh`` +# shall be named with the prefix ``stack_``. + + +# Functions +# --------- + +# Generic service install handles venv creation if configured for service +# stack_install_service service +function stack_install_service { + local service=$1 + if type install_${service} >/dev/null 2>&1; then + # FIXME(dhellmann): Needs to be python3-aware at some point. + if [[ ${USE_VENV} = True && -n ${PROJECT_VENV[$service]:-} ]]; then + rm -rf ${PROJECT_VENV[$service]} + source $TOP_DIR/tools/build_venv.sh ${PROJECT_VENV[$service]} ${ADDITIONAL_VENV_PACKAGES//,/ } + export PIP_VIRTUAL_ENV=${PROJECT_VENV[$service]:-} + + # Install other OpenStack prereqs that might come from source repos + install_oslo + install_keystonemiddleware + fi + install_${service} + if [[ ${USE_VENV} = True && -n ${PROJECT_VENV[$service]:-} ]]; then + unset PIP_VIRTUAL_ENV + fi + else + echo "No function declared with name 'install_${service}'." + exit 1 + fi +} diff --git a/lib/swift b/lib/swift new file mode 100644 index 0000000000..862927437d --- /dev/null +++ b/lib/swift @@ -0,0 +1,865 @@ +#!/bin/bash +# +# lib/swift +# Functions to control the configuration and operation of the **Swift** service + +# Dependencies: +# +# - ``functions`` file +# - ``apache`` file +# - ``DEST``, `SWIFT_HASH` must be defined +# - ``STACK_USER`` must be defined +# - ``SWIFT_DATA_DIR`` or ``DATA_DIR`` must be defined +# - ``lib/keystone`` file +# +# ``stack.sh`` calls the entry points in this order: +# +# - install_swift +# - _config_swift_apache_wsgi +# - configure_swift +# - init_swift +# - start_swift +# - stop_swift +# - cleanup_swift +# - _cleanup_swift_apache_wsgi + +# Save trace setting +_XTRACE_LIB_SWIFT=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +if is_service_enabled tls-proxy; then + SWIFT_SERVICE_PROTOCOL="https" +fi + +# Set up default directories +GITDIR["python-swiftclient"]=$DEST/python-swiftclient +SWIFT_DIR=$DEST/swift + +# Swift virtual environment +if [[ ${USE_VENV} = True ]]; then + PROJECT_VENV["swift"]=${SWIFT_DIR}.venv + SWIFT_BIN_DIR=${PROJECT_VENV["swift"]}/bin +else + SWIFT_BIN_DIR=$(get_python_exec_prefix) +fi + +SWIFT_APACHE_WSGI_DIR=${SWIFT_APACHE_WSGI_DIR:-/var/www/swift} + +SWIFT_SERVICE_PROTOCOL=${SWIFT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} +SWIFT_DEFAULT_BIND_PORT=${SWIFT_DEFAULT_BIND_PORT:-8080} +SWIFT_DEFAULT_BIND_PORT_INT=${SWIFT_DEFAULT_BIND_PORT_INT:-8081} +SWIFT_SERVICE_LOCAL_HOST=${SWIFT_SERVICE_LOCAL_HOST:-$SERVICE_LOCAL_HOST} +SWIFT_SERVICE_LISTEN_ADDRESS=${SWIFT_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)} + +# TODO: add logging to different location. + +# Set ``SWIFT_DATA_DIR`` to the location of swift drives and objects. +# Default is the common DevStack data directory. +SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DATA_DIR}/swift} +SWIFT_DISK_IMAGE=${SWIFT_DATA_DIR}/drives/images/swift.img + +# Set ``SWIFT_CONF_DIR`` to the location of the configuration files. +# Default is ``/etc/swift``. +SWIFT_CONF_DIR=${SWIFT_CONF_DIR:-/etc/swift} + +if is_service_enabled s-proxy && is_service_enabled s3api; then + # If we are using ``s3api``, we can default the S3 port to swift instead + # of nova-objectstore + S3_SERVICE_PORT=${S3_SERVICE_PORT:-$SWIFT_DEFAULT_BIND_PORT} +fi + +if is_service_enabled g-api; then + # Minimum Cinder volume size is 1G so if Swift backend for Glance is + # only 1G we can not upload volume to image. + # Increase Swift disk size up to 2G + SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=2G + SWIFT_MAX_FILE_SIZE_DEFAULT=1073741824 # 1G +else + # DevStack will create a loop-back disk formatted as XFS to store the + # swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in + # kilobytes. + # Default is 1 gigabyte. + SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=1G + SWIFT_MAX_FILE_SIZE_DEFAULT=536870912 # 512M +fi + +# if tempest enabled the default size is 6 Gigabyte. +if is_service_enabled tempest; then + SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=${SWIFT_LOOPBACK_DISK_SIZE:-6G} + SWIFT_MAX_FILE_SIZE_DEFAULT=5368709122 # Swift default 5G +fi + +SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-$SWIFT_LOOPBACK_DISK_SIZE_DEFAULT} + +# Set ``SWIFT_EXTRAS_MIDDLEWARE`` to extras middlewares. +# Default is ``staticweb, formpost`` +SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-formpost staticweb} + +# Set ``SWIFT_EXTRAS_MIDDLEWARE_LAST`` to extras middlewares that need to be at +# the end of the pipeline. +SWIFT_EXTRAS_MIDDLEWARE_LAST=${SWIFT_EXTRAS_MIDDLEWARE_LAST:-} + +# Set ``SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH`` to extras middlewares that need to be at +# the beginning of the pipeline, before authentication middlewares. +SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH=${SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH:-crossdomain} + +# The ring uses a configurable number of bits from a path's MD5 hash as +# a partition index that designates a device. The number of bits kept +# from the hash is known as the partition power, and 2 to the partition +# power indicates the partition count. Partitioning the full MD5 hash +# ring allows other parts of the cluster to work in batches of items at +# once which ends up either more efficient or at least less complex than +# working with each item separately or the entire cluster all at once. +# By default we define 9 for the partition count (which mean 512). +SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9} + +# Set ``SWIFT_REPLICAS`` to configure how many replicas are to be +# configured for your Swift cluster. By default we are configuring +# only one replica since this is way less CPU and memory intensive. If +# you are planning to test swift replication you may want to set this +# up to 3. +SWIFT_REPLICAS=${SWIFT_REPLICAS:-1} +SWIFT_REPLICAS_SEQ=$(seq ${SWIFT_REPLICAS}) + +# Set ``SWIFT_START_ALL_SERVICES`` to control whether all Swift +# services (including the *-auditor, *-replicator, *-reconstructor, etc. +# daemons) should be started. +SWIFT_START_ALL_SERVICES=$(trueorfalse True SWIFT_START_ALL_SERVICES) + +# Set ``SWIFT_LOG_TOKEN_LENGTH`` to configure how many characters of an auth +# token should be placed in the logs. When keystone is used with PKI tokens, +# the token values can be huge, seemingly larger the 2K, at the least. We +# restrict it here to a default of 12 characters, which should be enough to +# trace through the logs when looking for its use. +SWIFT_LOG_TOKEN_LENGTH=${SWIFT_LOG_TOKEN_LENGTH:-12} + +# Set ``SWIFT_MAX_HEADER_SIZE`` to configure the maximum length of headers in +# Swift API +SWIFT_MAX_HEADER_SIZE=${SWIFT_MAX_HEADER_SIZE:-16384} + +# Set ``SWIFT_MAX_FILE_SIZE`` to configure the maximum file size in Swift API +# Default 500MB because the loopback file used for swift could be 1 or 2 GB +SWIFT_MAX_FILE_SIZE=${SWIFT_MAX_FILE_SIZE:-$SWIFT_MAX_FILE_SIZE_DEFAULT} + +# Set ``OBJECT_PORT_BASE``, ``CONTAINER_PORT_BASE``, ``ACCOUNT_PORT_BASE`` +# Port bases used in port number calculation for the service "nodes" +# The specified port number will be used, the additional ports calculated by +# base_port + node_num * 10 +OBJECT_PORT_BASE=${OBJECT_PORT_BASE:-6613} +CONTAINER_PORT_BASE=${CONTAINER_PORT_BASE:-6611} +ACCOUNT_PORT_BASE=${ACCOUNT_PORT_BASE:-6612} + +# Enable tempurl feature +SWIFT_ENABLE_TEMPURLS=${SWIFT_ENABLE_TEMPURLS:-False} +SWIFT_TEMPURL_KEY=${SWIFT_TEMPURL_KEY:-} + +# Toggle for deploying Swift under HTTPD + mod_wsgi +SWIFT_USE_MOD_WSGI=${SWIFT_USE_MOD_WSGI:-False} + +# A space-separated list of storage node IPs that +# should be used to create the Swift rings +SWIFT_STORAGE_IPS=${SWIFT_STORAGE_IPS:-} + + +# Functions +# --------- + +# Test if any Swift services are enabled +# is_swift_enabled +function is_swift_enabled { + [[ ,${DISABLED_SERVICES} =~ ,"swift" ]] && return 1 + [[ ,${ENABLED_SERVICES} =~ ,"s-" ]] && return 0 + return 1 +} + +# cleanup_swift() - Remove residual data files +function cleanup_swift { + rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz} + + destroy_disk ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1 + + rm -rf ${SWIFT_DATA_DIR}/run/ + if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then + _cleanup_swift_apache_wsgi + fi +} + +# _cleanup_swift_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file +function _cleanup_swift_apache_wsgi { + sudo rm -f $SWIFT_APACHE_WSGI_DIR/*.wsgi + disable_apache_site proxy-server + local node_number type + for node_number in ${SWIFT_REPLICAS_SEQ}; do + for type in object container account; do + local site_name=${type}-server-${node_number} + disable_apache_site ${site_name} + sudo rm -f $(apache_site_config_for ${site_name}) + done + done +} + +# _config_swift_apache_wsgi() - Set WSGI config files of Swift +function _config_swift_apache_wsgi { + sudo mkdir -p ${SWIFT_APACHE_WSGI_DIR} + local proxy_port=${SWIFT_DEFAULT_BIND_PORT} + + # copy proxy vhost and wsgi file + sudo cp ${SWIFT_DIR}/examples/apache2/proxy-server.template $(apache_site_config_for proxy-server) + sudo sed -e " + /^#/d;/^$/d; + s/%PORT%/$proxy_port/g; + s/%SERVICENAME%/proxy-server/g; + s/%APACHE_NAME%/${APACHE_NAME}/g; + s/%USER%/${STACK_USER}/g; + " -i $(apache_site_config_for proxy-server) + enable_apache_site proxy-server + + sudo cp ${SWIFT_DIR}/examples/wsgi/proxy-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/proxy-server.wsgi + sudo sed -e " + /^#/d;/^$/d; + s/%SERVICECONF%/proxy-server.conf/g; + " -i ${SWIFT_APACHE_WSGI_DIR}/proxy-server.wsgi + + # copy apache vhost file and set name and port + local node_number + for node_number in ${SWIFT_REPLICAS_SEQ}; do + local object_port + object_port=$(( OBJECT_PORT_BASE + 10 * (node_number - 1) )) + local container_port + container_port=$(( CONTAINER_PORT_BASE + 10 * (node_number - 1) )) + local account_port + account_port=$(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) )) + + sudo cp ${SWIFT_DIR}/examples/apache2/object-server.template $(apache_site_config_for object-server-${node_number}) + sudo sed -e " + s/%PORT%/$object_port/g; + s/%SERVICENAME%/object-server-${node_number}/g; + s/%APACHE_NAME%/${APACHE_NAME}/g; + s/%USER%/${STACK_USER}/g; + " -i $(apache_site_config_for object-server-${node_number}) + enable_apache_site object-server-${node_number} + + sudo cp ${SWIFT_DIR}/examples/wsgi/object-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/object-server-${node_number}.wsgi + sudo sed -e " + /^#/d;/^$/d; + s/%SERVICECONF%/object-server\/${node_number}.conf/g; + " -i ${SWIFT_APACHE_WSGI_DIR}/object-server-${node_number}.wsgi + + sudo cp ${SWIFT_DIR}/examples/apache2/container-server.template $(apache_site_config_for container-server-${node_number}) + sudo sed -e " + /^#/d;/^$/d; + s/%PORT%/$container_port/g; + s/%SERVICENAME%/container-server-${node_number}/g; + s/%APACHE_NAME%/${APACHE_NAME}/g; + s/%USER%/${STACK_USER}/g; + " -i $(apache_site_config_for container-server-${node_number}) + enable_apache_site container-server-${node_number} + + sudo cp ${SWIFT_DIR}/examples/wsgi/container-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/container-server-${node_number}.wsgi + sudo sed -e " + /^#/d;/^$/d; + s/%SERVICECONF%/container-server\/${node_number}.conf/g; + " -i ${SWIFT_APACHE_WSGI_DIR}/container-server-${node_number}.wsgi + + sudo cp ${SWIFT_DIR}/examples/apache2/account-server.template $(apache_site_config_for account-server-${node_number}) + sudo sed -e " + /^#/d;/^$/d; + s/%PORT%/$account_port/g; + s/%SERVICENAME%/account-server-${node_number}/g; + s/%APACHE_NAME%/${APACHE_NAME}/g; + s/%USER%/${STACK_USER}/g; + " -i $(apache_site_config_for account-server-${node_number}) + enable_apache_site account-server-${node_number} + + sudo cp ${SWIFT_DIR}/examples/wsgi/account-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/account-server-${node_number}.wsgi + sudo sed -e " + /^#/d;/^$/d; + s/%SERVICECONF%/account-server\/${node_number}.conf/g; + " -i ${SWIFT_APACHE_WSGI_DIR}/account-server-${node_number}.wsgi + done +} + +# This function generates an object/container/account configuration +# emulating 4 nodes on different ports +function generate_swift_config_services { + local swift_node_config=$1 + local node_id=$2 + local bind_port=$3 + local server_type=$4 + + log_facility=$(( node_id - 1 )) + local node_path=${SWIFT_DATA_DIR}/${node_number} + + iniuncomment ${swift_node_config} DEFAULT user + iniset ${swift_node_config} DEFAULT user ${STACK_USER} + + iniuncomment ${swift_node_config} DEFAULT bind_port + iniset ${swift_node_config} DEFAULT bind_port ${bind_port} + + iniuncomment ${swift_node_config} DEFAULT swift_dir + iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONF_DIR} + + iniuncomment ${swift_node_config} DEFAULT devices + iniset ${swift_node_config} DEFAULT devices ${node_path} + + iniuncomment ${swift_node_config} DEFAULT log_facility + iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility} + + iniuncomment ${swift_node_config} DEFAULT workers + iniset ${swift_node_config} DEFAULT workers ${API_WORKERS:-1} + + iniuncomment ${swift_node_config} DEFAULT disable_fallocate + iniset ${swift_node_config} DEFAULT disable_fallocate true + + iniuncomment ${swift_node_config} DEFAULT mount_check + iniset ${swift_node_config} DEFAULT mount_check false + + iniuncomment ${swift_node_config} ${server_type}-replicator rsync_module + iniset ${swift_node_config} ${server_type}-replicator rsync_module "{replication_ip}::${server_type}{replication_port}" + + # Using a sed and not iniset/iniuncomment because we want to a global + # modification and make sure it works for new sections. + sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config} +} + +# configure_swift() - Set config files, create data dirs and loop image +function configure_swift { + local swift_pipeline="${SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH}" + local node_number + local swift_node_config + local swift_log_dir + + # Make sure to kill all swift processes first + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true + + sudo install -d -o ${STACK_USER} ${SWIFT_CONF_DIR} + sudo install -d -o ${STACK_USER} ${SWIFT_CONF_DIR}/{object,container,account}-server + + if [[ "$SWIFT_CONF_DIR" != "/etc/swift" ]]; then + # Some swift tools are hard-coded to use ``/etc/swift`` and are apparently not going to be fixed. + # Create a symlink if the config dir is moved + sudo ln -sf ${SWIFT_CONF_DIR} /etc/swift + fi + + # Swift use rsync to synchronize between all the different + # partitions (which make more sense when you have a multi-node + # setup) we configure it with our version of rsync. + sed -e " + s/%GROUP%/$(id -g -n ${STACK_USER})/; + s/%USER%/${STACK_USER}/; + s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,; + " $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf + # rsyncd.conf just prepared for 4 nodes + if is_ubuntu; then + sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync + elif [ -e /etc/xinetd.d/rsync ]; then + sudo sed -i '/disable *= *yes/ { s/yes/no/ }' /etc/xinetd.d/rsync + fi + + SWIFT_CONFIG_PROXY_SERVER=${SWIFT_CONF_DIR}/proxy-server.conf + cp ${SWIFT_DIR}/etc/proxy-server.conf-sample ${SWIFT_CONFIG_PROXY_SERVER} + cp ${SWIFT_DIR}/etc/internal-client.conf-sample ${SWIFT_CONF_DIR}/internal-client.conf + + # To run container sync feature introduced in Swift ver 1.12.0, + # container sync "realm" is added in container-sync-realms.conf + local csyncfile=${SWIFT_CONF_DIR}/container-sync-realms.conf + cp ${SWIFT_DIR}/etc/container-sync-realms.conf-sample ${csyncfile} + iniset ${csyncfile} realm1 key realm1key + iniset ${csyncfile} realm1 cluster_name1 "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/" + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${STACK_USER} + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir ${SWIFT_CONF_DIR} + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers 1 + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level DEBUG + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_ip + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_ip ${SWIFT_SERVICE_LISTEN_ADDRESS} + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port + if is_service_enabled tls-proxy; then + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT_INT} + else + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT} + fi + + # DevStack is commonly run in a small slow environment, so bump the timeouts up. + # ``node_timeout`` is the node read operation response time to the proxy server + # ``conn_timeout`` is how long it takes a connect() system call to return + iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server node_timeout 120 + iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server conn_timeout 20 + + # Versioned Writes + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:versioned_writes allow_versioned_writes true + + # Add sha1 temporary https://storyboard.openstack.org/#!/story/2010068 + if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]]; then + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempurl allowed_digests "sha1 sha256 sha512" + fi + + # Configure Ceilometer + if is_service_enabled ceilometer; then + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer "set log_level" "WARN" + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer paste.filter_factory "ceilometermiddleware.swift:filter_factory" + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer control_exchange "swift" + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer url $(get_notification_url) + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer driver "messaging" + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer topic "notifications" + SWIFT_EXTRAS_MIDDLEWARE_LAST="${SWIFT_EXTRAS_MIDDLEWARE_LAST} ceilometer" + fi + + # Restrict the length of auth tokens in the Swift ``proxy-server`` logs. + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:proxy-logging reveal_sensitive_prefix ${SWIFT_LOG_TOKEN_LENGTH} + + # By default Swift will be installed with Keystone and tempauth middleware + # and add the s3api middleware if its configured for it. The token for + # tempauth would be prefixed with the reseller_prefix setting `TEMPAUTH_` the + # token for keystoneauth would have the standard reseller_prefix `AUTH_` + if is_service_enabled s3api;then + swift_pipeline+=" s3api" + fi + if is_service_enabled keystone; then + swift_pipeline+=" authtoken" + if is_service_enabled s3api;then + swift_pipeline+=" s3token" + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token auth_uri ${KEYSTONE_SERVICE_URI_V3} + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token delay_auth_decision true + fi + swift_pipeline+=" keystoneauth" + fi + + swift_pipeline+=" tempauth " + + sed -i "/^pipeline/ { s/tempauth/${swift_pipeline} ${SWIFT_EXTRAS_MIDDLEWARE}/ ;}" ${SWIFT_CONFIG_PROXY_SERVER} + sed -i "/^pipeline/ { s/proxy-server/${SWIFT_EXTRAS_MIDDLEWARE_LAST} proxy-server/ ; }" ${SWIFT_CONFIG_PROXY_SERVER} + + iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true + iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server allow_account_management true + + # Configure Crossdomain + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:crossdomain use "egg:swift#crossdomain" + + # Configure authtoken middleware to use the same Python logging + # adapter provided by the Swift ``proxy-server``, so that request transaction + # IDs will included in all of its log messages. + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken log_name swift + + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken paste.filter_factory keystonemiddleware.auth_token:filter_factory + configure_keystone_authtoken_middleware $SWIFT_CONFIG_PROXY_SERVER swift filter:authtoken + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken delay_auth_decision 1 + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken cache swift.cache + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken include_service_catalog False + + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth use "egg:swift#keystoneauth" + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles "Member, admin" + + # Configure Tempauth. In the sample config file Keystoneauth is commented + # out. Make sure we uncomment Tempauth after we uncomment Keystoneauth + # otherwise, this code also sets the reseller_prefix for Keystoneauth. + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth account_autocreate + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth reseller_prefix "TEMPAUTH" + + # Allow both reseller prefixes to be used with domain_remap + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:domain_remap reseller_prefixes "AUTH, TEMPAUTH" + + cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONF_DIR}/swift.conf + iniset ${SWIFT_CONF_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH} + iniset ${SWIFT_CONF_DIR}/swift.conf swift-constraints max_header_size ${SWIFT_MAX_HEADER_SIZE} + iniset ${SWIFT_CONF_DIR}/swift.conf swift-constraints max_file_size ${SWIFT_MAX_FILE_SIZE} + + local node_number + for node_number in ${SWIFT_REPLICAS_SEQ}; do + local swift_node_config=${SWIFT_CONF_DIR}/object-server/${node_number}.conf + cp ${SWIFT_DIR}/etc/object-server.conf-sample ${swift_node_config} + generate_swift_config_services ${swift_node_config} ${node_number} $(( OBJECT_PORT_BASE + 10 * (node_number - 1) )) object + iniuncomment ${swift_node_config} DEFAULT bind_ip + iniset ${swift_node_config} DEFAULT bind_ip ${SWIFT_SERVICE_LISTEN_ADDRESS} + iniset ${swift_node_config} filter:recon recon_cache_path ${SWIFT_DATA_DIR}/cache + + swift_node_config=${SWIFT_CONF_DIR}/container-server/${node_number}.conf + cp ${SWIFT_DIR}/etc/container-server.conf-sample ${swift_node_config} + generate_swift_config_services ${swift_node_config} ${node_number} $(( CONTAINER_PORT_BASE + 10 * (node_number - 1) )) container + iniuncomment ${swift_node_config} DEFAULT bind_ip + iniset ${swift_node_config} DEFAULT bind_ip ${SWIFT_SERVICE_LISTEN_ADDRESS} + + swift_node_config=${SWIFT_CONF_DIR}/account-server/${node_number}.conf + cp ${SWIFT_DIR}/etc/account-server.conf-sample ${swift_node_config} + generate_swift_config_services ${swift_node_config} ${node_number} $(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) )) account + iniuncomment ${swift_node_config} DEFAULT bind_ip + iniset ${swift_node_config} DEFAULT bind_ip ${SWIFT_SERVICE_LISTEN_ADDRESS} + done + + # Set new accounts in tempauth to match keystone project/user (to make testing easier) + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth user_swiftprojecttest1_swiftusertest1 "testing .admin" + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth user_swiftprojecttest2_swiftusertest2 "testing2 .admin" + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth user_swiftprojecttest1_swiftusertest3 "testing3 .admin" + + testfile=${SWIFT_CONF_DIR}/test.conf + cp ${SWIFT_DIR}/test/sample.conf ${testfile} + + # Set accounts for functional tests + iniset ${testfile} func_test account swiftprojecttest1 + iniset ${testfile} func_test username swiftusertest1 + iniset ${testfile} func_test username3 swiftusertest3 + iniset ${testfile} func_test account2 swiftprojecttest2 + iniset ${testfile} func_test username2 swiftusertest2 + iniset ${testfile} func_test account4 swiftprojecttest4 + iniset ${testfile} func_test username4 swiftusertest4 + iniset ${testfile} func_test password4 testing4 + iniset ${testfile} func_test domain4 swift_test + + if is_service_enabled keystone; then + iniuncomment ${testfile} func_test auth_version + local auth_vers + auth_vers=$(iniget ${testfile} func_test auth_version) + iniset ${testfile} func_test auth_host ${KEYSTONE_SERVICE_HOST} + if [[ "$KEYSTONE_SERVICE_PROTOCOL" == "https" ]]; then + iniset ${testfile} func_test auth_port 443 + else + iniset ${testfile} func_test auth_port 80 + fi + iniset ${testfile} func_test auth_uri ${KEYSTONE_SERVICE_URI} + if [[ "$auth_vers" == "3" ]]; then + iniset ${testfile} func_test auth_prefix /identity/v3/ + else + iniset ${testfile} func_test auth_prefix /identity/v2.0/ + fi + if is_service_enabled tls-proxy; then + iniset ${testfile} func_test cafile ${SSL_BUNDLE_FILE} + iniset ${testfile} func_test web_front_end apache2 + fi + fi + + local user_group + user_group=$(id -g ${STACK_USER}) + sudo install -d -o ${STACK_USER} -g ${user_group} ${SWIFT_DATA_DIR} + + local swift_log_dir=${SWIFT_DATA_DIR}/logs + sudo rm -rf ${swift_log_dir} + local swift_log_group=adm + sudo install -d -o ${STACK_USER} -g ${swift_log_group} ${swift_log_dir}/hourly + + if [[ $SYSLOG != "False" ]]; then + sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \ + tee /etc/rsyslog.d/10-swift.conf + echo "MaxMessageSize 6k" | sudo tee /etc/rsyslog.d/99-maxsize.conf + # restart syslog to take the changes + sudo killall -HUP rsyslogd + fi + + if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then + _config_swift_apache_wsgi + fi +} + +# create_swift_disk - Create Swift backing disk +function create_swift_disk { + local node_number + + # First do a bit of setup by creating the directories and + # changing the permissions so we can run it as our user. + + local user_group + user_group=$(id -g ${STACK_USER}) + sudo install -d -o ${STACK_USER} -g ${user_group} ${SWIFT_DATA_DIR}/{drives,cache,run,logs} + + # Create a loopback disk and format it to XFS. + create_disk ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1 ${SWIFT_LOOPBACK_DISK_SIZE} + + # Create a link to the above mount and + # create all of the directories needed to emulate a few different servers + local node_number + for node_number in ${SWIFT_REPLICAS_SEQ}; do + # node_devices must match *.conf devices option + local node_devices=${SWIFT_DATA_DIR}/${node_number} + local real_devices=${SWIFT_DATA_DIR}/drives/sdb1/$node_number + sudo ln -sf $real_devices $node_devices; + local device=${real_devices}/sdb1 + [[ -d $device ]] && continue + sudo install -o ${STACK_USER} -g $user_group -d $device + done +} + +# create_swift_accounts() - Set up standard Swift accounts and extra +# one for tests we do this by attaching all words in the account name +# since we want to make it compatible with tempauth which use +# underscores for separators. + +# Project User Roles Domain +# ------------------------------------------------------------------- +# service swift service default +# swiftprojecttest1 swiftusertest1 admin default +# swiftprojecttest1 swiftusertest3 anotherrole default +# swiftprojecttest2 swiftusertest2 admin default +# swiftprojecttest4 swiftusertest4 admin swift_test + +function create_swift_accounts { + # Defines specific passwords used by ``tools/create_userrc.sh`` + # As these variables are used by ``create_userrc.sh,`` they must be exported + # The _password suffix is expected by ``create_userrc.sh``. + export swiftusertest1_password=testing + export swiftusertest2_password=testing2 + export swiftusertest3_password=testing3 + export swiftusertest4_password=testing4 + + local another_role + another_role=$(get_or_create_role "anotherrole") + + # NOTE(jroll): Swift doesn't need the admin role here, however Ironic uses + # temp urls, which break when uploaded by a non-admin role + create_service_user "swift" "admin" + + get_or_create_service "swift" "object-store" "Swift Service" + get_or_create_endpoint \ + "object-store" \ + "$REGION_NAME" \ + "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_\$(project_id)s" \ + "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT" + + local swift_project_test1 + swift_project_test1=$(get_or_create_project swiftprojecttest1 default) + die_if_not_set $LINENO swift_project_test1 "Failure creating swift_project_test1" + SWIFT_USER_TEST1=$(get_or_create_user swiftusertest1 $swiftusertest1_password \ + "default" "test@example.com") + die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1" + get_or_add_user_project_role admin $SWIFT_USER_TEST1 $swift_project_test1 + + local swift_user_test3 + swift_user_test3=$(get_or_create_user swiftusertest3 $swiftusertest3_password \ + "default" "test3@example.com") + die_if_not_set $LINENO swift_user_test3 "Failure creating swift_user_test3" + get_or_add_user_project_role $another_role $swift_user_test3 $swift_project_test1 + + local swift_project_test2 + swift_project_test2=$(get_or_create_project swiftprojecttest2 default) + die_if_not_set $LINENO swift_project_test2 "Failure creating swift_project_test2" + + local swift_user_test2 + swift_user_test2=$(get_or_create_user swiftusertest2 $swiftusertest2_password \ + "default" "test2@example.com") + die_if_not_set $LINENO swift_user_test2 "Failure creating swift_user_test2" + get_or_add_user_project_role admin $swift_user_test2 $swift_project_test2 + + local swift_domain + swift_domain=$(get_or_create_domain swift_test 'Used for swift functional testing') + die_if_not_set $LINENO swift_domain "Failure creating swift_test domain" + + local swift_project_test4 + swift_project_test4=$(get_or_create_project swiftprojecttest4 $swift_domain) + die_if_not_set $LINENO swift_project_test4 "Failure creating swift_project_test4" + + local swift_user_test4 + swift_user_test4=$(get_or_create_user swiftusertest4 $swiftusertest4_password \ + $swift_domain "test4@example.com") + die_if_not_set $LINENO swift_user_test4 "Failure creating swift_user_test4" + get_or_add_user_project_role admin $swift_user_test4 $swift_project_test4 +} + +# init_swift() - Initialize rings +function init_swift { + local node_number + # Make sure to kill all swift processes first + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true + + # Forcibly re-create the backing filesystem + create_swift_disk + + # This is where we create three different rings for swift with + # different object servers binding on different ports. + pushd ${SWIFT_CONF_DIR} >/dev/null && { + + rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz + + $SWIFT_BIN_DIR/swift-ring-builder object.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + $SWIFT_BIN_DIR/swift-ring-builder container.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + $SWIFT_BIN_DIR/swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + + # The ring will be created on each node, and because the order of + # nodes is identical we can use a seed for rebalancing, making it + # possible to get a ring on each node that uses the same partition + # assignment. + if [[ -n $SWIFT_STORAGE_IPS ]]; then + local node_number + node_number=1 + + for node in ${SWIFT_STORAGE_IPS}; do + $SWIFT_BIN_DIR/swift-ring-builder object.builder add z${node_number}-${node}:${OBJECT_PORT_BASE}/sdb1 1 + $SWIFT_BIN_DIR/swift-ring-builder container.builder add z${node_number}-${node}:${CONTAINER_PORT_BASE}/sdb1 1 + $SWIFT_BIN_DIR/swift-ring-builder account.builder add z${node_number}-${node}:${ACCOUNT_PORT_BASE}/sdb1 1 + let "node_number=node_number+1" + done + + else + + for node_number in ${SWIFT_REPLICAS_SEQ}; do + $SWIFT_BIN_DIR/swift-ring-builder object.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( OBJECT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1 + $SWIFT_BIN_DIR/swift-ring-builder container.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( CONTAINER_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1 + $SWIFT_BIN_DIR/swift-ring-builder account.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1 + done + fi + + # We use a seed for rebalancing. Doing this allows us to create + # identical rings on multiple nodes if SWIFT_STORAGE_IPS is the same + $SWIFT_BIN_DIR/swift-ring-builder object.builder rebalance 42 + $SWIFT_BIN_DIR/swift-ring-builder container.builder rebalance 42 + $SWIFT_BIN_DIR/swift-ring-builder account.builder rebalance 42 + } && popd >/dev/null +} + +function install_swift { + git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH + # keystonemiddleware needs to be installed via keystone extras as defined + # in setup.cfg, see bug #1909018 for more details. + setup_develop $SWIFT_DIR keystone + if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then + install_apache_wsgi + fi +} + +function install_swiftclient { + if use_library_from_git "python-swiftclient"; then + git_clone_by_name "python-swiftclient" + setup_dev_lib "python-swiftclient" + fi +} + +# install_ceilometermiddleware() - Collect source and prepare +# note that this doesn't really have anything to do with ceilometer; +# though ceilometermiddleware has ceilometer in its name as an +# artifact of history, it is not a ceilometer specific tool. It +# simply generates pycadf-based notifications about requests and +# responses on the swift proxy +function install_ceilometermiddleware { + if use_library_from_git "ceilometermiddleware"; then + git_clone_by_name "ceilometermiddleware" + setup_dev_lib "ceilometermiddleware" + else + pip_install_gr ceilometermiddleware + fi +} + +# start_swift() - Start running processes +function start_swift { + # (re)start memcached to make sure we have a clean memcache. + restart_service memcached + + # Start rsync + if is_ubuntu; then + sudo /etc/init.d/rsync restart || : + elif [ -e /etc/xinetd.d/rsync ]; then + start_service xinetd + else + start_service rsyncd + fi + + if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then + # Apache should serve the "PACO" a.k.a "main" services + restart_apache_server + # The rest of the services should be started in backgroud + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start + return 0 + fi + + + # By default with only one replica we are launching the proxy, container + # account and object server in screen in foreground. Then, the rest of + # the services is optionally started. + # + # If we have ``SWIFT_REPLICAS`` set to something greater than one + # we first spawn *all* the Swift services then kill the proxy service + # so we can run it in foreground in screen. + # + # ``swift-init ... {stop|restart}`` exits with '1' if no servers are + # running, ignore it just in case + if [[ ${SWIFT_REPLICAS} == 1 ]]; then + local foreground_services type + + foreground_services="object container account" + for type in ${foreground_services}; do + run_process s-${type} "$SWIFT_BIN_DIR/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v" + done + + if [[ "$SWIFT_START_ALL_SERVICES" == "True" ]]; then + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start + else + # The container-sync daemon is strictly needed to pass the container + # sync Tempest tests. + enable_service s-container-sync + run_process s-container-sync "$SWIFT_BIN_DIR/swift-container-sync ${SWIFT_CONF_DIR}/container-server/1.conf" + fi + else + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run proxy stop || true + fi + + if is_service_enabled tls-proxy; then + local proxy_port=${SWIFT_DEFAULT_BIND_PORT} + start_tls_proxy swift '*' $proxy_port $SERVICE_HOST $SWIFT_DEFAULT_BIND_PORT_INT $SWIFT_MAX_HEADER_SIZE + fi + run_process s-proxy "$SWIFT_BIN_DIR/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v" + + # We also started the storage services, but proxy started last and + # will take the longest to start, so by the time it comes up, we're + # probably fine. + echo "Waiting for swift proxy to start..." + if ! wait_for_service $SERVICE_TIMEOUT $SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/info; then + die $LINENO "swift proxy did not start" + fi + + if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]]; then + swift_configure_tempurls + fi +} + +# stop_swift() - Stop running processes +function stop_swift { + local type + + if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run rest stop && return 0 + fi + + # screen normally killed by ``unstack.sh`` + if type -p $SWIFT_BIN_DIR/swift-init >/dev/null; then + $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true + fi + # Dump all of the servers + # Maintain the iteration as stop_process() has some desirable side-effects + for type in proxy object container account; do + stop_process s-${type} + done + # Blast out any stragglers + pkill -f swift- || true +} + +function swift_configure_tempurls { + # note we are using swift credentials! + openstack --os-cloud="" \ + --os-region-name="$REGION_NAME" \ + --os-auth-url="$KEYSTONE_SERVICE_URI" \ + --os-username="swift" \ + --os-password="$SERVICE_PASSWORD" \ + --os-user-domain-name="$SERVICE_DOMAIN_NAME" \ + --os-project-name="$SERVICE_PROJECT_NAME" \ + --os-project-domain-name="$SERVICE_DOMAIN_NAME" \ + object store account \ + set --property "Temp-URL-Key=$SWIFT_TEMPURL_KEY" +} + +# Restore xtrace +$_XTRACE_LIB_SWIFT + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/tcpdump b/lib/tcpdump new file mode 100644 index 0000000000..16e8269d02 --- /dev/null +++ b/lib/tcpdump @@ -0,0 +1,43 @@ +#!/bin/bash +# +# lib/tcpdump +# Functions to start and stop a tcpdump + +# Dependencies: +# +# - ``functions`` file + +# ``stack.sh`` calls the entry points in this order: +# +# - start_tcpdump +# - stop_tcpdump + +# Save trace setting +_XTRACE_TCPDUMP=$(set +o | grep xtrace) +set +o xtrace + +TCPDUMP_OUTPUT=${TCPDUMP_OUTPUT:-$LOGDIR/tcpdump.pcap} + +# e.g. for iscsi +# "-i any tcp port 3260" +TCPDUMP_ARGS=${TCPDUMP_ARGS:-""} + +# start_tcpdump() - Start running processes +function start_tcpdump { + # Run a tcpdump with given arguments and save the packet capture + if is_service_enabled tcpdump; then + if [[ -z "${TCPDUMP_ARGS}" ]]; then + die $LINENO "The tcpdump service requires TCPDUMP_ARGS to be set" + fi + touch ${TCPDUMP_OUTPUT} + run_process tcpdump "/usr/sbin/tcpdump -w $TCPDUMP_OUTPUT $TCPDUMP_ARGS" root root + fi +} + +# stop_tcpdump() stop tcpdump process +function stop_tcpdump { + stop_process tcpdump +} + +# Restore xtrace +$_XTRACE_TCPDUMP diff --git a/lib/tempest b/lib/tempest new file mode 100644 index 0000000000..1ebe9c5f1f --- /dev/null +++ b/lib/tempest @@ -0,0 +1,892 @@ +#!/bin/bash +# +# lib/tempest +# Install and configure Tempest + +# Dependencies: +# +# - ``functions`` file +# - ``lib/nova`` service is running +# - Global vars that are assumed to be defined: +# - ``DEST``, ``FILES`` +# - ``ADMIN_PASSWORD`` +# - ``DEFAULT_IMAGE_NAME`` +# - ``DEFAULT_IMAGE_FILE_NAME`` +# - ``S3_SERVICE_PORT`` +# - ``SERVICE_HOST`` +# - ``BASE_SQL_CONN`` ``lib/database`` declares +# - ``PUBLIC_NETWORK_NAME`` +# - ``VIRT_DRIVER`` +# - ``LIBVIRT_TYPE`` +# - ``KEYSTONE_SERVICE_URI_V3`` from lib/keystone +# +# Optional Dependencies: +# +# - ``ALT_*`` +# - ``LIVE_MIGRATION_AVAILABLE`` +# - ``USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION`` +# - ``DEFAULT_INSTANCE_TYPE`` +# - ``DEFAULT_INSTANCE_USER`` +# - ``DEFAULT_INSTANCE_ALT_USER`` +# - ``CINDER_ENABLED_BACKENDS`` +# - ``CINDER_BACKUP_DRIVER`` +# - ``NOVA_ALLOW_DUPLICATE_NETWORKS`` +# +# ``stack.sh`` calls the entry points in this order: +# +# - install_tempest +# - configure_tempest + +# Save trace setting +_XTRACE_TEMPEST=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories +TEMPEST_DIR=$DEST/tempest +TEMPEST_CONFIG_DIR=${TEMPEST_CONFIG_DIR:-$TEMPEST_DIR/etc} +TEMPEST_CONFIG=$TEMPEST_CONFIG_DIR/tempest.conf +TEMPEST_STATE_PATH=${TEMPEST_STATE_PATH:=$DATA_DIR/tempest} + +# This is the timeout that tempest will wait for a VM to change state, +# spawn, delete, etc. +# The default is set to 196 seconds. +BUILD_TIMEOUT=${BUILD_TIMEOUT:-196} + +# This must be False on stable branches, as master tempest +# deps do not match stable branch deps. Set this to True to +# have tempest installed in DevStack by default. +INSTALL_TEMPEST=${INSTALL_TEMPEST:-"True"} + +# This variable is passed directly to pip install inside the common tox venv +# that is created +TEMPEST_PLUGINS=${TEMPEST_PLUGINS:-0} + +# Cinder/Volume variables +TEMPEST_VOLUME_DRIVER=${TEMPEST_VOLUME_DRIVER:-default} +TEMPEST_DEFAULT_VOLUME_VENDOR="Open Source" +TEMPEST_VOLUME_VENDOR=${TEMPEST_VOLUME_VENDOR:-$TEMPEST_DEFAULT_VOLUME_VENDOR} +TEMPEST_DEFAULT_STORAGE_PROTOCOL="iSCSI" +TEMPEST_STORAGE_PROTOCOL=${TEMPEST_STORAGE_PROTOCOL:-$TEMPEST_DEFAULT_STORAGE_PROTOCOL} + +# Glance/Image variables +# When Glance image import is enabled, image creation is asynchronous and images +# may not yet be active when tempest looks for them. In that case, we poll +# Glance every TEMPEST_GLANCE_IMPORT_POLL_INTERVAL seconds for the number of +# times specified by TEMPEST_GLANCE_IMPORT_POLL_LIMIT. If you are importing +# multiple images, set TEMPEST_GLANCE_IMAGE_COUNT so the poller does not quit +# too early (though it will not exceed the polling limit). +TEMPEST_GLANCE_IMPORT_POLL_INTERVAL=${TEMPEST_GLANCE_IMPORT_POLL_INTERVAL:-1} +TEMPEST_GLANCE_IMPORT_POLL_LIMIT=${TEMPEST_GLANCE_IMPORT_POLL_LIMIT:-12} +TEMPEST_GLANCE_IMAGE_COUNT=${TEMPEST_GLANCE_IMAGE_COUNT:-1} + +# Neutron/Network variables +IPV6_ENABLED=$(trueorfalse True IPV6_ENABLED) +IPV6_SUBNET_ATTRIBUTES_ENABLED=$(trueorfalse True IPV6_SUBNET_ATTRIBUTES_ENABLED) + +# Do we want to make a configuration where Tempest has admin on +# the cloud. We don't always want to so that we can ensure Tempest +# would work on a public cloud. +TEMPEST_HAS_ADMIN=$(trueorfalse True TEMPEST_HAS_ADMIN) + +# Credential provider configuration option variables +TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-$TEMPEST_HAS_ADMIN} +TEMPEST_USE_TEST_ACCOUNTS=$(trueorfalse False TEMPEST_USE_TEST_ACCOUNTS) + +# The number of workers tempest is expected to be run with. This is used for +# generating a accounts.yaml for running with test-accounts. This is also the +# same variable that devstack-gate uses to specify the number of workers that +# it will run tempest with +TEMPEST_CONCURRENCY=${TEMPEST_CONCURRENCY:-$(nproc)} + +TEMPEST_FLAVOR_RAM=${TEMPEST_FLAVOR_RAM:-192} +TEMPEST_FLAVOR_ALT_RAM=${TEMPEST_FLAVOR_ALT_RAM:-256} + +TEMPEST_USE_ISO_IMAGE=$(trueorfalse False TEMPEST_USE_ISO_IMAGE) + +# Functions +# --------- + +# remove_disabled_extension - removes disabled extensions from the list of extensions +# to test for a given service +function remove_disabled_extensions { + local extensions_list=$1 + shift + local disabled_exts=$* + remove_disabled_services "$extensions_list" "$disabled_exts" +} + +# image_size_in_gib - converts an image size from bytes to GiB, rounded up +# Takes an image ID parameter as input +function image_size_in_gib { + local size + size=$(openstack --os-cloud devstack-admin image show $1 -c size -f value) + echo $size | python3 -c "import math; print(int(math.ceil(float(int(input()) / 1024.0 ** 3))))" +} + +function set_tempest_venv_constraints { + local tmp_c + tmp_c=$1 + if [[ $TEMPEST_VENV_UPPER_CONSTRAINTS == "master" ]]; then + (cd $REQUIREMENTS_DIR && + git show master:upper-constraints.txt 2>/dev/null || + git show origin/master:upper-constraints.txt) > $tmp_c + # NOTE(gmann): we need to set the below env var pointing to master + # constraints even that is what default in tox.ini. Otherwise it can + # create the issue for grenade run where old and new devstack can have + # different tempest (old and master) to install. For detail problem, + # refer to the https://bugs.launchpad.net/devstack/+bug/2003993 + export UPPER_CONSTRAINTS_FILE=https://releases.openstack.org/constraints/upper/master + export TOX_CONSTRAINTS_FILE=https://releases.openstack.org/constraints/upper/master + else + echo "Using $TEMPEST_VENV_UPPER_CONSTRAINTS constraints in Tempest virtual env." + cat $TEMPEST_VENV_UPPER_CONSTRAINTS > $tmp_c + # NOTE: setting both tox env var and once Tempest start using new var + # TOX_CONSTRAINTS_FILE then we can remove the old one. + export UPPER_CONSTRAINTS_FILE=$TEMPEST_VENV_UPPER_CONSTRAINTS + export TOX_CONSTRAINTS_FILE=$TEMPEST_VENV_UPPER_CONSTRAINTS + fi +} + +# Makes a call to glance to get a list of active images, ignoring +# ramdisk and kernel images. Takes 3 arguments, an array and two +# variables. The array will contain the list of active image UUIDs; +# if an image with ``DEFAULT_IMAGE_NAME`` is found, its UUID will be +# set as the value img_id ($2) parameters. +function get_active_images { + declare -n img_array=$1 + declare -n img_id=$2 + + # start with a fresh array in case we are called multiple times + img_array=() + + # NOTE(gmaan): Most of the iso image require ssh to be enabled explicitly + # and if we set those iso images in image_ref and image_ref_alt that can + # cause test to fail because many tests using image_ref and image_ref_alt + # to boot server also perform ssh. We skip to set iso image in tempest + # unless it is requested via TEMPEST_USE_ISO_IMAGE. + while read -r IMAGE_NAME IMAGE_UUID DISK_FORMAT; do + if [[ "$DISK_FORMAT" == "iso" ]] && [[ "$TEMPEST_USE_ISO_IMAGE" == False ]]; then + continue + fi + if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then + img_id="$IMAGE_UUID" + fi + img_array+=($IMAGE_UUID) + done < <(openstack --os-cloud devstack-admin image list --long --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2,$4 }') +} + +function poll_glance_images { + declare -n image_array=$1 + declare -n image_id=$2 + local -i poll_count + + poll_count=$TEMPEST_GLANCE_IMPORT_POLL_LIMIT + while (( poll_count-- > 0 )) ; do + sleep $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL + get_active_images image_array image_id + if (( ${#image_array[*]} >= $TEMPEST_GLANCE_IMAGE_COUNT )) ; then + return + fi + done + local msg + msg="Polling limit of $TEMPEST_GLANCE_IMPORT_POLL_LIMIT exceeded; " + msg+="poll interval was $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL sec" + warn $LINENO "$msg" +} + +# configure_tempest() - Set config files, create data dirs, etc +function configure_tempest { + if [[ "$INSTALL_TEMPEST" == "True" ]]; then + setup_develop $TEMPEST_DIR + else + # install testr since its used to process tempest logs + pip_install_gr testrepository + fi + + local ENABLED_SERVICES=${SERVICES_FOR_TEMPEST:=$ENABLED_SERVICES} + + local image_lines + local images + local num_images + local image_uuid + local image_uuid_alt + local password + local line + local flavors + local available_flavors + local flavors_ref + local flavor_lines + local flavor_ref_size + local flavor_ref_alt_size + local public_network_id + local public_router_id + local ssh_connect_method="floating" + local disk + + # Save IFS + ifs=$IFS + + # Glance should already contain images to be used in tempest + # testing. Here we simply look for images stored in Glance + # and set the appropriate variables for use in the tempest config + # We ignore ramdisk and kernel images, look for the default image + # ``DEFAULT_IMAGE_NAME``. If not found, we set the ``image_uuid`` to the + # first image returned and set ``image_uuid_alt`` to the second, + # if there is more than one returned... + # ... Also ensure we only take active images, so we don't get snapshots in process + declare -a images + + if is_service_enabled glance; then + get_active_images images image_uuid + + if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then + # Glance image import is asynchronous and may be configured + # to do image conversion. If image import is being used, + # it's possible that this code is being executed before the + # import has completed and there may be no active images yet. + if [[ "$GLANCE_USE_IMPORT_WORKFLOW" == "True" ]]; then + poll_glance_images images image_uuid + if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then + echo "Only found ${#images[*]} image(s), was looking for $TEMPEST_GLANCE_IMAGE_COUNT" + exit 1 + fi + fi + fi + + case "${#images[*]}" in + 0) + echo "Found no valid images to use!" + exit 1 + ;; + 1) + if [ -z "$image_uuid" ]; then + image_uuid=${images[0]} + fi + image_uuid_alt=$image_uuid + ;; + *) + if [ -z "$image_uuid" ]; then + image_uuid=${images[0]} + if [ -z "$image_uuid_alt" ]; then + image_uuid_alt=${images[1]} + fi + elif [ -z "$image_uuid_alt" ]; then + for image in ${images[@]}; do + if [[ "$image" != "$image_uuid" ]]; then + image_uuid_alt=$image + break + fi + done + fi + ;; + esac + fi + + # (Re)create ``tempest.conf`` + # Create every time because the image UUIDS are going to change + sudo install -d -o $STACK_USER $TEMPEST_CONFIG_DIR + rm -f $TEMPEST_CONFIG + + local password=${ADMIN_PASSWORD:-secret} + + # See ``lib/keystone`` where these users and tenants are set up + local admin_username=${ADMIN_USERNAME:-admin} + local admin_project_name=${ADMIN_TENANT_NAME:-admin} + local admin_domain_name=${ADMIN_DOMAIN_NAME:-Default} + local alt_username=${ALT_USERNAME:-alt_demo} + local alt_project_name=${ALT_TENANT_NAME:-alt_demo} + local admin_project_id + admin_project_id=$(openstack --os-cloud devstack-admin project list | awk "/ admin / { print \$2 }") + + if is_service_enabled nova; then + # If ``DEFAULT_INSTANCE_TYPE`` is not declared, use the new behavior + # Tempest creates its own instance types + available_flavors=$(openstack --os-cloud devstack-admin flavor list) + if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then + if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then + # Determine the flavor disk size based on the image size. + disk=$(image_size_in_gib $image_uuid) + ram=${TEMPEST_FLAVOR_RAM} + openstack --os-cloud devstack-admin flavor create --id 42 --ram ${ram} --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano + fi + flavor_ref=42 + if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then + # Determine the alt flavor disk size based on the alt image size. + disk=$(image_size_in_gib $image_uuid_alt) + ram=${TEMPEST_FLAVOR_ALT_RAM} + openstack --os-cloud devstack-admin flavor create --id 84 --ram ${ram} --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro + fi + flavor_ref_alt=84 + else + # Check Nova for existing flavors, if ``DEFAULT_INSTANCE_TYPE`` is set use it. + IFS=$'\r\n' + flavors="" + for line in $available_flavors; do + f=$(echo $line | awk "/ $DEFAULT_INSTANCE_TYPE / { print \$2 }") + flavors="$flavors $f" + done + + for line in $available_flavors; do + flavors="$flavors `echo $line | grep -v "^\(|\s*ID\|+--\)" | cut -d' ' -f2`" + done + + IFS=" " + flavors=($flavors) + num_flavors=${#flavors[*]} + echo "Found $num_flavors flavors" + if [[ $num_flavors -eq 0 ]]; then + echo "Found no valid flavors to use!" + exit 1 + fi + flavor_ref=${flavors[0]} + flavor_ref_alt=$flavor_ref + flavor_ref_size=$(openstack --os-cloud devstack-admin flavor show --format value --column disk "${flavor_ref}") + + # Ensure ``flavor_ref`` and ``flavor_ref_alt`` have different values. + # Some resize instance in tempest tests depends on this. + for f in ${flavors[@]:1}; do + if [[ "$f" != "$flavor_ref" ]]; then + # + # NOTE(sdatko): Resize is only possible when target flavor + # is not smaller than the original one. For + # Tempest tests, in case there was a bigger + # flavor selected as default, e.g. m1.small, + # we need to perform additional check. + # + flavor_ref_alt_size=$(openstack --os-cloud devstack-admin flavor show --format value --column disk "${f}") + if [[ "${flavor_ref_alt_size}" -lt "${flavor_ref_size}" ]]; then + continue + fi + + flavor_ref_alt=$f + break + fi + done + fi + fi + + if is_service_enabled glance; then + git_clone $OSTESTIMAGES_REPO $OSTESTIMAGES_DIR $OSTESTIMAGES_BRANCH + pushd $OSTESTIMAGES_DIR + tox -egenerate + popd + iniset $TEMPEST_CONFIG image images_manifest_file ${OSTESTIMAGES_DIR}/images/manifest.yaml + local image_conversion + image_conversion=$(iniget $GLANCE_IMAGE_IMPORT_CONF image_conversion output_format) + if [[ -n "$image_conversion" ]]; then + iniset $TEMPEST_CONFIG image-feature-enabled image_conversion True + fi + iniset $TEMPEST_CONFIG image-feature-enabled image_format_enforcement $GLANCE_ENFORCE_IMAGE_FORMAT + fi + + iniset $TEMPEST_CONFIG network project_network_cidr $FIXED_RANGE + + ssh_connect_method=${TEMPEST_SSH_CONNECT_METHOD:-$ssh_connect_method} + + # the public network (for floating ip access) is only available + # if the extension is enabled. + # If NEUTRON_CREATE_INITIAL_NETWORKS is not true, there is no network created + # and the public_network_id should not be set. + if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]] && is_networking_extension_supported 'external-net'; then + public_network_id=$(openstack --os-cloud devstack-admin network show -f value -c id $PUBLIC_NETWORK_NAME) + # make sure shared network presence does not confuses the tempest tests + openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create --share shared + openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --description shared-subnet --subnet-range ${TEMPEST_SHARED_POOL:-192.168.233.0/24} --network shared shared-subnet + fi + + iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG + + # Oslo + iniset $TEMPEST_CONFIG oslo_concurrency lock_path $TEMPEST_STATE_PATH + mkdir -p $TEMPEST_STATE_PATH + iniset $TEMPEST_CONFIG DEFAULT use_stderr False + iniset $TEMPEST_CONFIG DEFAULT log_file tempest.log + iniset $TEMPEST_CONFIG DEFAULT debug True + + # Timeouts + iniset $TEMPEST_CONFIG compute build_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONFIG volume build_timeout $BUILD_TIMEOUT + + # Identity + iniset $TEMPEST_CONFIG identity uri_v3 "$KEYSTONE_SERVICE_URI_V3" + iniset $TEMPEST_CONFIG identity user_lockout_failure_attempts $KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS + iniset $TEMPEST_CONFIG identity user_lockout_duration $KEYSTONE_LOCKOUT_DURATION + iniset $TEMPEST_CONFIG identity user_unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT + if [[ "$TEMPEST_HAS_ADMIN" == "True" ]]; then + iniset $TEMPEST_CONFIG auth admin_username $admin_username + iniset $TEMPEST_CONFIG auth admin_password "$password" + iniset $TEMPEST_CONFIG auth admin_project_name $admin_project_name + iniset $TEMPEST_CONFIG auth admin_domain_name $admin_domain_name + fi + iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v3} + if is_service_enabled tls-proxy; then + iniset $TEMPEST_CONFIG identity ca_certificates_file $SSL_BUNDLE_FILE + fi + + # Identity Features + if [[ "$KEYSTONE_SECURITY_COMPLIANCE_ENABLED" = True ]]; then + iniset $TEMPEST_CONFIG identity-feature-enabled security_compliance True + fi + + # When LDAP is enabled domain specific drivers are also enabled and the users + # and groups identity tests must adapt to this scenario + if is_service_enabled ldap; then + iniset $TEMPEST_CONFIG identity-feature-enabled domain_specific_drivers True + fi + + # TODO(felipemonteiro): Remove this once Tempest no longer supports Pike + # as this is supported in Queens and beyond. + iniset $TEMPEST_CONFIG identity-feature-enabled project_tags True + + # In Queens and later, application credentials are enabled by default + # so remove this once Tempest no longer supports Pike. + iniset $TEMPEST_CONFIG identity-feature-enabled application_credentials True + + # In Train and later, access rules for application credentials are enabled + # by default so remove this once Tempest no longer supports Stein. + iniset $TEMPEST_CONFIG identity-feature-enabled access_rules True + + # Image + # We want to be able to override this variable in the gate to avoid + # doing an external HTTP fetch for this test. + if [[ ! -z "$TEMPEST_HTTP_IMAGE" ]]; then + iniset $TEMPEST_CONFIG image http_image $TEMPEST_HTTP_IMAGE + fi + iniset $TEMPEST_CONFIG image-feature-enabled import_image $GLANCE_USE_IMPORT_WORKFLOW + iniset $TEMPEST_CONFIG image-feature-enabled os_glance_reserved True + if is_service_enabled g-api-r; then + iniset $TEMPEST_CONFIG image alternate_image_endpoint image_remote + fi + + # Compute + iniset $TEMPEST_CONFIG compute image_ref $image_uuid + iniset $TEMPEST_CONFIG compute image_ref_alt $image_uuid_alt + iniset $TEMPEST_CONFIG compute flavor_ref $flavor_ref + iniset $TEMPEST_CONFIG compute flavor_ref_alt $flavor_ref_alt + iniset $TEMPEST_CONFIG validation connect_method $ssh_connect_method + if ! is_service_enabled neutron; then + iniset $TEMPEST_CONFIG compute fixed_network_name $PRIVATE_NETWORK_NAME + fi + + # Set the service catalog entry for Tempest to run on. Typically + # used to try different compute API version targets. The tempest + # default if 'compute', which is typically valid, so only set this + # if you want to change it. + if [[ -n "$TEMPEST_COMPUTE_TYPE" ]]; then + iniset $TEMPEST_CONFIG compute catalog_type $TEMPEST_COMPUTE_TYPE + fi + + # Compute Features + # Set the microversion range for compute tests. + # This is used to run the Nova microversions tests. + # Setting [None, latest] range of microversion which allow Tempest to run all microversions tests. + # NOTE- To avoid microversion tests failure on stable branch, we need to change "tempest_compute_max_microversion" + # for stable branch on each release which should be changed from "latest" to max supported version of that release. + local tempest_compute_min_microversion=${TEMPEST_COMPUTE_MIN_MICROVERSION:-None} + local tempest_compute_max_microversion=${TEMPEST_COMPUTE_MAX_MICROVERSION:-"latest"} + # Reset microversions to None where v2.0 is running which does not support microversion. + # Both "None" means no microversion testing. + if [[ "$TEMPEST_COMPUTE_TYPE" == "compute_legacy" ]]; then + tempest_compute_min_microversion=None + tempest_compute_max_microversion=None + fi + if [ "$tempest_compute_min_microversion" == "None" ]; then + inicomment $TEMPEST_CONFIG compute min_microversion + else + iniset $TEMPEST_CONFIG compute min_microversion $tempest_compute_min_microversion + fi + if [ "$tempest_compute_max_microversion" == "None" ]; then + inicomment $TEMPEST_CONFIG compute max_microversion + else + iniset $TEMPEST_CONFIG compute max_microversion $tempest_compute_max_microversion + fi + + iniset $TEMPEST_CONFIG compute-feature-enabled personality ${ENABLE_FILE_INJECTION:-False} + iniset $TEMPEST_CONFIG compute-feature-enabled resize True + iniset $TEMPEST_CONFIG compute-feature-enabled live_migration ${LIVE_MIGRATION_AVAILABLE:-False} + iniset $TEMPEST_CONFIG compute-feature-enabled change_password False + iniset $TEMPEST_CONFIG compute-feature-enabled block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} + iniset $TEMPEST_CONFIG compute-feature-enabled live_migrate_back_and_forth ${LIVE_MIGRATE_BACK_AND_FORTH:-False} + iniset $TEMPEST_CONFIG compute-feature-enabled attach_encrypted_volume ${ATTACH_ENCRYPTED_VOLUME_AVAILABLE:-True} + + # Starting Wallaby, nova sanitizes instance hostnames having freeform characters with dashes + iniset $TEMPEST_CONFIG compute-feature-enabled hostname_fqdn_sanitization True + + if [[ -n "$NOVA_FILTERS" ]]; then + iniset $TEMPEST_CONFIG compute-feature-enabled scheduler_enabled_filters ${NOVA_FILTERS} + fi + + if [[ $ENABLE_VOLUME_MULTIATTACH == "True" ]]; then + iniset $TEMPEST_CONFIG compute-feature-enabled volume_multiattach True + fi + + if is_service_enabled n-novnc || [ "$NOVA_VNC_ENABLED" != False ]; then + iniset $TEMPEST_CONFIG compute-feature-enabled vnc_console True + fi + if is_service_enabled n-spice || [ "$NOVA_SPICE_ENABLED" != False ]; then + iniset $TEMPEST_CONFIG compute-feature-enabled spice_console True + fi + if is_service_enabled n-sproxy || [ "$NOVA_SERIAL_ENABLED" != False ]; then + iniset $TEMPEST_CONFIG compute-feature-enabled serial_console True + fi + + # NOTE(gmaan): Since 2025.2, 'manager' role is available in nova. + local nova_policy_roles="admin,manager,member,reader,service" + iniset $TEMPEST_CONFIG compute-feature-enabled nova_policy_roles $nova_policy_roles + + # Network + iniset $TEMPEST_CONFIG network project_networks_reachable false + iniset $TEMPEST_CONFIG network public_network_id "$public_network_id" + iniset $TEMPEST_CONFIG network public_router_id "$public_router_id" + iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE" + iniset $TEMPEST_CONFIG network-feature-enabled ipv6 "$IPV6_ENABLED" + iniset $TEMPEST_CONFIG network-feature-enabled ipv6_subnet_attributes "$IPV6_SUBNET_ATTRIBUTES_ENABLED" + iniset $TEMPEST_CONFIG network-feature-enabled port_security $NEUTRON_PORT_SECURITY + + iniset $TEMPEST_CONFIG enforce_scope neutron "$NEUTRON_ENFORCE_SCOPE" + + # Scenario + SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES} + SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_FILE_NAME + SCENARIO_IMAGE_TYPE=${SCENARIO_IMAGE_TYPE:-cirros} + iniset $TEMPEST_CONFIG scenario img_file $SCENARIO_IMAGE_DIR/$SCENARIO_IMAGE_FILE + + # since version 0.6.0 cirros uses dhcpcd dhcp client by default, however, cirros, prior to the + # version 0.6.0, used udhcpc (the only available client at that time) which is also tempest's default + if [[ "$SCENARIO_IMAGE_TYPE" == "cirros" ]]; then + # the image is a cirros image + # use dhcpcd client when version greater or equal 0.6.0 + if [[ $(echo $CIRROS_VERSION | tr -d '.') -ge 060 ]]; then + iniset $TEMPEST_CONFIG scenario dhcp_client dhcpcd + fi + fi + + # If using provider networking, use the physical network for validation rather than private + TEMPEST_SSH_NETWORK_NAME=$PRIVATE_NETWORK_NAME + if is_provider_network; then + TEMPEST_SSH_NETWORK_NAME=$PHYSICAL_NETWORK + fi + # Validation + iniset $TEMPEST_CONFIG validation run_validation ${TEMPEST_RUN_VALIDATION:-True} + iniset $TEMPEST_CONFIG validation ip_version_for_ssh 4 + iniset $TEMPEST_CONFIG validation ssh_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONFIG validation image_ssh_user ${DEFAULT_INSTANCE_USER:=cirros} + iniset $TEMPEST_CONFIG validation image_alt_ssh_user ${DEFAULT_INSTANCE_ALT_USER:-$DEFAULT_INSTANCE_USER} + iniset $TEMPEST_CONFIG validation network_for_ssh $TEMPEST_SSH_NETWORK_NAME + + # Volume + # Only turn on TEMPEST_VOLUME_MANAGE_SNAPSHOT by default for "lvm" backends + if [[ "$CINDER_ENABLED_BACKENDS" == *"lvm"* ]]; then + TEMPEST_VOLUME_MANAGE_SNAPSHOT=${TEMPEST_VOLUME_MANAGE_SNAPSHOT:-True} + fi + iniset $TEMPEST_CONFIG volume-feature-enabled manage_snapshot $(trueorfalse False TEMPEST_VOLUME_MANAGE_SNAPSHOT) + # Only turn on TEMPEST_VOLUME_MANAGE_VOLUME by default for "lvm" backends + if [[ "$CINDER_ENABLED_BACKENDS" == *"lvm"* ]]; then + TEMPEST_VOLUME_MANAGE_VOLUME=${TEMPEST_VOLUME_MANAGE_VOLUME:-True} + fi + iniset $TEMPEST_CONFIG volume-feature-enabled manage_volume $(trueorfalse False TEMPEST_VOLUME_MANAGE_VOLUME) + # Only turn on TEMPEST_EXTEND_ATTACHED_VOLUME by default for "lvm" backends + # in Cinder and the libvirt driver in Nova. + if [[ "$CINDER_ENABLED_BACKENDS" == *"lvm"* ]] && [ "$VIRT_DRIVER" = "libvirt" ]; then + TEMPEST_EXTEND_ATTACHED_VOLUME=${TEMPEST_EXTEND_ATTACHED_VOLUME:-True} + fi + iniset $TEMPEST_CONFIG volume-feature-enabled extend_attached_volume $(trueorfalse False TEMPEST_EXTEND_ATTACHED_VOLUME) + # Only turn on TEMPEST_VOLUME_REVERT_TO_SNAPSHOT by default for "lvm" backends + if [[ "$CINDER_ENABLED_BACKENDS" == *"lvm"* ]]; then + TEMPEST_VOLUME_REVERT_TO_SNAPSHOT=${TEMPEST_VOLUME_REVERT_TO_SNAPSHOT:-True} + fi + iniset $TEMPEST_CONFIG volume-feature-enabled volume_revert $(trueorfalse False TEMPEST_VOLUME_REVERT_TO_SNAPSHOT) + iniset $TEMPEST_CONFIG volume-feature-enabled extend_attached_encrypted_volume ${TEMPEST_EXTEND_ATTACHED_ENCRYPTED_VOLUME:-False} + if [[ "$CINDER_BACKUP_DRIVER" == *"swift"* ]]; then + iniset $TEMPEST_CONFIG volume backup_driver swift + fi + local tempest_volume_min_microversion=${TEMPEST_VOLUME_MIN_MICROVERSION:-None} + local tempest_volume_max_microversion=${TEMPEST_VOLUME_MAX_MICROVERSION:-"latest"} + if [ "$tempest_volume_min_microversion" == "None" ]; then + inicomment $TEMPEST_CONFIG volume min_microversion + else + iniset $TEMPEST_CONFIG volume min_microversion $tempest_volume_min_microversion + fi + + if [ "$tempest_volume_max_microversion" == "None" ]; then + inicomment $TEMPEST_CONFIG volume max_microversion + else + iniset $TEMPEST_CONFIG volume max_microversion $tempest_volume_max_microversion + fi + + if ! is_service_enabled c-bak; then + iniset $TEMPEST_CONFIG volume-feature-enabled backup False + fi + + # Using ``CINDER_ENABLED_BACKENDS`` + # Cinder uses a comma separated list with "type:backend_name": + # CINDER_ENABLED_BACKENDS = ceph:cephBE1,lvm:lvmBE2,foo:my_foo + if [[ -n "$CINDER_ENABLED_BACKENDS" ]] && [[ $CINDER_ENABLED_BACKENDS =~ .*,.* ]]; then + # We have at least 2 backends + iniset $TEMPEST_CONFIG volume-feature-enabled multi_backend "True" + local add_comma_seperator=0 + local backends_list='' + local be + # Tempest uses a comma separated list of backend_names: + # backend_names = BACKEND_1,BACKEND_2 + for be in ${CINDER_ENABLED_BACKENDS//,/ }; do + if [ "$add_comma_seperator" -eq "1" ]; then + backends_list+=,${be##*:} + else + # first element in the list + backends_list+=${be##*:} + add_comma_seperator=1 + fi + done + iniset $TEMPEST_CONFIG volume "backend_names" "$backends_list" + fi + + if [ $TEMPEST_VOLUME_DRIVER != "default" -o \ + "$TEMPEST_VOLUME_VENDOR" != "$TEMPEST_DEFAULT_VOLUME_VENDOR" ]; then + iniset $TEMPEST_CONFIG volume vendor_name "$TEMPEST_VOLUME_VENDOR" + fi + if [ $TEMPEST_VOLUME_DRIVER != "default" -o \ + "$TEMPEST_STORAGE_PROTOCOL" != "$TEMPEST_DEFAULT_STORAGE_PROTOCOL" ]; then + iniset $TEMPEST_CONFIG volume storage_protocol "$TEMPEST_STORAGE_PROTOCOL" + fi + + if [[ $ENABLE_VOLUME_MULTIATTACH == "True" ]]; then + iniset $TEMPEST_CONFIG volume volume_type_multiattach $VOLUME_TYPE_MULTIATTACH + fi + + # Placement Features + # Set the microversion range for placement. + # Setting [None, latest] range of microversion which allow Tempest to run all microversions tests. + # NOTE- To avoid microversion tests failure on stable branch, we need to change "tempest_placement_max_microversion" + # for stable branch on each release which should be changed from "latest" to max supported version of that release. + local tempest_placement_min_microversion=${TEMPEST_PLACEMENT_MIN_MICROVERSION:-None} + local tempest_placement_max_microversion=${TEMPEST_PLACEMENT_MAX_MICROVERSION:-"latest"} + if [ "$tempest_placement_min_microversion" == "None" ]; then + inicomment $TEMPEST_CONFIG placement min_microversion + else + iniset $TEMPEST_CONFIG placement min_microversion $tempest_placement_min_microversion + fi + if [ "$tempest_placement_max_microversion" == "None" ]; then + inicomment $TEMPEST_CONFIG placement max_microversion + else + iniset $TEMPEST_CONFIG placement max_microversion $tempest_placement_max_microversion + fi + + # Baremetal + if [ "$VIRT_DRIVER" = "ironic" ] ; then + iniset $TEMPEST_CONFIG compute-feature-enabled change_password False + iniset $TEMPEST_CONFIG compute-feature-enabled console_output False + iniset $TEMPEST_CONFIG compute-feature-enabled interface_attach False + iniset $TEMPEST_CONFIG compute-feature-enabled live_migration False + iniset $TEMPEST_CONFIG compute-feature-enabled pause False + iniset $TEMPEST_CONFIG compute-feature-enabled rescue False + iniset $TEMPEST_CONFIG compute-feature-enabled resize False + iniset $TEMPEST_CONFIG compute-feature-enabled shelve False + iniset $TEMPEST_CONFIG compute-feature-enabled snapshot False + iniset $TEMPEST_CONFIG compute-feature-enabled suspend False + fi + + # Libvirt + if [ "$VIRT_DRIVER" = "libvirt" ]; then + # Libvirt-LXC + if [ "$LIBVIRT_TYPE" = "lxc" ]; then + iniset $TEMPEST_CONFIG compute-feature-enabled rescue False + iniset $TEMPEST_CONFIG compute-feature-enabled resize False + iniset $TEMPEST_CONFIG compute-feature-enabled shelve False + iniset $TEMPEST_CONFIG compute-feature-enabled snapshot False + iniset $TEMPEST_CONFIG compute-feature-enabled suspend False + else + iniset $TEMPEST_CONFIG compute-feature-enabled shelve_migrate True + iniset $TEMPEST_CONFIG compute-feature-enabled stable_rescue True + iniset $TEMPEST_CONFIG compute-feature-enabled swap_volume True + fi + fi + + # ``service_available`` + # + # this tempest service list needs to be the services that + # tempest own, otherwise we can have an erroneous set of + # defaults (something defaulting true in Tempest, but not listed here). + # services tested by tempest plugins needs to be set on service devstack + # plugin side as devstack cannot keep track of all the tempest plugins + # services. Refer Bug#1743688 for more details. + # 'horizon' is also kept here as no devtack plugin for horizon. + local service + local tempest_services="key,glance,nova,neutron,cinder,swift,horizon" + for service in ${tempest_services//,/ }; do + if is_service_enabled $service ; then + iniset $TEMPEST_CONFIG service_available $service "True" + else + iniset $TEMPEST_CONFIG service_available $service "False" + fi + done + + # ``enforce_scope`` + # If services enable the enforce_scope for their policy + # we need to enable the same on Tempest side so that + # test can be run with scoped token. + if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope keystone true + fi + + if [[ "$NOVA_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope nova true + fi + + if [[ "$PLACEMENT_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope placement true + fi + + if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope glance true + fi + + if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then + iniset $TEMPEST_CONFIG enforce_scope cinder true + fi + + if [ "$VIRT_DRIVER" = "libvirt" ] && [ "$LIBVIRT_TYPE" = "lxc" ]; then + # libvirt-lxc does not support boot from volume or attaching volumes + # so basically anything with cinder is out of the question. + iniset $TEMPEST_CONFIG service_available cinder "False" + fi + + # Run tempest configuration utilities. This must be done last during configuration to + # ensure as complete a config as possible already exists + + # NOTE(mtreinish): Respect constraints on tempest verify-config venv + local tmp_cfg_file + tmp_cfg_file=$(mktemp) + cd $TEMPEST_DIR + + local tmp_u_c_m + tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX) + set_tempest_venv_constraints $tmp_u_c_m + if [[ "$OFFLINE" != "True" ]]; then + tox -revenv-tempest --notest + fi + tox -evenv-tempest -- pip install -c $tmp_u_c_m -r requirements.txt + rm -f $tmp_u_c_m + + # Auth: + if [[ $TEMPEST_USE_TEST_ACCOUNTS == "True" ]]; then + if [[ $TEMPEST_HAS_ADMIN == "True" ]]; then + tox -evenv-tempest -- tempest account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-project-name $admin_project_name -r $TEMPEST_CONCURRENCY --with-admin etc/accounts.yaml + else + tox -evenv-tempest -- tempest account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-project-name $admin_project_name -r $TEMPEST_CONCURRENCY etc/accounts.yaml + fi + iniset $TEMPEST_CONFIG auth use_dynamic_credentials False + iniset $TEMPEST_CONFIG auth test_accounts_file "etc/accounts.yaml" + elif [[ $TEMPEST_HAS_ADMIN == "False" ]]; then + iniset $TEMPEST_CONFIG auth use_dynamic_credentials ${TEMPEST_ALLOW_TENANT_ISOLATION:-False} + + else + iniset $TEMPEST_CONFIG auth use_dynamic_credentials ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} + fi + + # API Extensions + # Run ``verify_tempest_config -ur`` to retrieve enabled extensions on API endpoints + # NOTE(mtreinish): This must be done after auth settings are added to the tempest config + tox -evenv -- tempest verify-config -uro $tmp_cfg_file + + # Neutron API Extensions + + # disable metering if we didn't enable the service + if ! is_service_enabled q-metering neutron-metering; then + DISABLE_NETWORK_API_EXTENSIONS+=", metering" + fi + + # disable l3_agent_scheduler if we didn't enable L3 agent + if ! is_service_enabled q-l3 neutron-l3; then + DISABLE_NETWORK_API_EXTENSIONS+=", l3_agent_scheduler" + fi + + local network_api_extensions=${NETWORK_API_EXTENSIONS:-"all"} + if [[ ! -z "$DISABLE_NETWORK_API_EXTENSIONS" ]]; then + # Enabled extensions are either the ones explicitly specified or those available on the API endpoint + network_api_extensions=${NETWORK_API_EXTENSIONS:-$(iniget $tmp_cfg_file network-feature-enabled api_extensions | tr -d " ")} + # Remove disabled extensions + network_api_extensions=$(remove_disabled_extensions $network_api_extensions $DISABLE_NETWORK_API_EXTENSIONS) + fi + if [[ -n "$ADDITIONAL_NETWORK_API_EXTENSIONS" ]] && [[ "$network_api_extensions" != "all" ]]; then + network_api_extensions+=",$ADDITIONAL_NETWORK_API_EXTENSIONS" + fi + iniset $TEMPEST_CONFIG network-feature-enabled api_extensions $network_api_extensions + # Swift API Extensions + local object_storage_api_extensions=${OBJECT_STORAGE_API_EXTENSIONS:-"all"} + if [[ ! -z "$DISABLE_OBJECT_STORAGE_API_EXTENSIONS" ]]; then + # Enabled extensions are either the ones explicitly specified or those available on the API endpoint + object_storage_api_extensions=${OBJECT_STORAGE_API_EXTENSIONS:-$(iniget $tmp_cfg_file object-storage-feature-enabled discoverable_apis | tr -d " ")} + # Remove disabled extensions + object_storage_api_extensions=$(remove_disabled_extensions $object_storage_api_extensions $DISABLE_STORAGE_API_EXTENSIONS) + fi + iniset $TEMPEST_CONFIG object-storage-feature-enabled discoverable_apis $object_storage_api_extensions + # Cinder API Extensions + local volume_api_extensions=${VOLUME_API_EXTENSIONS:-"all"} + if [[ ! -z "$DISABLE_VOLUME_API_EXTENSIONS" ]]; then + # Enabled extensions are either the ones explicitly specified or those available on the API endpoint + volume_api_extensions=${VOLUME_API_EXTENSIONS:-$(iniget $tmp_cfg_file volume-feature-enabled api_extensions | tr -d " ")} + # Remove disabled extensions + volume_api_extensions=$(remove_disabled_extensions $volume_api_extensions $DISABLE_VOLUME_API_EXTENSIONS) + fi + iniset $TEMPEST_CONFIG volume-feature-enabled api_extensions $volume_api_extensions + + # Restore IFS + IFS=$ifs +} + +# install_tempest() - Collect source and prepare +function install_tempest { + git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH + # NOTE(gmann): Pinning tox<4.0.0 for stable/zed and lower. Tox 4.0.0 + # released after zed was released and has some incompatible changes + # and it is ok not to fix the issues caused by tox 4.0.0 in stable + # beanches jobs. We can continue testing the stable/zed and lower + # branches with tox<4.0.0 + pip_install 'tox!=2.8.0,<4.0.0' + pushd $TEMPEST_DIR + # NOTE(gmann): checkout the TEMPEST_BRANCH in case TEMPEST_BRANCH + # is tag name not master. git_clone would not checkout tag because + # TEMPEST_DIR already exist until RECLONE is true. + git checkout $TEMPEST_BRANCH + + local tmp_u_c_m + tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX) + set_tempest_venv_constraints $tmp_u_c_m + + tox -r --notest -efull + # NOTE(mtreinish) Respect constraints in the tempest full venv, things that + # are using a tox job other than full will not be respecting constraints but + # running pip install -U on tempest requirements + $TEMPEST_DIR/.tox/tempest/bin/pip install -c $tmp_u_c_m -r requirements.txt + PROJECT_VENV["tempest"]=${TEMPEST_DIR}/.tox/tempest + rm -f $tmp_u_c_m + popd +} + +# install_tempest_plugins() - Install any specified plugins into the tempest venv +function install_tempest_plugins { + pushd $TEMPEST_DIR + if [[ $TEMPEST_PLUGINS != 0 ]] ; then + local tmp_u_c_m + tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX) + set_tempest_venv_constraints $tmp_u_c_m + tox -evenv-tempest -- pip install -c $tmp_u_c_m $TEMPEST_PLUGINS + rm -f $tmp_u_c_m + echo "Checking installed Tempest plugins:" + tox -evenv-tempest -- tempest list-plugins + fi + popd +} + +# Restore xtrace +$_XTRACE_TEMPEST + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/template b/lib/template new file mode 100644 index 0000000000..e6d003284f --- /dev/null +++ b/lib/template @@ -0,0 +1,106 @@ +#!/bin/bash +# +# lib/template +# Functions to control the configuration and operation of the XXXX service +# + +# Dependencies: +# +# - ``functions`` file +# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined +# - + +# ``stack.sh`` calls the entry points in this order: +# +# - is_XXXX_enabled +# - install_XXXX +# - configure_XXXX +# - init_XXXX +# - start_XXXX +# - stop_XXXX +# - cleanup_XXXX + +# Save trace setting +_XTRACE_TEMPLATE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# + +# Set up default directories +XXXX_DIR=$DEST/XXXX +XXX_CONF_DIR=/etc/XXXX + + +# Functions +# --------- + +# Test if any XXXX services are enabled +# is_XXXX_enabled +function is_XXXX_enabled { + [[ ,${DISABLED_SERVICES} =~ ,"XXXX" ]] && return 1 + [[ ,${ENABLED_SERVICES} =~ ,"XX-" ]] && return 0 + return 1 +} + +# cleanup_XXXX() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_XXXX { + # kill instances (nova) + # delete image files (glance) + # This function intentionally left blank + : +} + +# configure_XXXX() - Set config files, create data dirs, etc +function configure_XXXX { + # sudo python setup.py deploy + # iniset $XXXX_CONF ... + # This function intentionally left blank + : +} + +# create_XXXX_accounts() - Create required service accounts +function create_XXXX_accounts { + : +} + +# init_XXXX() - Initialize databases, etc. +function init_XXXX { + # clean up from previous (possibly aborted) runs + # create required data files + : +} + +# install_XXXX() - Collect source and prepare +function install_XXXX { + # git clone xxx + : +} + +# start_XXXX() - Start running processes +function start_XXXX { + # The quoted command must be a single command and not include an + # shell metacharacters, redirections or shell builtins. + # run_process XXXX "$XXXX_DIR/bin/XXXX-bin" + : +} + +# stop_XXXX() - Stop running processes +function stop_XXXX { + # for serv in serv-a serv-b; do + # stop_process $serv + # done + : +} + +# Restore xtrace +$_XTRACE_TEMPLATE + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/tls b/lib/tls new file mode 100644 index 0000000000..fa0a448d7d --- /dev/null +++ b/lib/tls @@ -0,0 +1,617 @@ +#!/bin/bash +# +# lib/tls +# Functions to control the configuration and operation of the TLS proxy service + +# !! source _before_ any services that use ``SERVICE_HOST`` +# +# Dependencies: +# +# - ``functions`` file +# - ``DEST``, ``DATA_DIR`` must be defined +# - ``HOST_IP``, ``SERVICE_HOST`` +# - ``KEYSTONE_TOKEN_FORMAT`` must be defined + +# Entry points: +# +# - configure_CA +# - init_CA + +# - configure_proxy +# - start_tls_proxy + +# - stop_tls_proxy +# - cleanup_CA + +# - make_root_CA +# - make_int_CA +# - make_cert ca-dir cert-name "common-name" ["alt-name" ...] +# - start_tls_proxy HOST_IP 5000 localhost 5000 +# - ensure_certificates +# - is_ssl_enabled_service +# - enable_mod_ssl + + +# Defaults +# -------- + +if is_service_enabled tls-proxy; then + # TODO(dtroyer): revisit this below after the search for HOST_IP has been done + TLS_IP=${TLS_IP:-$(ipv6_unquote $SERVICE_HOST)} +fi + +DEVSTACK_HOSTNAME=$(hostname -f) +DEVSTACK_CERT_NAME=devstack-cert +DEVSTACK_CERT=$DATA_DIR/$DEVSTACK_CERT_NAME.pem + +# CA configuration +ROOT_CA_DIR=${ROOT_CA_DIR:-$DATA_DIR/CA/root-ca} +INT_CA_DIR=${INT_CA_DIR:-$DATA_DIR/CA/int-ca} + +ORG_NAME="OpenStack" +ORG_UNIT_NAME="DevStack" + +# Stud configuration +STUD_PROTO="--tls" +STUD_CIPHERS='TLSv1+HIGH:!DES:!aNULL:!eNULL:@STRENGTH' + + +# CA Functions +# ============ + +# There may be more than one, get specific +OPENSSL=${OPENSSL:-/usr/bin/openssl} + +# Do primary CA configuration +function configure_CA { + # build common config file + + # Verify ``TLS_IP`` is good + if [[ -n "$SERVICE_HOST" && "$(ipv6_unquote $SERVICE_HOST)" != "$TLS_IP" ]]; then + # auto-discover has changed the IP + TLS_IP=$(ipv6_unquote $SERVICE_HOST) + fi +} + +# Creates a new CA directory structure +# create_CA_base ca-dir +function create_CA_base { + local ca_dir=$1 + + if [[ -d $ca_dir ]]; then + # Bail out it exists + return 0 + fi + + local i + for i in certs crl newcerts private; do + mkdir -p $ca_dir/$i + done + chmod 710 $ca_dir/private + echo "01" >$ca_dir/serial + cp /dev/null $ca_dir/index.txt +} + +# Create a new CA configuration file +# create_CA_config ca-dir common-name +function create_CA_config { + local ca_dir=$1 + local common_name=$2 + + echo " +[ ca ] +default_ca = CA_default + +[ CA_default ] +dir = $ca_dir +policy = policy_match +database = \$dir/index.txt +serial = \$dir/serial +certs = \$dir/certs +crl_dir = \$dir/crl +new_certs_dir = \$dir/newcerts +certificate = \$dir/cacert.pem +private_key = \$dir/private/cacert.key +RANDFILE = \$dir/private/.rand +default_md = sha256 + +[ req ] +default_bits = 2048 +default_md = sha256 + +prompt = no +distinguished_name = ca_distinguished_name + +x509_extensions = ca_extensions + +[ ca_distinguished_name ] +organizationName = $ORG_NAME +organizationalUnitName = $ORG_UNIT_NAME Certificate Authority +commonName = $common_name + +[ policy_match ] +countryName = optional +stateOrProvinceName = optional +organizationName = match +organizationalUnitName = optional +commonName = supplied + +[ ca_extensions ] +basicConstraints = critical,CA:true +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always, issuer +keyUsage = cRLSign, keyCertSign + +" >$ca_dir/ca.conf +} + +# Create a new signing configuration file +# create_signing_config ca-dir +function create_signing_config { + local ca_dir=$1 + + echo " +[ ca ] +default_ca = CA_default + +[ CA_default ] +dir = $ca_dir +policy = policy_match +database = \$dir/index.txt +serial = \$dir/serial +certs = \$dir/certs +crl_dir = \$dir/crl +new_certs_dir = \$dir/newcerts +certificate = \$dir/cacert.pem +private_key = \$dir/private/cacert.key +RANDFILE = \$dir/private/.rand +default_md = default + +[ req ] +default_bits = 1024 +default_md = sha256 + +prompt = no +distinguished_name = req_distinguished_name + +x509_extensions = req_extensions + +[ req_distinguished_name ] +organizationName = $ORG_NAME +organizationalUnitName = $ORG_UNIT_NAME Server Farm + +[ policy_match ] +countryName = optional +stateOrProvinceName = optional +organizationName = match +organizationalUnitName = optional +commonName = supplied + +[ req_extensions ] +basicConstraints = CA:false +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always, issuer +keyUsage = digitalSignature, keyEncipherment, keyAgreement +extendedKeyUsage = serverAuth, clientAuth +subjectAltName = \$ENV::SUBJECT_ALT_NAME + +" >$ca_dir/signing.conf +} + +# Create root and intermediate CAs +# init_CA +function init_CA { + # Ensure CAs are built + make_root_CA $ROOT_CA_DIR + make_int_CA $INT_CA_DIR $ROOT_CA_DIR + + # Create the CA bundle + cat $ROOT_CA_DIR/cacert.pem $INT_CA_DIR/cacert.pem >>$INT_CA_DIR/ca-chain.pem + cat $INT_CA_DIR/ca-chain.pem >> $SSL_BUNDLE_FILE + + if is_fedora; then + sudo cp $INT_CA_DIR/ca-chain.pem /usr/share/pki/ca-trust-source/anchors/devstack-chain.pem + sudo update-ca-trust + elif is_ubuntu; then + sudo cp $INT_CA_DIR/ca-chain.pem /usr/local/share/ca-certificates/devstack-int.crt + sudo cp $ROOT_CA_DIR/cacert.pem /usr/local/share/ca-certificates/devstack-root.crt + sudo update-ca-certificates + fi +} + +# Create an initial server cert +# init_cert +function init_cert { + if [[ ! -r $DEVSTACK_CERT ]]; then + if [[ -n "$TLS_IP" ]]; then + TLS_IP="IP:$TLS_IP" + if [[ -n "$HOST_IPV6" ]]; then + TLS_IP="$TLS_IP,IP:$HOST_IPV6" + fi + fi + make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME "$TLS_IP" + + # Create a cert bundle + cat $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/cacert.pem >$DEVSTACK_CERT + fi +} + +# make_cert creates and signs a new certificate with the given commonName and CA +# make_cert ca-dir cert-name "common-name" ["alt-name" ...] +function make_cert { + local ca_dir=$1 + local cert_name=$2 + local common_name=$3 + local alt_names=$4 + + if [ "$common_name" != "$SERVICE_HOST" ]; then + if is_ipv4_address "$SERVICE_HOST" ; then + if [[ -z "$alt_names" ]]; then + alt_names="IP:$SERVICE_HOST" + else + alt_names="$alt_names,IP:$SERVICE_HOST" + fi + fi + fi + + # Only generate the certificate if it doesn't exist yet on the disk + if [ ! -r "$ca_dir/$cert_name.crt" ]; then + # Generate a signing request + $OPENSSL req \ + -sha256 \ + -newkey rsa \ + -nodes \ + -keyout $ca_dir/private/$cert_name.key \ + -out $ca_dir/$cert_name.csr \ + -subj "/O=${ORG_NAME}/OU=${ORG_UNIT_NAME} Servers/CN=${common_name}" + + if [[ -z "$alt_names" ]]; then + alt_names="DNS:${common_name}" + else + alt_names="DNS:${common_name},${alt_names}" + fi + + # Sign the request valid for 1 year + SUBJECT_ALT_NAME="$alt_names" \ + $OPENSSL ca -config $ca_dir/signing.conf \ + -extensions req_extensions \ + -days 365 \ + -notext \ + -in $ca_dir/$cert_name.csr \ + -out $ca_dir/$cert_name.crt \ + -subj "/O=${ORG_NAME}/OU=${ORG_UNIT_NAME} Servers/CN=${common_name}" \ + -batch + fi +} + +# Make an intermediate CA to sign everything else +# make_int_CA ca-dir signing-ca-dir +function make_int_CA { + local ca_dir=$1 + local signing_ca_dir=$2 + + # Create the root CA + create_CA_base $ca_dir + create_CA_config $ca_dir 'Intermediate CA' + create_signing_config $ca_dir + + if [ ! -r "$ca_dir/cacert.pem" ]; then + # Create a signing certificate request + $OPENSSL req -config $ca_dir/ca.conf \ + -sha256 \ + -newkey rsa \ + -nodes \ + -keyout $ca_dir/private/cacert.key \ + -out $ca_dir/cacert.csr \ + -outform PEM + + # Sign the intermediate request valid for 1 year + $OPENSSL ca -config $signing_ca_dir/ca.conf \ + -extensions ca_extensions \ + -days 365 \ + -notext \ + -in $ca_dir/cacert.csr \ + -out $ca_dir/cacert.pem \ + -batch + fi +} + +# Make a root CA to sign other CAs +# make_root_CA ca-dir +function make_root_CA { + local ca_dir=$1 + + # Create the root CA + create_CA_base $ca_dir + create_CA_config $ca_dir 'Root CA' + + if [ ! -r "$ca_dir/cacert.pem" ]; then + # Create a self-signed certificate valid for 5 years + $OPENSSL req -config $ca_dir/ca.conf \ + -x509 \ + -nodes \ + -newkey rsa \ + -days 21360 \ + -keyout $ca_dir/private/cacert.key \ + -out $ca_dir/cacert.pem \ + -outform PEM + fi +} + +# Deploy the service cert & key to a service specific +# location +function deploy_int_cert { + local cert_target_file=$1 + local key_target_file=$2 + + sudo cp "$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt" "$cert_target_file" + sudo cp "$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key" "$key_target_file" +} + +# Deploy the intermediate CA cert bundle file to a service +# specific location +function deploy_int_CA { + local ca_target_file=$1 + + sudo cp "$INT_CA_DIR/ca-chain.pem" "$ca_target_file" +} + +# If a non-system python-requests is installed then it will use the +# built-in CA certificate store rather than the distro-specific +# CA certificate store. Detect this and symlink to the correct +# one. If the value for the CA is not rooted in /etc then we know +# we need to change it. +function fix_system_ca_bundle_path { + if is_service_enabled tls-proxy; then + local capath + if [[ "$GLOBAL_VENV" == "True" ]] ; then + capath=$($DEVSTACK_VENV/bin/python3 -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass') + else + capath=$(python$PYTHON3_VERSION -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass') + fi + if [[ ! $capath == "" && ! $capath =~ ^/etc/.* && ! -L $capath ]]; then + if is_fedora; then + sudo rm -f $capath + sudo ln -s /etc/pki/tls/certs/ca-bundle.crt $capath + elif is_ubuntu; then + sudo rm -f $capath + sudo ln -s /etc/ssl/certs/ca-certificates.crt $capath + else + echo "Don't know how to set the CA bundle, expect the install to fail." + fi + fi + fi +} + + +# Only for compatibility, return if the tls-proxy is enabled +function is_ssl_enabled_service { + return is_service_enabled tls-proxy +} + +# Certificate Input Configuration +# =============================== + +# Ensure that the certificates for a service are in place. This function does +# not check that a service is SSL enabled, this should already have been +# completed. +# +# The function expects to find a certificate, key and CA certificate in the +# variables ``{service}_SSL_CERT``, ``{service}_SSL_KEY`` and ``{service}_SSL_CA``. For +# example for keystone this would be ``KEYSTONE_SSL_CERT``, ``KEYSTONE_SSL_KEY`` and +# ``KEYSTONE_SSL_CA``. +# +# If it does not find these certificates then the DevStack-issued server +# certificate, key and CA certificate will be associated with the service. +# +# If only some of the variables are provided then the function will quit. +function ensure_certificates { + local service=$1 + + local cert_var="${service}_SSL_CERT" + local key_var="${service}_SSL_KEY" + local ca_var="${service}_SSL_CA" + + local cert=${!cert_var} + local key=${!key_var} + local ca=${!ca_var} + + if [[ -z "$cert" && -z "$key" && -z "$ca" ]]; then + local cert="$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt" + local key="$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key" + local ca="$INT_CA_DIR/ca-chain.pem" + eval ${service}_SSL_CERT=\$cert + eval ${service}_SSL_KEY=\$key + eval ${service}_SSL_CA=\$ca + return # the CA certificate is already in the bundle + elif [[ -z "$cert" || -z "$key" || -z "$ca" ]]; then + die $LINENO "Missing either the ${cert_var} ${key_var} or ${ca_var}" \ + "variable to enable SSL for ${service}" + fi + + cat $ca >> $SSL_BUNDLE_FILE +} + +# Enable the mod_ssl plugin in Apache +function enable_mod_ssl { + echo "Enabling mod_ssl" + + if is_ubuntu; then + sudo a2enmod ssl + elif is_fedora; then + # Fedora enables mod_ssl by default + : + fi + if ! sudo `which httpd || which apache2ctl` -M | grep -w -q ssl_module; then + die $LINENO "mod_ssl is not enabled in apache2/httpd, please check for it manually and run stack.sh again" + fi +} + + +# Proxy Functions +# =============== + +function tune_apache_connections { + local should_restart=$1 + local tuning_file=$APACHE_SETTINGS_DIR/connection-tuning.conf + if ! [ -f $tuning_file ] ; then + sudo bash -c "cat > $tuning_file" << EOF +# worker MPM +# StartServers: initial number of server processes to start +# MinSpareThreads: minimum number of worker threads which are kept spare +# MaxSpareThreads: maximum number of worker threads which are kept spare +# ThreadLimit: ThreadsPerChild can be changed to this maximum value during a +# graceful restart. ThreadLimit can only be changed by stopping +# and starting Apache. +# ThreadsPerChild: constant number of worker threads in each server process +# MaxClients: maximum number of simultaneous client connections +# MaxRequestsPerChild: maximum number of requests a server process serves +# +# We want to be memory thrifty so tune down apache to allow 256 total +# connections. This should still be plenty for a dev env yet lighter than +# apache defaults. + +# Note that the next three conf values must be changed together. +# MaxClients = ServerLimit * ThreadsPerChild +ServerLimit 8 +ThreadsPerChild 32 +MaxClients 256 +StartServers 2 +MinSpareThreads 32 +MaxSpareThreads 96 +ThreadLimit 64 +MaxRequestsPerChild 0 + + +# Note that the next three conf values must be changed together. +# MaxClients = ServerLimit * ThreadsPerChild +ServerLimit 8 +ThreadsPerChild 32 +MaxClients 256 +StartServers 2 +MinSpareThreads 32 +MaxSpareThreads 96 +ThreadLimit 64 +MaxRequestsPerChild 0 + +EOF + if [ "$should_restart" != "norestart" ] ; then + # Only restart the apache server if we know we really want to + # do so. Too many restarts in a short period of time is treated + # as an error by systemd. + restart_apache_server + fi + fi +} + +# Starts the TLS proxy for the given IP/ports +# start_tls_proxy service-name front-host front-port back-host back-port +function start_tls_proxy { + local b_service="$1-tls-proxy" + local f_host=$2 + local f_port=$3 + local b_host=$4 + local b_port=$5 + # 8190 is the default apache size. + local f_header_size=${6:-8190} + + # We don't restart apache here as we'll do it at the end of the function. + tune_apache_connections norestart + + local config_file + config_file=$(apache_site_config_for $b_service) + local listen_string + # Default apache configs on ubuntu and centos listen on 80 and 443 + # newer apache seems fine with duplicate listen directive but older + # apache does not so special case 80 and 443. + if [[ "$f_port" == "80" ]] || [[ "$f_port" == "443" ]]; then + listen_string="" + elif [[ "$f_host" == '*' ]] ; then + listen_string="Listen $f_port" + else + listen_string="Listen $f_host:$f_port" + fi + sudo bash -c "cat >$config_file" << EOF +$listen_string + + + SSLEngine On + SSLCertificateFile $DEVSTACK_CERT + SSLProtocol -all +TLSv1.3 +TLSv1.2 + + # Disable KeepAlive to fix bug #1630664 a.k.a the + # ('Connection aborted.', BadStatusLine("''",)) error + KeepAlive Off + + # This increase in allowed request header sizes is required + # for swift functional testing to work with tls enabled. It is 2 bytes + # larger than the apache default of 8190. + LimitRequestFieldSize $f_header_size + RequestHeader set X-Forwarded-Proto "https" + + # Avoid races (at the cost of performance) to re-use a pooled connection + # where the connection is closed (bug 1807518). + # Set acquire=1 to disable waiting for connection pool members so that + # we can determine when apache is overloaded (returns 503). + SetEnv proxy-initial-not-pooled + + ProxyPass http://$b_host:$b_port/ retry=0 nocanon acquire=1 + ProxyPassReverse http://$b_host:$b_port/ + + ErrorLog $APACHE_LOG_DIR/tls-proxy_error.log + ErrorLogFormat "%{cu}t [%-m:%l] [pid %P:tid %T] %7F: %E: [client\ %a] [frontend\ %A] %M% ,\ referer\ %{Referer}i" + LogLevel info + CustomLog $APACHE_LOG_DIR/tls-proxy_access.log combined + +EOF + for mod in headers ssl proxy proxy_http; do + # We don't need to restart here as we will restart once at the end + # of the function. + enable_apache_mod $mod norestart + done + enable_apache_site $b_service + restart_apache_server +} + +# Cleanup Functions +# ================= + +# Stops the apache service. This should be done only after all services +# using tls configuration are down. +function stop_tls_proxy { + stop_apache_server + + # NOTE(jh): Removing all tls-proxy configs is a bit of a hack, but + # necessary so that we can restart after an unstack. A better + # solution would be to ensure that each service calling + # start_tls_proxy will call stop_tls_proxy with the same + # parameters on shutdown so we can use the disable_apache_site + # function and remove individual files there. + if is_ubuntu; then + sudo rm -f /etc/apache2/sites-enabled/*-tls-proxy.conf + else + for i in $APACHE_CONF_DIR/*-tls-proxy.conf; do + sudo mv $i $i.disabled + done + fi +} + +# Clean up the CA files +# cleanup_CA +function cleanup_CA { + if is_fedora; then + sudo rm -f /usr/share/pki/ca-trust-source/anchors/devstack-chain.pem + sudo update-ca-trust + elif is_ubuntu; then + sudo rm -f /usr/local/share/ca-certificates/devstack-int.crt + sudo rm -f /usr/local/share/ca-certificates/devstack-root.crt + sudo update-ca-certificates + fi + + rm -rf "$INT_CA_DIR" "$ROOT_CA_DIR" "$DEVSTACK_CERT" +} + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/openrc b/openrc index 7c1e129278..e800abeb3d 100644 --- a/openrc +++ b/openrc @@ -1,63 +1,74 @@ #!/usr/bin/env bash - -# Load local configuration -source ./stackrc - -# Set api host endpoint -HOST_IP=${HOST_IP:-127.0.0.1} - -# Nova original used project_id as the *account* that owned resources (servers, -# ip address, ...) With the addition of Keystone we have standardized on the -# term **tenant** as the entity that owns the resources. **novaclient** still -# uses the old deprecated terms project_id. Note that this field should now be -# set to tenant_name, not tenant_id. -export NOVA_PROJECT_ID=${TENANT:-demo} - -# In addition to the owning entity (tenant), nova stores the entity performing -# the action as the **user**. -export NOVA_USERNAME=${USERNAME:-demo} - -# With Keystone you pass the keystone password instead of an api key. -# The most recent versions of novaclient use NOVA_PASSWORD instead of NOVA_API_KEY -export NOVA_PASSWORD=${ADMIN_PASSWORD:-secrete} - -# With the addition of Keystone, to use an openstack cloud you should -# authenticate against keystone, which returns a **Token** and **Service -# Catalog**. The catalog contains the endpoint for all services the user/tenant -# has access to - including nova, glance, keystone, swift, ... We currently -# recommend using the 2.0 *auth api*. # -# *NOTE*: Using the 2.0 *auth api* does not mean that compute api is 2.0. We -# will use the 1.1 *compute api* -export NOVA_URL=${NOVA_URL:-http://$HOST_IP:5000/v2.0/} - -# Currently novaclient needs you to specify the *compute api* version. This -# needs to match the config of your catalog returned by Keystone. -export NOVA_VERSION=${NOVA_VERSION:-1.1} - -# FIXME - why does this need to be specified? -export NOVA_REGION_NAME=${NOVA_REGION_NAME:-RegionOne} - -# Set the ec2 url so euca2ools works -export EC2_URL=${EC2_URL:-http://$HOST_IP:8773/services/Cloud} - -# Access key is set in the initial keystone data to be the same as username -export EC2_ACCESS_KEY=${USERNAME:-demo} - -# Secret key is set in the initial keystone data to the admin password -export EC2_SECRET_KEY=${ADMIN_PASSWORD:-secrete} - -# set log level to DEBUG (helps debug issues) -# export NOVACLIENT_DEBUG=1 +# source openrc [username] [projectname] +# +# Configure a set of credentials for $PROJECT/$USERNAME: +# Set OS_PROJECT_NAME to override the default project 'demo' +# Set OS_USERNAME to override the default user name 'demo' +# Set ADMIN_PASSWORD to set the password for 'admin' and 'demo' -# Max time till the vm is bootable -export BOOT_TIMEOUT=${BOOT_TIMEOUT:-15} +if [[ -n "$1" ]]; then + OS_USERNAME=$1 +fi +if [[ -n "$2" ]]; then + OS_PROJECT_NAME=$2 +fi -# Max time to wait while vm goes from build to active state -export ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-10} +# Find the other rc files +RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) -# Max time from run instance command until it is running -export RUNNING_TIMEOUT=${RUNNING_TIMEOUT:-$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT))} +# Import common functions +source $RC_DIR/functions -# Max time to wait for proper IP association and dis-association. -export ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10} +# Load local configuration +source $RC_DIR/stackrc + +# Load the last env variables if available +if [[ -r $RC_DIR/.stackenv ]]; then + source $RC_DIR/.stackenv + export OS_CACERT +fi + +# Get some necessary configuration +source $RC_DIR/lib/tls + +# Minimal configuration +export OS_AUTH_TYPE=password +export OS_PROJECT_NAME=${OS_PROJECT_NAME:-demo} +export OS_USERNAME=${OS_USERNAME:-demo} +export OS_PASSWORD=${ADMIN_PASSWORD:-secret} +export OS_REGION_NAME=${REGION_NAME:-RegionOne} + +# Set the host API endpoint. This will default to HOST_IP if SERVICE_IP_VERSION +# is 4, else HOST_IPV6 if it's 6. SERVICE_HOST may also be used to specify the +# endpoint, which is convenient for some localrc configurations. Additionally, +# some exercises call Glance directly. On a single-node installation, Glance +# should be listening on a local IP address, depending on the setting of +# SERVICE_IP_VERSION. If its running elsewhere, it can be set here. +if [[ $SERVICE_IP_VERSION == 6 ]]; then + HOST_IPV6=${HOST_IPV6:-::1} + SERVICE_HOST=${SERVICE_HOST:-[$HOST_IPV6]} + GLANCE_HOST=${GLANCE_HOST:-[$HOST_IPV6]} +else + HOST_IP=${HOST_IP:-127.0.0.1} + SERVICE_HOST=${SERVICE_HOST:-$HOST_IP} + GLANCE_HOST=${GLANCE_HOST:-$HOST_IP} +fi + +# If you don't have a working .stackenv, this is the backup position +KEYSTONE_BACKUP=$SERVICE_PROTOCOL://$SERVICE_HOST:5000 +KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_URI:-$KEYSTONE_BACKUP} + +export OS_AUTH_URL=${OS_AUTH_URL:-$KEYSTONE_SERVICE_URI} + +export OS_USER_DOMAIN_ID=${OS_USER_DOMAIN_ID:-"default"} +export OS_PROJECT_DOMAIN_ID=${OS_PROJECT_DOMAIN_ID:-"default"} + +# Set OS_CACERT to a default CA certificate chain if it exists. +if [[ ! -v OS_CACERT ]] ; then + DEFAULT_OS_CACERT=$INT_CA_DIR/ca-chain.pem + # If the file does not exist, this may confuse preflight sanity checks + if [ -e $DEFAULT_OS_CACERT ] ; then + export OS_CACERT=$DEFAULT_OS_CACERT + fi +fi diff --git a/playbooks/devstack.yaml b/playbooks/devstack.yaml new file mode 100644 index 0000000000..d0906380ab --- /dev/null +++ b/playbooks/devstack.yaml @@ -0,0 +1,7 @@ +- hosts: all + # This is the default strategy, however since orchestrate-devstack requires + # "linear", it is safer to enforce it in case this is running in an + # environment configured with a different default strategy. + strategy: linear + roles: + - orchestrate-devstack diff --git a/playbooks/post.yaml b/playbooks/post.yaml new file mode 100644 index 0000000000..0047d78ea5 --- /dev/null +++ b/playbooks/post.yaml @@ -0,0 +1,41 @@ +- hosts: all + become: True + vars: + devstack_log_dir: "{{ devstack_base_dir|default('/opt/stack') }}/logs/" + devstack_conf_dir: "{{ devstack_base_dir|default('/opt/stack') }}/devstack/" + devstack_full_log: "{{ devstack_early_log|default('/opt/stack/logs/devstack-early.txt') }}" + tasks: + # NOTE(andreaf) If the tempest service is enabled, a tempest.log is + # generated as part of lib/tempest, as a result of verify_tempest_config + - name: Check if a tempest log exits + stat: + path: "{{ devstack_conf_dir }}/tempest.log" + register: tempest_log + - name: Link post-devstack tempest.log + file: + src: "{{ devstack_conf_dir }}/tempest.log" + dest: "{{ stage_dir }}/verify_tempest_conf.log" + state: hard + when: tempest_log.stat.exists + - name: Capture most recent qemu crash dump, if any + shell: + executable: /bin/bash + cmd: | + coredumpctl -o {{ devstack_log_dir }}/qemu.coredump dump /usr/bin/qemu-system-x86_64 + ignore_errors: yes + roles: + - export-devstack-journal + - apache-logs-conf + # This should run as early as possible to make sure we don't skew + # the post-tempest results with other activities. + - capture-performance-data + - devstack-project-conf + # capture-system-logs should be the last role before stage-output + - capture-system-logs + - role: stage-output + # NOTE(andreaf) We need fetch-devstack-log-dir only as long as the base job + # starts pulling logs for us from {{ ansible_user_dir }}/logs. + # Meanwhile we already store things in ansible_user_dir and use + # fetch-devstack-log-dir setting devstack_base_dir + - role: fetch-devstack-log-dir + devstack_base_dir: "{{ ansible_user_dir }}" diff --git a/playbooks/pre.yaml b/playbooks/pre.yaml new file mode 100644 index 0000000000..68cb1d8c7a --- /dev/null +++ b/playbooks/pre.yaml @@ -0,0 +1,37 @@ +- hosts: all + pre_tasks: + - name: Fix the permissions of the zuul home directory + # Make sure that the zuul home can be traversed, + # so that all users can access the sources placed there. + # Some distributions create it with 700 by default. + file: + path: "{{ ansible_user_dir }}" + mode: a+x + - name: Gather minimum local MTU + set_fact: + local_mtu: > + {% set mtus = [] -%} + {% for interface in ansible_interfaces -%} + {% set interface_variable = 'ansible_' + interface -%} + {% if interface_variable in hostvars[inventory_hostname] -%} + {% set _ = mtus.append(hostvars[inventory_hostname][interface_variable]['mtu']|int) -%} + {% endif -%} + {% endfor -%} + {{- mtus|min -}} + - name: Calculate external_bridge_mtu + # 30 bytes is overhead for vxlan (which is greater than GRE + # allowing us to use either overlay option with this MTU. + # 40 bytes is overhead for IPv6, which will also support an IPv4 overlay. + # TODO(andreaf) This should work, but it may have to be reconcilied with + # the MTU setting used by the multinode setup roles in multinode pre.yaml + set_fact: + external_bridge_mtu: "{{ local_mtu | int - 30 - 40 }}" + roles: + - configure-swap + - setup-stack-user + - setup-tempest-user + - setup-devstack-source-dirs + - setup-devstack-log-dir + - setup-devstack-cache + - start-fresh-logging + - write-devstack-local-conf diff --git a/playbooks/tox/post.yaml b/playbooks/tox/post.yaml new file mode 100644 index 0000000000..7f0cb19824 --- /dev/null +++ b/playbooks/tox/post.yaml @@ -0,0 +1,4 @@ +- hosts: all + roles: + - fetch-tox-output + - fetch-subunit-output diff --git a/playbooks/tox/pre.yaml b/playbooks/tox/pre.yaml new file mode 100644 index 0000000000..68d5254251 --- /dev/null +++ b/playbooks/tox/pre.yaml @@ -0,0 +1,14 @@ +- hosts: all + roles: + # Run bindep and test-setup after devstack so that they won't interfere + - role: bindep + bindep_profile: test + bindep_dir: "{{ zuul_work_dir }}" + - test-setup + # NOTE(gmann): Pinning tox<4.0.0 for stable/zed and lower. Tox 4.0.0 + # released after zed was released and has some incompatible changes + # and it is ok not to fix the issues caused by tox 4.0.0 in stable + # beanches jobs. We can continue testing the stable/zed and lower + # branches with tox<4.0.0 + - role: ensure-tox + ensure_tox_version: "<4" diff --git a/playbooks/tox/run-both.yaml b/playbooks/tox/run-both.yaml new file mode 100644 index 0000000000..e4043d8231 --- /dev/null +++ b/playbooks/tox/run-both.yaml @@ -0,0 +1,11 @@ +- hosts: all + roles: + - run-devstack + # Run bindep and test-setup after devstack so that they won't interfere + - role: bindep + bindep_profile: test + bindep_dir: "{{ zuul_work_dir }}" + - test-setup + - ensure-tox + - get-devstack-os-environment + - tox diff --git a/playbooks/tox/run.yaml b/playbooks/tox/run.yaml new file mode 100644 index 0000000000..0d065c6ca2 --- /dev/null +++ b/playbooks/tox/run.yaml @@ -0,0 +1,4 @@ +- hosts: all + roles: + - get-devstack-os-environment + - tox diff --git a/playbooks/unit-tests/pre.yaml b/playbooks/unit-tests/pre.yaml new file mode 100644 index 0000000000..cfa1676378 --- /dev/null +++ b/playbooks/unit-tests/pre.yaml @@ -0,0 +1,13 @@ +- hosts: all + + tasks: + + - name: Install prerequisites + shell: + chdir: '{{ zuul.project.src_dir }}' + executable: /bin/bash + cmd: | + set -e + set -x + echo "IPV4_ADDRS_SAFE_TO_USE=10.1.0.0/20" >> localrc + ./tools/install_prereqs.sh diff --git a/playbooks/unit-tests/run.yaml b/playbooks/unit-tests/run.yaml new file mode 100644 index 0000000000..181521f072 --- /dev/null +++ b/playbooks/unit-tests/run.yaml @@ -0,0 +1,12 @@ +- hosts: all + + tasks: + + - name: Run run_tests.sh + shell: + chdir: '{{ zuul.project.src_dir }}' + executable: /bin/bash + cmd: | + set -e + set -x + ./run_tests.sh diff --git a/roles/apache-logs-conf/README.rst b/roles/apache-logs-conf/README.rst new file mode 100644 index 0000000000..eccee403a5 --- /dev/null +++ b/roles/apache-logs-conf/README.rst @@ -0,0 +1,12 @@ +Prepare apache configs and logs for staging + +Make sure apache config files and log files are available in a linux flavor +independent location. Note that this relies on hard links, to the staging +directory must be in the same partition where the logs and configs are. + +**Role Variables** + +.. zuul:rolevar:: stage_dir + :default: {{ ansible_user_dir }} + + The base stage directory. diff --git a/roles/apache-logs-conf/defaults/main.yaml b/roles/apache-logs-conf/defaults/main.yaml new file mode 100644 index 0000000000..1fb04fedc8 --- /dev/null +++ b/roles/apache-logs-conf/defaults/main.yaml @@ -0,0 +1,2 @@ +devstack_base_dir: /opt/stack +stage_dir: "{{ ansible_user_dir }}" diff --git a/roles/apache-logs-conf/tasks/main.yaml b/roles/apache-logs-conf/tasks/main.yaml new file mode 100644 index 0000000000..6b7ea37857 --- /dev/null +++ b/roles/apache-logs-conf/tasks/main.yaml @@ -0,0 +1,90 @@ +- name: Ensure {{ stage_dir }}/apache exists + file: + path: "{{ stage_dir }}/apache" + state: directory + +- name: Link apache logs on Debian/SuSE + block: + - name: Find logs + find: + path: "/var/log/apache2" + file_type: any + register: debian_suse_apache_logs + + - name: Dereference files + stat: + path: "{{ item.path }}" + with_items: "{{ debian_suse_apache_logs.files }}" + register: debian_suse_apache_deref_logs + + - name: Create hard links + file: + src: "{{ item.stat.lnk_source | default(item.stat.path) }}" + dest: "{{ stage_dir }}/apache/{{ item.stat.path | basename }}" + state: hard + with_items: "{{ debian_suse_apache_deref_logs.results }}" + when: + - item.stat.isreg or item.stat.islnk + when: ansible_os_family in ('Debian', 'Suse') + no_log: true + +- name: Link apache logs on RedHat + block: + - name: Find logs + find: + path: "/var/log/httpd" + file_type: any + register: redhat_apache_logs + + - name: Dereference files + stat: + path: "{{ item.path }}" + with_items: "{{ redhat_apache_logs.files }}" + register: redhat_apache_deref_logs + + - name: Create hard links + file: + src: "{{ item.stat.lnk_source | default(item.stat.path) }}" + dest: "{{ stage_dir }}/apache/{{ item.stat.path | basename }}" + state: hard + with_items: "{{ redhat_apache_deref_logs.results }}" + when: + - item.stat.isreg or item.stat.islnk + when: ansible_os_family == 'RedHat' + no_log: true + +- name: Ensure {{ stage_dir }}/apache_config apache_config exists + file: + path: "{{ stage_dir }}/apache_config" + state: directory + +- name: Define config paths + set_fact: + apache_config_paths: + 'Debian': '/etc/apache2/sites-enabled/' + 'Suse': '/etc/apache2/conf.d/' + 'RedHat': '/etc/httpd/conf.d/' + 'openEuler': '/etc/httpd/conf.d/' + +- name: Discover configurations + find: + path: "{{ apache_config_paths[ansible_os_family] }}" + file_type: any + register: apache_configs + no_log: true + +- name: Dereference configurations + stat: + path: "{{ item.path }}" + with_items: "{{ apache_configs.files }}" + register: apache_configs_deref + no_log: true + +- name: Link configurations + file: + src: "{{ item.stat.lnk_source | default(item.stat.path) }}" + dest: "{{ stage_dir }}/apache_config/{{ item.stat.path | basename }}" + state: hard + with_items: "{{ apache_configs_deref.results }}" + when: item.stat.isreg or item.stat.islnk + no_log: true diff --git a/roles/capture-performance-data/README.rst b/roles/capture-performance-data/README.rst new file mode 100644 index 0000000000..b7a37c223f --- /dev/null +++ b/roles/capture-performance-data/README.rst @@ -0,0 +1,25 @@ +Generate performance logs for staging + +Captures usage information from mysql, systemd, apache logs, and other +parts of the system and generates a performance.json file in the +staging directory. + +**Role Variables** + +.. zuul:rolevar:: stage_dir + :default: {{ ansible_user_dir }} + + The base stage directory + +.. zuul:rolevar:: devstack_conf_dir + :default: /opt/stack + + The base devstack destination directory + +.. zuul:rolevar:: debian_suse_apache_deref_logs + + The apache logs found in the debian/suse locations + +.. zuul:rolevar:: redhat_apache_deref_logs + + The apache logs found in the redhat locations diff --git a/roles/capture-performance-data/defaults/main.yaml b/roles/capture-performance-data/defaults/main.yaml new file mode 100644 index 0000000000..7bd79f4c4f --- /dev/null +++ b/roles/capture-performance-data/defaults/main.yaml @@ -0,0 +1,3 @@ +devstack_base_dir: /opt/stack +devstack_conf_dir: "{{ devstack_base_dir }}" +stage_dir: "{{ ansible_user_dir }}" diff --git a/roles/capture-performance-data/tasks/main.yaml b/roles/capture-performance-data/tasks/main.yaml new file mode 100644 index 0000000000..51a11b60bc --- /dev/null +++ b/roles/capture-performance-data/tasks/main.yaml @@ -0,0 +1,18 @@ +- name: Generate statistics + shell: + executable: /bin/bash + cmd: | + source {{ devstack_conf_dir }}/stackrc + source {{ devstack_conf_dir }}/inc/python + setup_devstack_virtualenv + $PYTHON {{ devstack_conf_dir }}/tools/get-stats.py \ + --db-user="$DATABASE_USER" \ + --db-pass="$DATABASE_PASSWORD" \ + --db-host="$DATABASE_HOST" \ + {{ apache_logs }} > {{ stage_dir }}/performance.json + vars: + apache_logs: >- + {% for i in debian_suse_apache_deref_logs.results | default([]) + redhat_apache_deref_logs.results | default([]) %} + --apache-log="{{ i.stat.path }}" + {% endfor %} + ignore_errors: yes diff --git a/roles/capture-system-logs/README.rst b/roles/capture-system-logs/README.rst new file mode 100644 index 0000000000..1376f63bfc --- /dev/null +++ b/roles/capture-system-logs/README.rst @@ -0,0 +1,21 @@ +Stage a number of system type logs + +Stage a number of different logs / reports: +- snapshot of iptables +- disk space available +- pip[2|3] freeze +- installed packages (dpkg/rpm) +- ceph, openswitch, gluster +- coredumps +- dns resolver +- listen53 +- services +- unbound.log +- deprecation messages + +**Role Variables** + +.. zuul:rolevar:: stage_dir + :default: {{ ansible_user_dir }} + + The base stage directory. diff --git a/roles/capture-system-logs/defaults/main.yaml b/roles/capture-system-logs/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/capture-system-logs/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/capture-system-logs/tasks/main.yaml b/roles/capture-system-logs/tasks/main.yaml new file mode 100644 index 0000000000..4b5ec4836b --- /dev/null +++ b/roles/capture-system-logs/tasks/main.yaml @@ -0,0 +1,59 @@ +# TODO(andreaf) Make this into proper Ansible +- name: Stage various logs and reports + shell: + executable: /bin/bash + cmd: | + sudo iptables-save > {{ stage_dir }}/iptables.txt + + # NOTE(sfernand): Run 'df' with a 60s timeout to prevent hangs from + # stale NFS mounts. + timeout -s 9 60s df -h > {{ stage_dir }}/df.txt || true + # If 'df' times out, the mount output helps debug which NFS share + # is unresponsive. + mount > {{ stage_dir }}/mount.txt + + for py_ver in 2 3; do + if [[ `which python${py_ver}` ]]; then + python${py_ver} -m pip freeze > {{ stage_dir }}/pip${py_ver}-freeze.txt + fi + done + + if [ `command -v dpkg` ]; then + dpkg -l> {{ stage_dir }}/dpkg-l.txt + fi + if [ `command -v rpm` ]; then + rpm -qa | sort > {{ stage_dir }}/rpm-qa.txt + fi + + # Services status + sudo systemctl status --all > services.txt 2>/dev/null + + # NOTE(kchamart) The 'audit.log' can be useful in cases when QEMU + # failed to start due to denials from SELinux — useful for CentOS + # and Fedora machines. For Ubuntu (which runs AppArmor), DevStack + # already captures the contents of /var/log/kern.log (via + # `journalctl -t kernel` redirected into syslog.txt.gz), which + # contains AppArmor-related messages. + if [ -f /var/log/audit/audit.log ] ; then + sudo cp /var/log/audit/audit.log {{stage_dir }}/audit.log && + chmod +r {{ stage_dir }}/audit.log; + fi + + # gzip and save any coredumps in /var/core + if [ -d /var/core ]; then + sudo gzip -r /var/core + sudo cp -r /var/core {{ stage_dir }}/ + fi + + sudo ss -lntup | grep ':53' > {{ stage_dir }}/listen53.txt + + # NOTE(andreaf) Service logs are already in logs/ thanks for the + # export-devstack-journal log. Apache logs are under apache/ thans to the + # apache-logs-conf role. + grep -i deprecat {{ stage_dir }}/logs/*.txt {{ stage_dir }}/apache/*.log | \ + sed -r 's/[0-9]{1,2}\:[0-9]{1,2}\:[0-9]{1,2}\.[0-9]{1,3}/ /g' | \ + sed -r 's/[0-9]{1,2}\:[0-9]{1,2}\:[0-9]{1,2}/ /g' | \ + sed -r 's/[0-9]{1,4}-[0-9]{1,2}-[0-9]{1,4}/ /g' | + sed -r 's/\[.*\]/ /g' | \ + sed -r 's/\s[0-9]+\s/ /g' | \ + awk '{if ($0 in seen) {seen[$0]++} else {out[++n]=$0;seen[$0]=1}} END { for (i=1; i<=n; i++) print seen[out[i]]" :: " out[i] }' > {{ stage_dir }}/deprecations.log diff --git a/roles/devstack-ipv6-only-deployments-verification/README.rst b/roles/devstack-ipv6-only-deployments-verification/README.rst new file mode 100644 index 0000000000..3bddf5ea60 --- /dev/null +++ b/roles/devstack-ipv6-only-deployments-verification/README.rst @@ -0,0 +1,16 @@ +Verify all addresses in IPv6-only deployments + +This role needs to be invoked from a playbook that +runs tests. This role verifies the IPv6 settings on the +devstack side and that devstack deploys with all addresses +being IPv6. This role is invoked before tests are run so that +if there is any missing IPv6 setting, deployments can fail +the job early. + + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/devstack-ipv6-only-deployments-verification/defaults/main.yaml b/roles/devstack-ipv6-only-deployments-verification/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/devstack-ipv6-only-deployments-verification/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/devstack-ipv6-only-deployments-verification/tasks/main.yaml b/roles/devstack-ipv6-only-deployments-verification/tasks/main.yaml new file mode 100644 index 0000000000..59d3b79bc1 --- /dev/null +++ b/roles/devstack-ipv6-only-deployments-verification/tasks/main.yaml @@ -0,0 +1,4 @@ +- name: Verify the ipv6-only deployments + become: true + become_user: stack + shell: "{{ devstack_base_dir }}/devstack/tools/verify-ipv6-only-deployments.sh" diff --git a/roles/devstack-project-conf/README.rst b/roles/devstack-project-conf/README.rst new file mode 100644 index 0000000000..3f2d4c9697 --- /dev/null +++ b/roles/devstack-project-conf/README.rst @@ -0,0 +1,11 @@ +Prepare OpenStack project configurations for staging + +Prepare all relevant config files for staging. +This is helpful to avoid staging the entire /etc. + +**Role Variables** + +.. zuul:rolevar:: stage_dir + :default: {{ ansible_user_dir }} + + The base stage directory. diff --git a/roles/devstack-project-conf/defaults/main.yaml b/roles/devstack-project-conf/defaults/main.yaml new file mode 100644 index 0000000000..f8fb8deac9 --- /dev/null +++ b/roles/devstack-project-conf/defaults/main.yaml @@ -0,0 +1 @@ +stage_dir: "{{ ansible_user_dir }}" diff --git a/roles/devstack-project-conf/tasks/main.yaml b/roles/devstack-project-conf/tasks/main.yaml new file mode 100644 index 0000000000..917cdbc370 --- /dev/null +++ b/roles/devstack-project-conf/tasks/main.yaml @@ -0,0 +1,25 @@ +- name: Ensure {{ stage_dir }}/etc exists + file: + path: "{{ stage_dir }}/etc" + state: directory + +- name: Check which projects have a config folder + stat: + path: "/etc/{{ item.value.short_name }}" + with_dict: "{{ zuul.projects }}" + register: project_configs + no_log: true + +- name: Copy configuration files + command: cp -pRL {{ item.stat.path }} {{ stage_dir }}/etc/{{ item.item.value.short_name }} + when: item.stat.exists + with_items: "{{ project_configs.results }}" + +- name: Check if openstack has a config folder + stat: + path: "/etc/openstack" + register: openstack_configs + +- name: Copy configuration files + command: cp -pRL /etc/openstack {{ stage_dir }}/etc/ + when: openstack_configs.stat.exists diff --git a/roles/export-devstack-journal/README.rst b/roles/export-devstack-journal/README.rst new file mode 100644 index 0000000000..9e3c919627 --- /dev/null +++ b/roles/export-devstack-journal/README.rst @@ -0,0 +1,25 @@ +Export journal files from devstack services + +This performs a number of logging collection services + +* Export the systemd journal in native format +* For every devstack service, export logs to text in a file named + ``screen-*`` to maintain legacy compatability when devstack services + used to run in a screen session and were logged separately. +* Export a syslog-style file with kernel and sudo messages for legacy + compatability. + +Writes the output to the ``logs/`` subdirectory of ``stage_dir``. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. This is used to obtain the + ``log-start-timestamp.txt``, used to filter the systemd journal. + +.. zuul:rolevar:: stage_dir + :default: {{ ansible_user_dir }} + + The base stage directory. diff --git a/roles/export-devstack-journal/defaults/main.yaml b/roles/export-devstack-journal/defaults/main.yaml new file mode 100644 index 0000000000..1fb04fedc8 --- /dev/null +++ b/roles/export-devstack-journal/defaults/main.yaml @@ -0,0 +1,2 @@ +devstack_base_dir: /opt/stack +stage_dir: "{{ ansible_user_dir }}" diff --git a/roles/export-devstack-journal/tasks/main.yaml b/roles/export-devstack-journal/tasks/main.yaml new file mode 100644 index 0000000000..db38b10a44 --- /dev/null +++ b/roles/export-devstack-journal/tasks/main.yaml @@ -0,0 +1,54 @@ +# NOTE(andreaf) This bypasses the stage-output role +- name: Ensure {{ stage_dir }}/logs exists + become: true + file: + path: "{{ stage_dir }}/logs" + state: directory + owner: "{{ ansible_user }}" + +- name: Export legacy stack screen log files + become: true + shell: + cmd: | + u="" + name="" + for u in $(systemctl list-unit-files | grep devstack | awk '{print $1}'); do + name=$(echo $u | sed 's/devstack@/screen-/' | sed 's/\.service//') + journalctl -o short-precise --unit $u > {{ stage_dir }}/logs/$name.txt + done + +- name: Export legacy syslog.txt + become: true + shell: + # The journal contains everything running under systemd, we'll + # build an old school version of the syslog with just the + # kernel and sudo messages. + cmd: | + journalctl \ + -t kernel \ + -t sudo \ + --no-pager \ + --since="$(cat {{ devstack_base_dir }}/log-start-timestamp.txt)" \ + > {{ stage_dir }}/logs/syslog.txt + +# TODO: convert this to ansible +# - make a list of the above units +# - iterate the list here +- name: Export journal + become: true + shell: + # Export the journal in export format to make it downloadable + # for later searching. It can then be rewritten to a journal native + # format locally using systemd-journal-remote. This makes a class of + # debugging much easier. We don't do the native conversion here as + # some distros do not package that tooling. + cmd: | + journalctl -o export \ + --since="$(cat {{ devstack_base_dir }}/log-start-timestamp.txt)" \ + | gzip > {{ stage_dir }}/logs/devstack.journal.gz + +- name: Save journal README + become: true + template: + src: devstack.journal.README.txt.j2 + dest: '{{ stage_dir }}/logs/devstack.journal.README.txt' diff --git a/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 b/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 new file mode 100644 index 0000000000..30519f63d7 --- /dev/null +++ b/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 @@ -0,0 +1,33 @@ +Devstack systemd journal +======================== + +The devstack.journal file is a copy of the systemd journal during the +devstack run. + +To use it, you will need to convert it so journalctl can read it +locally. After downloading the file: + + $ /lib/systemd/systemd-journal-remote <(zcat ./devstack.journal.gz) -o output.journal + +Note this binary is not in the regular path. On Debian/Ubuntu +platforms, you will need to have the "systemd-journal-remote" package +installed. + +It should result in something like: + + Finishing after writing entries + +You can then use journalctl to examine this file. For example, to see +all devstack services try: + + $ journalctl --file ./output.journal -u 'devstack@*' + +To see just cinder API server logs restrict the match with + + $ journalctl --file ./output.journal -u 'devstack@c-api' + +There may be many types of logs available in the journal, a command like + + $ journalctl --file ./output.journal --output=json-pretty | grep "_SYSTEMD_UNIT" | sort -u + +can help you find interesting things to filter on. \ No newline at end of file diff --git a/roles/fetch-devstack-log-dir/README.rst b/roles/fetch-devstack-log-dir/README.rst new file mode 100644 index 0000000000..360a2e3dd0 --- /dev/null +++ b/roles/fetch-devstack-log-dir/README.rst @@ -0,0 +1,10 @@ +Fetch content from the devstack log directory + +Copy logs from every host back to the zuul executor. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/fetch-devstack-log-dir/defaults/main.yaml b/roles/fetch-devstack-log-dir/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/fetch-devstack-log-dir/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/fetch-devstack-log-dir/tasks/main.yaml b/roles/fetch-devstack-log-dir/tasks/main.yaml new file mode 100644 index 0000000000..276c4e0eb5 --- /dev/null +++ b/roles/fetch-devstack-log-dir/tasks/main.yaml @@ -0,0 +1,10 @@ +# as the user in the guest may not exist on the executor +# we do not preserve the group or owner of the copied logs. + +- name: Collect devstack logs + synchronize: + dest: "{{ zuul.executor.log_root }}/{{ inventory_hostname }}" + mode: pull + src: "{{ devstack_base_dir }}/logs" + group: no + owner: no diff --git a/roles/get-devstack-os-environment/README.rst b/roles/get-devstack-os-environment/README.rst new file mode 100644 index 0000000000..68ddce8b5a --- /dev/null +++ b/roles/get-devstack-os-environment/README.rst @@ -0,0 +1,40 @@ +Reads the OS_* variables set by devstack through openrc +for the specified user and project and exports them as +the os_env_vars fact. + +**WARNING**: this role is meant to be used as porting aid +for the non-unified python-client jobs which +are already around, as those clients do not use clouds.yaml +as openstackclient does. +When those clients and their jobs are deprecated and removed, +or anyway when the new code is able to read from clouds.yaml +directly, this role should be removed as well. + + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. + +.. zuul:rolevar:: openrc_file + :default: {{ devstack_base_dir }}/devstack/openrc + + The location of the generated openrc file. + +.. zuul:rolevar:: openrc_user + :default: admin + + The user whose credentials should be retrieved. + +.. zuul:rolevar:: openrc_project + :default: admin + + The project (which openrc_user is part of) whose + access data should be retrieved. + +.. zuul:rolevar:: openrc_enable_export + :default: false + + Set it to true to export os_env_vars. diff --git a/roles/get-devstack-os-environment/defaults/main.yaml b/roles/get-devstack-os-environment/defaults/main.yaml new file mode 100644 index 0000000000..f68ea560d0 --- /dev/null +++ b/roles/get-devstack-os-environment/defaults/main.yaml @@ -0,0 +1,6 @@ +devstack_base_dir: "/opt/stack" +openrc_file: "{{ devstack_base_dir }}/devstack/openrc" +openrc_user: admin +openrc_project: admin +openrc_enable_export: false +tox_environment: {} diff --git a/roles/get-devstack-os-environment/tasks/main.yaml b/roles/get-devstack-os-environment/tasks/main.yaml new file mode 100644 index 0000000000..b2c5e93ed4 --- /dev/null +++ b/roles/get-devstack-os-environment/tasks/main.yaml @@ -0,0 +1,14 @@ +- when: openrc_enable_export + block: + - name: Extract the OS_ environment variables + shell: + cmd: | + source {{ openrc_file }} {{ openrc_user }} {{ openrc_project }} &>/dev/null + env | awk -F= 'BEGIN {print "---" } /^OS_/ { print " "$1": \""$2"\""} ' + args: + executable: "/bin/bash" + register: env_os + + - name: Append the the OS_ environment variables to tox_environment + set_fact: + tox_environment: "{{ env_os.stdout|from_yaml|default({})|combine(tox_environment) }}" diff --git a/roles/orchestrate-devstack/README.rst b/roles/orchestrate-devstack/README.rst new file mode 100644 index 0000000000..097dcea55e --- /dev/null +++ b/roles/orchestrate-devstack/README.rst @@ -0,0 +1,25 @@ +Orchestrate a devstack + +Runs devstack in a multinode scenario, with one controller node +and a group of subnodes. + +The reason for this role is so that jobs in other repository may +run devstack in their plays with no need for re-implementing the +orchestration logic. + +The "run-devstack" role is available to run devstack with no +orchestration. + +This role sets up the controller and CA first, it then pushes CA +data to sub-nodes and run devstack there. The only requirement for +this role is for the controller inventory_hostname to be "controller" +and for all sub-nodes to be defined in a group called "subnode". + +This role needs to be invoked from a playbook that uses a "linear" strategy. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/orchestrate-devstack/defaults/main.yaml b/roles/orchestrate-devstack/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/orchestrate-devstack/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/orchestrate-devstack/tasks/main.yaml b/roles/orchestrate-devstack/tasks/main.yaml new file mode 100644 index 0000000000..b8ee7e35a7 --- /dev/null +++ b/roles/orchestrate-devstack/tasks/main.yaml @@ -0,0 +1,50 @@ +- name: Run devstack on the controller + include_role: + name: run-devstack + when: inventory_hostname == 'controller' + +- name: Setup devstack on sub-nodes + any_errors_fatal: true + block: + + - name: Distribute the build sshkey for the user "stack" + include_role: + name: copy-build-sshkey + vars: + copy_sshkey_target_user: 'stack' + + - name: Sync CA data to subnodes (when any) + # Only do this if the tls-proxy service is defined and enabled + include_role: + name: sync-devstack-data + when: devstack_services['tls-proxy']|default(false) + + - name: Sync controller ceph.conf and key rings to subnode + include_role: + name: sync-controller-ceph-conf-and-keys + when: devstack_plugins is defined and 'devstack-plugin-ceph' in devstack_plugins + + - name: Run devstack on the sub-nodes + include_role: + name: run-devstack + when: inventory_hostname in groups['subnode'] + + - name: Discover hosts + # Discovers compute nodes (subnodes) and maps them to cells. Only run + # on the controller node. + # NOTE(mriedem): We want to remove this if/when nova supports + # auto-registration of computes with cells, but that's not happening in + # Ocata. + # NOTE(andreaf) This is taken (NOTE included) from the discover_hosts + # function in devstack gate. Since this is now in devstack, which is + # branched, we know that the discover_hosts tool exists. + become: true + become_user: stack + shell: ./tools/discover_hosts.sh + args: + chdir: "{{ devstack_base_dir }}/devstack" + when: inventory_hostname == 'controller' + + when: + - '"controller" in hostvars' + - '"subnode" in groups' diff --git a/roles/process-stackviz/README.rst b/roles/process-stackviz/README.rst new file mode 100644 index 0000000000..a8447d2355 --- /dev/null +++ b/roles/process-stackviz/README.rst @@ -0,0 +1,22 @@ +Generate stackviz report. + +Generate stackviz report using subunit and dstat data, using +the stackviz archive embedded in test images. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. + +.. zuul:rolevar:: stage_dir + :default: "{{ ansible_user_dir }}" + + The stage directory where the input data can be found and + the output will be produced. + +.. zuul:rolevar:: zuul_work_dir + :default: {{ devstack_base_dir }}/tempest + + Directory to work in. It has to be a fully qualified path. diff --git a/roles/process-stackviz/defaults/main.yaml b/roles/process-stackviz/defaults/main.yaml new file mode 100644 index 0000000000..f3bc32b149 --- /dev/null +++ b/roles/process-stackviz/defaults/main.yaml @@ -0,0 +1,3 @@ +devstack_base_dir: /opt/stack +stage_dir: "{{ ansible_user_dir }}" +zuul_work_dir: "{{ devstack_base_dir }}/tempest" diff --git a/roles/process-stackviz/tasks/main.yaml b/roles/process-stackviz/tasks/main.yaml new file mode 100644 index 0000000000..3ba3d9c2e6 --- /dev/null +++ b/roles/process-stackviz/tasks/main.yaml @@ -0,0 +1,73 @@ +- name: Process Stackviz + block: + + - name: Devstack checks if stackviz archive exists + stat: + path: "/opt/cache/files/stackviz-latest.tar.gz" + register: stackviz_archive + + - debug: + msg: "Stackviz archive could not be found in /opt/cache/files/stackviz-latest.tar.gz" + when: not stackviz_archive.stat.exists + + - name: Check if subunit data exists + stat: + path: "{{ zuul_work_dir }}/testrepository.subunit" + register: subunit_input + + - debug: + msg: "Subunit file could not be found at {{ zuul_work_dir }}/testrepository.subunit" + when: not subunit_input.stat.exists + + - name: Install stackviz + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + block: + - include_role: + name: ensure-pip + + - pip: + name: "file://{{ stackviz_archive.stat.path }}" + virtualenv: /tmp/stackviz + virtualenv_command: '{{ ensure_pip_virtualenv_command }}' + extra_args: -U + + - name: Deploy stackviz static html+js + command: cp -pR /tmp/stackviz/share/stackviz-html {{ stage_dir }}/stackviz + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + + - name: Check if dstat data exists + stat: + path: "{{ devstack_base_dir }}/logs/dstat-csv.log" + register: dstat_input + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + + - name: Run stackviz with dstat + shell: | + cat {{ subunit_input.stat.path }} | \ + /tmp/stackviz/bin/stackviz-export \ + --dstat "{{ devstack_base_dir }}/logs/dstat-csv.log" \ + --env --stdin \ + {{ stage_dir }}/stackviz/data + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + - dstat_input.stat.exists + + - name: Run stackviz without dstat + shell: | + cat {{ subunit_input.stat.path }} | \ + /tmp/stackviz/bin/stackviz-export \ + --env --stdin \ + {{ stage_dir }}/stackviz/data + when: + - stackviz_archive.stat.exists + - subunit_input.stat.exists + - not dstat_input.stat.exists + + ignore_errors: yes diff --git a/roles/run-devstack/README.rst b/roles/run-devstack/README.rst new file mode 100644 index 0000000000..d77eb15e99 --- /dev/null +++ b/roles/run-devstack/README.rst @@ -0,0 +1,8 @@ +Run devstack + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/run-devstack/defaults/main.yaml b/roles/run-devstack/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/run-devstack/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/run-devstack/tasks/main.yaml b/roles/run-devstack/tasks/main.yaml new file mode 100644 index 0000000000..f58b31d477 --- /dev/null +++ b/roles/run-devstack/tasks/main.yaml @@ -0,0 +1,11 @@ +- name: Run devstack + shell: + cmd: | + ./stack.sh 2>&1 + rc=$? + echo "*** FINISHED ***" + exit $rc + args: + chdir: "{{devstack_base_dir}}/devstack" + become: true + become_user: stack diff --git a/roles/setup-devstack-cache/README.rst b/roles/setup-devstack-cache/README.rst new file mode 100644 index 0000000000..b8938c3dea --- /dev/null +++ b/roles/setup-devstack-cache/README.rst @@ -0,0 +1,15 @@ +Set up the devstack cache directory + +If the node has a cache of devstack image files, copy it into place. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. + +.. zuul:rolevar:: devstack_cache_dir + :default: /opt/cache + + The directory with the cached files. diff --git a/roles/setup-devstack-cache/defaults/main.yaml b/roles/setup-devstack-cache/defaults/main.yaml new file mode 100644 index 0000000000..c56720b4f5 --- /dev/null +++ b/roles/setup-devstack-cache/defaults/main.yaml @@ -0,0 +1,2 @@ +devstack_base_dir: /opt/stack +devstack_cache_dir: /opt/cache diff --git a/roles/setup-devstack-cache/tasks/main.yaml b/roles/setup-devstack-cache/tasks/main.yaml new file mode 100644 index 0000000000..3adff17d5d --- /dev/null +++ b/roles/setup-devstack-cache/tasks/main.yaml @@ -0,0 +1,15 @@ +- name: Copy cached devstack files + # This uses hard links to avoid using extra space. + command: "find {{ devstack_cache_dir }}/files -mindepth 1 -maxdepth 1 -exec cp -l {} {{ devstack_base_dir }}/devstack/files/ ;" + become: true + ignore_errors: yes + +- name: Set ownership of cached files + file: + path: '{{ devstack_base_dir }}/devstack/files' + state: directory + recurse: true + owner: stack + group: stack + mode: a+r + become: yes diff --git a/roles/setup-devstack-log-dir/README.rst b/roles/setup-devstack-log-dir/README.rst new file mode 100644 index 0000000000..9d8dba3442 --- /dev/null +++ b/roles/setup-devstack-log-dir/README.rst @@ -0,0 +1,11 @@ +Set up the devstack log directory + +Create a log directory on the ephemeral disk partition to save space +on the root device. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/setup-devstack-log-dir/defaults/main.yaml b/roles/setup-devstack-log-dir/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/setup-devstack-log-dir/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/setup-devstack-log-dir/tasks/main.yaml b/roles/setup-devstack-log-dir/tasks/main.yaml new file mode 100644 index 0000000000..d8e8cfe70a --- /dev/null +++ b/roles/setup-devstack-log-dir/tasks/main.yaml @@ -0,0 +1,8 @@ +- name: Create logs directory + file: + path: '{{ devstack_base_dir }}/logs' + state: directory + mode: 0755 + owner: stack + group: stack + become: yes diff --git a/roles/setup-devstack-source-dirs/README.rst b/roles/setup-devstack-source-dirs/README.rst new file mode 100644 index 0000000000..0aa048b7d2 --- /dev/null +++ b/roles/setup-devstack-source-dirs/README.rst @@ -0,0 +1,16 @@ +Set up the devstack source directories + +Ensure that the base directory exists, and then move the source repos +into it. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. + +.. zuul:rolevar:: devstack_sources_branch + :default: None + + The target branch to be setup (where available). diff --git a/roles/setup-devstack-source-dirs/defaults/main.yaml b/roles/setup-devstack-source-dirs/defaults/main.yaml new file mode 100644 index 0000000000..77a74d7b89 --- /dev/null +++ b/roles/setup-devstack-source-dirs/defaults/main.yaml @@ -0,0 +1,9 @@ +devstack_base_dir: /opt/stack +devstack_source_dirs: + - src/opendev.org/opendev + - src/opendev.org/openstack + - src/opendev.org/openstack-dev + - src/opendev.org/openstack-infra + - src/opendev.org/starlingx + - src/opendev.org/x + - src/opendev.org/zuul diff --git a/roles/setup-devstack-source-dirs/tasks/main.yaml b/roles/setup-devstack-source-dirs/tasks/main.yaml new file mode 100644 index 0000000000..cb7c6e3af8 --- /dev/null +++ b/roles/setup-devstack-source-dirs/tasks/main.yaml @@ -0,0 +1,72 @@ +- name: Find all OpenStack source repos used by this job + find: + paths: "{{ devstack_source_dirs }}" + file_type: directory + register: found_repos + +- name: Copy Zuul repos into devstack working directory + command: rsync -a {{ item.path }} {{ devstack_base_dir }} + with_items: '{{ found_repos.files }}' + become: yes + +# Github projects are github.com/username/repo (username might be a +# top-level project too), so we have to do a two-step swizzle to just +# get the full repo path (ansible's find module doesn't help with this +# :/) +- name: Find top level github projects + find: + paths: + - src/github.com + file_type: directory + register: found_github_projects + +- name: Find actual github repos + find: + paths: '{{ found_github_projects.files | map(attribute="path") | list }}' + file_type: directory + register: found_github_repos + when: found_github_projects.files + +- name: Copy github repos into devstack working directory + command: rsync -a {{ item.path }} {{ devstack_base_dir }} + with_items: '{{ found_github_repos.files }}' + become: yes + when: found_github_projects.files + +- name: Setup refspec for repos into devstack working directory + shell: + # Copied almost "as-is" from devstack-gate setup-workspace function + # but removing the dependency on functions.sh + # TODO this should be rewritten as a python module. + cmd: | + cd {{ devstack_base_dir }}/{{ item.path | basename }} + base_branch={{ devstack_sources_branch }} + if git branch -a | grep "$base_branch" > /dev/null ; then + git checkout $base_branch + elif [[ "$base_branch" == stable/* ]] || [[ "$base_branch" == unmaintained/* ]]; then + # Look for an eol tag for the stable branch. + eol_tag="${base_branch#*/}-eol" + if git tag -l |grep $eol_tag >/dev/null; then + git checkout $eol_tag + git reset --hard $eol_tag + if ! git clean -x -f -d -q ; then + sleep 1 + git clean -x -f -d -q + fi + fi + else + git checkout master + fi + args: + executable: /bin/bash + with_items: '{{ found_repos.files }}' + when: devstack_sources_branch is defined + +- name: Set ownership of repos + file: + path: '{{ devstack_base_dir }}' + state: directory + recurse: true + owner: stack + group: stack + become: yes diff --git a/roles/setup-stack-user/README.rst b/roles/setup-stack-user/README.rst new file mode 100644 index 0000000000..80c4d39eff --- /dev/null +++ b/roles/setup-stack-user/README.rst @@ -0,0 +1,16 @@ +Set up the `stack` user + +Create the stack user, set up its home directory, and allow it to +sudo. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. + +.. zuul:rolevar:: devstack_stack_home_dir + :default: {{ devstack_base_dir }} + + The home directory for the stack user. diff --git a/roles/setup-stack-user/defaults/main.yaml b/roles/setup-stack-user/defaults/main.yaml new file mode 100644 index 0000000000..6d0be666d4 --- /dev/null +++ b/roles/setup-stack-user/defaults/main.yaml @@ -0,0 +1,2 @@ +devstack_base_dir: /opt/stack +devstack_stack_home_dir: '{{ devstack_base_dir }}' diff --git a/roles/setup-stack-user/files/50_stack_sh b/roles/setup-stack-user/files/50_stack_sh new file mode 100644 index 0000000000..4c6b46bdb1 --- /dev/null +++ b/roles/setup-stack-user/files/50_stack_sh @@ -0,0 +1 @@ +stack ALL=(root) NOPASSWD:ALL diff --git a/roles/setup-stack-user/tasks/main.yaml b/roles/setup-stack-user/tasks/main.yaml new file mode 100644 index 0000000000..0fc7c2d78b --- /dev/null +++ b/roles/setup-stack-user/tasks/main.yaml @@ -0,0 +1,47 @@ +- name: Create stack group + group: + name: stack + become: yes + +# NOTE(andreaf) Create a user home_dir is not safe via +# the user module since it will fail if the containing +# folder does not exists. If the folder does exists and +# it's empty, the skeleton is setup and ownership set. +- name: Create the stack user home folder + file: + path: '{{ devstack_stack_home_dir }}' + state: directory + become: yes + +- name: Create stack user + user: + name: stack + shell: /bin/bash + home: '{{ devstack_stack_home_dir }}' + group: stack + become: yes + +- name: Set stack user home directory permissions and ownership + file: + path: '{{ devstack_stack_home_dir }}' + mode: 0755 + owner: stack + group: stack + become: yes + +- name: Copy 50_stack_sh file to /etc/sudoers.d + copy: + src: 50_stack_sh + dest: /etc/sudoers.d + mode: 0440 + owner: root + group: root + become: yes + +- name: Create .cache folder within BASE + file: + path: '{{ devstack_stack_home_dir }}/.cache' + state: directory + owner: stack + group: stack + become: yes diff --git a/roles/setup-tempest-user/README.rst b/roles/setup-tempest-user/README.rst new file mode 100644 index 0000000000..bb29c50a28 --- /dev/null +++ b/roles/setup-tempest-user/README.rst @@ -0,0 +1,10 @@ +Set up the `tempest` user + +Create the tempest user and allow it to sudo. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/setup-tempest-user/files/51_tempest_sh b/roles/setup-tempest-user/files/51_tempest_sh new file mode 100644 index 0000000000..f88ff9f4f2 --- /dev/null +++ b/roles/setup-tempest-user/files/51_tempest_sh @@ -0,0 +1,3 @@ +tempest ALL=(root) NOPASSWD:/sbin/ip +tempest ALL=(root) NOPASSWD:/sbin/iptables +tempest ALL=(root) NOPASSWD:/usr/bin/ovsdb-client diff --git a/roles/setup-tempest-user/tasks/main.yaml b/roles/setup-tempest-user/tasks/main.yaml new file mode 100644 index 0000000000..892eaf655a --- /dev/null +++ b/roles/setup-tempest-user/tasks/main.yaml @@ -0,0 +1,20 @@ +- name: Create tempest group + group: + name: tempest + become: yes + +- name: Create tempest user + user: + name: tempest + shell: /bin/bash + group: tempest + become: yes + +- name: Copy 51_tempest_sh to /etc/sudoers.d + copy: + src: 51_tempest_sh + dest: /etc/sudoers.d + owner: root + group: root + mode: 0440 + become: yes diff --git a/roles/start-fresh-logging/README.rst b/roles/start-fresh-logging/README.rst new file mode 100644 index 0000000000..11b029e182 --- /dev/null +++ b/roles/start-fresh-logging/README.rst @@ -0,0 +1,11 @@ +Restart logging on all hosts + +Restart syslog so that the system logs only include output from the +job. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. diff --git a/roles/start-fresh-logging/defaults/main.yaml b/roles/start-fresh-logging/defaults/main.yaml new file mode 100644 index 0000000000..fea05c8146 --- /dev/null +++ b/roles/start-fresh-logging/defaults/main.yaml @@ -0,0 +1 @@ +devstack_base_dir: /opt/stack diff --git a/roles/start-fresh-logging/tasks/main.yaml b/roles/start-fresh-logging/tasks/main.yaml new file mode 100644 index 0000000000..6c7ba66de7 --- /dev/null +++ b/roles/start-fresh-logging/tasks/main.yaml @@ -0,0 +1,56 @@ +- name: Check for /bin/journalctl file + command: which journalctl + changed_when: False + failed_when: False + register: which_out + +- block: + - name: Get current date + command: date +"%Y-%m-%d %H:%M:%S" + register: date_out + + - name: Copy current date to log-start-timestamp.txt + copy: + dest: "{{ devstack_base_dir }}/log-start-timestamp.txt" + content: "{{ date_out.stdout }}" + when: which_out.rc == 0 + become: yes + +- block: + - name: Stop rsyslog + service: name=rsyslog state=stopped + + - name: Save syslog file prior to devstack run + command: mv /var/log/syslog /var/log/syslog-pre-devstack + + - name: Save kern.log file prior to devstack run + command: mv /var/log/kern.log /var/log/kern_log-pre-devstack + + - name: Recreate syslog file + file: name=/var/log/syslog state=touch + + - name: Recreate syslog file owner and group + command: chown /var/log/syslog --ref /var/log/syslog-pre-devstack + + - name: Recreate syslog file permissions + command: chmod /var/log/syslog --ref /var/log/syslog-pre-devstack + + - name: Add read permissions to all on syslog file + file: name=/var/log/syslog mode=a+r + + - name: Recreate kern.log file + file: name=/var/log/kern.log state=touch + + - name: Recreate kern.log file owner and group + command: chown /var/log/kern.log --ref /var/log/kern_log-pre-devstack + + - name: Recreate kern.log file permissions + command: chmod /var/log/kern.log --ref /var/log/kern_log-pre-devstack + + - name: Add read permissions to all on kern.log file + file: name=/var/log/kern.log mode=a+r + + - name: Start rsyslog + service: name=rsyslog state=started + when: which_out.rc == 1 + become: yes diff --git a/roles/sync-controller-ceph-conf-and-keys/README.rst b/roles/sync-controller-ceph-conf-and-keys/README.rst new file mode 100644 index 0000000000..e3d2bb42a4 --- /dev/null +++ b/roles/sync-controller-ceph-conf-and-keys/README.rst @@ -0,0 +1,3 @@ +Sync ceph config and keys between controller and subnodes + +Simply copy the contents of /etc/ceph on the controller to subnodes. diff --git a/roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml b/roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml new file mode 100644 index 0000000000..71ece579e6 --- /dev/null +++ b/roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml @@ -0,0 +1,15 @@ +- name: Ensure /etc/ceph exists on subnode + become: true + file: + path: /etc/ceph + state: directory + +- name: Copy /etc/ceph from controller to subnode + become: true + synchronize: + owner: yes + group: yes + perms: yes + src: /etc/ceph/ + dest: /etc/ceph/ + delegate_to: controller diff --git a/roles/sync-devstack-data/README.rst b/roles/sync-devstack-data/README.rst new file mode 100644 index 0000000000..388625c893 --- /dev/null +++ b/roles/sync-devstack-data/README.rst @@ -0,0 +1,19 @@ +Sync devstack data for multinode configurations + +Sync any data files which include certificates to be used if TLS is enabled. +This role must be executed on the controller and it pushes data to all +subnodes. + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. + +.. zuul:rolevar:: devstack_data_base_dir + :default: {{ devstack_base_dir }} + + The devstack base directory for data/. + Useful for example when multiple executions of devstack (i.e. grenade) + share the same data directory. diff --git a/roles/sync-devstack-data/defaults/main.yaml b/roles/sync-devstack-data/defaults/main.yaml new file mode 100644 index 0000000000..6b5017b811 --- /dev/null +++ b/roles/sync-devstack-data/defaults/main.yaml @@ -0,0 +1,2 @@ +devstack_base_dir: /opt/stack +devstack_data_base_dir: "{{ devstack_base_dir }}" diff --git a/roles/sync-devstack-data/tasks/main.yaml b/roles/sync-devstack-data/tasks/main.yaml new file mode 100644 index 0000000000..a1d37c3951 --- /dev/null +++ b/roles/sync-devstack-data/tasks/main.yaml @@ -0,0 +1,59 @@ +- name: Ensure the data folder exists + become: true + file: + path: "{{ devstack_data_base_dir }}/data" + state: directory + owner: stack + group: stack + mode: 0755 + when: 'inventory_hostname in groups["subnode"]|default([])' + +- name: Ensure the CA folder exists + become: true + file: + path: "{{ devstack_data_base_dir }}/data/CA" + state: directory + owner: stack + group: stack + mode: 0755 + when: 'inventory_hostname in groups["subnode"]|default([])' + +- name: Pull the CA certificate and folder + become: true + synchronize: + src: "{{ item }}" + dest: "{{ zuul.executor.work_root }}/{{ item | basename }}" + mode: pull + with_items: + - "{{ devstack_data_base_dir }}/data/ca-bundle.pem" + - "{{ devstack_data_base_dir }}/data/CA" + when: inventory_hostname == 'controller' + +- name: Push the CA certificate + become: true + become_user: stack + synchronize: + src: "{{ zuul.executor.work_root }}/ca-bundle.pem" + dest: "{{ devstack_data_base_dir }}/data/ca-bundle.pem" + mode: push + when: 'inventory_hostname in groups["subnode"]|default([])' + +- name: Push the CA folder + become: true + become_user: stack + synchronize: + src: "{{ zuul.executor.work_root }}/CA/" + dest: "{{ devstack_data_base_dir }}/data/" + mode: push + when: 'inventory_hostname in groups["subnode"]|default([])' + +- name: Ensure the data folder and subfolders have the correct permissions + become: true + file: + path: "{{ devstack_data_base_dir }}/data" + state: directory + owner: stack + group: stack + mode: 0755 + recurse: yes + when: 'inventory_hostname in groups["subnode"]|default([])' diff --git a/roles/write-devstack-local-conf/README.rst b/roles/write-devstack-local-conf/README.rst new file mode 100644 index 0000000000..d0a51e77c2 --- /dev/null +++ b/roles/write-devstack-local-conf/README.rst @@ -0,0 +1,99 @@ +Write the local.conf file for use by devstack + +**Role Variables** + +.. zuul:rolevar:: devstack_base_dir + :default: /opt/stack + + The devstack base directory. + +.. zuul:rolevar:: devstack_local_conf_path + :default: {{ devstack_base_dir }}/devstack/local.conf + + The path of the local.conf file. + +.. zuul:rolevar:: devstack_localrc + :type: dict + + A dictionary of variables that should be written to the localrc + section of local.conf. The values (which are strings) may contain + bash shell variables, and will be ordered so that variables used by + later entries appear first. + + As a special case, the variable ``LIBS_FROM_GIT`` will be + constructed automatically from the projects which appear in the + ``required-projects`` list defined by the job plus the project of + the change under test. To instruct devstack to install a library + from source rather than pypi, simply add that library to the job's + ``required-projects`` list. To override the + automatically-generated value, set ``LIBS_FROM_GIT`` in + ``devstack_localrc`` to the desired value. + +.. zuul:rolevar:: devstack_local_conf + :type: dict + + A complex argument consisting of nested dictionaries which combine + to form the meta-sections of the local_conf file. The top level is + a dictionary of phases, followed by dictionaries of filenames, then + sections, which finally contain key-value pairs for the INI file + entries in those sections. + + The keys in this dictionary are the devstack phases. + + .. zuul:rolevar:: [phase] + :type: dict + + The keys in this dictionary are the filenames for this phase. + + .. zuul:rolevar:: [filename] + :type: dict + + The keys in this dictionary are the INI sections in this file. + + .. zuul:rolevar:: [section] + :type: dict + + This is a dictionary of key-value pairs which comprise + this section of the INI file. + +.. zuul:rolevar:: devstack_base_services + :type: list + :default: {{ base_services | default(omit) }} + + A list of base services which are enabled. Services can be added or removed + from this list via the ``devstack_services`` variable. This is ignored if + ``base`` is set to ``False`` in ``devstack_services``. + +.. zuul:rolevar:: devstack_services + :type: dict + + A dictionary mapping service names to boolean values. If the + boolean value is ``false``, a ``disable_service`` line will be + emitted for the service name. If it is ``true``, then + ``enable_service`` will be emitted. All other values are ignored. + + The special key ``base`` can be used to enable or disable the base set of + services enabled by default. If ``base`` is found, it will processed before + all other keys. If its value is ``False`` a ``disable_all_services`` will be + emitted; if its value is ``True`` services from ``devstack_base_services`` + will be emitted via ``ENABLED_SERVICES``. + +.. zuul:rolevar:: devstack_plugins + :type: dict + + A dictionary mapping a plugin name to a git repo location. If the + location is a non-empty string, then an ``enable_plugin`` line will + be emmitted for the plugin name. + + If a plugin declares a dependency on another plugin (via + ``plugin_requires`` in the plugin's settings file), this role will + automatically emit ``enable_plugin`` lines in the correct order. + +.. zuul:rolevar:: tempest_plugins + :type: list + + A list of tempest plugins which are installed alongside tempest. + + The list of values will be combined with the base devstack directory + and used to populate the ``TEMPEST_PLUGINS`` variable. If the variable + already exists, its value is *not* changed. diff --git a/roles/write-devstack-local-conf/defaults/main.yaml b/roles/write-devstack-local-conf/defaults/main.yaml new file mode 100644 index 0000000000..7bc1dec9b8 --- /dev/null +++ b/roles/write-devstack-local-conf/defaults/main.yaml @@ -0,0 +1,3 @@ +devstack_base_dir: /opt/stack +devstack_local_conf_path: "{{ devstack_base_dir }}/devstack/local.conf" +devstack_base_services: "{{ enabled_services | default(omit) }}" diff --git a/roles/write-devstack-local-conf/library/devstack_local_conf.py b/roles/write-devstack-local-conf/library/devstack_local_conf.py new file mode 100644 index 0000000000..2f97d0e355 --- /dev/null +++ b/roles/write-devstack-local-conf/library/devstack_local_conf.py @@ -0,0 +1,351 @@ +# Copyright (C) 2017 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re + + +class DependencyGraph(object): + # This is based on the JobGraph from Zuul. + + def __init__(self): + self._names = set() + self._dependencies = {} # dependent_name -> set(parent_names) + + def add(self, name, dependencies): + # Append the dependency information + self._dependencies.setdefault(name, set()) + try: + for dependency in dependencies: + # Make sure a circular dependency is never created + ancestors = self._getParentNamesRecursively( + dependency, soft=True) + ancestors.add(dependency) + if name in ancestors: + raise Exception("Dependency cycle detected in {}". + format(name)) + self._dependencies[name].add(dependency) + except Exception: + del self._dependencies[name] + raise + + def getDependenciesRecursively(self, parent): + dependencies = [] + + current_dependencies = self._dependencies[parent] + for current in current_dependencies: + if current not in dependencies: + dependencies.append(current) + for dep in self.getDependenciesRecursively(current): + if dep not in dependencies: + dependencies.append(dep) + return dependencies + + def _getParentNamesRecursively(self, dependent, soft=False): + all_parent_items = set() + items_to_iterate = set([dependent]) + while len(items_to_iterate) > 0: + current_item = items_to_iterate.pop() + current_parent_items = self._dependencies.get(current_item) + if current_parent_items is None: + if soft: + current_parent_items = set() + else: + raise Exception("Dependent item {} not found: ".format( + dependent)) + new_parent_items = current_parent_items - all_parent_items + items_to_iterate |= new_parent_items + all_parent_items |= new_parent_items + return all_parent_items + + +class VarGraph(DependencyGraph): + def __init__(self, vars): + super(VarGraph, self).__init__() + self.vars = {} + self._varnames = set() + for k, v in vars.items(): + self._varnames.add(k) + for k, v in vars.items(): + self._addVar(k, str(v)) + + bash_var_re = re.compile(r'\$\{?(\w+)') + def getDependencies(self, value): + return self.bash_var_re.findall(value) + + def _addVar(self, key, value): + if key in self.vars: + raise Exception("Variable {} already added".format(key)) + self.vars[key] = value + # Append the dependency information + dependencies = set() + for dependency in self.getDependencies(value): + if dependency == key: + # A variable is allowed to reference itself; no + # dependency link needed in that case. + continue + if dependency not in self._varnames: + # It's not necessary to create a link for an + # external variable. + continue + dependencies.add(dependency) + try: + self.add(key, dependencies) + except Exception: + del self.vars[key] + raise + + def getVars(self): + ret = [] + keys = sorted(self.vars.keys()) + seen = set() + for key in keys: + dependencies = self.getDependenciesRecursively(key) + for var in dependencies + [key]: + if var not in seen: + ret.append((var, self.vars[var])) + seen.add(var) + return ret + + +class PluginGraph(DependencyGraph): + def __init__(self, base_dir, plugins): + super(PluginGraph, self).__init__() + # The dependency trees expressed by all the plugins we found + # (which may be more than those the job is using). + self._plugin_dependencies = {} + self.loadPluginNames(base_dir) + + self.plugins = {} + self._pluginnames = set() + for k, v in plugins.items(): + self._pluginnames.add(k) + for k, v in plugins.items(): + self._addPlugin(k, str(v)) + + def loadPluginNames(self, base_dir): + if base_dir is None: + return + git_roots = [] + for root, dirs, files in os.walk(base_dir): + if '.git' not in dirs: + continue + # Don't go deeper than git roots + dirs[:] = [] + git_roots.append(root) + for root in git_roots: + devstack = os.path.join(root, 'devstack') + if not (os.path.exists(devstack) and os.path.isdir(devstack)): + continue + settings = os.path.join(devstack, 'settings') + if not (os.path.exists(settings) and os.path.isfile(settings)): + continue + self.loadDevstackPluginInfo(settings) + + define_re = re.compile(r'^define_plugin\s+(\S+).*') + require_re = re.compile(r'^plugin_requires\s+(\S+)\s+(\S+).*') + def loadDevstackPluginInfo(self, fn): + name = None + reqs = set() + with open(fn) as f: + for line in f: + m = self.define_re.match(line) + if m: + name = m.group(1) + m = self.require_re.match(line) + if m: + if name == m.group(1): + reqs.add(m.group(2)) + if name and reqs: + self._plugin_dependencies[name] = reqs + + def getDependencies(self, value): + return self._plugin_dependencies.get(value, []) + + def _addPlugin(self, key, value): + if key in self.plugins: + raise Exception("Plugin {} already added".format(key)) + self.plugins[key] = value + # Append the dependency information + dependencies = set() + for dependency in self.getDependencies(key): + if dependency == key: + continue + dependencies.add(dependency) + try: + self.add(key, dependencies) + except Exception: + del self.plugins[key] + raise + + def getPlugins(self): + ret = [] + keys = sorted(self.plugins.keys()) + seen = set() + for key in keys: + dependencies = self.getDependenciesRecursively(key) + for plugin in dependencies + [key]: + if plugin not in seen: + ret.append((plugin, self.plugins[plugin])) + seen.add(plugin) + return ret + + +class LocalConf(object): + + def __init__(self, localrc, localconf, base_services, services, plugins, + base_dir, projects, project, tempest_plugins): + self.localrc = [] + self.warnings = [] + self.meta_sections = {} + self.plugin_deps = {} + self.base_dir = base_dir + self.projects = projects + self.project = project + self.tempest_plugins = tempest_plugins + if services or base_services: + self.handle_services(base_services, services or {}) + self.handle_localrc(localrc) + # Plugins must be the last items in localrc, otherwise + # the configuration lines which follows them in the file are + # not applied to the plugins (for example, the value of DEST.) + if plugins: + self.handle_plugins(plugins) + if localconf: + self.handle_localconf(localconf) + + def handle_plugins(self, plugins): + pg = PluginGraph(self.base_dir, plugins) + for k, v in pg.getPlugins(): + if v: + self.localrc.append('enable_plugin {} {}'.format(k, v)) + + def handle_services(self, base_services, services): + enable_base_services = services.pop('base', True) + if enable_base_services and base_services: + self.localrc.append('ENABLED_SERVICES={}'.format( + ",".join(base_services))) + else: + self.localrc.append('disable_all_services') + for k, v in services.items(): + if v is False: + self.localrc.append('disable_service {}'.format(k)) + elif v is True: + self.localrc.append('enable_service {}'.format(k)) + + def handle_localrc(self, localrc): + lfg = False + tp = False + if localrc: + vg = VarGraph(localrc) + for k, v in vg.getVars(): + # Avoid double quoting + if len(v) and v[0]=='"': + self.localrc.append('{}={}'.format(k, v)) + else: + self.localrc.append('{}="{}"'.format(k, v)) + if k == 'LIBS_FROM_GIT': + lfg = True + elif k == 'TEMPEST_PLUGINS': + tp = True + + if not lfg and (self.projects or self.project): + required_projects = [] + if self.projects: + for project_name, project_info in self.projects.items(): + if project_info.get('required'): + required_projects.append(project_info['short_name']) + if self.project: + if self.project['short_name'] not in required_projects: + required_projects.append(self.project['short_name']) + if required_projects: + self.localrc.append('LIBS_FROM_GIT={}'.format( + ','.join(required_projects))) + + if self.tempest_plugins: + if not tp: + tp_dirs = [] + for tempest_plugin in self.tempest_plugins: + tp_dirs.append(os.path.join(self.base_dir, tempest_plugin)) + self.localrc.append('TEMPEST_PLUGINS="{}"'.format( + ' '.join(tp_dirs))) + else: + self.warnings.append('TEMPEST_PLUGINS already defined ({}),' + 'requested value {} ignored'.format( + tp, self.tempest_plugins)) + + + def handle_localconf(self, localconf): + for phase, phase_data in localconf.items(): + for fn, fn_data in phase_data.items(): + ms_name = '[[{}|{}]]'.format(phase, fn) + ms_data = [] + for section, section_data in fn_data.items(): + ms_data.append('[{}]'.format(section)) + for k, v in section_data.items(): + ms_data.append('{} = {}'.format(k, v)) + ms_data.append('') + self.meta_sections[ms_name] = ms_data + + def write(self, path): + with open(path, 'w') as f: + f.write('[[local|localrc]]\n') + f.write('\n'.join(self.localrc)) + f.write('\n\n') + for section, lines in self.meta_sections.items(): + f.write('{}\n'.format(section)) + f.write('\n'.join(lines)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + plugins=dict(type='dict'), + base_services=dict(type='list'), + services=dict(type='dict'), + localrc=dict(type='dict'), + local_conf=dict(type='dict'), + base_dir=dict(type='path'), + path=dict(type='str'), + projects=dict(type='dict'), + project=dict(type='dict'), + tempest_plugins=dict(type='list'), + ) + ) + + p = module.params + lc = LocalConf(p.get('localrc'), + p.get('local_conf'), + p.get('base_services'), + p.get('services'), + p.get('plugins'), + p.get('base_dir'), + p.get('projects'), + p.get('project'), + p.get('tempest_plugins')) + lc.write(p['path']) + + module.exit_json(warnings=lc.warnings) + + +try: + from ansible.module_utils.basic import * # noqa + from ansible.module_utils.basic import AnsibleModule +except ImportError: + pass + +if __name__ == '__main__': + main() diff --git a/roles/write-devstack-local-conf/library/test.py b/roles/write-devstack-local-conf/library/test.py new file mode 100644 index 0000000000..7c526b34c8 --- /dev/null +++ b/roles/write-devstack-local-conf/library/test.py @@ -0,0 +1,291 @@ +# Copyright (C) 2017 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import tempfile +import unittest + +from devstack_local_conf import LocalConf +from collections import OrderedDict + +class TestDevstackLocalConf(unittest.TestCase): + + @staticmethod + def _init_localconf(p): + lc = LocalConf(p.get('localrc'), + p.get('local_conf'), + p.get('base_services'), + p.get('services'), + p.get('plugins'), + p.get('base_dir'), + p.get('projects'), + p.get('project'), + p.get('tempest_plugins')) + return lc + + def setUp(self): + self.tmpdir = tempfile.mkdtemp() + + def tearDown(self): + shutil.rmtree(self.tmpdir) + + def test_plugins(self): + "Test that plugins without dependencies work" + localrc = {'test_localrc': '1'} + local_conf = {'install': + {'nova.conf': + {'main': + {'test_conf': '2'}}}} + services = {'cinder': True} + # We use ordereddict here to make sure the plugins are in the + # *wrong* order for testing. + plugins = OrderedDict([ + ('bar', 'https://git.openstack.org/openstack/bar-plugin'), + ('foo', 'https://git.openstack.org/openstack/foo-plugin'), + ('baz', 'https://git.openstack.org/openstack/baz-plugin'), + ]) + p = dict(localrc=localrc, + local_conf=local_conf, + base_services=[], + services=services, + plugins=plugins, + base_dir='./test', + path=os.path.join(self.tmpdir, 'test.local.conf')) + lc = self._init_localconf(p) + lc.write(p['path']) + + plugins = [] + with open(p['path']) as f: + for line in f: + if line.startswith('enable_plugin'): + plugins.append(line.split()[1]) + self.assertEqual(['bar', 'baz', 'foo'], plugins) + + + def test_plugin_deps(self): + "Test that plugins with dependencies work" + os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', 'devstack')) + os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', '.git')) + os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', 'devstack')) + os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', '.git')) + with open(os.path.join( + self.tmpdir, + 'foo-plugin', 'devstack', 'settings'), 'w') as f: + f.write('define_plugin foo-plugin\n') + with open(os.path.join( + self.tmpdir, + 'bar-plugin', 'devstack', 'settings'), 'w') as f: + f.write('define_plugin bar-plugin\n') + f.write('plugin_requires bar-plugin foo-plugin\n') + + localrc = {'test_localrc': '1'} + local_conf = {'install': + {'nova.conf': + {'main': + {'test_conf': '2'}}}} + services = {'cinder': True} + # We use ordereddict here to make sure the plugins are in the + # *wrong* order for testing. + plugins = OrderedDict([ + ('bar-plugin', 'https://git.openstack.org/openstack/bar-plugin'), + ('foo-plugin', 'https://git.openstack.org/openstack/foo-plugin'), + ]) + p = dict(localrc=localrc, + local_conf=local_conf, + base_services=[], + services=services, + plugins=plugins, + base_dir=self.tmpdir, + path=os.path.join(self.tmpdir, 'test.local.conf')) + lc = self._init_localconf(p) + lc.write(p['path']) + + plugins = [] + with open(p['path']) as f: + for line in f: + if line.startswith('enable_plugin'): + plugins.append(line.split()[1]) + self.assertEqual(['foo-plugin', 'bar-plugin'], plugins) + + def test_libs_from_git(self): + "Test that LIBS_FROM_GIT is auto-generated" + projects = { + 'git.openstack.org/openstack/nova': { + 'required': True, + 'short_name': 'nova', + }, + 'git.openstack.org/openstack/oslo.messaging': { + 'required': True, + 'short_name': 'oslo.messaging', + }, + 'git.openstack.org/openstack/devstack-plugin': { + 'required': False, + 'short_name': 'devstack-plugin', + }, + } + project = { + 'short_name': 'glance', + } + p = dict(base_services=[], + base_dir='./test', + path=os.path.join(self.tmpdir, 'test.local.conf'), + projects=projects, + project=project) + lc = self._init_localconf(p) + lc.write(p['path']) + + lfg = None + with open(p['path']) as f: + for line in f: + if line.startswith('LIBS_FROM_GIT'): + lfg = line.strip().split('=')[1] + self.assertEqual('nova,oslo.messaging,glance', lfg) + + def test_overridelibs_from_git(self): + "Test that LIBS_FROM_GIT can be overridden" + localrc = {'LIBS_FROM_GIT': 'oslo.db'} + projects = { + 'git.openstack.org/openstack/nova': { + 'required': True, + 'short_name': 'nova', + }, + 'git.openstack.org/openstack/oslo.messaging': { + 'required': True, + 'short_name': 'oslo.messaging', + }, + 'git.openstack.org/openstack/devstack-plugin': { + 'required': False, + 'short_name': 'devstack-plugin', + }, + } + p = dict(localrc=localrc, + base_services=[], + base_dir='./test', + path=os.path.join(self.tmpdir, 'test.local.conf'), + projects=projects) + lc = self._init_localconf(p) + lc.write(p['path']) + + lfg = None + with open(p['path']) as f: + for line in f: + if line.startswith('LIBS_FROM_GIT'): + lfg = line.strip().split('=')[1] + self.assertEqual('"oslo.db"', lfg) + + def test_avoid_double_quote(self): + "Test that there a no duplicated quotes" + localrc = {'TESTVAR': '"quoted value"'} + p = dict(localrc=localrc, + base_services=[], + base_dir='./test', + path=os.path.join(self.tmpdir, 'test.local.conf'), + projects={}) + lc = self._init_localconf(p) + lc.write(p['path']) + + testvar = None + with open(p['path']) as f: + for line in f: + if line.startswith('TESTVAR'): + testvar = line.strip().split('=')[1] + self.assertEqual('"quoted value"', testvar) + + def test_plugin_circular_deps(self): + "Test that plugins with circular dependencies fail" + os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', 'devstack')) + os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', '.git')) + os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', 'devstack')) + os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', '.git')) + with open(os.path.join( + self.tmpdir, + 'foo-plugin', 'devstack', 'settings'), 'w') as f: + f.write('define_plugin foo\n') + f.write('plugin_requires foo bar\n') + with open(os.path.join( + self.tmpdir, + 'bar-plugin', 'devstack', 'settings'), 'w') as f: + f.write('define_plugin bar\n') + f.write('plugin_requires bar foo\n') + + localrc = {'test_localrc': '1'} + local_conf = {'install': + {'nova.conf': + {'main': + {'test_conf': '2'}}}} + services = {'cinder': True} + # We use ordereddict here to make sure the plugins are in the + # *wrong* order for testing. + plugins = OrderedDict([ + ('bar', 'https://git.openstack.org/openstack/bar-plugin'), + ('foo', 'https://git.openstack.org/openstack/foo-plugin'), + ]) + p = dict(localrc=localrc, + local_conf=local_conf, + base_services=[], + services=services, + plugins=plugins, + base_dir=self.tmpdir, + path=os.path.join(self.tmpdir, 'test.local.conf')) + with self.assertRaises(Exception): + lc = self._init_localconf(p) + lc.write(p['path']) + + def _find_tempest_plugins_value(self, file_path): + tp = None + with open(file_path) as f: + for line in f: + if line.startswith('TEMPEST_PLUGINS'): + found = line.strip().split('=')[1] + self.assertIsNone(tp, + "TEMPEST_PLUGIN ({}) found again ({})".format( + tp, found)) + tp = found + return tp + + def test_tempest_plugins(self): + "Test that TEMPEST_PLUGINS is correctly populated." + p = dict(base_services=[], + base_dir='./test', + path=os.path.join(self.tmpdir, 'test.local.conf'), + tempest_plugins=['heat-tempest-plugin', 'sahara-tests']) + lc = self._init_localconf(p) + lc.write(p['path']) + + tp = self._find_tempest_plugins_value(p['path']) + self.assertEqual('"./test/heat-tempest-plugin ./test/sahara-tests"', tp) + self.assertEqual(len(lc.warnings), 0) + + def test_tempest_plugins_not_overridden(self): + """Test that the existing value of TEMPEST_PLUGINS is not overridden + by the user-provided value, but a warning is emitted.""" + localrc = {'TEMPEST_PLUGINS': 'someplugin'} + p = dict(localrc=localrc, + base_services=[], + base_dir='./test', + path=os.path.join(self.tmpdir, 'test.local.conf'), + tempest_plugins=['heat-tempest-plugin', 'sahara-tests']) + lc = self._init_localconf(p) + lc.write(p['path']) + + tp = self._find_tempest_plugins_value(p['path']) + self.assertEqual('"someplugin"', tp) + self.assertEqual(len(lc.warnings), 1) + + +if __name__ == '__main__': + unittest.main() diff --git a/roles/write-devstack-local-conf/tasks/main.yaml b/roles/write-devstack-local-conf/tasks/main.yaml new file mode 100644 index 0000000000..bfd086034b --- /dev/null +++ b/roles/write-devstack-local-conf/tasks/main.yaml @@ -0,0 +1,14 @@ +- name: Write a job-specific local_conf file + become: true + become_user: stack + devstack_local_conf: + path: "{{ devstack_local_conf_path }}" + plugins: "{{ devstack_plugins|default(omit) }}" + base_services: "{{ devstack_base_services|default(omit) }}" + services: "{{ devstack_services|default(omit) }}" + localrc: "{{ devstack_localrc|default(omit) }}" + local_conf: "{{ devstack_local_conf|default(omit) }}" + base_dir: "{{ devstack_base_dir|default(omit) }}" + projects: "{{ zuul.projects }}" + project: "{{ zuul.project }}" + tempest_plugins: "{{ tempest_plugins|default(omit) }}" diff --git a/run_tests.sh b/run_tests.sh new file mode 100755 index 0000000000..a9a3d0bb48 --- /dev/null +++ b/run_tests.sh @@ -0,0 +1,41 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This runs a series of unit tests for DevStack to ensure it's functioning + +PASSES="" +FAILURES="" + +for testfile in tests/test_*.sh; do + $testfile + if [[ $? -eq 0 ]]; then + PASSES="$PASSES $testfile" + else + FAILURES="$FAILURES $testfile" + fi +done + +# Summary display now that all is said and done +echo "=====================================================================" +for script in $PASSES; do + echo PASS $script +done +for script in $FAILURES; do + echo FAILED $script +done +echo "=====================================================================" + +if [[ -n "$FAILURES" ]]; then + exit 1 +fi diff --git a/samples/local.conf b/samples/local.conf new file mode 100644 index 0000000000..55b729809d --- /dev/null +++ b/samples/local.conf @@ -0,0 +1,100 @@ +# Sample ``local.conf`` for user-configurable variables in ``stack.sh`` + +# NOTE: Copy this file to the root DevStack directory for it to work properly. + +# ``local.conf`` is a user-maintained settings file that is sourced from ``stackrc``. +# This gives it the ability to override any variables set in ``stackrc``. +# Also, most of the settings in ``stack.sh`` are written to only be set if no +# value has already been set; this lets ``local.conf`` effectively override the +# default values. + +# This is a collection of some of the settings we have found to be useful +# in our DevStack development environments. Additional settings are described +# in https://docs.openstack.org/devstack/latest/configuration.html#local-conf +# These should be considered as samples and are unsupported DevStack code. + +# The ``localrc`` section replaces the old ``localrc`` configuration file. +# Note that if ``localrc`` is present it will be used in favor of this section. +[[local|localrc]] + +# Minimal Contents +# ---------------- + +# While ``stack.sh`` is happy to run without ``localrc``, devlife is better when +# there are a few minimal variables set: + +# If the ``*_PASSWORD`` variables are not set here you will be prompted to enter +# values for them by ``stack.sh``and they will be added to ``local.conf``. +ADMIN_PASSWORD=nomoresecret +DATABASE_PASSWORD=stackdb +RABBIT_PASSWORD=stackqueue +SERVICE_PASSWORD=$ADMIN_PASSWORD + +# ``HOST_IP`` and ``HOST_IPV6`` should be set manually for best results if +# the NIC configuration of the host is unusual, i.e. ``eth1`` has the default +# route but ``eth0`` is the public interface. They are auto-detected in +# ``stack.sh`` but often is indeterminate on later runs due to the IP moving +# from an Ethernet interface to a bridge on the host. Setting it here also +# makes it available for ``openrc`` to include when setting ``OS_AUTH_URL``. +# Neither is set by default. +#HOST_IP=w.x.y.z +#HOST_IPV6=2001:db8::7 + + +# Logging +# ------- + +# By default ``stack.sh`` output only goes to the terminal where it runs. It can +# be configured to additionally log to a file by setting ``LOGFILE`` to the full +# path of the destination log file. A timestamp will be appended to the given name. +LOGFILE=$DEST/logs/stack.sh.log + +# Old log files are automatically removed after 2 days to keep things neat. Change +# the number of days by setting ``LOGDAYS``. +LOGDAYS=2 + +# Nova logs will be colorized if ``SYSLOG`` is not set; turn this off by setting +# ``LOG_COLOR`` false. +#LOG_COLOR=False + + +# Using milestone-proposed branches +# --------------------------------- + +# Uncomment these to grab the milestone-proposed branches from the +# repos: +#CINDER_BRANCH=milestone-proposed +#GLANCE_BRANCH=milestone-proposed +#HORIZON_BRANCH=milestone-proposed +#KEYSTONE_BRANCH=milestone-proposed +#KEYSTONECLIENT_BRANCH=milestone-proposed +#NOVA_BRANCH=milestone-proposed +#NOVACLIENT_BRANCH=milestone-proposed +#NEUTRON_BRANCH=milestone-proposed +#SWIFT_BRANCH=milestone-proposed + +# Using git versions of clients +# ----------------------------- +# By default clients are installed from pip. See LIBS_FROM_GIT in +# stackrc for details on getting clients from specific branches or +# revisions. e.g. +# LIBS_FROM_GIT="python-ironicclient" +# IRONICCLIENT_BRANCH=refs/changes/44/2.../1 + +# Swift +# ----- + +# Swift is now used as the back-end for the S3-like object store. Setting the +# hash value is required and you will be prompted for it if Swift is enabled +# so just set it to something already: +SWIFT_HASH=66a3d6b56c1f479c8b4e70ab5c2000f5 + +# For development purposes the default of 3 replicas is usually not required. +# Set this to 1 to save some resources: +SWIFT_REPLICAS=1 + +# The data for Swift is stored by default in (``$DEST/data/swift``), +# or (``$DATA_DIR/swift``) if ``DATA_DIR`` has been set, and can be +# moved by setting ``SWIFT_DATA_DIR``. The directory will be created +# if it does not exist. +SWIFT_DATA_DIR=$DEST/data diff --git a/samples/local.sh b/samples/local.sh new file mode 100755 index 0000000000..7e6ae70ad4 --- /dev/null +++ b/samples/local.sh @@ -0,0 +1,67 @@ +#!/usr/bin/env bash + +# Sample ``local.sh`` for user-configurable tasks to run automatically +# at the successful conclusion of ``stack.sh``. + +# NOTE: Copy this file to the root DevStack directory for it to work properly. + +# This is a collection of some of the things we have found to be useful to run +# after ``stack.sh`` to tweak the OpenStack configuration that DevStack produces. +# These should be considered as samples and are unsupported DevStack code. + + +# Keep track of the DevStack directory +TOP_DIR=$(cd $(dirname "$0") && pwd) + +# Import common functions +source $TOP_DIR/functions + +# Use openrc + stackrc + localrc for settings +source $TOP_DIR/stackrc + +# Destination path for installation ``DEST`` +DEST=${DEST:-/opt/stack} + +if is_service_enabled nova; then + + # Import ssh keys + # --------------- + + # Import keys from the current user into the default OpenStack user (usually + # ``demo``) + + # Get OpenStack user auth + export OS_CLOUD=devstack + + # Add first keypair found in localhost:$HOME/.ssh + for i in $HOME/.ssh/id_rsa.pub $HOME/.ssh/id_dsa.pub; do + if [[ -r $i ]]; then + openstack keypair create --public-key $i `hostname` + break + fi + done + + # Update security default group + # ----------------------------- + + # Add tcp/22 and icmp to default security group + default=$(openstack security group list -f value -c ID) + openstack security group rule create $default --protocol tcp --dst-port 22 + openstack security group rule create $default --protocol icmp + + # Create A Flavor + # --------------- + + # Get OpenStack admin auth + source $TOP_DIR/openrc admin admin + + # Name of new flavor + # set in ``local.conf`` with ``DEFAULT_INSTANCE_TYPE=m1.micro`` + MI_NAME=m1.micro + + # Create micro flavor if not present + if [[ -z $(openstack flavor list | grep $MI_NAME) ]]; then + openstack flavor create $MI_NAME --id 6 --ram 128 --disk 0 --vcpus 1 + fi + +fi diff --git a/stack.sh b/stack.sh index 15ab53839b..965f58007d 100755 --- a/stack.sh +++ b/stack.sh @@ -1,201 +1,652 @@ #!/usr/bin/env bash -# **stack.sh** is an opinionated openstack developer installation. +# ``stack.sh`` is an opinionated OpenStack developer installation. It +# installs and configures various combinations of **Cinder**, **Glance**, +# **Horizon**, **Keystone**, **Nova**, **Neutron**, and **Swift** + +# This script's options can be changed by setting appropriate environment +# variables. You can configure things like which git repositories to use, +# services to enable, OS images to use, etc. Default values are located in the +# ``stackrc`` file. If you are crafty you can run the script on multiple nodes +# using shared settings for common resources (eg., mysql or rabbitmq) and build +# a multi-node developer install. + +# To keep this script simple we assume you are running on a recent **Ubuntu** +# (Bionic or newer) or **CentOS/RHEL/RockyLinux** +# (7 or newer) machine. (It may work on other platforms but support for those +# platforms is left to those who added them to DevStack.) It should work in +# a VM or physical server. Additionally, we maintain a list of ``deb`` and +# ``rpm`` dependencies and other configuration files in this repo. -# This script installs and configures *nova*, *glance*, *horizon* and *keystone* +# Learn more and get the most recent version at http://devstack.org -# This script allows you to specify configuration options of what git -# repositories to use, enabled services, network configuration and various -# passwords. If you are crafty you can run the script on multiple nodes using -# shared settings for common resources (mysql, rabbitmq) and build a multi-node -# developer install. +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following along as the install occurs. +set -o xtrace -# To keep this script simple we assume you are running on an **Ubuntu 11.10 -# Oneiric** machine. It should work in a VM or physical server. Additionally -# we put the list of *apt* and *pip* dependencies and other configuration files -# in this repo. So start by grabbing this script and the dependencies. +# Make sure custom grep options don't get in the way +unset GREP_OPTIONS -# Learn more and get the most recent version at http://devstack.org +# NOTE(sdague): why do we explicitly set locale when running stack.sh? +# +# Devstack is written in bash, and many functions used throughout +# devstack process text coming off a command (like the ip command) +# and do transforms using grep, sed, cut, awk on the strings that are +# returned. Many of these programs are internationalized, which is +# great for end users, but means that the strings that devstack +# functions depend upon might not be there in other locales. We thus +# need to pin the world to an english basis during the runs. +# +# Previously we used the C locale for this, every system has it, and +# it gives us a stable sort order. It does however mean that we +# effectively drop unicode support.... boo! :( +# +# With python3 being more unicode aware by default, that's not the +# right option. While there is a C.utf8 locale, some distros are +# shipping it as C.UTF8 for extra confusingness. And it's support +# isn't super clear across distros. This is made more challenging when +# trying to support both out of the box distros, and the gate which +# uses diskimage builder to build disk images in a different way than +# the distros do. +# +# So... en_US.utf8 it is. That's existed for a very long time. It is a +# compromise position, but it is the least worse idea at the time of +# this comment. +# +# We also have to unset other variables that might impact LC_ALL +# taking effect. +unset LANG +unset LANGUAGE +LC_ALL=en_US.utf8 +export LC_ALL -# Sanity Check -# ============ +# Clear all OpenStack related envvars +unset `env | grep -E '^OS_' | cut -d = -f 1` -# Warn users who aren't on oneiric, but allow them to override check and attempt -# installation with ``FORCE=yes ./stack`` -DISTRO=$(lsb_release -c -s) +# Make sure umask is sane +umask 022 -if [[ ! ${DISTRO} =~ (oneiric) ]]; then - echo "WARNING: this script has only been tested on oneiric" - if [[ "$FORCE" != "yes" ]]; then - echo "If you wish to run this script anyway run with FORCE=yes" - exit 1 - fi -fi +# Not all distros have sbin in PATH for regular users. +# osc will normally be installed at /usr/local/bin/openstack so ensure +# /usr/local/bin is also in the path +PATH=$PATH:/usr/local/bin:/usr/local/sbin:/usr/sbin:/sbin -# Keep track of the current devstack directory. +# Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0") && pwd) -# stack.sh keeps the list of **apt** and **pip** dependencies in external -# files, along with config templates and other useful files. You can find these -# in the ``files`` directory (next to this script). We will reference this -# directory using the ``FILES`` variable in this script. +# Check for uninitialized variables, a big cause of bugs +NOUNSET=${NOUNSET:-} +if [[ -n "$NOUNSET" ]]; then + set -o nounset +fi + +# Set start of devstack timestamp +DEVSTACK_START_TIME=$(date +%s) + +# Configuration +# ============= + +# Sanity Checks +# ------------- + +# Clean up last environment var cache +if [[ -r $TOP_DIR/.stackenv ]]; then + rm $TOP_DIR/.stackenv +fi + +# ``stack.sh`` keeps the list of ``deb`` and ``rpm`` dependencies, config +# templates and other useful files in the ``files`` subdirectory FILES=$TOP_DIR/files if [ ! -d $FILES ]; then - echo "ERROR: missing devstack/files - did you grab more than just stack.sh?" + set +o xtrace + echo "missing devstack/files" + exit 1 +fi + +# ``stack.sh`` keeps function libraries here +# Make sure ``$TOP_DIR/inc`` directory is present +if [ ! -d $TOP_DIR/inc ]; then + set +o xtrace + echo "missing devstack/inc" exit 1 fi +# ``stack.sh`` keeps project libraries here +# Make sure ``$TOP_DIR/lib`` directory is present +if [ ! -d $TOP_DIR/lib ]; then + set +o xtrace + echo "missing devstack/lib" + exit 1 +fi + +# Check if run in POSIX shell +if [[ "${POSIXLY_CORRECT}" == "y" ]]; then + set +o xtrace + echo "You are running POSIX compatibility mode, DevStack requires bash 4.2 or newer." + exit 1 +fi + +# OpenStack is designed to be run as a non-root user; Horizon will fail to run +# as **root** since Apache will not serve content from **root** user). +# ``stack.sh`` must not be run as **root**. It aborts and suggests one course of +# action to create a suitable user account. + +if [[ $EUID -eq 0 ]]; then + set +o xtrace + echo "DevStack should be run as a user with sudo permissions, " + echo "not root." + echo "A \"stack\" user configured correctly can be created with:" + echo " $TOP_DIR/tools/create-stack-user.sh" + exit 1 +fi + +# OpenStack is designed to run at a system level, with system level +# installation of python packages. It does not support running under a +# virtual env, and will fail in really odd ways if you do this. Make +# this explicit as it has come up on the mailing list. +if [[ -n "$VIRTUAL_ENV" ]]; then + set +o xtrace + echo "You appear to be running under a python virtualenv." + echo "DevStack does not support this, as we may break the" + echo "virtualenv you are currently in by modifying " + echo "external system-level components the virtualenv relies on." + echo "We recommend you use a separate virtual-machine if " + echo "you are worried about DevStack taking over your system." + exit 1 +fi + +# Provide a safety switch for devstack. If you do a lot of devstack, +# on a lot of different environments, you sometimes run it on the +# wrong box. This makes there be a way to prevent that. +if [[ -e $HOME/.no-devstack ]]; then + set +o xtrace + echo "You've marked this host as a no-devstack host, to save yourself from" + echo "running devstack accidentally. If this is in error, please remove the" + echo "~/.no-devstack file" + exit 1 +fi + +# Prepare the environment +# ----------------------- + +# Initialize variables: +LAST_SPINNER_PID="" + +# Import common functions +source $TOP_DIR/functions + +# Import 'public' stack.sh functions +source $TOP_DIR/lib/stack + +# Determine what system we are running on. This provides ``os_VENDOR``, +# ``os_RELEASE``, ``os_PACKAGE``, ``os_CODENAME`` +# and ``DISTRO`` +GetDistro -# Settings -# ======== +# Global Settings +# --------------- -# This script is customizable through setting environment variables. If you -# want to override a setting you can either:: +# Check for a ``localrc`` section embedded in ``local.conf`` and extract if +# ``localrc`` does not already exist + +# Phase: local +rm -f $TOP_DIR/.localrc.auto +extract_localrc_section $TOP_DIR/local.conf $TOP_DIR/localrc $TOP_DIR/.localrc.auto + +# ``stack.sh`` is customizable by setting environment variables. Override a +# default setting via export: # -# export MYSQL_PASSWORD=anothersecret +# export DATABASE_PASSWORD=anothersecret # ./stack.sh # -# You can also pass options on a single line ``MYSQL_PASSWORD=simple ./stack.sh`` +# or by setting the variable on the command line: # -# Additionally, you can put any local variables into a ``localrc`` file, like:: +# DATABASE_PASSWORD=simple ./stack.sh # -# MYSQL_PASSWORD=anothersecret -# MYSQL_USER=hellaroot +# Persistent variables can be placed in a ``local.conf`` file: # -# We try to have sensible defaults, so you should be able to run ``./stack.sh`` -# in most cases. +# [[local|localrc]] +# DATABASE_PASSWORD=anothersecret +# DATABASE_USER=hellaroot # -# We source our settings from ``stackrc``. This file is distributed with devstack -# and contains locations for what repositories to use. If you want to use other -# repositories and branches, you can add your own settings with another file called -# ``localrc`` +# We try to have sensible defaults, so you should be able to run ``./stack.sh`` +# in most cases. ``local.conf`` is not distributed with DevStack and will never +# be overwritten by a DevStack update. # -# If ``localrc`` exists, then ``stackrc`` will load those settings. This is -# useful for changing a branch or repository to test other versions. Also you -# can store your other settings like **MYSQL_PASSWORD** or **ADMIN_PASSWORD** instead -# of letting devstack generate random ones for you. -source ./stackrc +# DevStack distributes ``stackrc`` which contains locations for the OpenStack +# repositories, branches to configure, and other configuration defaults. +# ``stackrc`` sources the ``localrc`` section of ``local.conf`` to allow you to +# safely override those settings. + +if [[ ! -r $TOP_DIR/stackrc ]]; then + die $LINENO "missing $TOP_DIR/stackrc - did you grab more than just stack.sh?" +fi +source $TOP_DIR/stackrc + +# write /etc/devstack-version +write_devstack_version + +# Warn users who aren't on an explicitly supported distro, but allow them to +# override check and attempt installation with ``FORCE=yes ./stack`` +SUPPORTED_DISTROS="trixie|bookworm|jammy|noble|rhel9|rhel10" + +if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then + echo "WARNING: this script has not been tested on $DISTRO" + if [[ "$FORCE" != "yes" ]]; then + die $LINENO "If you wish to run this script anyway run with FORCE=yes" + fi +fi + +# Local Settings +# -------------- + +# Make sure the proxy config is visible to sub-processes +export_proxy_variables + +# Remove services which were negated in ``ENABLED_SERVICES`` +# using the "-" prefix (e.g., "-rabbit") instead of +# calling disable_service(). +disable_negated_services + + +# Configure sudo +# -------------- + +# We're not as **root** so make sure ``sudo`` is available +is_package_installed sudo || is_package_installed sudo-ldap || install_package sudo + +# UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one +sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || + echo "#includedir /etc/sudoers.d" | sudo tee -a /etc/sudoers + +# Conditionally setup detailed logging for sudo +if [[ -n "$LOG_SUDO" ]]; then + TEMPFILE=`mktemp` + echo "Defaults log_output" > $TEMPFILE + chmod 0440 $TEMPFILE + sudo chown root:root $TEMPFILE + sudo mv $TEMPFILE /etc/sudoers.d/00_logging +fi + +# Set up DevStack sudoers +TEMPFILE=`mktemp` +echo "$STACK_USER ALL=(root) NOPASSWD:ALL" >$TEMPFILE +# Some binaries might be under ``/sbin`` or ``/usr/sbin``, so make sure sudo will +# see them by forcing ``PATH`` +echo "Defaults:$STACK_USER secure_path=/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >> $TEMPFILE +echo "Defaults:$STACK_USER !requiretty" >> $TEMPFILE +chmod 0440 $TEMPFILE +sudo chown root:root $TEMPFILE +sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh + +# Configure Distro Repositories +# ----------------------------- + +# For Debian/Ubuntu make apt attempt to retry network ops on it's own +if is_ubuntu; then + echo 'APT::Acquire::Retries "20";' | sudo tee /etc/apt/apt.conf.d/80retry >/dev/null +fi + +# Some distros need to add repos beyond the defaults provided by the vendor +# to pick up required packages. + +function _install_epel { + # epel-release is in extras repo which is enabled by default + install_package epel-release + + # RDO repos are not tested with epel and may have incompatibilities so + # let's limit the packages fetched from epel to the ones not in RDO repos. + sudo dnf config-manager --save --setopt=includepkgs=debootstrap,dpkg epel +} + +function _install_rdo { + if [[ $DISTRO =~ "rhel" ]]; then + VERSION=${DISTRO:4:2} + rdo_release=${TARGET_BRANCH#*/} + if [[ "$TARGET_BRANCH" == "master" ]]; then + # adding delorean-deps repo to provide current master rpms + sudo wget https://trunk.rdoproject.org/centos${VERSION}-master/delorean-deps.repo -O /etc/yum.repos.d/delorean-deps.repo + else + if sudo dnf provides centos-release-openstack-${rdo_release} >/dev/null 2>&1; then + sudo dnf -y install centos-release-openstack-${rdo_release} + else + sudo wget https://trunk.rdoproject.org/centos${VERSION}-${rdo_release}/delorean-deps.repo -O /etc/yum.repos.d/delorean-deps.repo + fi + fi + fi + sudo dnf -y update +} + + +# Configure Target Directories +# ---------------------------- # Destination path for installation ``DEST`` DEST=${DEST:-/opt/stack} -# Configure services to syslog instead of writing to individual log files -SYSLOG=${SYSLOG:-False} +# Create the destination directory and ensure it is writable by the user +# and read/executable by everybody for daemons (e.g. apache run for horizon) +# If directory exists do not modify the permissions. +if [[ ! -d $DEST ]]; then + sudo mkdir -p $DEST + safe_chown -R $STACK_USER $DEST + safe_chmod 0755 $DEST +fi -# apt-get wrapper to just get arguments set correctly -function apt_get() { - local sudo="sudo" - [ "$(id -u)" = "0" ] && sudo="env" - $sudo DEBIAN_FRONTEND=noninteractive apt-get \ - --option "Dpkg::Options::=--force-confold" --assume-yes "$@" -} +# Destination path for devstack logs +if [[ -n ${LOGDIR:-} ]]; then + sudo mkdir -p $LOGDIR + safe_chown -R $STACK_USER $LOGDIR + safe_chmod 0755 $LOGDIR +fi +# Destination path for service data +DATA_DIR=${DATA_DIR:-${DEST}/data} +if [[ ! -d $DATA_DIR ]]; then + sudo mkdir -p $DATA_DIR + safe_chown -R $STACK_USER $DATA_DIR + safe_chmod 0755 $DATA_DIR +fi -# OpenStack is designed to be run as a regular user (Horizon will fail to run -# as root, since apache refused to startup serve content from root user). If -# stack.sh is run as root, it automatically creates a stack user with -# sudo privileges and runs as that user. +# Create and/or clean the async state directory +async_init -if [[ $EUID -eq 0 ]]; then - ROOTSLEEP=${ROOTSLEEP:-10} - echo "You are running this script as root." - echo "In $ROOTSLEEP seconds, we will create a user 'stack' and run as that user" - sleep $ROOTSLEEP - - # since this script runs as a normal user, we need to give that user - # ability to run sudo - dpkg -l sudo || apt_get update && apt_get install sudo - - if ! getent passwd stack >/dev/null; then - echo "Creating a user called stack" - useradd -U -G sudo -s /bin/bash -d $DEST -m stack +# Configure proper hostname +# Certain services such as rabbitmq require that the local hostname resolves +# correctly. Make sure it exists in /etc/hosts so that is always true. +LOCAL_HOSTNAME=`hostname -s` +if ! grep -Fqwe "$LOCAL_HOSTNAME" /etc/hosts; then + sudo sed -i "s/\(^127.0.0.1.*\)/\1 $LOCAL_HOSTNAME/" /etc/hosts +fi + +# If you have all the repos installed above already setup (e.g. a CI +# situation where they are on your image) you may choose to skip this +# to speed things up +SKIP_EPEL_INSTALL=$(trueorfalse False SKIP_EPEL_INSTALL) + +if [[ $DISTRO == "rhel9" ]]; then + # for CentOS Stream 9 repository + sudo dnf config-manager --set-enabled crb + # for RHEL 9 repository + sudo dnf config-manager --set-enabled codeready-builder-for-rhel-9-x86_64-rpms + # rabbitmq and other packages are provided by RDO repositories. + _install_rdo + + # Some distributions (Rocky Linux 9) provide curl-minimal instead of curl, + # it triggers a conflict when devstack wants to install "curl". + # Swap curl-minimal with curl. + if is_package_installed curl-minimal; then + sudo dnf swap -y curl-minimal curl + fi +elif [[ $DISTRO == "rhel10" ]]; then + # for CentOS Stream 10 repository + sudo dnf config-manager --set-enabled crb + # rabbitmq and other packages are provided by RDO repositories. + _install_rdo +elif [[ $DISTRO == "openEuler-22.03" ]]; then + # There are some problem in openEuler. We should fix it first. Some required + # package/action runs before fixup script. So we can't fix there. + # + # 1. the hostname package is not installed by default + # 2. Some necessary packages are in openstack repo, for example liberasurecode-devel + # 3. python3-pip can be uninstalled by `get_pip.py` automaticly. + # 4. Ensure wget installation before use + install_package hostname openstack-release-wallaby wget + uninstall_package python3-pip + + # Add yum repository for libvirt7.X + sudo wget https://eur.openeuler.openatom.cn/coprs/g/sig-openstack/Libvirt-7.X/repo/openeuler-22.03_LTS/group_sig-openstack-Libvirt-7.X-openeuler-22.03_LTS.repo -O /etc/yum.repos.d/libvirt7.2.0.repo +fi + +# Ensure python is installed +# -------------------------- +install_python + + +# Configure Logging +# ----------------- + +# Set up logging level +VERBOSE=$(trueorfalse True VERBOSE) +VERBOSE_NO_TIMESTAMP=$(trueorfalse False VERBOSE) + +# Draw a spinner so the user knows something is happening +function spinner { + local delay=0.75 + local spinstr='/-\|' + printf "..." >&3 + while [ true ]; do + local temp=${spinstr#?} + printf "[%c]" "$spinstr" >&3 + local spinstr=$temp${spinstr%"$temp"} + sleep $delay + printf "\b\b\b" >&3 + done +} + +function kill_spinner { + if [ ! -z "$LAST_SPINNER_PID" ]; then + kill >/dev/null 2>&1 $LAST_SPINNER_PID + printf "\b\b\bdone\n" >&3 fi +} - echo "Giving stack user passwordless sudo priviledges" - # some uec images sudoers does not have a '#includedir'. add one. - grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || - echo "#includedir /etc/sudoers.d" >> /etc/sudoers - ( umask 226 && echo "stack ALL=(ALL) NOPASSWD:ALL" \ - > /etc/sudoers.d/50_stack_sh ) - - echo "Copying files to stack user" - STACK_DIR="$DEST/${PWD##*/}" - cp -r -f "$PWD" "$STACK_DIR" - chown -R stack "$STACK_DIR" - if [[ "$SHELL_AFTER_RUN" != "no" ]]; then - exec su -c "set -e; cd $STACK_DIR; bash stack.sh; bash" stack +# Echo text to the log file, summary log file and stdout +# echo_summary "something to say" +function echo_summary { + if [[ -t 3 && "$VERBOSE" != "True" ]]; then + kill_spinner + echo -n -e $@ >&6 + spinner & + LAST_SPINNER_PID=$! else - exec su -c "set -e; cd $STACK_DIR; bash stack.sh" stack + echo -e $@ >&6 fi - exit 1 +} + +# Echo text only to stdout, no log files +# echo_nolog "something not for the logs" +function echo_nolog { + echo $@ >&3 +} + +# Set up logging for ``stack.sh`` +# Set ``LOGFILE`` to turn on logging +# Append '.xxxxxxxx' to the given name to maintain history +# where 'xxxxxxxx' is a representation of the date the file was created +TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"} +LOGDAYS=${LOGDAYS:-7} +CURRENT_LOG_TIME=$(date "+$TIMESTAMP_FORMAT") + +if [[ -n "$LOGFILE" ]]; then + # Clean up old log files. Append '.*' to the user-specified + # ``LOGFILE`` to match the date in the search template. + LOGFILE_DIR="${LOGFILE%/*}" # dirname + LOGFILE_NAME="${LOGFILE##*/}" # basename + mkdir -p $LOGFILE_DIR + find $LOGFILE_DIR -maxdepth 1 -name $LOGFILE_NAME.\* -mtime +$LOGDAYS -exec rm {} \; + LOGFILE=$LOGFILE.${CURRENT_LOG_TIME} + SUMFILE=$LOGFILE.summary.${CURRENT_LOG_TIME} + + # Redirect output according to config + + # Set fd 3 to a copy of stdout. So we can set fd 1 without losing + # stdout later. + exec 3>&1 + if [[ "$VERBOSE" == "True" ]]; then + _of_args="-v" + if [[ "$VERBOSE_NO_TIMESTAMP" == "True" ]]; then + _of_args="$_of_args --no-timestamp" + fi + # Set fd 1 and 2 to write the log file + exec 1> >( $PYTHON $TOP_DIR/tools/outfilter.py $_of_args -o "${LOGFILE}" ) 2>&1 + # Set fd 6 to summary log file + exec 6> >( $PYTHON $TOP_DIR/tools/outfilter.py -o "${SUMFILE}" ) + else + # Set fd 1 and 2 to primary logfile + exec 1> >( $PYTHON $TOP_DIR/tools/outfilter.py -o "${LOGFILE}" ) 2>&1 + # Set fd 6 to summary logfile and stdout + exec 6> >( $PYTHON $TOP_DIR/tools/outfilter.py -v -o "${SUMFILE}" >&3 ) + fi + + echo_summary "stack.sh log $LOGFILE" + # Specified logfile name always links to the most recent log + ln -sf $LOGFILE $LOGFILE_DIR/$LOGFILE_NAME + ln -sf $SUMFILE $LOGFILE_DIR/$LOGFILE_NAME.summary else - # Our user needs passwordless priviledges for certain commands which nova - # uses internally. - # Natty uec images sudoers does not have a '#includedir'. add one. - sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || - echo "#includedir /etc/sudoers.d" | sudo tee -a /etc/sudoers - TEMPFILE=`mktemp` - cat $FILES/sudo/nova > $TEMPFILE - sed -e "s,%USER%,$USER,g" -i $TEMPFILE - chmod 0440 $TEMPFILE - sudo chown root:root $TEMPFILE - sudo mv $TEMPFILE /etc/sudoers.d/stack_sh_nova -fi - -# Set the destination directories for openstack projects -NOVA_DIR=$DEST/nova -HORIZON_DIR=$DEST/horizon -GLANCE_DIR=$DEST/glance -KEYSTONE_DIR=$DEST/keystone -NOVACLIENT_DIR=$DEST/python-novaclient -OPENSTACKX_DIR=$DEST/openstackx -NOVNC_DIR=$DEST/noVNC -SWIFT_DIR=$DEST/swift -SWIFT_KEYSTONE_DIR=$DEST/swift-keystone2 -QUANTUM_DIR=$DEST/quantum - -# Default Quantum Plugin -Q_PLUGIN=${Q_PLUGIN:-openvswitch} - -# Specify which services to launch. These generally correspond to screen tabs -ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,horizon,mysql,rabbit,openstackx} - -# Name of the lvm volume group to use/create for iscsi volumes -VOLUME_GROUP=${VOLUME_GROUP:-nova-volumes} - -# Nova hypervisor configuration. We default to libvirt whth **kvm** but will -# drop back to **qemu** if we are unable to load the kvm module. Stack.sh can -# also install an **LXC** based system. -VIRT_DRIVER=${VIRT_DRIVER:-libvirt} -LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} - -# nova supports pluggable schedulers. ``SimpleScheduler`` should work in most -# cases unless you are working on multi-zone mode. -SCHEDULER=${SCHEDULER:-nova.scheduler.simple.SimpleScheduler} - -# Use the eth0 IP unless an explicit is set by ``HOST_IP`` environment variable -if [ ! -n "$HOST_IP" ]; then - HOST_IP=`LC_ALL=C /sbin/ifconfig eth0 | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'` - if [ "$HOST_IP" = "" ]; then - echo "Could not determine host ip address." - echo "If this is not your first run of stack.sh, it is " - echo "possible that nova moved your eth0 ip address to the FLAT_NETWORK_BRIDGE." - echo "Please specify your HOST_IP in your localrc." - exit 1 + # Set up output redirection without log files + # Set fd 3 to a copy of stdout. So we can set fd 1 without losing + # stdout later. + exec 3>&1 + if [[ "$VERBOSE" != "True" ]]; then + # Throw away stdout and stderr + exec 1>/dev/null 2>&1 fi + # Always send summary fd to original stdout + exec 6> >( $PYTHON $TOP_DIR/tools/outfilter.py -v >&3 ) fi -# Service startup timeout -SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} +# Basic test for ``$DEST`` path permissions (fatal on error unless skipped) +check_path_perm_sanity ${DEST} + +# Configure Error Traps +# --------------------- + +# Kill background processes on exit +trap exit_trap EXIT +function exit_trap { + local r=$? + jobs=$(jobs -p) + # Only do the kill when we're logging through a process substitution, + # which currently is only to verbose logfile + if [[ -n $jobs && -n "$LOGFILE" && "$VERBOSE" == "True" ]]; then + echo "exit_trap: cleaning up child processes" + kill 2>&1 $jobs + fi + + #Remove timing data file + if [ -f "$OSCWRAP_TIMER_FILE" ] ; then + rm "$OSCWRAP_TIMER_FILE" + fi + + # Kill the last spinner process + kill_spinner + + if [[ $r -ne 0 ]]; then + echo "Error on exit" + # If we error before we've installed os-testr, this will fail. + if type -p generate-subunit > /dev/null; then + generate-subunit $DEVSTACK_START_TIME $SECONDS 'fail' >> ${SUBUNIT_OUTPUT} + fi + if [[ -z $LOGDIR ]]; then + ${PYTHON} $TOP_DIR/tools/worlddump.py + else + ${PYTHON} $TOP_DIR/tools/worlddump.py -d $LOGDIR + fi + else + # If we error before we've installed os-testr, this will fail. + if type -p generate-subunit > /dev/null; then + generate-subunit $DEVSTACK_START_TIME $SECONDS >> ${SUBUNIT_OUTPUT} + fi + fi + + exit $r +} + +# Exit on any errors so that errors don't compound +trap err_trap ERR +function err_trap { + local r=$? + set +o xtrace + if [[ -n "$LOGFILE" ]]; then + echo "${0##*/} failed: full log in $LOGFILE" + else + echo "${0##*/} failed" + fi + exit $r +} + +# Begin trapping error exit codes +set -o errexit + +# Print the kernel version +uname -a + +# Reset the bundle of CA certificates +SSL_BUNDLE_FILE="$DATA_DIR/ca-bundle.pem" +rm -f $SSL_BUNDLE_FILE + +# Import common services (database, message queue) configuration +source $TOP_DIR/lib/database +source $TOP_DIR/lib/rpc_backend + +# load host tuning functions and defaults +source $TOP_DIR/lib/host +# tune host memory early to ensure zswap/ksm are configured before +# doing memory intensive operation like cloning repos or unpacking packages. +tune_host + +# Configure Projects +# ================== + +# Clone all external plugins +fetch_plugins + +# Plugin Phase 0: override_defaults - allow plugins to override +# defaults before other services are run +run_phase override_defaults + +# Import Apache functions +source $TOP_DIR/lib/apache + +# Import TLS functions +source $TOP_DIR/lib/tls + +# Source project function libraries +source $TOP_DIR/lib/infra +source $TOP_DIR/lib/libraries +source $TOP_DIR/lib/lvm +source $TOP_DIR/lib/horizon +source $TOP_DIR/lib/keystone +source $TOP_DIR/lib/glance +source $TOP_DIR/lib/nova +source $TOP_DIR/lib/placement +source $TOP_DIR/lib/cinder +source $TOP_DIR/lib/swift +source $TOP_DIR/lib/neutron +source $TOP_DIR/lib/ldap +source $TOP_DIR/lib/dstat +source $TOP_DIR/lib/atop +source $TOP_DIR/lib/tcpdump +source $TOP_DIR/lib/etcd3 +source $TOP_DIR/lib/os-vif + +# Extras Source +# -------------- + +# Phase: source +run_phase source + + +# Interactive Configuration +# ------------------------- + +# Do all interactive config up front before the logging spew begins # Generic helper to configure passwords function read_password { + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace var=$1; msg=$2 pw=${!var} - localrc=$TOP_DIR/localrc + if [[ -f $RC_DIR/localrc ]]; then + localrc=$TOP_DIR/localrc + else + localrc=$TOP_DIR/.localrc.password + fi # If the password is not defined yet, proceed to prompt user for a password. if [ ! $pw ]; then @@ -204,1055 +655,931 @@ function read_password { touch $localrc fi - # Presumably if we got this far it can only be that our localrc is missing - # the required password. Prompt user for a password and write to localrc. + # Presumably if we got this far it can only be that our + # localrc is missing the required password. Prompt user for a + # password and write to localrc. + echo '' echo '################################################################################' echo $msg echo '################################################################################' - echo "This value will be written to your localrc file so you don't have to enter it again." - echo "It is probably best to avoid spaces and weird characters." + echo "This value will be written to ${localrc} file so you don't have to enter it " + echo "again. Use only alphanumeric characters." echo "If you leave this blank, a random default value will be used." - echo "Enter a password now:" - read -e $var - pw=${!var} + pw=" " + while true; do + echo "Enter a password now:" + read -e $var + pw=${!var} + [[ "$pw" = "`echo $pw | tr -cd [:alnum:]`" ]] && break + echo "Invalid chars in password. Try again:" + done if [ ! $pw ]; then - pw=`openssl rand -hex 10` + pw=$(generate_hex_string 10) fi eval "$var=$pw" echo "$var=$pw" >> $localrc fi - set -o xtrace + + # restore previous xtrace value + $xtrace } -# Nova Network Configuration -# -------------------------- +# Database Configuration +# ---------------------- -# FIXME: more documentation about why these are important flags. Also -# we should make sure we use the same variable names as the flag names. - -PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-eth0} -FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24} -FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256} -FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28} -NET_MAN=${NET_MAN:-FlatDHCPManager} -EC2_DMZ_HOST=${EC2_DMZ_HOST:-$HOST_IP} -FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-br100} -VLAN_INTERFACE=${VLAN_INTERFACE:-$PUBLIC_INTERFACE} - -# Multi-host is a mode where each compute node runs its own network node. This -# allows network operations and routing for a VM to occur on the server that is -# running the VM - removing a SPOF and bandwidth bottleneck. -MULTI_HOST=${MULTI_HOST:-False} - -# If you are using FlatDHCP on multiple hosts, set the ``FLAT_INTERFACE`` -# variable but make sure that the interface doesn't already have an -# ip or you risk breaking things. +# To select between database backends, add the following to ``local.conf``: # -# **DHCP Warning**: If your flat interface device uses DHCP, there will be a -# hiccup while the network is moved from the flat interface to the flat network -# bridge. This will happen when you launch your first instance. Upon launch -# you will lose all connectivity to the node, and the vm launch will probably -# fail. +# disable_service mysql +# enable_service postgresql # -# If you are running on a single node and don't need to access the VMs from -# devices other than that node, you can set the flat interface to the same -# value as ``FLAT_NETWORK_BRIDGE``. This will stop the network hiccup from -# occurring. -FLAT_INTERFACE=${FLAT_INTERFACE:-eth0} +# The available database backends are listed in ``DATABASE_BACKENDS`` after +# ``lib/database`` is sourced. ``mysql`` is the default. -## FIXME(ja): should/can we check that FLAT_INTERFACE is sane? +if initialize_database_backends; then + echo "Using $DATABASE_TYPE database backend" + # Last chance for the database password. This must be handled here + # because read_password is not a library function. + read_password DATABASE_PASSWORD "ENTER A PASSWORD TO USE FOR THE DATABASE." -# Using Quantum networking: -# -# Make sure that q-svc is enabled in ENABLED_SERVICES. If it is the network -# manager will be set to the QuantumManager. -# -# If you're planning to use the Quantum openvswitch plugin, set Q_PLUGIN to -# "openvswitch" and make sure the q-agt service is enabled in -# ENABLED_SERVICES. -# -# With Quantum networking the NET_MAN variable is ignored. + define_database_baseurl +else + echo "No database enabled" +fi -# MySQL & RabbitMQ -# ---------------- +# Queue Configuration +# ------------------- -# We configure Nova, Horizon, Glance and Keystone to use MySQL as their -# database server. While they share a single server, each has their own -# database and tables. +# Rabbit connection info +# In multi node DevStack, second node needs ``RABBIT_USERID``, but rabbit +# isn't enabled. +if is_service_enabled rabbit; then + read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT." +fi -# By default this script will install and configure MySQL. If you want to -# use an existing server, you can pass in the user/password/host parameters. -# You will need to send the same ``MYSQL_PASSWORD`` to every host if you are doing -# a multi-node devstack installation. -MYSQL_HOST=${MYSQL_HOST:-localhost} -MYSQL_USER=${MYSQL_USER:-root} -read_password MYSQL_PASSWORD "ENTER A PASSWORD TO USE FOR MYSQL." -# don't specify /db in this string, so we can use it for multiple services -BASE_SQL_CONN=${BASE_SQL_CONN:-mysql://$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST} +# Keystone +# -------- -# Rabbit connection info -RABBIT_HOST=${RABBIT_HOST:-localhost} -read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT." +if is_service_enabled keystone; then + # Services authenticate to Identity with servicename/``SERVICE_PASSWORD`` + read_password SERVICE_PASSWORD "ENTER A SERVICE_PASSWORD TO USE FOR THE SERVICE AUTHENTICATION." + # Horizon currently truncates usernames and passwords at 20 characters + read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)." + + # Keystone can now optionally install OpenLDAP by enabling the ``ldap`` + # service in ``local.conf`` (e.g. ``enable_service ldap``). + # To clean out the Keystone contents in OpenLDAP set ``KEYSTONE_CLEAR_LDAP`` + # to ``yes`` (e.g. ``KEYSTONE_CLEAR_LDAP=yes``) in ``local.conf``. To enable the + # Keystone Identity Driver (``keystone.identity.backends.ldap.Identity``) + # set ``KEYSTONE_IDENTITY_BACKEND`` to ``ldap`` (e.g. + # ``KEYSTONE_IDENTITY_BACKEND=ldap``) in ``local.conf``. + + # Only request LDAP password if the service is enabled + if is_service_enabled ldap; then + read_password LDAP_PASSWORD "ENTER A PASSWORD TO USE FOR LDAP" + fi +fi -# Glance connection info. Note the port must be specified. -GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$HOST_IP:9292} -# SWIFT +# Swift # ----- -# TODO: implement glance support -# TODO: add logging to different location. - -# By default the location of swift drives and objects is located inside -# the swift source directory. SWIFT_DATA_LOCATION variable allow you to redefine -# this. -SWIFT_DATA_LOCATION=${SWIFT_DATA_LOCATION:-${SWIFT_DIR}/data} - -# We are going to have the configuration files inside the source -# directory, change SWIFT_CONFIG_LOCATION if you want to adjust that. -SWIFT_CONFIG_LOCATION=${SWIFT_CONFIG_LOCATION:-${SWIFT_DIR}/config} - -# devstack will create a loop-back disk formatted as XFS to store the -# swift data. By default the disk size is 1 gigabyte. The variable -# SWIFT_LOOPBACK_DISK_SIZE specified in bytes allow you to change -# that. -SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} - -# The ring uses a configurable number of bits from a path’s MD5 hash as -# a partition index that designates a device. The number of bits kept -# from the hash is known as the partition power, and 2 to the partition -# power indicates the partition count. Partitioning the full MD5 hash -# ring allows other parts of the cluster to work in batches of items at -# once which ends up either more efficient or at least less complex than -# working with each item separately or the entire cluster all at once. -# By default we define 9 for the partition count (which mean 512). -SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9} - -# We only ask for Swift Hash if we have enabled swift service. -if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then - # SWIFT_HASH is a random unique string for a swift cluster that + +if is_service_enabled s-proxy; then + # We only ask for Swift Hash if we have enabled swift service. + # ``SWIFT_HASH`` is a random unique string for a swift cluster that # can never change. read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH." + + if [[ -z "$SWIFT_TEMPURL_KEY" ]] && [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]]; then + read_password SWIFT_TEMPURL_KEY "ENTER A KEY FOR SWIFT TEMPURLS." + fi fi -# Keystone -# -------- +# Save configuration values +save_stackenv $LINENO -# Service Token - Openstack components need to have an admin token -# to validate user tokens. -read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN TOKEN." -# Horizon currently truncates usernames and passwords at 20 characters -read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)." - -LOGFILE=${LOGFILE:-"$PWD/stack.sh.$$.log"} -( -# So that errors don't compound we exit on any errors so you see only the -# first error that occurred. -trap failed ERR -failed() { - local r=$? - set +o xtrace - [ -n "$LOGFILE" ] && echo "${0##*/} failed: full log in $LOGFILE" - exit $r -} - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following along as the install occurs. -set -o xtrace - -# create the destination directory and ensure it is writable by the user -sudo mkdir -p $DEST -if [ ! -w $DEST ]; then - sudo chown `whoami` $DEST -fi # Install Packages # ================ + +# OpenStack uses a fair number of other projects. + +# Bring down global requirements before any use of pip_install. This is +# necessary to ensure that the constraints file is in place before we +# attempt to apply any constraints to pip installs. +# We always need the master branch in addition to any stable branch, so +# override GIT_DEPTH here. +GIT_DEPTH=0 git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH + +# Install package requirements +# Source it so the entire environment is available +echo_summary "Installing package prerequisites" +source $TOP_DIR/tools/install_prereqs.sh + +# Configure an appropriate Python environment. # -# Openstack uses a fair number of other projects. - -# - We are going to install packages only for the services needed. -# - We are parsing the packages files and detecting metadatas. -# - If there is a NOPRIME as comment mean we are not doing the install -# just yet. -# - If we have the meta-keyword distro:DISTRO or -# distro:DISTRO1,DISTRO2 it will be installed only for those -# distros (case insensitive). -function get_packages() { - local file_to_parse="general" - local service - - for service in ${ENABLED_SERVICES//,/ }; do - # Allow individual services to specify dependencies - if [[ -e $FILES/apts/${service} ]]; then - file_to_parse="${file_to_parse} $service" - fi - if [[ $service == n-* ]]; then - if [[ ! $file_to_parse =~ nova ]]; then - file_to_parse="${file_to_parse} nova" - fi - elif [[ $service == g-* ]]; then - if [[ ! $file_to_parse =~ glance ]]; then - file_to_parse="${file_to_parse} glance" - fi - elif [[ $service == key* ]]; then - if [[ ! $file_to_parse =~ keystone ]]; then - file_to_parse="${file_to_parse} keystone" - fi - fi - done +# NOTE(ianw) 2021-08-11 : We install the latest pip here because pip +# is very active and changes are not generally reflected in the LTS +# distros. This often involves important things like dependency or +# conflict resolution, and has often been required because the +# complicated constraints etc. used by openstack have tickled bugs in +# distro versions of pip. We want to find these problems as they +# happen, rather than years later when we try to update our LTS +# distro. Whilst it is clear that global installations of upstream +# pip are less and less common, with virtualenv's being the general +# approach now; there are a lot of devstack plugins that assume a +# global install environment. +if [[ "$OFFLINE" != "True" ]]; then + PYPI_ALTERNATIVE_URL=${PYPI_ALTERNATIVE_URL:-""} $TOP_DIR/tools/install_pip.sh +fi - for file in ${file_to_parse}; do - local fname=${FILES}/apts/${file} - local OIFS line package distros distro - [[ -e $fname ]] || { echo "missing: $fname"; exit 1 ;} +# Do the ugly hacks for broken packages and distros +source $TOP_DIR/tools/fixup_stuff.sh +fixup_all + +if [[ "$GLOBAL_VENV" == "True" ]] ; then + # TODO(frickler): find a better solution for this + sudo ln -sf /opt/stack/data/venv/bin/cinder-manage /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/cinder-rtstool /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/glance /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/nova-manage /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/openstack /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/privsep-helper /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/rally /usr/local/bin + sudo ln -sf /opt/stack/data/venv/bin/tox /usr/local/bin + + setup_devstack_virtualenv +fi - OIFS=$IFS - IFS=$'\n' - for line in $(<${fname}); do - if [[ $line =~ "NOPRIME" ]]; then - continue - fi +# Install subunit for the subunit output stream +pip_install -U os-testr - if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then # We are using BASH regexp matching feature. - package=${BASH_REMATCH[1]} - distros=${BASH_REMATCH[2]} - for distro in ${distros//,/ }; do #In bash ${VAR,,} will lowecase VAR - [[ ${distro,,} == ${DISTRO,,} ]] && echo $package - done - continue - fi +# the default rate limit of 1000 messages / 30 seconds is not +# sufficient given how verbose our logging is. +iniset -sudo /etc/systemd/journald.conf "Journal" "RateLimitBurst" "0" +sudo systemctl restart systemd-journald - echo ${line%#*} - done - IFS=$OIFS - done -} +# Virtual Environment +# ------------------- -# install apt requirements -apt_get update -apt_get install $(get_packages) - -# install python requirements -sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install --use-mirrors `cat $FILES/pips/*` - -# git clone only if directory doesn't exist already. Since ``DEST`` might not -# be owned by the installation user, we create the directory and change the -# ownership to the proper user. -function git_clone { - - GIT_REMOTE=$1 - GIT_DEST=$2 - GIT_BRANCH=$3 - - # do a full clone only if the directory doesn't exist - if [ ! -d $GIT_DEST ]; then - git clone $GIT_REMOTE $GIT_DEST - cd $2 - # This checkout syntax works for both branches and tags - git checkout $GIT_BRANCH - elif [[ "$RECLONE" == "yes" ]]; then - # if it does exist then simulate what clone does if asked to RECLONE - cd $GIT_DEST - # set the url to pull from and fetch - git remote set-url origin $GIT_REMOTE - git fetch origin - # remove the existing ignored files (like pyc) as they cause breakage - # (due to the py files having older timestamps than our pyc, so python - # thinks the pyc files are correct using them) - find $GIT_DEST -name '*.pyc' -delete - git checkout -f origin/$GIT_BRANCH - # a local branch might not exist - git branch -D $GIT_BRANCH || true - git checkout -b $GIT_BRANCH - fi -} +# Install required infra support libraries +install_infra -# compute service -git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH -# python client library to nova that horizon (and others) use -git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH - -# glance, swift middleware and nova api needs keystone middleware -if [[ "$ENABLED_SERVICES" =~ "key" || - "$ENABLED_SERVICES" =~ "g-api" || - "$ENABLED_SERVICES" =~ "n-api" || - "$ENABLED_SERVICES" =~ "swift" ]]; then - # unified auth system (manages accounts/tokens) - git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH -fi -if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then - # storage service - git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH - # swift + keystone middleware - git_clone $SWIFT_KEYSTONE_REPO $SWIFT_KEYSTONE_DIR $SWIFT_KEYSTONE_BRANCH -fi -if [[ "$ENABLED_SERVICES" =~ "g-api" || - "$ENABLED_SERVICES" =~ "n-api" ]]; then - # image catalog service - git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH -fi -if [[ "$ENABLED_SERVICES" =~ "n-vnc" ]]; then - # a websockets/html5 or flash powered VNC console for vm instances - git_clone $NOVNC_REPO $NOVNC_DIR $NOVNC_BRANCH -fi -if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then - # django powered web control panel for openstack - git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH $HORIZON_TAG -fi -if [[ "$ENABLED_SERVICES" =~ "openstackx" ]]; then - # openstackx is a collection of extensions to openstack.compute & nova - # that is *deprecated*. The code is being moved into python-novaclient & nova. - git_clone $OPENSTACKX_REPO $OPENSTACKX_DIR $OPENSTACKX_BRANCH -fi -if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then - # quantum - git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH -fi - -# Initialization -# ============== +# Install bindep +$VIRTUALENV_CMD $DEST/bindep-venv +# TODO(ianw) : optionally install from zuul checkout? +$DEST/bindep-venv/bin/pip install bindep +export BINDEP_CMD=${DEST}/bindep-venv/bin/bindep +# Install packages as defined in plugin bindep.txt files +pkgs="$( _get_plugin_bindep_packages )" +if [[ -n "${pkgs}" ]]; then + install_package ${pkgs} +fi -# setup our checkouts so they are installed into python path -# allowing ``import nova`` or ``import glance.client`` -if [[ "$ENABLED_SERVICES" =~ "key" || - "$ENABLED_SERVICES" =~ "g-api" || - "$ENABLED_SERVICES" =~ "n-api" || - "$ENABLED_SERVICES" =~ "swift" ]]; then - cd $KEYSTONE_DIR; sudo python setup.py develop -fi -if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then - cd $SWIFT_DIR; sudo python setup.py develop - cd $SWIFT_KEYSTONE_DIR; sudo python setup.py develop -fi -if [[ "$ENABLED_SERVICES" =~ "g-api" || - "$ENABLED_SERVICES" =~ "n-api" ]]; then - cd $GLANCE_DIR; sudo python setup.py develop -fi -cd $NOVACLIENT_DIR; sudo python setup.py develop -cd $NOVA_DIR; sudo python setup.py develop -if [[ "$ENABLED_SERVICES" =~ "openstackx" ]]; then - cd $OPENSTACKX_DIR; sudo python setup.py develop -fi -if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then - cd $HORIZON_DIR/horizon; sudo python setup.py develop - cd $HORIZON_DIR/openstack-dashboard; sudo python setup.py develop -fi -if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then - cd $QUANTUM_DIR; sudo python setup.py develop -fi - -# Add a useful screenrc. This isn't required to run openstack but is we do -# it since we are going to run the services in screen for simple -cp $FILES/screenrc ~/.screenrc - -# Rabbit -# --------- - -if [[ "$ENABLED_SERVICES" =~ "rabbit" ]]; then - # Install and start rabbitmq-server - # the temp file is necessary due to LP: #878600 - tfile=$(mktemp) - apt_get install rabbitmq-server > "$tfile" 2>&1 - cat "$tfile" - rm -f "$tfile" - # change the rabbit password since the default is "guest" - sudo rabbitmqctl change_password guest $RABBIT_PASSWORD -fi - -# Mysql -# --------- - -if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then - - # Seed configuration with mysql password so that apt-get install doesn't - # prompt us for a password upon install. - cat <$HOME/.my.cnf -[client] -user=$MYSQL_USER -password=$MYSQL_PASSWORD -host=$MYSQL_HOST -EOF - chmod 0600 $HOME/.my.cnf - fi +# Extras Pre-install +# ------------------ +# Phase: pre-install +run_phase stack pre-install - # Install and start mysql-server - apt_get install mysql-server - # Update the DB to give user ‘$MYSQL_USER’@’%’ full control of the all databases: - sudo mysql -uroot -p$MYSQL_PASSWORD -h127.0.0.1 -e "GRANT ALL PRIVILEGES ON *.* TO '$MYSQL_USER'@'%' identified by '$MYSQL_PASSWORD';" +# NOTE(danms): Set global limits before installing anything +set_systemd_override DefaultLimitNOFILE ${ULIMIT_NOFILE} - # Edit /etc/mysql/my.cnf to change ‘bind-address’ from localhost (127.0.0.1) to any (0.0.0.0) and restart the mysql service: - sudo sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf - sudo service mysql restart +install_rpc_backend +restart_rpc_backend + +if is_service_enabled $DATABASE_BACKENDS; then + install_database +fi +if [ -n "$DATABASE_TYPE" ]; then + install_database_python fi +if is_service_enabled neutron; then + install_neutron_agent_packages +fi -# Horizon -# --------- +if is_service_enabled etcd3; then + install_etcd3 +fi -# Setup the django horizon application to serve via apache/wsgi +# Setup TLS certs +# --------------- -if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then +# Do this early, before any webservers are set up to ensure +# we don't run into problems with missing certs when apache +# is restarted. +if is_service_enabled tls-proxy; then + configure_CA + init_CA + init_cert +fi - # Install apache2, which is NOPRIME'd - apt_get install apache2 libapache2-mod-wsgi +# Dstat +# ----- - # Horizon currently imports quantum even if you aren't using it. Instead - # of installing quantum we can create a simple module that will pass the - # initial imports - mkdir -p $HORIZON_DIR/openstack-dashboard/quantum || true - touch $HORIZON_DIR/openstack-dashboard/quantum/__init__.py - touch $HORIZON_DIR/openstack-dashboard/quantum/client.py +# Install dstat services prerequisites +install_dstat - # ``local_settings.py`` is used to override horizon default settings. - local_settings=$HORIZON_DIR/openstack-dashboard/local/local_settings.py - cp $FILES/horizon_settings.py $local_settings +# Check Out and Install Source +# ---------------------------- - # Enable quantum in dashboard, if requested - if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then - sudo sed -e "s,QUANTUM_ENABLED = False,QUANTUM_ENABLED = True,g" -i $local_settings - fi +echo_summary "Installing OpenStack project source" - # Initialize the horizon database (it stores sessions and notices shown to - # users). The user system is external (keystone). - cd $HORIZON_DIR/openstack-dashboard - dashboard/manage.py syncdb +# Install additional libraries +install_libs - # create an empty directory that apache uses as docroot - sudo mkdir -p $HORIZON_DIR/.blackhole +# Install uwsgi +install_apache_uwsgi - ## Configure apache's 000-default to run horizon - sudo cp $FILES/000-default.template /etc/apache2/sites-enabled/000-default - sudo sed -e "s,%USER%,$USER,g" -i /etc/apache2/sites-enabled/000-default - sudo sed -e "s,%HORIZON_DIR%,$HORIZON_DIR,g" -i /etc/apache2/sites-enabled/000-default - sudo service apache2 restart +# Install client libraries +install_keystoneauth +install_keystoneclient +install_glanceclient +install_cinderclient +install_novaclient +if is_service_enabled swift glance horizon; then + install_swiftclient +fi +if is_service_enabled neutron nova horizon; then + install_neutronclient fi +# Install middleware +install_keystonemiddleware -# Glance -# ------ +if is_service_enabled keystone; then + if [ "$KEYSTONE_SERVICE_HOST" == "$SERVICE_HOST" ]; then + stack_install_service keystone + configure_keystone + fi +fi -if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then - GLANCE_IMAGE_DIR=$DEST/glance/images - # Delete existing images - rm -rf $GLANCE_IMAGE_DIR +if is_service_enabled swift; then + if is_service_enabled ceilometer; then + install_ceilometermiddleware + fi + stack_install_service swift + configure_swift - # Use local glance directories - mkdir -p $GLANCE_IMAGE_DIR + # s3api middleware to provide S3 emulation to Swift + if is_service_enabled s3api; then + # Replace the nova-objectstore port by the swift port + S3_SERVICE_PORT=8080 + fi +fi - # (re)create glance database - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS glance;' - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE glance;' +if is_service_enabled g-api n-api; then + # Image catalog service + stack_install_service glance + configure_glance +fi - # Copy over our glance configurations and update them - GLANCE_CONF=$GLANCE_DIR/etc/glance-registry.conf - cp $FILES/glance-registry.conf $GLANCE_CONF - sudo sed -e "s,%SQL_CONN%,$BASE_SQL_CONN/glance,g" -i $GLANCE_CONF - sudo sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $GLANCE_CONF - sudo sed -e "s,%DEST%,$DEST,g" -i $GLANCE_CONF - sudo sed -e "s,%SYSLOG%,$SYSLOG,g" -i $GLANCE_CONF +if is_service_enabled cinder; then + # Block volume service + stack_install_service cinder + configure_cinder +fi - GLANCE_API_CONF=$GLANCE_DIR/etc/glance-api.conf - cp $FILES/glance-api.conf $GLANCE_API_CONF - sudo sed -e "s,%DEST%,$DEST,g" -i $GLANCE_API_CONF - sudo sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $GLANCE_API_CONF - sudo sed -e "s,%SYSLOG%,$SYSLOG,g" -i $GLANCE_API_CONF +if is_service_enabled neutron; then + # Network service + stack_install_service neutron fi -# Nova -# ---- +if is_service_enabled nova; then + # Compute service + stack_install_service nova + configure_nova +fi -if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then - # We are going to use a sample http middleware configuration based on the - # one from the keystone project to launch nova. This paste config adds - # the configuration required for nova to validate keystone tokens. We add - # our own service token to the configuration. - cp $FILES/nova-api-paste.ini $NOVA_DIR/bin - sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini +if is_service_enabled placement; then + # placement api + stack_install_service placement + configure_placement fi -if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then +# create a placement-client fake service to know we need to configure +# placement connectivity. We configure the placement service for nova +# if placement-api or placement-client is active, and n-cpu on the +# same box. +if is_service_enabled placement placement-client; then + if is_service_enabled n-cpu || is_service_enabled n-sch; then + configure_placement_nova_compute + fi +fi - # Virtualization Configuration - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +if is_service_enabled horizon; then + # dashboard + stack_install_service horizon +fi - # attempt to load modules: network block device - used to manage qcow images - sudo modprobe nbd || true +if is_service_enabled tls-proxy; then + fix_system_ca_bundle_path +fi - # Check for kvm (hardware based virtualization). If unable to initialize - # kvm, we drop back to the slower emulation mode (qemu). Note: many systems - # come with hardware virtualization disabled in BIOS. - if [[ "$LIBVIRT_TYPE" == "kvm" ]]; then - apt_get install libvirt-bin - sudo modprobe kvm || true - if [ ! -e /dev/kvm ]; then - echo "WARNING: Switching to QEMU" - LIBVIRT_TYPE=qemu - fi +if is_service_enabled cinder || [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then + # os-brick setup required by glance, cinder, and nova + init_os_brick +fi + +# Extras Install +# -------------- + +# Phase: install +run_phase stack install + +# Install the OpenStack client, needed for most setup commands +if use_library_from_git "python-openstackclient"; then + git_clone_by_name "python-openstackclient" + setup_dev_lib "python-openstackclient" +else + pip_install_gr python-openstackclient + if is_service_enabled openstack-cli-server; then + install_openstack_cli_server fi +fi + +# Installs alias for osc so that we can collect timing for all +# osc commands. Alias dies with stack.sh. +install_oscwrap - # Install and configure **LXC** if specified. LXC is another approach to - # splitting a system into many smaller parts. LXC uses cgroups and chroot - # to simulate multiple systems. - if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then - apt_get install lxc - # lxc uses cgroups (a kernel interface via virtual filesystem) configured - # and mounted to ``/cgroup`` - sudo mkdir -p /cgroup - if ! grep -q cgroup /etc/fstab; then - echo none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0 | sudo tee -a /etc/fstab +# Syslog +# ------ + +if [[ $SYSLOG != "False" ]]; then + if [[ "$SYSLOG_HOST" = "$HOST_IP" ]]; then + # Configure the master host to receive + cat </dev/null +\$ModLoad imrelp +\$InputRELPServerRun $SYSLOG_PORT +EOF + else + # Set rsyslog to send to remote host + cat </dev/null +*.* :omrelp:$SYSLOG_HOST:$SYSLOG_PORT +EOF + fi + + RSYSLOGCONF="/etc/rsyslog.conf" + if [ -f $RSYSLOGCONF ]; then + sudo cp -b $RSYSLOGCONF $RSYSLOGCONF.bak + if [[ $(grep '$SystemLogRateLimitBurst' $RSYSLOGCONF) ]]; then + sudo sed -i 's/$SystemLogRateLimitBurst\ .*/$SystemLogRateLimitBurst\ 0/' $RSYSLOGCONF + else + sudo sed -i '$ i $SystemLogRateLimitBurst\ 0' $RSYSLOGCONF fi - if ! mount -n | grep -q cgroup; then - sudo mount /cgroup + if [[ $(grep '$SystemLogRateLimitInterval' $RSYSLOGCONF) ]]; then + sudo sed -i 's/$SystemLogRateLimitInterval\ .*/$SystemLogRateLimitInterval\ 0/' $RSYSLOGCONF + else + sudo sed -i '$ i $SystemLogRateLimitInterval\ 0' $RSYSLOGCONF fi fi - # The user that nova runs as needs to be member of libvirtd group otherwise - # nova-compute will be unable to use libvirt. - sudo usermod -a -G libvirtd `whoami` - # libvirt detects various settings on startup, as we potentially changed - # the system configuration (modules, filesystems), we need to restart - # libvirt to detect those changes. - sudo /etc/init.d/libvirt-bin restart + echo_summary "Starting rsyslog" + restart_service rsyslog +fi - # Instance Storage - # ~~~~~~~~~~~~~~~~ +# Export Certificate Authority Bundle +# ----------------------------------- - # Nova stores each instance in its own directory. - mkdir -p $NOVA_DIR/instances +# If certificates were used and written to the SSL bundle file then these +# should be exported so clients can validate their connections. - # You can specify a different disk to be mounted and used for backing the - # virtual machines. If there is a partition labeled nova-instances we - # mount it (ext filesystems can be labeled via e2label). - if [ -L /dev/disk/by-label/nova-instances ]; then - if ! mount -n | grep -q $NOVA_DIR/instances; then - sudo mount -L nova-instances $NOVA_DIR/instances - sudo chown -R `whoami` $NOVA_DIR/instances - fi - fi +if [ -f $SSL_BUNDLE_FILE ]; then + export OS_CACERT=$SSL_BUNDLE_FILE +fi + + +# Configure database +# ------------------ - # Clean out the instances directory. - sudo rm -rf $NOVA_DIR/instances/* +if is_service_enabled $DATABASE_BACKENDS; then + configure_database fi -if [[ "$ENABLED_SERVICES" =~ "n-net" ]]; then - # delete traces of nova networks from prior runs - sudo killall dnsmasq || true - rm -rf $NOVA_DIR/networks - mkdir -p $NOVA_DIR/networks +# Save configuration values +save_stackenv $LINENO + + +# Start Services +# ============== + +# Dstat +# ----- + +# A better kind of sysstat, with the top process per time slice +start_dstat + +if is_service_enabled atop; then + configure_atop + install_atop + start_atop fi -# Storage Service -if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then - # We first do a bit of setup by creating the directories and - # changing the permissions so we can run it as our user. - - USER_GROUP=$(id -g) - sudo mkdir -p ${SWIFT_DATA_LOCATION}/drives - sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_LOCATION}/drives - - # We then create a loopback disk and format it to XFS. - if [[ ! -e ${SWIFT_DATA_LOCATION}/drives/images/swift.img ]]; then - mkdir -p ${SWIFT_DATA_LOCATION}/drives/images - sudo touch ${SWIFT_DATA_LOCATION}/drives/images/swift.img - sudo chown $USER: ${SWIFT_DATA_LOCATION}/drives/images/swift.img - - dd if=/dev/zero of=${SWIFT_DATA_LOCATION}/drives/images/swift.img \ - bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} - mkfs.xfs -f -i size=1024 ${SWIFT_DATA_LOCATION}/drives/images/swift.img - fi +# Run a background tcpdump for debugging +# Note: must set TCPDUMP_ARGS with the enabled service +if is_service_enabled tcpdump; then + start_tcpdump +fi - # After the drive being created we mount the disk with a few mount - # options to make it most efficient as possible for swift. - mkdir -p ${SWIFT_DATA_LOCATION}/drives/sdb1 - if ! egrep -q ${SWIFT_DATA_LOCATION}/drives/sdb1 /proc/mounts; then - sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \ - ${SWIFT_DATA_LOCATION}/drives/images/swift.img ${SWIFT_DATA_LOCATION}/drives/sdb1 - fi +# Etcd +# ----- - # We then create link to that mounted location so swift would know - # where to go. - for x in {1..4}; do sudo ln -sf ${SWIFT_DATA_LOCATION}/drives/sdb1/$x ${SWIFT_DATA_LOCATION}/$x; done - - # We now have to emulate a few different servers into one we - # create all the directories needed for swift - tmpd="" - for d in ${SWIFT_DATA_LOCATION}/drives/sdb1/{1..4} \ - ${SWIFT_CONFIG_LOCATION}/{object,container,account}-server \ - ${SWIFT_DATA_LOCATION}/{1..4}/node/sdb1 /var/run/swift; do - [[ -d $d ]] && continue - sudo install -o ${USER} -g $USER_GROUP -d $d - done +# etcd is a distributed key value store that provides a reliable way to store data across a cluster of machines +if is_service_enabled etcd3; then + start_etcd3 +fi + +# Keystone +# -------- - # We do want to make sure this is all owned by our user. - sudo chown -R $USER: ${SWIFT_DATA_LOCATION}/{1..4}/node - sudo chown -R $USER: ${SWIFT_CONFIG_LOCATION} - - # swift-init has a bug using /etc/swift until bug #885595 is fixed - # we have to create a link - sudo ln -sf ${SWIFT_CONFIG_LOCATION} /etc/swift - - # Swift use rsync to syncronize between all the different - # partitions (which make more sense when you have a multi-node - # setup) we configure it with our version of rsync. - sed -e "s/%GROUP%/${USER_GROUP}/;s/%USER%/$USER/;s,%SWIFT_DATA_LOCATION%,$SWIFT_DATA_LOCATION," $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf - sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync - - # By default Swift will be installed with the tempauth middleware - # which has some default username and password if you have - # configured keystone it will checkout the directory. - if [[ "$ENABLED_SERVICES" =~ "key" ]]; then - swift_auth_server=keystone - - # We install the memcache server as this is will be used by the - # middleware to cache the tokens auths for a long this is needed. - apt_get install memcached - - # We need a special version of bin/swift which understand the - # OpenStack api 2.0, we download it until this is getting - # integrated in swift. - sudo curl -s -o/usr/local/bin/swift \ - 'https://review.openstack.org/gitweb?p=openstack/swift.git;a=blob_plain;f=bin/swift;hb=48bfda6e2fdf3886c98bd15649887d54b9a2574e' - else - swift_auth_server=tempauth - fi - - # We do the install of the proxy-server and swift configuration - # replacing a few directives to match our configuration. - sed "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s/%USER%/$USER/;s/%SERVICE_TOKEN%/${SERVICE_TOKEN}/;s/%AUTH_SERVER%/${swift_auth_server}/" \ - $FILES/swift/proxy-server.conf|sudo tee ${SWIFT_CONFIG_LOCATION}/proxy-server.conf - - sed -e "s/%SWIFT_HASH%/$SWIFT_HASH/" $FILES/swift/swift.conf > ${SWIFT_CONFIG_LOCATION}/swift.conf - - # We need to generate a object/account/proxy configuration - # emulating 4 nodes on different ports we have a little function - # that help us doing that. - function generate_swift_configuration() { - local server_type=$1 - local bind_port=$2 - local log_facility=$3 - local node_number - - for node_number in {1..4}; do - node_path=${SWIFT_DATA_LOCATION}/${node_number} - sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s,%USER%,$USER,;s,%NODE_PATH%,${node_path},;s,%BIND_PORT%,${bind_port},;s,%LOG_FACILITY%,${log_facility}," \ - $FILES/swift/${server_type}-server.conf > ${SWIFT_CONFIG_LOCATION}/${server_type}-server/${node_number}.conf - bind_port=$(( ${bind_port} + 10 )) - log_facility=$(( ${log_facility} + 1 )) - done - } - generate_swift_configuration object 6010 2 - generate_swift_configuration container 6011 2 - generate_swift_configuration account 6012 2 - - # We create two helper scripts : - # - # - swift-remakerings - # Allow to recreate rings from scratch. - # - swift-startmain - # Restart your full cluster. - # - sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s/%SWIFT_PARTITION_POWER_SIZE%/$SWIFT_PARTITION_POWER_SIZE/" $FILES/swift/swift-remakerings | \ - sudo tee /usr/local/bin/swift-remakerings - sudo install -m755 $FILES/swift/swift-startmain /usr/local/bin/ - sudo chmod +x /usr/local/bin/swift-* - - # We then can start rsync. - sudo /etc/init.d/rsync restart || : - - # Create our ring for the object/container/account. - /usr/local/bin/swift-remakerings - - # And now we launch swift-startmain to get our cluster running - # ready to be tested. - /usr/local/bin/swift-startmain || : - - unset s swift_hash swift_auth_server tmpd +if is_service_enabled tls-proxy; then + start_tls_proxy http-services '*' 443 $SERVICE_HOST 80 fi -# Volume Service -# -------------- +# Write a clouds.yaml file and use the devstack-admin cloud +write_clouds_yaml +export OS_CLOUD=${OS_CLOUD:-devstack-admin} -if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then - # - # Configure a default volume group called 'nova-volumes' for the nova-volume - # service if it does not yet exist. If you don't wish to use a file backed - # volume group, create your own volume group called 'nova-volumes' before - # invoking stack.sh. - # - # By default, the backing file is 2G in size, and is stored in /opt/stack. +if is_service_enabled keystone; then + echo_summary "Starting Keystone" - apt_get install iscsitarget-dkms iscsitarget + if [ "$KEYSTONE_SERVICE_HOST" == "$SERVICE_HOST" ]; then + init_keystone + start_keystone + bootstrap_keystone + fi - if ! sudo vgdisplay | grep -q $VOLUME_GROUP; then - VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DEST/nova-volumes-backing-file} - VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-2052M} - truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE - DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE` - sudo vgcreate $VOLUME_GROUP $DEV + create_keystone_accounts + if is_service_enabled nova; then + async_runfunc create_nova_accounts + fi + if is_service_enabled glance; then + async_runfunc create_glance_accounts + fi + if is_service_enabled cinder; then + async_runfunc create_cinder_accounts + fi + if is_service_enabled neutron; then + async_runfunc create_neutron_accounts + fi + if is_service_enabled swift; then + async_runfunc create_swift_accounts fi - # Configure iscsitarget - sudo sed 's/ISCSITARGET_ENABLE=false/ISCSITARGET_ENABLE=true/' -i /etc/default/iscsitarget - sudo /etc/init.d/iscsitarget restart fi -function add_nova_flag { - echo "$1" >> $NOVA_DIR/bin/nova.conf -} +# Horizon +# ------- + +if is_service_enabled horizon; then + echo_summary "Configuring Horizon" + async_runfunc configure_horizon +fi + +async_wait create_nova_accounts create_glance_accounts create_cinder_accounts +async_wait create_neutron_accounts create_swift_accounts configure_horizon + +# Glance +# ------ + +# NOTE(yoctozepto): limited to node hosting the database which is the controller +if is_service_enabled $DATABASE_BACKENDS && is_service_enabled glance; then + echo_summary "Configuring Glance" + async_runfunc init_glance +fi + + +# Neutron +# ------- + +if is_service_enabled neutron; then + echo_summary "Configuring Neutron" -# (re)create nova.conf -rm -f $NOVA_DIR/bin/nova.conf -add_nova_flag "--verbose" -add_nova_flag "--allow_admin_api" -add_nova_flag "--scheduler_driver=$SCHEDULER" -add_nova_flag "--dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf" -add_nova_flag "--fixed_range=$FIXED_RANGE" -if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then - add_nova_flag "--network_manager=nova.network.quantum.manager.QuantumManager" - if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - add_nova_flag "--libvirt_vif_type=ethernet" - add_nova_flag "--libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtOpenVswitchDriver" - add_nova_flag "--linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver" - add_nova_flag "--quantum-use-dhcp" + configure_neutron + + # Run init_neutron only on the node hosting the Neutron API server + if is_service_enabled $DATABASE_BACKENDS && is_service_enabled neutron; then + async_runfunc init_neutron fi -else - add_nova_flag "--network_manager=nova.network.manager.$NET_MAN" -fi -if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then - add_nova_flag "--volume_group=$VOLUME_GROUP" -fi -add_nova_flag "--my_ip=$HOST_IP" -add_nova_flag "--public_interface=$PUBLIC_INTERFACE" -add_nova_flag "--vlan_interface=$VLAN_INTERFACE" -add_nova_flag "--sql_connection=$BASE_SQL_CONN/nova" -add_nova_flag "--libvirt_type=$LIBVIRT_TYPE" -if [[ "$ENABLED_SERVICES" =~ "openstackx" ]]; then - add_nova_flag "--osapi_extension=nova.api.openstack.v2.contrib.standard_extensions" - add_nova_flag "--osapi_extension=extensions.admin.Admin" -fi -if [[ "$ENABLED_SERVICES" =~ "n-vnc" ]]; then - VNCPROXY_URL=${VNCPROXY_URL:-"http://$HOST_IP:6080"} - add_nova_flag "--vncproxy_url=$VNCPROXY_URL" - add_nova_flag "--vncproxy_wwwroot=$NOVNC_DIR/" -fi -add_nova_flag "--api_paste_config=$NOVA_DIR/bin/nova-api-paste.ini" -add_nova_flag "--image_service=nova.image.glance.GlanceImageService" -add_nova_flag "--ec2_dmz_host=$EC2_DMZ_HOST" -add_nova_flag "--rabbit_host=$RABBIT_HOST" -add_nova_flag "--rabbit_password=$RABBIT_PASSWORD" -add_nova_flag "--glance_api_servers=$GLANCE_HOSTPORT" -add_nova_flag "--force_dhcp_release" -if [ -n "$INSTANCES_PATH" ]; then - add_nova_flag "--instances_path=$INSTANCES_PATH" -fi -if [ "$MULTI_HOST" != "False" ]; then - add_nova_flag "--multi_host" - add_nova_flag "--send_arp_for_ha" -fi -if [ "$SYSLOG" != "False" ]; then - add_nova_flag "--use_syslog" -fi - -# You can define extra nova conf flags by defining the array EXTRA_FLAGS, -# For Example: EXTRA_FLAGS=(--foo --bar=2) -for I in "${EXTRA_FLAGS[@]}"; do - add_nova_flag $I -done - -# XenServer -# --------- - -if [ "$VIRT_DRIVER" = 'xenserver' ]; then - read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN." - add_nova_flag "--connection_type=xenapi" - add_nova_flag "--xenapi_connection_url=http://169.254.0.1" - add_nova_flag "--xenapi_connection_username=root" - add_nova_flag "--xenapi_connection_password=$XENAPI_PASSWORD" - add_nova_flag "--flat_injected=False" - add_nova_flag "--flat_interface=eth1" - add_nova_flag "--flat_network_bridge=xenbr1" - add_nova_flag "--public_interface=eth3" -else - add_nova_flag "--flat_network_bridge=$FLAT_NETWORK_BRIDGE" - if [ -n "$FLAT_INTERFACE" ]; then - add_nova_flag "--flat_interface=$FLAT_INTERFACE" +fi + + +# Nova +# ---- + +if is_service_enabled q-dhcp; then + # TODO(frickler): These are remnants from n-net, check which parts are really + # still needed for Neutron. + # Do not kill any dnsmasq instance spawned by NetworkManager + netman_pid=$(pidof NetworkManager || true) + if [ -z "$netman_pid" ]; then + sudo killall dnsmasq || true + else + sudo ps h -o pid,ppid -C dnsmasq | grep -v $netman_pid | awk '{print $1}' | sudo xargs kill || true fi + + clean_iptables + + # Force IP forwarding on, just in case + sudo sysctl -w net.ipv4.ip_forward=1 fi -# Nova Database -# ~~~~~~~~~~~~~ +# os-vif +# ------ +if is_service_enabled nova neutron; then + configure_os_vif +fi -# All nova components talk to a central database. We will need to do this step -# only once for an entire cluster. +# Storage Service +# --------------- -if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then - # (re)create nova database - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS nova;' - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE nova;' +if is_service_enabled swift; then + echo_summary "Configuring Swift" + async_runfunc init_swift +fi - # (re)create nova database - $NOVA_DIR/bin/nova-manage db sync + +# Volume Service +# -------------- + +if is_service_enabled cinder; then + echo_summary "Configuring Cinder" + async_runfunc init_cinder fi +# Placement Service +# --------------- -# Keystone -# -------- +if is_service_enabled placement; then + echo_summary "Configuring placement" + async_runfunc init_placement +fi + +# Wait for neutron and placement before starting nova +async_wait init_neutron +async_wait init_placement +async_wait init_glance +async_wait init_swift +async_wait init_cinder -if [[ "$ENABLED_SERVICES" =~ "key" ]]; then - # (re)create keystone database - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS keystone;' - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE keystone;' +# Compute Service +# --------------- - # Configure keystone.conf - KEYSTONE_CONF=$KEYSTONE_DIR/etc/keystone.conf - cp $FILES/keystone.conf $KEYSTONE_CONF - sudo sed -e "s,%SQL_CONN%,$BASE_SQL_CONN/keystone,g" -i $KEYSTONE_CONF - sudo sed -e "s,%DEST%,$DEST,g" -i $KEYSTONE_CONF +if is_service_enabled nova; then + echo_summary "Configuring Nova" + init_nova - # keystone_data.sh creates our admin user and our ``SERVICE_TOKEN``. - KEYSTONE_DATA=$KEYSTONE_DIR/bin/keystone_data.sh - cp $FILES/keystone_data.sh $KEYSTONE_DATA - sudo sed -e "s,%HOST_IP%,$HOST_IP,g" -i $KEYSTONE_DATA - sudo sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $KEYSTONE_DATA - sudo sed -e "s,%ADMIN_PASSWORD%,$ADMIN_PASSWORD,g" -i $KEYSTONE_DATA - # initialize keystone with default users/endpoints - ENABLED_SERVICES=$ENABLED_SERVICES BIN_DIR=$KEYSTONE_DIR/bin bash $KEYSTONE_DATA + async_runfunc configure_neutron_nova fi +# Extras Configuration +# ==================== + +# Phase: post-config +run_phase stack post-config + + +# Local Configuration +# =================== + +# Apply configuration from ``local.conf`` if it exists for layer 2 services +# Phase: post-config +merge_config_group $TOP_DIR/local.conf post-config + + # Launch Services # =============== -# nova api crashes if we start it with a regular screen command, -# so send the start command by forcing text into the window. # Only run the services specified in ``ENABLED_SERVICES`` -# our screen helper to launch a service in a hidden named screen -function screen_it { - NL=`echo -ne '\015'` - if [[ "$ENABLED_SERVICES" =~ "$1" ]]; then - if [[ "$USE_TMUX" =~ "yes" ]]; then - tmux new-window -t stack -a -n "$1" "bash" - tmux send-keys "$2" C-M - else - screen -S stack -X screen -t $1 - # sleep to allow bash to be ready to be send the command - we are - # creating a new window in screen and then sends characters, so if - # bash isn't running by the time we send the command, nothing happens - sleep 1 - screen -S stack -p $1 -X stuff "$2$NL" - fi - fi -} - -# create a new named screen to run processes in -screen -d -m -S stack -t stack -sleep 1 - -# launch the glance registry service -if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then - screen_it g-reg "cd $GLANCE_DIR; bin/glance-registry --config-file=etc/glance-registry.conf" +# Launch Swift Services +if is_service_enabled swift; then + echo_summary "Starting Swift" + start_swift fi -# launch the glance api and wait for it to answer before continuing -if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then - screen_it g-api "cd $GLANCE_DIR; bin/glance-api --config-file=etc/glance-api.conf" - echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then - echo "g-api did not start" - exit 1 - fi +# NOTE(lyarwood): By default use a single hardcoded fixed_key across devstack +# deployments. This ensures the keys match across nova and cinder across all +# hosts. +FIXED_KEY=${FIXED_KEY:-bae3516cc1c0eb18b05440eba8012a4a880a2ee04d584a9c1579445e675b12defdc716ec} +if is_service_enabled cinder; then + iniset $CINDER_CONF key_manager fixed_key "$FIXED_KEY" fi -# launch the keystone and wait for it to answer before continuing -if [[ "$ENABLED_SERVICES" =~ "key" ]]; then - screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone --config-file $KEYSTONE_CONF -d" - echo "Waiting for keystone to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://127.0.0.1:5000; do sleep 1; done"; then - echo "keystone did not start" - exit 1 - fi +async_wait configure_neutron_nova + +# NOTE(clarkb): This must come after async_wait configure_neutron_nova because +# configure_neutron_nova modifies $NOVA_CONF and $NOVA_CPU_CONF as well. If +# we don't wait then these two ini updates race either other and can result +# in unexpected configs. +if is_service_enabled nova; then + iniset $NOVA_CONF key_manager fixed_key "$FIXED_KEY" + iniset $NOVA_CPU_CONF key_manager fixed_key "$FIXED_KEY" fi -# launch the nova-api and wait for it to answer before continuing -if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then - screen_it n-api "cd $NOVA_DIR && $NOVA_DIR/bin/nova-api" - echo "Waiting for nova-api to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://127.0.0.1:8774; do sleep 1; done"; then - echo "nova-api did not start" - exit 1 - fi +# Launch the nova-api and wait for it to answer before continuing +if is_service_enabled n-api; then + echo_summary "Starting Nova API" + start_nova_api fi -# Quantum -if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then - # Install deps - # FIXME add to files/apts/quantum, but don't install if not needed! - apt_get install openvswitch-switch openvswitch-datapath-dkms +if is_service_enabled ovn-controller ovn-controller-vtep; then + echo_summary "Starting OVN services" + start_ovn_services +fi - # Create database for the plugin/agent - if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS ovs_quantum;' - else - echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin." - exit 1 - fi - fi +if is_service_enabled q-svc neutron-api; then + echo_summary "Starting Neutron" + configure_neutron_after_post_config + start_neutron_service_and_check +fi - QUANTUM_PLUGIN_INI_FILE=$QUANTUM_DIR/etc/plugins.ini - # Make sure we're using the openvswitch plugin - sed -i -e "s/^provider =.*$/provider = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin/g" $QUANTUM_PLUGIN_INI_FILE - screen_it q-svc "cd $QUANTUM_DIR && PYTHONPATH=.:$PYTHONPATH python $QUANTUM_DIR/bin/quantum-server $QUANTUM_DIR/etc/quantum.conf" +# Start placement before any of the service that are likely to want +# to use it to manage resource providers. +if is_service_enabled placement; then + echo_summary "Starting Placement" + start_placement fi -# Quantum agent (for compute nodes) -if [[ "$ENABLED_SERVICES" =~ "q-agt" ]]; then - if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - # Set up integration bridge - OVS_BRIDGE=${OVS_BRIDGE:-br-int} - sudo ovs-vsctl --no-wait -- --if-exists del-br $OVS_BRIDGE - sudo ovs-vsctl --no-wait add-br $OVS_BRIDGE - sudo ovs-vsctl --no-wait br-set-external-id $OVS_BRIDGE bridge-id br-int +if is_service_enabled neutron; then + start_neutron +fi +# Once neutron agents are started setup initial network elements +if is_service_enabled q-svc neutron-api && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then + echo_summary "Creating initial neutron network elements" + # Here's where plugins can wire up their own networks instead + # of the code in lib/neutron_plugins/services/l3 + if type -p neutron_plugin_create_initial_networks > /dev/null; then + neutron_plugin_create_initial_networks + else + create_neutron_initial_network fi - # Start up the quantum <-> openvswitch agent - screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py $QUANTUM_DIR/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini -v" fi -# If we're using Quantum (i.e. q-svc is enabled), network creation has to -# happen after we've started the Quantum service. -if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then - # create a small network - $NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE - - if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then - echo "Not creating floating IPs (not supported by QuantumManager)" - else - # create some floating ips - $NOVA_DIR/bin/nova-manage floating create $FLOATING_RANGE - fi +if is_service_enabled nova; then + echo_summary "Starting Nova" + start_nova + async_runfunc create_flavors +fi +if is_service_enabled cinder; then + echo_summary "Starting Cinder" + start_cinder + create_volume_types fi -# Launching nova-compute should be as simple as running ``nova-compute`` but -# have to do a little more than that in our script. Since we add the group -# ``libvirtd`` to our user in this script, when nova-compute is run it is -# within the context of our original shell (so our groups won't be updated). -# Use 'sg' to execute nova-compute as a member of the libvirtd group. -screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_DIR/bin/nova-compute" -screen_it n-vol "cd $NOVA_DIR && $NOVA_DIR/bin/nova-volume" -screen_it n-net "cd $NOVA_DIR && $NOVA_DIR/bin/nova-network" -screen_it n-sch "cd $NOVA_DIR && $NOVA_DIR/bin/nova-scheduler" -if [[ "$ENABLED_SERVICES" =~ "n-vnc" ]]; then - screen_it n-vnc "cd $NOVNC_DIR && ./utils/nova-wsproxy.py --flagfile $NOVA_DIR/bin/nova.conf --web . 6080" +# This sleep is required for cinder volume service to become active and +# publish capabilities to cinder scheduler before creating the image-volume +if [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then + sleep 30 fi -if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then - screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/apache2/error.log" + +# Launch the Glance services +# NOTE (abhishekk): We need to start glance api service only after cinder +# service has started as on glance startup glance-api queries cinder for +# validating volume_type configured for cinder store of glance. +if is_service_enabled glance; then + echo_summary "Starting Glance" + start_glance fi # Install Images # ============== -# Upload an image to glance. -# -# The default image is a small ***TTY*** testing image, which lets you login -# the username/password of root/password. -# -# TTY also uses cloud-init, supporting login via keypair and sending scripts as -# userdata. See https://help.ubuntu.com/community/CloudInit for more on cloud-init +# Upload an image to Glance. # -# Override ``IMAGE_URLS`` with a comma-separated list of uec images. +# The default image is CirrOS, a small testing image which lets you login as **root** +# CirrOS has a ``cloud-init`` analog supporting login via keypair and sending +# scripts as userdata. +# See https://help.ubuntu.com/community/CloudInit for more on ``cloud-init`` + +# NOTE(yoctozepto): limited to node hosting the database which is the controller +if is_service_enabled $DATABASE_BACKENDS && is_service_enabled glance; then + echo_summary "Uploading images" + + for image_url in ${IMAGE_URLS//,/ }; do + upload_image $image_url + done +fi + +async_wait create_flavors + +if is_service_enabled horizon; then + echo_summary "Starting Horizon" + init_horizon + start_horizon +fi + + +# Create account rc files +# ======================= + +# Creates source able script files for easier user switching. +# This step also creates certificates for tenants and users, +# which is helpful in image bundle steps. + +if is_service_enabled nova && is_service_enabled keystone; then + USERRC_PARAMS="-PA --target-dir $TOP_DIR/accrc --os-password $ADMIN_PASSWORD" + + if [ -f $SSL_BUNDLE_FILE ]; then + USERRC_PARAMS="$USERRC_PARAMS --os-cacert $SSL_BUNDLE_FILE" + fi + + $TOP_DIR/tools/create_userrc.sh $USERRC_PARAMS +fi + + +# Save some values we generated for later use +save_stackenv + + +# Wrapup configuration +# ==================== + +# local.conf extra +# ---------------- + +# Apply configuration from ``local.conf`` if it exists for layer 2 services +# Phase: extra +merge_config_group $TOP_DIR/local.conf extra + + +# Run extras +# ---------- + +# Phase: extra +run_phase stack extra + + +# local.conf post-extra +# --------------------- + +# Apply late configuration from ``local.conf`` if it exists for layer 2 services +# Phase: post-extra +merge_config_group $TOP_DIR/local.conf post-extra + + +# Sanity checks +# ============= + +# Check that computes are all ready # -# * **natty**: http://uec-images.ubuntu.com/natty/current/natty-server-cloudimg-amd64.tar.gz -# * **oneiric**: http://uec-images.ubuntu.com/oneiric/current/oneiric-server-cloudimg-amd64.tar.gz +# TODO(sdague): there should be some generic phase here. +if is_service_enabled n-cpu; then + is_nova_ready +fi -if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then - # Create a directory for the downloaded image tarballs. - mkdir -p $FILES/images +# Check the status of running services +service_check - # Option to upload legacy ami-tty, which works with xenserver - if [ $UPLOAD_LEGACY_TTY ]; then - if [ ! -f $FILES/tty.tgz ]; then - wget -c http://images.ansolabs.com/tty.tgz -O $FILES/tty.tgz - fi +# Configure nova cellsv2 +# ---------------------- - tar -zxf $FILES/tty.tgz -C $FILES/images - RVAL=`glance add -A $SERVICE_TOKEN name="tty-kernel" is_public=true container_format=aki disk_format=aki < $FILES/images/aki-tty/image` - KERNEL_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "` - RVAL=`glance add -A $SERVICE_TOKEN name="tty-ramdisk" is_public=true container_format=ari disk_format=ari < $FILES/images/ari-tty/image` - RAMDISK_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "` - glance add -A $SERVICE_TOKEN name="tty" is_public=true container_format=ami disk_format=ami kernel_id=$KERNEL_ID ramdisk_id=$RAMDISK_ID < $FILES/images/ami-tty/image +# Do this late because it requires compute hosts to have started +if is_service_enabled n-api; then + if is_service_enabled n-cpu; then + $TOP_DIR/tools/discover_hosts.sh + else + # Some CI systems like Hyper-V build the control plane on + # Linux, and join in non Linux Computes after setup. This + # allows them to delay the processing until after their whole + # environment is up. + echo_summary "SKIPPING Cell setup because n-cpu is not enabled. You will have to do this manually before you have a working environment." fi + # Run the nova-status upgrade check command which can also be used + # to verify the base install. Note that this is good enough in a + # single node deployment, but in a multi-node setup it won't verify + # any subnodes - that would have to be driven from whatever tooling + # is deploying the subnodes, e.g. the zuul v3 devstack-multinode job. + $NOVA_BIN_DIR/nova-status --config-file $NOVA_CONF upgrade check +fi - for image_url in ${IMAGE_URLS//,/ }; do - # Downloads the image (uec ami+aki style), then extracts it. - IMAGE_FNAME=`basename "$image_url"` - IMAGE_NAME=`basename "$IMAGE_FNAME" .tar.gz` - if [ ! -f $FILES/$IMAGE_FNAME ]; then - wget -c $image_url -O $FILES/$IMAGE_FNAME - fi +# Run local script +# ---------------- - # Extract ami and aki files - tar -zxf $FILES/$IMAGE_FNAME -C $FILES/images +# Run ``local.sh`` if it exists to perform user-managed tasks +if [[ -x $TOP_DIR/local.sh ]]; then + echo "Running user script $TOP_DIR/local.sh" + $TOP_DIR/local.sh +fi - # Use glance client to add the kernel the root filesystem. - # We parse the results of the first upload to get the glance ID of the - # kernel for use when uploading the root filesystem. - RVAL=`glance add -A $SERVICE_TOKEN name="$IMAGE_NAME-kernel" is_public=true container_format=aki disk_format=aki < $FILES/images/$IMAGE_NAME-vmlinuz*` - KERNEL_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "` - glance add -A $SERVICE_TOKEN name="$IMAGE_NAME" is_public=true container_format=ami disk_format=ami kernel_id=$KERNEL_ID < $FILES/images/$IMAGE_NAME.img - done +# Bash completion +# =============== + +# Prepare bash completion for OSC +# Note we use "command" to avoid the timing wrapper +# which isn't relevant here and floods logs +command openstack complete \ + | sudo tee /etc/bash_completion.d/osc.bash_completion > /dev/null + +# If cinder is configured, set global_filter for PV devices +if is_service_enabled cinder; then + if is_ubuntu; then + echo_summary "Configuring lvm.conf global device filter" + set_lvm_filter + else + echo_summary "Skip setting lvm filters for non Ubuntu systems" + fi fi +# Run test-config +# --------------- + +# Phase: test-config +run_phase stack test-config + +# Apply late configuration from ``local.conf`` if it exists for layer 2 services +# Phase: test-config +merge_config_group $TOP_DIR/local.conf test-config + # Fin # === +set +o xtrace + +if [[ -n "$LOGFILE" ]]; then + exec 1>&3 + # Force all output to stdout and logs now + exec 1> >( tee -a "${LOGFILE}" ) 2>&1 +else + # Force all output to stdout now + exec 1>&3 +fi -) 2>&1 | tee "${LOGFILE}" +# Make sure we didn't leak any background tasks +async_cleanup + +# Dump out the time totals +time_totals +async_print_timing + +if is_service_enabled mysql; then + if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" && "$MYSQL_HOST" ]]; then + echo "" + echo "" + echo "Post-stack database query stats:" + mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST stats -e \ + 'SELECT * FROM queries' -t 2>/dev/null + mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST stats -e \ + 'DELETE FROM queries' 2>/dev/null + fi +fi -# Check that the left side of the above pipe succeeded -for ret in "${PIPESTATUS[@]}"; do [ $ret -eq 0 ] || exit $ret; done -( # Using the cloud # =============== echo "" echo "" echo "" +echo "This is your host IP address: $HOST_IP" +if [ "$HOST_IPV6" != "" ]; then + echo "This is your host IPv6 address: $HOST_IPV6" +fi -# If you installed the horizon on this server, then you should be able +# If you installed Horizon on this server you should be able # to access the site using your browser. -if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then - echo "horizon is now available at http://$HOST_IP/" +if is_service_enabled horizon; then + echo "Horizon is now available at http://$SERVICE_HOST$HORIZON_APACHE_ROOT" +fi + +# If Keystone is present you can point ``nova`` cli to this server +if is_service_enabled keystone; then + echo "Keystone is serving at $KEYSTONE_SERVICE_URI/" + echo "The default users are: admin and demo" + echo "The password: $ADMIN_PASSWORD" fi -# If keystone is present, you can point nova cli to this server -if [[ "$ENABLED_SERVICES" =~ "key" ]]; then - echo "keystone is serving at http://$HOST_IP:5000/v2.0/" - echo "examples on using novaclient command line is in exercise.sh" - echo "the default users are: admin and demo" - echo "the password: $ADMIN_PASSWORD" +# Warn that a deprecated feature was used +if [[ -n "$DEPRECATED_TEXT" ]]; then + echo + echo -e "WARNING: $DEPRECATED_TEXT" + echo fi -# indicate how long this took to run (bash maintained variable 'SECONDS') -echo "stack.sh completed in $SECONDS seconds." +echo +echo "Services are running under systemd unit files." +echo "For more information see: " +echo "https://docs.openstack.org/devstack/latest/systemd.html" +echo + +# Useful info on current state +cat /etc/devstack-version +echo + +# Indicate how long this took to run (bash maintained variable ``SECONDS``) +echo_summary "stack.sh completed in $SECONDS seconds." + -) | tee -a "$LOGFILE" +# Restore/close logging file descriptors +exec 1>&3 +exec 2>&3 +exec 3>&- +exec 6>&- diff --git a/stackrc b/stackrc index 524cc99237..93f8b1cd6d 100644 --- a/stackrc +++ b/stackrc @@ -1,52 +1,939 @@ -# compute service -NOVA_REPO=https://github.com/openstack/nova.git -NOVA_BRANCH=master +#!/bin/bash +# +# stackrc +# + +# ensure we don't re-source this in the same environment +[[ -z "$_DEVSTACK_STACKRC" ]] || return 0 +declare -r -g _DEVSTACK_STACKRC=1 + +# Find the other rc files +RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) + +# Source required DevStack functions and globals +source $RC_DIR/functions + +# Set the target branch. This is used so that stable branching +# does not need to update each repo below. +TARGET_BRANCH=master + +# Cycle trailing projects need to branch later than the others. +TRAILING_TARGET_BRANCH=master + +# And some repos do not create stable branches, so this is used +# to make it explicit and avoid accidentally setting to a stable +# branch. +BRANCHLESS_TARGET_BRANCH=master + +# Destination path for installation +DEST=/opt/stack + +# Destination for working data +DATA_DIR=${DEST}/data + +# Destination for status files +SERVICE_DIR=${DEST}/status + +# Path for subunit output file +SUBUNIT_OUTPUT=${DEST}/devstack.subunit + +# Determine stack user +if [[ $EUID -eq 0 ]]; then + STACK_USER=stack +else + STACK_USER=$(whoami) +fi + +# Specify region name Region +REGION_NAME=${REGION_NAME:-RegionOne} + +# Specify name of region where identity service endpoint is registered. +# When deploying multiple DevStack instances in different regions with shared +# Keystone, set KEYSTONE_REGION_NAME to the region where Keystone is running +# for DevStack instances which do not host Keystone. +KEYSTONE_REGION_NAME=${KEYSTONE_REGION_NAME:-$REGION_NAME} + +# Specify which services to launch. These generally correspond to +# screen tabs. To change the default list, use the ``enable_service`` and +# ``disable_service`` functions in ``local.conf``. +# For example, to enable Swift as part of DevStack add the following +# settings in ``local.conf``: +# [[local|localrc]] +# enable_service s-proxy s-object s-container s-account +# This allows us to pass ``ENABLED_SERVICES`` +if ! isset ENABLED_SERVICES ; then + # Keystone - nothing works without keystone + ENABLED_SERVICES=key + # Nova - services to support libvirt based openstack clouds + ENABLED_SERVICES+=,n-api,n-cpu,n-cond,n-sch,n-novnc,n-api-meta + # Placement service needed for Nova + ENABLED_SERVICES+=,placement-api,placement-client + # Glance services needed for Nova + ENABLED_SERVICES+=,g-api + # Cinder + ENABLED_SERVICES+=,c-sch,c-api,c-vol + # OVN + ENABLED_SERVICES+=,ovn-controller,ovn-northd,ovs-vswitchd,ovsdb-server + # Neutron + ENABLED_SERVICES+=,q-svc,q-ovn-agent + # Dashboard + ENABLED_SERVICES+=,horizon + # Additional services + ENABLED_SERVICES+=,rabbit,tempest,mysql,etcd3,dstat +fi + +# Global toggle for enabling services under mod_wsgi. If this is set to +# ``True`` all services that use HTTPD + mod_wsgi as the preferred method of +# deployment, will be deployed under Apache. If this is set to ``False`` all +# services will rely on the local toggle variable. +ENABLE_HTTPD_MOD_WSGI_SERVICES=True + +# Set the default Nova APIs to enable +NOVA_ENABLED_APIS=osapi_compute,metadata + +# allow local overrides of env variables, including repo config +if [[ -f $RC_DIR/localrc ]]; then + # Old-style user-supplied config + source $RC_DIR/localrc +elif [[ -f $RC_DIR/.localrc.auto ]]; then + # New-style user-supplied config extracted from local.conf + source $RC_DIR/.localrc.auto +fi + +# CELLSV2_SETUP - how we should configure services with cells v2 +# +# - superconductor - this is one conductor for the api services, and +# one per cell managing the compute services. This is preferred +# - singleconductor - this is one conductor for the whole deployment, +# this is not recommended, and will be removed in the future. +CELLSV2_SETUP=${CELLSV2_SETUP:-"superconductor"} + +# Set the root URL for Horizon +HORIZON_APACHE_ROOT="/dashboard" + +# Whether to use user specific units for running services or global ones. +USER_UNITS=$(trueorfalse False USER_UNITS) +if [[ "$USER_UNITS" == "True" ]]; then + SYSTEMD_DIR="$HOME/.local/share/systemd/user" + SYSTEMCTL="systemctl --user" +else + SYSTEMD_DIR="/etc/systemd/system" + SYSTEMCTL="sudo systemctl" +fi + +# Passwords generated by interactive devstack runs +if [[ -r $RC_DIR/.localrc.password ]]; then + source $RC_DIR/.localrc.password +fi + +# Adding the specific version of Python 3 to this variable will install +# the app using that version of the interpreter instead of just 3. +_DEFAULT_PYTHON3_VERSION="$(_get_python_version python3)" +export PYTHON3_VERSION=${PYTHON3_VERSION:-${_DEFAULT_PYTHON3_VERSION:-3}} + +# Create a virtualenv with this +# Use the built-in venv to avoid more dependencies +export VIRTUALENV_CMD="python3 -m venv" + +# Default for log coloring is based on interactive-or-not. +# Baseline assumption is that non-interactive invocations are for CI, +# where logs are to be presented as browsable text files; hence color +# codes should be omitted. +# Simply override LOG_COLOR if your environment is different. +if [ -t 1 ]; then + _LOG_COLOR_DEFAULT=True +else + _LOG_COLOR_DEFAULT=False +fi + +# Use color for logging output (only available if syslog is not used) +LOG_COLOR=$(trueorfalse $_LOG_COLOR_DEFAULT LOG_COLOR) + +# Make tracing more educational +if [[ "$LOG_COLOR" == "True" ]]; then + # tput requires TERM or -T. If neither is present, use vt100, a + # no-frills least common denominator supported everywhere. + TPUT_T= + if ! [ $TERM ]; then + TPUT_T='-T vt100' + fi + export PS4='+\[$(tput '$TPUT_T' setaf 242)\]$(short_source)\[$(tput '$TPUT_T' sgr0)\] ' +else + export PS4='+ $(short_source): ' +fi + +# Global option for enforcing scope. If enabled, ENFORCE_SCOPE overrides +# each services ${SERVICE}_ENFORCE_SCOPE variables +ENFORCE_SCOPE=$(trueorfalse False ENFORCE_SCOPE) + +# Devstack supports the use of a global virtualenv. These variables enable +# and disable this functionality as well as set the path to the virtualenv. +# Note that the DATA_DIR is selected because grenade testing uses a shared +# DATA_DIR but different DEST dirs and we don't want two sets of venvs, +# instead we want one global set. +DEVSTACK_VENV=${DEVSTACK_VENV:-$DATA_DIR/venv} + +# NOTE(kopecmartin): remove this once this is fixed +# https://bugs.launchpad.net/devstack/+bug/2031639 +# This couldn't go to fixup_stuff as that's called after projects +# (e.g. certain paths) are set taking GLOBAL_VENV into account +if [[ "$os_VENDOR" =~ (CentOSStream|Rocky) ]]; then + GLOBAL_VENV=$(trueorfalse False GLOBAL_VENV) +else + GLOBAL_VENV=$(trueorfalse True GLOBAL_VENV) +fi + +# Enable use of Python virtual environments. Individual project use of +# venvs are controlled by the PROJECT_VENV array; every project with +# an entry in the array will be installed into the named venv. +# By default this will put each project into its own venv. +USE_VENV=$(trueorfalse False USE_VENV) + +# Add packages that need to be installed into a venv but are not in any +# requirements files here, in a comma-separated list. +# Currently only used when USE_VENV is true (individual project venvs) +ADDITIONAL_VENV_PACKAGES=${ADDITIONAL_VENV_PACKAGES:-""} + +# This can be used to turn database query logging on and off +# (currently only implemented for MySQL backend) +DATABASE_QUERY_LOGGING=$(trueorfalse False DATABASE_QUERY_LOGGING) + +# This can be used to turn on various non-default items in the +# performance_schema that are of interest to us +MYSQL_GATHER_PERFORMANCE=$(trueorfalse True MYSQL_GATHER_PERFORMANCE) + +# This can be used to reduce the amount of memory mysqld uses while running. +# These are unscientifically determined, and could reduce performance or +# cause other issues. +MYSQL_REDUCE_MEMORY=$(trueorfalse True MYSQL_REDUCE_MEMORY) + +# Set a timeout for git operations. If git is still running when the +# timeout expires, the command will be retried up to 3 times. This is +# in the format for timeout(1); +# +# DURATION is a floating point number with an optional suffix: 's' +# for seconds (the default), 'm' for minutes, 'h' for hours or 'd' +# for days. +# +# Zero disables timeouts +GIT_TIMEOUT=${GIT_TIMEOUT:-0} + +# How should we be handling WSGI deployments. By default we're going +# to allow for 2 modes, which is "uwsgi" which runs with an apache +# proxy uwsgi in front of it, or "mod_wsgi", which runs in +# apache. mod_wsgi is deprecated, don't use it. +WSGI_MODE=${WSGI_MODE:-"uwsgi"} +if [[ "$WSGI_MODE" != "uwsgi" ]]; then + die $LINENO "$WSGI_MODE is no longer a supported WSGI mode. Only uwsgi is valid." +fi + +# Repositories +# ------------ + +# Base GIT Repo URL +GIT_BASE=${GIT_BASE:-https://opendev.org} -# storage service -SWIFT_REPO=https://github.com/openstack/swift.git -SWIFT_BRANCH=master +# The location of REQUIREMENTS once cloned +REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements} -# swift and keystone integration -SWIFT_KEYSTONE_REPO=https://github.com/cloudbuilders/swift-keystone2.git -SWIFT_KEYSTONE_BRANCH=master +# Which libraries should we install from git instead of using released +# versions on pypi? +# +# By default DevStack is now installing libraries from pypi instead of +# from git repositories by default. This works great if you are +# developing server components, but if you want to develop libraries +# and see them live in DevStack you need to tell DevStack it should +# install them from git. +# +# ex: LIBS_FROM_GIT=python-keystoneclient,oslo.config +# +# Will install those 2 libraries from git, the rest from pypi. +# +# Setting the variable to 'ALL' will activate the download for all +# libraries. + +DEVSTACK_SERIES="2026.1" + +############## +# +# OpenStack Server Components +# +############## + +# block storage service +CINDER_REPO=${CINDER_REPO:-${GIT_BASE}/openstack/cinder.git} +CINDER_BRANCH=${CINDER_BRANCH:-$TARGET_BRANCH} # image catalog service -GLANCE_REPO=https://github.com/openstack/glance.git -GLANCE_BRANCH=master +GLANCE_REPO=${GLANCE_REPO:-${GIT_BASE}/openstack/glance.git} +GLANCE_BRANCH=${GLANCE_BRANCH:-$TARGET_BRANCH} + +# django powered web control panel for openstack +HORIZON_REPO=${HORIZON_REPO:-${GIT_BASE}/openstack/horizon.git} +HORIZON_BRANCH=${HORIZON_BRANCH:-$TARGET_BRANCH} # unified auth system (manages accounts/tokens) -KEYSTONE_REPO=https://github.com/openstack/keystone.git -KEYSTONE_BRANCH=stable/diablo +KEYSTONE_REPO=${KEYSTONE_REPO:-${GIT_BASE}/openstack/keystone.git} +KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-$TARGET_BRANCH} -# a websockets/html5 or flash powered VNC console for vm instances -NOVNC_REPO=https://github.com/cloudbuilders/noVNC.git -NOVNC_BRANCH=master +# neutron service +NEUTRON_REPO=${NEUTRON_REPO:-${GIT_BASE}/openstack/neutron.git} +NEUTRON_BRANCH=${NEUTRON_BRANCH:-$TARGET_BRANCH} -# django powered web control panel for openstack -HORIZON_REPO=https://github.com/openstack/horizon.git -HORIZON_BRANCH=master +# compute service +NOVA_REPO=${NOVA_REPO:-${GIT_BASE}/openstack/nova.git} +NOVA_BRANCH=${NOVA_BRANCH:-$TARGET_BRANCH} + +# object storage service +SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git} +SWIFT_BRANCH=${SWIFT_BRANCH:-$TARGET_BRANCH} + +# placement service +PLACEMENT_REPO=${PLACEMENT_REPO:-${GIT_BASE}/openstack/placement.git} +PLACEMENT_BRANCH=${PLACEMENT_BRANCH:-$TARGET_BRANCH} + +############## +# +# Testing Components +# +############## + +# consolidated openstack requirements +REQUIREMENTS_REPO=${REQUIREMENTS_REPO:-${GIT_BASE}/openstack/requirements.git} +REQUIREMENTS_BRANCH=${REQUIREMENTS_BRANCH:-$TARGET_BRANCH} + +# Tempest test suite +TEMPEST_REPO=${TEMPEST_REPO:-${GIT_BASE}/openstack/tempest.git} +TEMPEST_BRANCH=${TEMPEST_BRANCH:-$BRANCHLESS_TARGET_BRANCH} +TEMPEST_VENV_UPPER_CONSTRAINTS=${TEMPEST_VENV_UPPER_CONSTRAINTS:-master} + +OSTESTIMAGES_REPO=${OSTESTIMAGES_REPO:-${GIT_BASE}/openstack/os-test-images.git} +OSTESTIMAGES_BRANCH=${OSTESTIMAGES_BRANCH:-$BRANCHLESS_TARGET_BRANCH} +OSTESTIMAGES_DIR=${DEST}/os-test-images + +############## +# +# OpenStack Client Library Components +# Note default install is from pip, see LIBS_FROM_GIT +# +############## + +# volume client +GITREPO["python-cinderclient"]=${CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-cinderclient.git} +GITBRANCH["python-cinderclient"]=${CINDERCLIENT_BRANCH:-$TARGET_BRANCH} + +# os-brick client for local volume attachement +GITREPO["python-brick-cinderclient-ext"]=${BRICK_CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-brick-cinderclient-ext.git} +GITBRANCH["python-brick-cinderclient-ext"]=${BRICK_CINDERCLIENT_BRANCH:-$TARGET_BRANCH} + +# python barbican client library +GITREPO["python-barbicanclient"]=${BARBICANCLIENT_REPO:-${GIT_BASE}/openstack/python-barbicanclient.git} +GITBRANCH["python-barbicanclient"]=${BARBICANCLIENT_BRANCH:-$TARGET_BRANCH} +GITDIR["python-barbicanclient"]=$DEST/python-barbicanclient + +# python glance client library +GITREPO["python-glanceclient"]=${GLANCECLIENT_REPO:-${GIT_BASE}/openstack/python-glanceclient.git} +GITBRANCH["python-glanceclient"]=${GLANCECLIENT_BRANCH:-$TARGET_BRANCH} + +# ironic client +GITREPO["python-ironicclient"]=${IRONICCLIENT_REPO:-${GIT_BASE}/openstack/python-ironicclient.git} +GITBRANCH["python-ironicclient"]=${IRONICCLIENT_BRANCH:-$TARGET_BRANCH} +# ironic plugin is out of tree, but nova uses it. set GITDIR here. +GITDIR["python-ironicclient"]=$DEST/python-ironicclient + +# the base authentication plugins that clients use to authenticate +GITREPO["keystoneauth"]=${KEYSTONEAUTH_REPO:-${GIT_BASE}/openstack/keystoneauth.git} +GITBRANCH["keystoneauth"]=${KEYSTONEAUTH_BRANCH:-$TARGET_BRANCH} + +# python keystone client library to nova that horizon uses +GITREPO["python-keystoneclient"]=${KEYSTONECLIENT_REPO:-${GIT_BASE}/openstack/python-keystoneclient.git} +GITBRANCH["python-keystoneclient"]=${KEYSTONECLIENT_BRANCH:-$TARGET_BRANCH} + +# neutron client +GITREPO["python-neutronclient"]=${NEUTRONCLIENT_REPO:-${GIT_BASE}/openstack/python-neutronclient.git} +GITBRANCH["python-neutronclient"]=${NEUTRONCLIENT_BRANCH:-$TARGET_BRANCH} # python client library to nova that horizon (and others) use -NOVACLIENT_REPO=https://github.com/openstack/python-novaclient.git -NOVACLIENT_BRANCH=master +GITREPO["python-novaclient"]=${NOVACLIENT_REPO:-${GIT_BASE}/openstack/python-novaclient.git} +GITBRANCH["python-novaclient"]=${NOVACLIENT_BRANCH:-$TARGET_BRANCH} + +# python swift client library +GITREPO["python-swiftclient"]=${SWIFTCLIENT_REPO:-${GIT_BASE}/openstack/python-swiftclient.git} +GITBRANCH["python-swiftclient"]=${SWIFTCLIENT_BRANCH:-$TARGET_BRANCH} + +# consolidated openstack python client +GITREPO["python-openstackclient"]=${OPENSTACKCLIENT_REPO:-${GIT_BASE}/openstack/python-openstackclient.git} +GITBRANCH["python-openstackclient"]=${OPENSTACKCLIENT_BRANCH:-$TARGET_BRANCH} +# this doesn't exist in a lib file, so set it here +GITDIR["python-openstackclient"]=$DEST/python-openstackclient + +# placement-api CLI +GITREPO["osc-placement"]=${OSC_PLACEMENT_REPO:-${GIT_BASE}/openstack/osc-placement.git} +GITBRANCH["osc-placement"]=${OSC_PLACEMENT_BRANCH:-$TARGET_BRANCH} + + +################### +# +# Oslo Libraries +# Note default install is from pip, see LIBS_FROM_GIT +# +################### + +# castellan key manager interface +GITREPO["castellan"]=${CASTELLAN_REPO:-${GIT_BASE}/openstack/castellan.git} +GITBRANCH["castellan"]=${CASTELLAN_BRANCH:-$TARGET_BRANCH} + +# cliff command line framework +GITREPO["cliff"]=${CLIFF_REPO:-${GIT_BASE}/openstack/cliff.git} +GITBRANCH["cliff"]=${CLIFF_BRANCH:-$TARGET_BRANCH} + +# async framework/helpers +GITREPO["futurist"]=${FUTURIST_REPO:-${GIT_BASE}/openstack/futurist.git} +GITBRANCH["futurist"]=${FUTURIST_BRANCH:-$TARGET_BRANCH} + +# debtcollector deprecation framework/helpers +GITREPO["debtcollector"]=${DEBTCOLLECTOR_REPO:-${GIT_BASE}/openstack/debtcollector.git} +GITBRANCH["debtcollector"]=${DEBTCOLLECTOR_BRANCH:-$TARGET_BRANCH} + +# etcd3gw library +GITREPO["etcd3gw"]=${ETCD3GW_REPO:-${GIT_BASE}/openstack/etcd3gw.git} +GITBRANCH["etcd3gw"]=${ETCD3GW_BRANCH:-$BRANCHLESS_TARGET_BRANCH} + +# helpful state machines +GITREPO["automaton"]=${AUTOMATON_REPO:-${GIT_BASE}/openstack/automaton.git} +GITBRANCH["automaton"]=${AUTOMATON_BRANCH:-$TARGET_BRANCH} + +# oslo.cache +GITREPO["oslo.cache"]=${OSLOCACHE_REPO:-${GIT_BASE}/openstack/oslo.cache.git} +GITBRANCH["oslo.cache"]=${OSLOCACHE_BRANCH:-$TARGET_BRANCH} + +# oslo.concurrency +GITREPO["oslo.concurrency"]=${OSLOCON_REPO:-${GIT_BASE}/openstack/oslo.concurrency.git} +GITBRANCH["oslo.concurrency"]=${OSLOCON_BRANCH:-$TARGET_BRANCH} + +# oslo.config +GITREPO["oslo.config"]=${OSLOCFG_REPO:-${GIT_BASE}/openstack/oslo.config.git} +GITBRANCH["oslo.config"]=${OSLOCFG_BRANCH:-$TARGET_BRANCH} + +# oslo.context +GITREPO["oslo.context"]=${OSLOCTX_REPO:-${GIT_BASE}/openstack/oslo.context.git} +GITBRANCH["oslo.context"]=${OSLOCTX_BRANCH:-$TARGET_BRANCH} + +# oslo.db +GITREPO["oslo.db"]=${OSLODB_REPO:-${GIT_BASE}/openstack/oslo.db.git} +GITBRANCH["oslo.db"]=${OSLODB_BRANCH:-$TARGET_BRANCH} + +# oslo.i18n +GITREPO["oslo.i18n"]=${OSLOI18N_REPO:-${GIT_BASE}/openstack/oslo.i18n.git} +GITBRANCH["oslo.i18n"]=${OSLOI18N_BRANCH:-$TARGET_BRANCH} + +# oslo.limit +GITREPO["oslo.limit"]=${OSLOLIMIT_REPO:-${GIT_BASE}/openstack/oslo.limit.git} +GITBRANCH["oslo.limit"]=${OSLOLIMIT_BRANCH:-$TARGET_BRANCH} + +# oslo.log +GITREPO["oslo.log"]=${OSLOLOG_REPO:-${GIT_BASE}/openstack/oslo.log.git} +GITBRANCH["oslo.log"]=${OSLOLOG_BRANCH:-$TARGET_BRANCH} + +# oslo.messaging +GITREPO["oslo.messaging"]=${OSLOMSG_REPO:-${GIT_BASE}/openstack/oslo.messaging.git} +GITBRANCH["oslo.messaging"]=${OSLOMSG_BRANCH:-$TARGET_BRANCH} + +# oslo.middleware +GITREPO["oslo.middleware"]=${OSLOMID_REPO:-${GIT_BASE}/openstack/oslo.middleware.git} +GITBRANCH["oslo.middleware"]=${OSLOMID_BRANCH:-$TARGET_BRANCH} + +# oslo.policy +GITREPO["oslo.policy"]=${OSLOPOLICY_REPO:-${GIT_BASE}/openstack/oslo.policy.git} +GITBRANCH["oslo.policy"]=${OSLOPOLICY_BRANCH:-$TARGET_BRANCH} + +# oslo.privsep +GITREPO["oslo.privsep"]=${OSLOPRIVSEP_REPO:-${GIT_BASE}/openstack/oslo.privsep.git} +GITBRANCH["oslo.privsep"]=${OSLOPRIVSEP_BRANCH:-$TARGET_BRANCH} + +# oslo.reports +GITREPO["oslo.reports"]=${OSLOREPORTS_REPO:-${GIT_BASE}/openstack/oslo.reports.git} +GITBRANCH["oslo.reports"]=${OSLOREPORTS_BRANCH:-$TARGET_BRANCH} -# openstackx is a collection of extensions to openstack.compute & nova -# that is *deprecated*. The code is being moved into python-novaclient & nova. -OPENSTACKX_REPO=https://github.com/cloudbuilders/openstackx.git -OPENSTACKX_BRANCH=master +# oslo.rootwrap +GITREPO["oslo.rootwrap"]=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git} +GITBRANCH["oslo.rootwrap"]=${OSLORWRAP_BRANCH:-$TARGET_BRANCH} -# quantum service -QUANTUM_REPO=https://github.com/openstack/quantum -QUANTUM_BRANCH=master +# oslo.serialization +GITREPO["oslo.serialization"]=${OSLOSERIALIZATION_REPO:-${GIT_BASE}/openstack/oslo.serialization.git} +GITBRANCH["oslo.serialization"]=${OSLOSERIALIZATION_BRANCH:-$TARGET_BRANCH} -# CI test suite -CITEST_REPO=https://github.com/openstack/tempest.git -CITEST_BRANCH=master +# oslo.service +GITREPO["oslo.service"]=${OSLOSERVICE_REPO:-${GIT_BASE}/openstack/oslo.service.git} +GITBRANCH["oslo.service"]=${OSLOSERVICE_BRANCH:-$TARGET_BRANCH} -# Specify a comma-separated list of uec images to download and install into glance. -IMAGE_URLS=http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz +# oslo.utils +GITREPO["oslo.utils"]=${OSLOUTILS_REPO:-${GIT_BASE}/openstack/oslo.utils.git} +GITBRANCH["oslo.utils"]=${OSLOUTILS_BRANCH:-$TARGET_BRANCH} -# allow local overrides of env variables -if [ -f ./localrc ]; then - source ./localrc +# oslo.versionedobjects +GITREPO["oslo.versionedobjects"]=${OSLOVERSIONEDOBJECTS_REPO:-${GIT_BASE}/openstack/oslo.versionedobjects.git} +GITBRANCH["oslo.versionedobjects"]=${OSLOVERSIONEDOBJECTS_BRANCH:-$TARGET_BRANCH} + +# oslo.vmware +GITREPO["oslo.vmware"]=${OSLOVMWARE_REPO:-${GIT_BASE}/openstack/oslo.vmware.git} +GITBRANCH["oslo.vmware"]=${OSLOVMWARE_BRANCH:-$TARGET_BRANCH} + +# osprofiler +GITREPO["osprofiler"]=${OSPROFILER_REPO:-${GIT_BASE}/openstack/osprofiler.git} +GITBRANCH["osprofiler"]=${OSPROFILER_BRANCH:-$TARGET_BRANCH} + +# pycadf auditing library +GITREPO["pycadf"]=${PYCADF_REPO:-${GIT_BASE}/openstack/pycadf.git} +GITBRANCH["pycadf"]=${PYCADF_BRANCH:-$TARGET_BRANCH} + +# stevedore plugin manager +GITREPO["stevedore"]=${STEVEDORE_REPO:-${GIT_BASE}/openstack/stevedore.git} +GITBRANCH["stevedore"]=${STEVEDORE_BRANCH:-$TARGET_BRANCH} + +# taskflow plugin manager +GITREPO["taskflow"]=${TASKFLOW_REPO:-${GIT_BASE}/openstack/taskflow.git} +GITBRANCH["taskflow"]=${TASKFLOW_BRANCH:-$TARGET_BRANCH} + +# tooz plugin manager +GITREPO["tooz"]=${TOOZ_REPO:-${GIT_BASE}/openstack/tooz.git} +GITBRANCH["tooz"]=${TOOZ_BRANCH:-$TARGET_BRANCH} + +# pbr drives the setuptools configs +GITREPO["pbr"]=${PBR_REPO:-${GIT_BASE}/openstack/pbr.git} +GITBRANCH["pbr"]=${PBR_BRANCH:-$BRANCHLESS_TARGET_BRANCH} + + +################## +# +# Libraries managed by OpenStack programs (non oslo) +# +################## + +# cursive library +GITREPO["cursive"]=${CURSIVE_REPO:-${GIT_BASE}/openstack/cursive.git} +GITBRANCH["cursive"]=${CURSIVE_BRANCH:-$TARGET_BRANCH} + +# glance store library +GITREPO["glance_store"]=${GLANCE_STORE_REPO:-${GIT_BASE}/openstack/glance_store.git} +GITBRANCH["glance_store"]=${GLANCE_STORE_BRANCH:-$TARGET_BRANCH} + +# keystone middleware +GITREPO["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_REPO:-${GIT_BASE}/openstack/keystonemiddleware.git} +GITBRANCH["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_BRANCH:-$TARGET_BRANCH} + +# ceilometer middleware +GITREPO["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_REPO:-${GIT_BASE}/openstack/ceilometermiddleware.git} +GITBRANCH["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_BRANCH:-$TARGET_BRANCH} +GITDIR["ceilometermiddleware"]=$DEST/ceilometermiddleware + +# openstacksdk OpenStack Python SDK +GITREPO["openstacksdk"]=${OPENSTACKSDK_REPO:-${GIT_BASE}/openstack/openstacksdk.git} +GITBRANCH["openstacksdk"]=${OPENSTACKSDK_BRANCH:-$TARGET_BRANCH} + +# os-brick library to manage local volume attaches +GITREPO["os-brick"]=${OS_BRICK_REPO:-${GIT_BASE}/openstack/os-brick.git} +GITBRANCH["os-brick"]=${OS_BRICK_BRANCH:-$TARGET_BRANCH} + +# os-client-config to manage clouds.yaml and friends +GITREPO["os-client-config"]=${OS_CLIENT_CONFIG_REPO:-${GIT_BASE}/openstack/os-client-config.git} +GITBRANCH["os-client-config"]=${OS_CLIENT_CONFIG_BRANCH:-$TARGET_BRANCH} +GITDIR["os-client-config"]=$DEST/os-client-config + +# os-vif library to communicate between Neutron to Nova +GITREPO["os-vif"]=${OS_VIF_REPO:-${GIT_BASE}/openstack/os-vif.git} +GITBRANCH["os-vif"]=${OS_VIF_BRANCH:-$TARGET_BRANCH} + +# osc-lib OpenStackClient common lib +GITREPO["osc-lib"]=${OSC_LIB_REPO:-${GIT_BASE}/openstack/osc-lib.git} +GITBRANCH["osc-lib"]=${OSC_LIB_BRANCH:-$TARGET_BRANCH} + +# ironic common lib +GITREPO["ironic-lib"]=${IRONIC_LIB_REPO:-${GIT_BASE}/openstack/ironic-lib.git} +GITBRANCH["ironic-lib"]=${IRONIC_LIB_BRANCH:-$TARGET_BRANCH} +# this doesn't exist in a lib file, so set it here +GITDIR["ironic-lib"]=$DEST/ironic-lib + +# diskimage-builder tool +GITREPO["diskimage-builder"]=${DIB_REPO:-${GIT_BASE}/openstack/diskimage-builder.git} +GITBRANCH["diskimage-builder"]=${DIB_BRANCH:-$BRANCHLESS_TARGET_BRANCH} +GITDIR["diskimage-builder"]=$DEST/diskimage-builder + +# neutron-lib library containing neutron stable non-REST interfaces +GITREPO["neutron-lib"]=${NEUTRON_LIB_REPO:-${GIT_BASE}/openstack/neutron-lib.git} +GITBRANCH["neutron-lib"]=${NEUTRON_LIB_BRANCH:-$TARGET_BRANCH} +GITDIR["neutron-lib"]=$DEST/neutron-lib + +# os-resource-classes library containing a list of standardized resource classes for OpenStack +GITREPO["os-resource-classes"]=${OS_RESOURCE_CLASSES_REPO:-${GIT_BASE}/openstack/os-resource-classes.git} +GITBRANCH["os-resource-classes"]=${OS_RESOURCE_CLASSES_BRANCH:-$TARGET_BRANCH} + +# os-traits library for resource provider traits in the placement service +GITREPO["os-traits"]=${OS_TRAITS_REPO:-${GIT_BASE}/openstack/os-traits.git} +GITBRANCH["os-traits"]=${OS_TRAITS_BRANCH:-$TARGET_BRANCH} + +# ovsdbapp used by neutron +GITREPO["ovsdbapp"]=${OVSDBAPP_REPO:-${GIT_BASE}/openstack/ovsdbapp.git} +GITBRANCH["ovsdbapp"]=${OVSDBAPP_BRANCH:-$TARGET_BRANCH} +GITDIR["ovsdbapp"]=$DEST/ovsdbapp + +# os-ken used by neutron +GITREPO["os-ken"]=${OS_KEN_REPO:-${GIT_BASE}/openstack/os-ken.git} +GITBRANCH["os-ken"]=${OS_KEN_BRANCH:-$TARGET_BRANCH} +GITDIR["os-ken"]=$DEST/os-ken + + +################# +# +# 3rd Party Components (non pip installable) +# +# NOTE(sdague): these should be converted to release version installs or removed +# +################# + +# ironic python agent +IRONIC_PYTHON_AGENT_REPO=${IRONIC_PYTHON_AGENT_REPO:-${GIT_BASE}/openstack/ironic-python-agent.git} +IRONIC_PYTHON_AGENT_BRANCH=${IRONIC_PYTHON_AGENT_BRANCH:-$TARGET_BRANCH} + +# a websockets/html5 or flash powered VNC console for vm instances +NOVNC_REPO=${NOVNC_REPO:-https://github.com/novnc/novnc.git} +NOVNC_BRANCH=${NOVNC_BRANCH:-v1.3.0} + +# a websockets/html5 or flash powered SPICE console for vm instances +SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git} +SPICE_BRANCH=${SPICE_BRANCH:-$BRANCHLESS_TARGET_BRANCH} + +# Global flag used to configure Tempest and potentially other services if +# volume multiattach is supported. In Queens, only the libvirt compute driver +# and lvm volume driver support multiattach, and qemu must be less than 2.10 +# or libvirt must be greater than or equal to 3.10. +ENABLE_VOLUME_MULTIATTACH=$(trueorfalse False ENABLE_VOLUME_MULTIATTACH) + +# Nova hypervisor configuration. We default to libvirt with **kvm** but will +# drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can +# also install an **LXC** or **OpenVZ** based system. +DEFAULT_VIRT_DRIVER=libvirt +VIRT_DRIVER=${VIRT_DRIVER:-$DEFAULT_VIRT_DRIVER} +case "$VIRT_DRIVER" in + ironic|libvirt) + LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} + LIBVIRT_CPU_MODE=${LIBVIRT_CPU_MODE:-custom} + LIBVIRT_CPU_MODEL=${LIBVIRT_CPU_MODEL:-Nehalem} + + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + + if [[ "$os_VENDOR" =~ (Debian|Ubuntu) ]]; then + LIBVIRT_GROUP=libvirt + else + LIBVIRT_GROUP=libvirtd + fi + ;; + lxd) + LXD_GROUP=${LXD_GROUP:-"lxd"} + ;; + docker|zun) + DOCKER_GROUP=${DOCKER_GROUP:-"docker"} + ;; + fake) + NUMBER_FAKE_NOVA_COMPUTE=${NUMBER_FAKE_NOVA_COMPUTE:-1} + ;; + *) + ;; +esac + +# Images +# ------ + +# Specify a comma-separated list of images to download and install into glance. +# Supported urls here are: +# * "uec-style" images: +# If the file ends in .tar.gz, uncompress the tarball and and select the first +# .img file inside it as the image. If present, use "*-vmlinuz*" as the kernel +# and "*-initrd*" as the ramdisk +# example: https://cloud-images.ubuntu.com/releases/jammy/release/ubuntu-22.04-server-cloudimg-amd64.tar.gz +# * disk image (*.img,*.img.gz) +# if file ends in .img, then it will be uploaded and registered as a to +# glance as a disk image. If it ends in .gz, it is uncompressed first. +# example: +# https://cloud-images.ubuntu.com/releases/jammy/release/ubuntu-22.04-server-cloudimg-amd64.img +# https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz +# * OpenVZ image: +# OpenVZ uses its own format of image, and does not support UEC style images + +#IMAGE_URLS="https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image + +CIRROS_VERSION=${CIRROS_VERSION:-"0.6.3"} +CIRROS_ARCH=${CIRROS_ARCH:-$(uname -m)} + +# Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of +# which may be set in ``local.conf``. Also allow ``DEFAULT_IMAGE_NAME`` and +# ``IMAGE_URLS`` to be set in the `localrc` section of ``local.conf``. +DOWNLOAD_DEFAULT_IMAGES=$(trueorfalse True DOWNLOAD_DEFAULT_IMAGES) +if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then + if [[ -n "$IMAGE_URLS" ]]; then + IMAGE_URLS+="," + fi + case "$VIRT_DRIVER" in + libvirt) + case "$LIBVIRT_TYPE" in + lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs} + DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz} + IMAGE_URLS+="https://github.com/cirros-dev/cirros/releases/download/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + *) # otherwise, use the qcow image + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk} + DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img} + IMAGE_URLS+="https://github.com/cirros-dev/cirros/releases/download/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + esac + ;; + vsphere) + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.2-i386-disk.vmdk} + DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-$DEFAULT_IMAGE_NAME} + IMAGE_URLS+="http://partnerweb.vmware.com/programs/vmdkimage/${DEFAULT_IMAGE_FILE_NAME}";; + fake) + # Use the same as the default for libvirt + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk} + DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img} + IMAGE_URLS+="https://github.com/cirros-dev/cirros/releases/download/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";; + esac + DOWNLOAD_DEFAULT_IMAGES=False +fi + +# This is a comma separated list of extra URLS to be listed for +# download by the tools/image_list.sh script. CI environments can +# pre-download these URLS and place them in $FILES. Later scripts can +# then use "get_extra_file " which will print out the path to the +# file; it will either be downloaded on demand or acquired from the +# cache if there. +EXTRA_CACHE_URLS="" + +# etcd3 defaults +ETCD_VERSION=${ETCD_VERSION:-v3.5.21} +ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"adddda4b06718e68671ffabff2f8cee48488ba61ad82900e639d108f2148501c"} +ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"95bf6918623a097c0385b96f139d90248614485e781ec9bee4768dbb6c79c53f"} +ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"6fb6ecb3d1b331eb177dc610a8efad3aceb1f836d6aeb439ba0bfac5d5c2a38c"} +ETCD_SHA256_S390X=${ETCD_SHA256_S390X:-"a211a83961ba8a7e94f7d6343ad769e699db21a715ba4f3b68cf31ea28f9c951"} +# Make sure etcd3 downloads the correct architecture +if is_arch "x86_64"; then + ETCD_ARCH="amd64" + ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_AMD64} +elif is_arch "aarch64"; then + ETCD_ARCH="arm64" + ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_ARM64} +elif is_arch "ppc64le"; then + ETCD_ARCH="ppc64le" + ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_PPC64} +elif is_arch "s390x"; then + ETCD_ARCH="s390x" + ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_S390X} +else + exit_distro_not_supported "invalid hardware type - $ETCD_ARCH" +fi +ETCD_PORT=${ETCD_PORT:-2379} +ETCD_PEER_PORT=${ETCD_PEER_PORT:-2380} +ETCD_DOWNLOAD_URL=${ETCD_DOWNLOAD_URL:-https://github.com/etcd-io/etcd/releases/download} +ETCD_NAME=etcd-$ETCD_VERSION-linux-$ETCD_ARCH +ETCD_DOWNLOAD_FILE=$ETCD_NAME.tar.gz +ETCD_DOWNLOAD_LOCATION=$ETCD_DOWNLOAD_URL/$ETCD_VERSION/$ETCD_DOWNLOAD_FILE +# etcd is always required, so place it into list of pre-cached downloads +EXTRA_CACHE_URLS+=",$ETCD_DOWNLOAD_LOCATION" + +# Cache settings +CACHE_BACKEND=${CACHE_BACKEND:-"dogpile.cache.memcached"} +MEMCACHE_SERVERS=${MEMCACHE_SERVERS:-"localhost:11211"} + +# Detect duplicate values in IMAGE_URLS +for image_url in ${IMAGE_URLS//,/ }; do + if [ $(echo "$IMAGE_URLS" | grep -o -F "$image_url" | wc -l) -gt 1 ]; then + die $LINENO "$image_url is duplicate, please remove it from IMAGE_URLS." + fi +done + +# 30Gb default volume backing file size +VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-30G} + +# Prefixes for volume and instance names +VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} +INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-} + +# Set default port for nova-objectstore +S3_SERVICE_PORT=${S3_SERVICE_PORT:-3333} + +# Common network names +PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"} +PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"public"} + +PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-""} + +# Allow the use of an alternate protocol (such as https) for service endpoints +SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} + +# Sets the maximum number of workers for most services to reduce +# the memory used where there are a large number of CPUs present +# (the default number of workers for many services is the number of CPUs) +# Also sets the minimum number of workers to 2. +API_WORKERS=${API_WORKERS:=$(( ($(nproc)/4)<2 ? 2 : ($(nproc)/4) ))} + +# Service startup timeout +SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} + +# Timeout for compute node registration in Nova +NOVA_READY_TIMEOUT=${NOVA_READY_TIMEOUT:-$SERVICE_TIMEOUT} + +# Service graceful shutdown timeout +SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT=${SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT:-5} + +# Service graceful shutdown timeout +WORKER_TIMEOUT=${WORKER_TIMEOUT:-80} + +# Common Configuration +# -------------------- + +# Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly without +# Internet access. ``stack.sh`` must have been previously run with Internet +# access to install prerequisites and fetch repositories. +OFFLINE=$(trueorfalse False OFFLINE) + +# Set ``ERROR_ON_CLONE`` to ``True`` to configure ``stack.sh`` to exit if +# the destination git repository does not exist during the ``git_clone`` +# operation. +ERROR_ON_CLONE=$(trueorfalse False ERROR_ON_CLONE) + +# Whether to enable the debug log level in OpenStack services +ENABLE_DEBUG_LOG_LEVEL=$(trueorfalse True ENABLE_DEBUG_LOG_LEVEL) + +# Set fixed and floating range here so we can make sure not to use addresses +# from either range when attempting to guess the IP to use for the host. +# Note that setting ``FIXED_RANGE`` may be necessary when running DevStack +# in an OpenStack cloud that uses either of these address ranges internally. +FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24} +IPV4_ADDRS_SAFE_TO_USE=${IPV4_ADDRS_SAFE_TO_USE:-10.0.0.0/22} +FIXED_RANGE=${FIXED_RANGE:-$IPV4_ADDRS_SAFE_TO_USE} +HOST_IP_IFACE=${HOST_IP_IFACE:-} +HOST_IP=${HOST_IP:-} +HOST_IPV6=${HOST_IPV6:-} + +HOST_IP=$(get_default_host_ip "$FIXED_RANGE" "$FLOATING_RANGE" "$HOST_IP_IFACE" "$HOST_IP" "inet") +if [ "$HOST_IP" == "" ]; then + die $LINENO "Could not determine host ip address. See local.conf for suggestions on setting HOST_IP." +fi + +HOST_IPV6=$(get_default_host_ip "" "" "$HOST_IP_IFACE" "$HOST_IPV6" "inet6") + +# Whether or not the port_security extension should be enabled for Neutron. +NEUTRON_PORT_SECURITY=$(trueorfalse True NEUTRON_PORT_SECURITY) + +# SERVICE IP version +# This is the IP version that services should be listening on, as well +# as using to register their endpoints with keystone. +SERVICE_IP_VERSION=${SERVICE_IP_VERSION:-4} + +# Validate SERVICE_IP_VERSION +# It would be nice to support "4+6" here as well, but that will require +# multiple calls into keystone to register endpoints, so for now let's +# just support one or the other. +if [[ $SERVICE_IP_VERSION != "4" ]] && [[ $SERVICE_IP_VERSION != "6" ]]; then + die $LINENO "SERVICE_IP_VERSION must be either 4 or 6" fi + +if [[ "$SERVICE_IP_VERSION" == 4 ]]; then + DEF_SERVICE_HOST=$HOST_IP + DEF_SERVICE_LOCAL_HOST=127.0.0.1 + DEF_SERVICE_LISTEN_ADDRESS=0.0.0.0 +fi + +if [[ "$SERVICE_IP_VERSION" == 6 ]]; then + if [ "$HOST_IPV6" == "" ]; then + die $LINENO "Could not determine host IPv6 address. See local.conf for suggestions on setting HOST_IPV6." + fi + + DEF_SERVICE_HOST=[$HOST_IPV6] + DEF_SERVICE_LOCAL_HOST=::1 + DEF_SERVICE_LISTEN_ADDRESS="[::]" +fi + +# This is either 0.0.0.0 for IPv4 or [::] for IPv6 +SERVICE_LISTEN_ADDRESS=${SERVICE_LISTEN_ADDRESS:-${DEF_SERVICE_LISTEN_ADDRESS}} + +# Allow the use of an alternate hostname (such as localhost/127.0.0.1) for +# service endpoints. Default is dependent on SERVICE_IP_VERSION above. +SERVICE_HOST=${SERVICE_HOST:-${DEF_SERVICE_HOST}} +# This is either 127.0.0.1 for IPv4 or ::1 for IPv6 +SERVICE_LOCAL_HOST=${SERVICE_LOCAL_HOST:-${DEF_SERVICE_LOCAL_HOST}} + +# TUNNEL IP version +# This is the IP version to use for tunnel endpoints +TUNNEL_IP_VERSION=${TUNNEL_IP_VERSION:-4} + +# Validate TUNNEL_IP_VERSION +if [[ $TUNNEL_IP_VERSION != "4" ]] && [[ $TUNNEL_IP_VERSION != "6" ]]; then + die $LINENO "TUNNEL_IP_VERSION must be either 4 or 6" +fi + +if [[ "$TUNNEL_IP_VERSION" == 4 ]]; then + DEF_TUNNEL_ENDPOINT_IP=$HOST_IP +fi + +if [[ "$TUNNEL_IP_VERSION" == 6 ]]; then + # Only die if the user has not over-ridden the endpoint IP + if [[ "$HOST_IPV6" == "" ]] && [[ "$TUNNEL_ENDPOINT_IP" == "" ]]; then + die $LINENO "Could not determine host IPv6 address. See local.conf for suggestions on setting HOST_IPV6." + fi + + DEF_TUNNEL_ENDPOINT_IP=$HOST_IPV6 +fi + +# Allow the use of an alternate address for tunnel endpoints. +# Default is dependent on TUNNEL_IP_VERSION above. +TUNNEL_ENDPOINT_IP=${TUNNEL_ENDPOINT_IP:-${DEF_TUNNEL_ENDPOINT_IP}} + +# Configure services to use syslog instead of writing to individual log files +SYSLOG=$(trueorfalse False SYSLOG) +SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP} +SYSLOG_PORT=${SYSLOG_PORT:-516} + +# Set global ``GIT_DEPTH=`` to limit the history depth of the git clone +# Set to 0 to disable shallow cloning +GIT_DEPTH=${GIT_DEPTH:-0} + +# We may not need to recreate database in case 2 Keystone services +# sharing the same database. It would be useful for multinode Grenade tests. +RECREATE_KEYSTONE_DB=$(trueorfalse True RECREATE_KEYSTONE_DB) + +# Following entries need to be last items in file + +# New way is LOGDIR for all logs and LOGFILE for stack.sh trace log, but if not fully-qualified will be in LOGDIR +# LOGFILE LOGDIR output +# not set not set (new) set LOGDIR from default +# set not set stack.sh log to LOGFILE, (new) set LOGDIR from LOGFILE +# not set set screen logs to LOGDIR +# set set stack.sh log to LOGFILE, screen logs to LOGDIR + +# Set up new logging defaults +if [[ -z "${LOGDIR:-}" ]]; then + default_logdir=$DEST/logs + if [[ -z "${LOGFILE:-}" ]]; then + # Nothing is set, we need a default + LOGDIR="$default_logdir" + else + # Set default LOGDIR + LOGDIR="${LOGFILE%/*}" + logfile="${LOGFILE##*/}" + if [[ -z "$LOGDIR" || "$LOGDIR" == "$logfile" ]]; then + # LOGFILE had no path, set a default + LOGDIR="$default_logdir" + fi + fi + unset default_logdir logfile +fi + +# ``LOGDIR`` is always set at this point so it is not useful as a 'enable' for service logs + +# System-wide ulimit file descriptors override +ULIMIT_NOFILE=${ULIMIT_NOFILE:-2048} + +# Local variables: +# mode: shell-script +# End: diff --git a/tests/fake-service.sh b/tests/fake-service.sh new file mode 100755 index 0000000000..d4b9b56bb3 --- /dev/null +++ b/tests/fake-service.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# fake-service.sh - a fake service for start/stop testing +# $1 - sleep time + +SLEEP_TIME=${1:-3} + +LOG=/tmp/fake-service.log +TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"} + +# duplicate output +exec 1> >(tee -a ${LOG}) + +echo "" +echo "Starting fake-service for ${SLEEP_TIME}" +while true; do + echo "$(date +${TIMESTAMP_FORMAT}) [$$]" + sleep ${SLEEP_TIME} +done + diff --git a/tests/test_functions.sh b/tests/test_functions.sh new file mode 100755 index 0000000000..08143d2a68 --- /dev/null +++ b/tests/test_functions.sh @@ -0,0 +1,293 @@ +#!/usr/bin/env bash + +# Tests for DevStack functions + +TOP=$(cd $(dirname "$0")/.. && pwd) + +# Import common functions +source $TOP/functions + +source $TOP/tests/unittest.sh + +echo "Testing generate_hex_string()" + +VAL=$(generate_hex_string 16) +if [[ ${#VAL} -eq 32 ]]; then + passed "OK" +else + failed "generate_hex_string 16 failed ${#VAL}" +fi + +VAL=$(generate_hex_string 32) +if [[ ${#VAL} -eq 64 ]]; then + passed "OK" +else + failed "generate_hex_string 32 failed ${#VAL}" +fi + +echo "Testing die_if_not_set()" + +bash -c "source $TOP/functions; X=`echo Y && true`; die_if_not_set $LINENO X 'not OK'" +if [[ $? != 0 ]]; then + failed "die_if_not_set [X='Y' true] Failed" +else + passed 'OK' +fi + +bash -c "source $TOP/functions; X=`true`; die_if_not_set $LINENO X 'OK'" > /dev/null 2>&1 +if [[ $? = 0 ]]; then + failed "die_if_not_set [X='' true] Failed" +fi + +bash -c "source $TOP/functions; X=`echo Y && false`; die_if_not_set $LINENO X 'not OK'" +if [[ $? != 0 ]]; then + failed "die_if_not_set [X='Y' false] Failed" +else + passed 'OK' +fi + +bash -c "source $TOP/functions; X=`false`; die_if_not_set $LINENO X 'OK'" > /dev/null 2>&1 +if [[ $? = 0 ]]; then + failed "die_if_not_set [X='' false] Failed" +fi + + +# Enabling/disabling services + +echo "Testing enable_service()" + +function test_enable_service { + local start="$1" + local add="$2" + local finish="$3" + + ENABLED_SERVICES="$start" + enable_service $add + if [ "$ENABLED_SERVICES" = "$finish" ]; then + passed "OK: $start + $add -> $ENABLED_SERVICES" + else + failed "changing $start to $finish with $add failed: $ENABLED_SERVICES" + fi +} + +test_enable_service '' a 'a' +test_enable_service 'a' b 'a,b' +test_enable_service 'a,b' c 'a,b,c' +test_enable_service 'a,b' c 'a,b,c' +test_enable_service 'a,b,' c 'a,b,c' +test_enable_service 'a,b' c,d 'a,b,c,d' +test_enable_service 'a,b' "c d" 'a,b,c,d' +test_enable_service 'a,b,c' c 'a,b,c' + +test_enable_service 'a,b,-c' c 'a,b' +test_enable_service 'a,b,c' -c 'a,b' + +function test_disable_service { + local start="$1" + local del="$2" + local finish="$3" + + ENABLED_SERVICES="$start" + disable_service "$del" + if [ "$ENABLED_SERVICES" = "$finish" ]; then + passed "OK: $start - $del -> $ENABLED_SERVICES" + else + failed "changing $start to $finish with $del failed: $ENABLED_SERVICES" + fi +} + +echo "Testing disable_service()" +test_disable_service 'a,b,c' a 'b,c' +test_disable_service 'a,b,c' b 'a,c' +test_disable_service 'a,b,c' c 'a,b' + +test_disable_service 'a,b,c' a 'b,c' +test_disable_service 'b,c' b 'c' +test_disable_service 'c' c '' +test_disable_service '' d '' + +test_disable_service 'a,b,c,' c 'a,b' +test_disable_service 'a,b' c 'a,b' + + +echo "Testing disable_all_services()" +ENABLED_SERVICES=a,b,c +disable_all_services + +if [[ -z "$ENABLED_SERVICES" ]]; then + passed "OK" +else + failed "disabling all services FAILED: $ENABLED_SERVICES" +fi + +echo "Testing disable_negated_services()" + + +function test_disable_negated_services { + local start="$1" + local finish="$2" + + ENABLED_SERVICES="$start" + disable_negated_services + if [ "$ENABLED_SERVICES" = "$finish" ]; then + passed "OK: $start + $add -> $ENABLED_SERVICES" + else + failed "changing $start to $finish failed: $ENABLED_SERVICES" + fi +} + +test_disable_negated_services '-a' '' +test_disable_negated_services '-a,a' '' +test_disable_negated_services '-a,-a' '' +test_disable_negated_services 'a,-a' '' +test_disable_negated_services 'b,a,-a' 'b' +test_disable_negated_services 'a,b,-a' 'b' +test_disable_negated_services 'a,-a,b' 'b' +test_disable_negated_services 'a,aa,-a' 'aa' +test_disable_negated_services 'aa,-a' 'aa' +test_disable_negated_services 'a_a, -a_a' '' +test_disable_negated_services 'a-b, -a-b' '' +test_disable_negated_services 'a-b, b, -a-b' 'b' +test_disable_negated_services 'a,-a,av2,b' 'av2,b' +test_disable_negated_services 'a,aa,-a' 'aa' +test_disable_negated_services 'a,av2,-a,a' 'av2' +test_disable_negated_services 'a,-a,av2' 'av2' + +echo "Testing remove_disabled_services()" + +function test_remove_disabled_services { + local service_list="$1" + local remove_list="$2" + local expected="$3" + + results=$(remove_disabled_services "$service_list" "$remove_list") + if [ "$results" = "$expected" ]; then + passed "OK: '$service_list' - '$remove_list' -> '$results'" + else + failed "getting '$expected' from '$service_list' - '$remove_list' failed: '$results'" + fi +} + +test_remove_disabled_services 'a,b,c' 'a,c' 'b' +test_remove_disabled_services 'a,b,c' 'b' 'a,c' +test_remove_disabled_services 'a,b,c,d' 'a,c d' 'b' +test_remove_disabled_services 'a,b c,d' 'a d' 'b,c' +test_remove_disabled_services 'a,b,c' 'a,b,c' '' +test_remove_disabled_services 'a,b,c' 'd' 'a,b,c' +test_remove_disabled_services 'a,b,c' '' 'a,b,c' +test_remove_disabled_services '' 'a,b,c' '' +test_remove_disabled_services '' '' '' + +echo "Testing is_package_installed()" + +if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion +fi + +if [[ "$os_PACKAGE" = "deb" ]]; then + is_package_installed dpkg + VAL=$? +elif [[ "$os_PACKAGE" = "rpm" ]]; then + is_package_installed rpm + VAL=$? +else + VAL=1 +fi +if [[ "$VAL" -eq 0 ]]; then + passed "OK" +else + failed "is_package_installed() on existing package failed" +fi + +if [[ "$os_PACKAGE" = "deb" ]]; then + is_package_installed dpkg bash + VAL=$? +elif [[ "$os_PACKAGE" = "rpm" ]]; then + is_package_installed rpm bash + VAL=$? +else + VAL=1 +fi +if [[ "$VAL" -eq 0 ]]; then + passed "OK" +else + failed "is_package_installed() on more than one existing package failed" +fi + +is_package_installed zzzZZZzzz +VAL=$? +if [[ "$VAL" -ne 0 ]]; then + passed "OK" +else + failed "is_package_installed() on non-existing package failed" +fi + +# test against removed package...was a bug on Ubuntu +if is_ubuntu; then + PKG=cowsay-off + if ! (dpkg -s $PKG >/dev/null 2>&1); then + # it was never installed...set up the condition + sudo apt-get install -y cowsay >/dev/null 2>&1 + fi + if (dpkg -s $PKG >/dev/null 2>&1); then + # remove it to create the 'un' status + sudo dpkg -P $PKG >/dev/null 2>&1 + fi + + # now test the installed check on a deleted package + is_package_installed $PKG + VAL=$? + if [[ "$VAL" -ne 0 ]]; then + passed "OK" + else + failed "is_package_installed() on deleted package failed" + fi +fi + +# test isset function +echo "Testing isset()" +you_should_not_have_this_variable=42 + +if isset "you_should_not_have_this_variable"; then + passed "OK" +else + failed "\"you_should_not_have_this_variable\" not declared. failed" +fi + +unset you_should_not_have_this_variable +if isset "you_should_not_have_this_variable"; then + failed "\"you_should_not_have_this_variable\" looks like declared variable." +else + passed "OK" +fi + +function test_export_proxy_variables { + echo "Testing export_proxy_variables()" + + local expected results + + http_proxy=http_proxy_test + https_proxy=https_proxy_test + no_proxy=no_proxy_test + + export_proxy_variables + expected=$(echo -e "http_proxy=$http_proxy\nhttps_proxy=$https_proxy\nno_proxy=$no_proxy") + results=$(env | egrep '(http(s)?|no)_proxy=' | sort) + if [[ $expected = $results ]]; then + passed "OK: Proxy variables are exported when proxy variables are set" + else + failed "Expected: $expected, Failed: $results" + fi + + unset http_proxy https_proxy no_proxy + export_proxy_variables + results=$(env | egrep '(http(s)?|no)_proxy=') + if [[ "" = $results ]]; then + passed "OK: Proxy variables aren't exported when proxy variables aren't set" + else + failed "Expected: '', Failed: $results" + fi +} +test_export_proxy_variables + +report_results diff --git a/tests/test_ini_config.sh b/tests/test_ini_config.sh new file mode 100755 index 0000000000..fd3896d6ba --- /dev/null +++ b/tests/test_ini_config.sh @@ -0,0 +1,255 @@ +#!/usr/bin/env bash + +# Tests for DevStack INI functions + +TOP=$(cd $(dirname "$0")/.. && pwd) + +# Import config functions +source $TOP/inc/ini-config + +source $TOP/tests/unittest.sh + +set -e + +echo "Testing INI functions" + +INI_TMP_DIR=$(mktemp -d) +INI_TMP_ETC_DIR=$INI_TMP_DIR/etc +TEST_INI=${INI_TMP_ETC_DIR}/test.ini +mkdir ${INI_TMP_ETC_DIR} + +echo "Creating $TEST_INI" +cat >${TEST_INI} < $thing" + fi + fi +} + +function test_all_libs_upto_date { + # this is all the magics + local found_libs=${!GITREPO[@]} + declare -A all_libs + for lib in $ALL_LIBS; do + all_libs[$lib]=1 + done + + for lib in $found_libs; do + if [[ -z ${all_libs[$lib]} ]]; then + echo "Library '$lib' not listed in unit tests, please add to ALL_LIBS" + exit 1 + fi + + done + echo "test_all_libs_upto_date PASSED" +} + +function test_libs_exist { + local lib="" + for lib in $ALL_LIBS; do + check_exists "${GITREPO[$lib]}" "GITREPO" "$lib" + check_exists "${GITBRANCH[$lib]}" "GITBRANCH" "$lib" + check_exists "${GITDIR[$lib]}" "GITDIR" "$lib" + done + + echo "test_libs_exist PASSED" +} + +function test_branch_master { + for lib in $ALL_LIBS; do + if [[ ${GITBRANCH[$lib]} != "master" ]]; then + echo "GITBRANCH for $lib not master (${GITBRANCH[$lib]})" + exit 1 + fi + done + + echo "test_branch_master PASSED" +} + +set -o errexit + +test_libs_exist +test_branch_master +test_all_libs_upto_date diff --git a/tests/test_localconf.sh b/tests/test_localconf.sh new file mode 100755 index 0000000000..d8075df442 --- /dev/null +++ b/tests/test_localconf.sh @@ -0,0 +1,475 @@ +#!/usr/bin/env bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +# Tests for DevStack INI functions + +TOP=$(cd $(dirname "$0")/.. && pwd) + +# Import config functions +source $TOP/inc/ini-config + +source $TOP/tests/unittest.sh + +echo "Testing INI local.conf functions" + +# test that can determine if file has section in specified meta-section + +function test_localconf_has_section { + local file_localconf + local file_conf1 + local file_conf2 + file_localconf=`mktemp` + file_conf1=`mktemp` + file_conf2=`mktemp` + + cat <<- EOF > $file_localconf +[[local|localrc]] +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2 +LOCALRC_VAR3=localrc_val3 + +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3 +EOF + + localconf_has_section $file_localconf post-config $file_conf1 conf1_t1 + assert_equal $? 0 + localconf_has_section $file_localconf post-config $file_conf1 conf1_t2 + assert_equal $? 0 + localconf_has_section $file_localconf post-config $file_conf1 conf1_t3 + assert_equal $? 0 + localconf_has_section $file_localconf post-extra $file_conf2 conf2_t1 + assert_equal $? 0 + localconf_has_section $file_localconf post-config $file_conf1 conf1_t4 + assert_equal $? 1 + localconf_has_section $file_localconf post-install $file_conf1 conf1_t1 + assert_equal $? 1 + localconf_has_section $file_localconf local localrc conf1_t2 + assert_equal $? 1 + rm -f $file_localconf $file_conf1 $file_conf2 +} + +# test that can determine if file has option in specified meta-section and section +function test_localconf_has_option { + local file_localconf + local file_conf1 + local file_conf2 + file_localconf=`mktemp` + file_conf1=`mktemp` + file_conf2=`mktemp` + cat <<- EOF > $file_localconf +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt1 = conf1_t1_val1 +conf1_t1_opt2 = conf1_t1_val2 +conf1_t1_opt3 = conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[local|localrc]] +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2 +LOCALRC_VAR3=localrc_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3 +EOF + + localconf_has_option $file_localconf local localrc "" LOCALRC_VAR1 + assert_equal $? 0 + localconf_has_option $file_localconf local localrc "" LOCALRC_VAR2 + assert_equal $? 0 + localconf_has_option $file_localconf local localrc "" LOCALRC_VAR3 + assert_equal $? 0 + localconf_has_option $file_localconf post-config $file_conf1 conf1_t1 conf1_t1_opt1 + assert_equal $? 0 + localconf_has_option $file_localconf post-config $file_conf1 conf1_t2 conf1_t2_opt2 + assert_equal $? 0 + localconf_has_option $file_localconf post-config $file_conf1 conf1_t3 conf1_t3_opt3 + assert_equal $? 0 + localconf_has_option $file_localconf post-extra $file_conf2 conf2_t1 conf2_t1_opt2 + assert_equal $? 0 + localconf_has_option $file_localconf post-config $file_conf1 conf1_t1_opt4 + assert_equal $? 1 + localconf_has_option $file_localconf post-install $file_conf1 conf1_t1_opt1 + assert_equal $? 1 + localconf_has_option $file_localconf local localrc conf1_t2 conf1_t2_opt1 + assert_equal $? 1 + rm -f $file_localconf $file_conf1 $file_conf2 +} + +# test that update option in specified meta-section and section +function test_localconf_update_option { + local file_localconf + local file_localconf_expected + local file_conf1 + local file_conf2 + file_localconf=`mktemp` + file_localconf_expected=`mktemp` + file_conf1=`mktemp` + file_conf2=`mktemp` + cat <<- EOF > $file_localconf +[[local|localrc]] +LOCALRC_VAR1 = localrc_val1 +LOCALRC_VAR2 = localrc_val2 +LOCALRC_VAR3 = localrc_val3 + +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3 +EOF + cat <<- EOF > $file_localconf_expected +[[local|localrc]] +LOCALRC_VAR1 = localrc_val1 +LOCALRC_VAR2 = localrc_val2_update +LOCALRC_VAR3 = localrc_val3 + +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1_update +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2_update +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3_update + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3_update +EOF + + localconf_update_option "$SUDO" $file_localconf local localrc "" LOCALRC_VAR2 localrc_val2_update + localconf_update_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t1 conf1_t1_opt1 conf1_t1_val1_update + localconf_update_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t2 conf1_t2_opt2 conf1_t2_val2_update + localconf_update_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t3 conf1_t3_opt3 conf1_t3_val3_update + localconf_update_option "$SUDO" $file_localconf post-extra $file_conf2 conf2_t1 conf2_t1_opt3 conf2_t1_val3_update + result=`cat $file_localconf` + result_expected=`cat $file_localconf_expected` + assert_equal "$result" "$result_expected" + localconf_update_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t2 conf1_t3_opt1 conf1_t3_val1_update + localconf_update_option "$SUDO" $file_localconf post-extra $file_conf2 conf2_t1 conf2_t1_opt4 conf2_t1_val4_update + localconf_update_option "$SUDO" $file_localconf post-install $file_conf2 conf2_t1 conf2_t1_opt1 conf2_t1_val1_update + localconf_update_option "$SUDO" $file_localconf local localrc "" LOCALRC_VAR4 localrc_val4_update + result=`cat $file_localconf` + result_expected=`cat $file_localconf_expected` + assert_equal "$result" "$result_expected" + rm -f $file_localconf $file_localconf_expected $file_conf1 $file_conf2 +} + +# test that add option in specified meta-section and section +function test_localconf_add_option { + local file_localconf + local file_localconf_expected + local file_conf1 + local file_conf2 + file_localconf=`mktemp` + file_localconf_expected=`mktemp` + file_conf1=`mktemp` + file_conf2=`mktemp` + cat <<- EOF > $file_localconf +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[local|localrc]] +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2 +LOCALRC_VAR3=localrc_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt1 = conf2_t1_val1 +conf2_t1_opt2 = conf2_t1_val2 +conf2_t1_opt3 = conf2_t1_val3 +EOF + cat <<- EOF > $file_localconf_expected +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt4 = conf1_t1_val4 +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt4 = conf1_t2_val4 +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt4 = conf1_t3_val4 +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[local|localrc]] +LOCALRC_VAR4 = localrc_val4 +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2 +LOCALRC_VAR3=localrc_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt4 = conf2_t1_val4 +conf2_t1_opt1 = conf2_t1_val1 +conf2_t1_opt2 = conf2_t1_val2 +conf2_t1_opt3 = conf2_t1_val3 +EOF + + localconf_add_option "$SUDO" $file_localconf local localrc "" LOCALRC_VAR4 localrc_val4 + localconf_add_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t1 conf1_t1_opt4 conf1_t1_val4 + localconf_add_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t2 conf1_t2_opt4 conf1_t2_val4 + localconf_add_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t3 conf1_t3_opt4 conf1_t3_val4 + localconf_add_option "$SUDO" $file_localconf post-extra $file_conf2 conf2_t1 conf2_t1_opt4 conf2_t1_val4 + result=`cat $file_localconf` + result_expected=`cat $file_localconf_expected` + assert_equal "$result" "$result_expected" + localconf_add_option "$SUDO" $file_localconf local localrc.conf "" LOCALRC_VAR4 localrc_val4_update + localconf_add_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t4 conf1_t4_opt1 conf1_t4_val1 + localconf_add_option "$SUDO" $file_localconf post-extra $file_conf2 conf2_t2 conf2_t2_opt4 conf2_t2_val4 + localconf_add_option "$SUDO" $file_localconf post-install $file_conf2 conf2_t1 conf2_t1_opt4 conf2_t2_val4 + result=`cat $file_localconf` + result_expected=`cat $file_localconf_expected` + assert_equal "$result" "$result_expected" + rm -f $file_localconf $file_localconf_expected $file_conf1 $file_conf2 +} + +# test that add section and option in specified meta-section +function test_localconf_add_section_and_option { + local file_localconf + local file_localconf_expected + local file_conf1 + local file_conf2 + file_localconf=`mktemp` + file_localconf_expected=`mktemp` + file_conf1=`mktemp` + file_conf2=`mktemp` + cat <<- EOF > $file_localconf +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[local|localrc]] +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2 +LOCALRC_VAR3=localrc_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3 +EOF + cat <<- EOF > $file_localconf_expected +[[post-config|$file_conf1]] +[conf1_t4] +conf1_t4_opt1 = conf1_t4_val1 +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[local|localrc]] +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2 +LOCALRC_VAR3=localrc_val3 + +[[post-extra|$file_conf2]] +[conf2_t2] +conf2_t2_opt1 = conf2_t2_val1 +[conf2_t1] +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3 +EOF + + localconf_add_section_and_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t4 conf1_t4_opt1 conf1_t4_val1 + localconf_add_section_and_option "$SUDO" $file_localconf post-extra $file_conf2 conf2_t2 conf2_t2_opt1 conf2_t2_val1 + result=`cat $file_localconf` + result_expected=`cat $file_localconf_expected` + assert_equal "$result" "$result_expected" + localconf_add_section_and_option "$SUDO" $file_localconf post-install $file_conf2 conf2_t2 conf2_t2_opt1 conf2_t2_val1 + result=`cat $file_localconf` + result_expected=`cat $file_localconf_expected` + assert_equal "$result" "$result_expected" + rm -f $file_localconf $file_localconf_expected $file_conf1 $file_conf2 +} + +# test that add section and option in specified meta-section +function test_localconf_set { + local file_localconf + local file_localconf_expected + local file_conf1 + local file_conf2 + file_localconf=`mktemp` + file_localconf_expected=`mktemp` + file_conf1=`mktemp` + file_conf2=`mktemp` + cat <<- EOF > $file_localconf +[[local|localrc]] +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2 +LOCALRC_VAR3=localrc_val3 + +[[post-config|$file_conf1]] +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3 +EOF + cat <<- EOF > $file_localconf_expected +[[local|localrc]] +LOCALRC_VAR1=localrc_val1 +LOCALRC_VAR2=localrc_val2_update +LOCALRC_VAR3=localrc_val3 + +[[post-config|$file_conf1]] +[conf1_t4] +conf1_t4_opt1 = conf1_t4_val1 +[conf1_t1] +conf1_t1_opt1=conf1_t1_val1 +conf1_t1_opt2=conf1_t1_val2 +conf1_t1_opt3=conf1_t1_val3 +[conf1_t2] +conf1_t2_opt1=conf1_t2_val1 +conf1_t2_opt2=conf1_t2_val2 +conf1_t2_opt3=conf1_t2_val3 +[conf1_t3] +conf1_t3_opt1=conf1_t3_val1 +conf1_t3_opt2=conf1_t3_val2 +conf1_t3_opt3=conf1_t3_val3 + +[[post-extra|$file_conf2]] +[conf2_t1] +conf2_t1_opt4 = conf2_t1_val4 +conf2_t1_opt1=conf2_t1_val1 +conf2_t1_opt2=conf2_t1_val2 +conf2_t1_opt3=conf2_t1_val3 + +[[post-install|/etc/neutron/plugin/ml2/ml2_conf.ini]] +[ml2] +ml2_opt1 = ml2_val1 +EOF + + if [[ -n "$SUDO" ]]; then + SUDO_ARG="-sudo" + else + SUDO_ARG="" + fi + localconf_set $SUDO_ARG $file_localconf post-install /etc/neutron/plugin/ml2/ml2_conf.ini ml2 ml2_opt1 ml2_val1 + localconf_set $SUDO_ARG $file_localconf local localrc "" LOCALRC_VAR2 localrc_val2_update + localconf_set $SUDO_ARG $file_localconf post-config $file_conf1 conf1_t4 conf1_t4_opt1 conf1_t4_val1 + localconf_set $SUDO_ARG $file_localconf post-extra $file_conf2 conf2_t1 conf2_t1_opt4 conf2_t1_val4 + result=`cat $file_localconf` + result_expected=`cat $file_localconf_expected` + assert_equal "$result" "$result_expected" + rm -f $file_localconf $file_localconf_expected $file_conf1 $file_conf2 +} + + +test_localconf_has_section +test_localconf_has_option +test_localconf_update_option +test_localconf_add_option +test_localconf_add_section_and_option +test_localconf_set diff --git a/tests/test_meta_config.sh b/tests/test_meta_config.sh new file mode 100755 index 0000000000..30479f245a --- /dev/null +++ b/tests/test_meta_config.sh @@ -0,0 +1,436 @@ +#!/usr/bin/env bash + +# Tests for DevStack meta-config functions + +TOP=$(cd $(dirname "$0")/.. && pwd) + +# Import config functions +source $TOP/inc/ini-config +source $TOP/inc/meta-config + +set -e + +# check_result() tests and reports the result values +# check_result "actual" "expected" +function check_result { + local actual=$1 + local expected=$2 + if [[ "$actual" == "$expected" ]]; then + echo "OK" + else + echo -e "failed: $actual != $expected\n" + exit 1 + fi +} + +# mock function-common:die so that it does not +# interrupt our test script +function die { + exit -1 +} + +function warn { + return 0 +} + +TEST_1C_ADD="[eee] +type=new +multi = foo2" + +function create_test1c { + cat >test1c.conf <test2a.conf <test-etc/test4.conf <test.conf < ${UNSORTED} + sort ${UNSORTED} > ${SORTED} + + if [ -n "$(diff -c ${UNSORTED} ${SORTED})" ]; then + failed "$p is unsorted" + # output this, it's helpful to see what exactly is unsorted + diff -c ${UNSORTED} ${SORTED} + else + passed "$p is sorted" + fi +done + +rm -rf ${TMPDIR} + +report_results diff --git a/tests/test_refs.sh b/tests/test_refs.sh new file mode 100755 index 0000000000..0f9aa4a5ca --- /dev/null +++ b/tests/test_refs.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +echo "Ensuring we don't have crazy refs" + +REFS=`grep BRANCH stackrc | grep -v 'TARGET_BRANCH' | grep -v 'NOVNC_BRANCH'` +rc=$? +if [[ $rc -eq 0 ]]; then + echo "Branch defaults must be one of the *TARGET_BRANCH values. Found:" + echo $REFS + exit 1 +fi diff --git a/tests/test_truefalse.sh b/tests/test_truefalse.sh new file mode 100755 index 0000000000..03996ceab4 --- /dev/null +++ b/tests/test_truefalse.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +# Tests for DevStack meta-config functions + +TOP=$(cd $(dirname "$0")/.. && pwd) + +# Import common functions +source $TOP/functions +source $TOP/tests/unittest.sh + +# common mistake is to use $FOO instead of "FOO"; in that case we +# should die +bash -c "source $TOP/functions-common; VAR=\$(trueorfalse False \$FOO)" &> /dev/null +assert_equal 1 $? "missing test-value" + +VAL=$(trueorfalse False MISSING_VARIABLE) +assert_equal "False" $VAL "blank test-value" + +function test_trueorfalse { + local one=1 + local captrue=True + local lowtrue=true + local uppertrue=TRUE + local capyes=Yes + local lowyes=yes + local upperyes=YES + + for default in True False; do + for name in one captrue lowtrue uppertrue capyes lowyes upperyes; do + local msg="trueorfalse($default $name)" + assert_equal "True" $(trueorfalse $default $name) "$msg" + done + done + + local zero=0 + local capfalse=False + local lowfalse=false + local upperfalse=FALSE + local capno=No + local lowno=no + local upperno=NO + + for default in True False; do + for name in zero capfalse lowfalse upperfalse capno lowno upperno; do + local msg="trueorfalse($default $name)" + assert_equal "False" $(trueorfalse $default $name) "$msg" + done + done +} + +test_trueorfalse + +report_results diff --git a/tests/test_vercmp.sh b/tests/test_vercmp.sh new file mode 100755 index 0000000000..c88bf86d7e --- /dev/null +++ b/tests/test_vercmp.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash + +# Tests for DevStack vercmp functionality + +TOP=$(cd $(dirname "$0")/.. && pwd) + +# Import common functions +source $TOP/functions +source $TOP/tests/unittest.sh + +assert_true "numeric gt" vercmp 2.0 ">" 1.0 +assert_true "numeric gte" vercmp 2.0 ">=" 1.0 +assert_true "numeric gt" vercmp 1.0.1 ">" 1.0 +assert_true "numeric gte" vercmp 1.0.1 ">=" 1.0 +assert_true "alpha gt" vercmp 1.0.1b ">" 1.0.1a +assert_true "alpha gte" vercmp 1.0.1b ">=" 1.0.1a +assert_true "alpha gt" vercmp b ">" a +assert_true "alpha gte" vercmp b ">=" a +assert_true "alpha gt" vercmp 2.0-rc3 ">" 2.0-rc1 +assert_true "alpha gte" vercmp 2.0-rc3 ">=" 2.0-rc1 + +assert_false "numeric gt fail" vercmp 1.0 ">" 1.0 +assert_true "numeric gte" vercmp 1.0 ">=" 1.0 +assert_false "numeric gt fail" vercmp 0.9 ">" 1.0 +assert_false "numeric gte fail" vercmp 0.9 ">=" 1.0 +assert_false "numeric gt fail" vercmp 0.9.9 ">" 1.0 +assert_false "numeric gte fail" vercmp 0.9.9 ">=" 1.0 +assert_false "numeric gt fail" vercmp 0.9a.9 ">" 1.0.1 +assert_false "numeric gte fail" vercmp 0.9a.9 ">=" 1.0.1 + +assert_false "numeric lt" vercmp 1.0 "<" 1.0 +assert_true "numeric lte" vercmp 1.0 "<=" 1.0 +assert_true "numeric lt" vercmp 1.0 "<" 1.0.1 +assert_true "numeric lte" vercmp 1.0 "<=" 1.0.1 +assert_true "alpha lt" vercmp 1.0.1a "<" 1.0.1b +assert_true "alpha lte" vercmp 1.0.1a "<=" 1.0.1b +assert_true "alpha lt" vercmp a "<" b +assert_true "alpha lte" vercmp a "<=" b +assert_true "alpha lt" vercmp 2.0-rc1 "<" 2.0-rc3 +assert_true "alpha lte" vercmp 2.0-rc1 "<=" 2.0-rc3 + +assert_true "eq" vercmp 1.0 "==" 1.0 +assert_true "eq" vercmp 1.0.1 "==" 1.0.1 +assert_false "eq fail" vercmp 1.0.1 "==" 1.0.2 +assert_false "eq fail" vercmp 2.0-rc1 "==" 2.0-rc2 + +report_results diff --git a/tests/test_worlddump.sh b/tests/test_worlddump.sh new file mode 100755 index 0000000000..919652536d --- /dev/null +++ b/tests/test_worlddump.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +# Simple test of worlddump.py + +TOP=$(cd $(dirname "$0")/.. && pwd) + +source $TOP/tests/unittest.sh + +OUT_DIR=$(mktemp -d) + +${PYTHON} $TOP/tools/worlddump.py -d $OUT_DIR + +if [[ $? -ne 0 ]]; then + fail "worlddump failed" +else + + # worlddump creates just one output file + OUT_FILE=($OUT_DIR/*.txt) + + if [ ! -r $OUT_FILE ]; then + failed "worlddump output not seen" + else + passed "worlddump output $OUT_FILE" + + if [[ $(stat -c %s $OUT_DIR/*.txt) -gt 0 ]]; then + passed "worlddump output is not zero sized" + fi + + # put more extensive examination here, if required. + fi +fi + +rm -rf $OUT_DIR + +report_results diff --git a/tests/test_write_devstack_local_conf_role.sh b/tests/test_write_devstack_local_conf_role.sh new file mode 100755 index 0000000000..71d8d51614 --- /dev/null +++ b/tests/test_write_devstack_local_conf_role.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +TOP=$(cd $(dirname "$0")/.. && pwd) + +# Import common functions +source $TOP/functions +source $TOP/tests/unittest.sh + +${PYTHON} $TOP/roles/write-devstack-local-conf/library/test.py diff --git a/tests/unittest.sh b/tests/unittest.sh new file mode 100644 index 0000000000..fced2abe65 --- /dev/null +++ b/tests/unittest.sh @@ -0,0 +1,155 @@ +#!/usr/bin/env bash + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# we always start with no errors +ERROR=0 +PASS=0 +FAILED_FUNCS="" + +export PYTHON=$(which python3 2>/dev/null) + +# pass a test, printing out MSG +# usage: passed message +function passed { + local lineno + lineno=$(caller 0 | awk '{print $1}') + local function + function=$(caller 0 | awk '{print $2}') + local msg="$1" + if [ -z "$msg" ]; then + msg="OK" + fi + PASS=$((PASS+1)) + echo "PASS: $function:L$lineno - $msg" +} + +# fail a test, printing out MSG +# usage: failed message +function failed { + local lineno + lineno=$(caller 0 | awk '{print $1}') + local function + function=$(caller 0 | awk '{print $2}') + local msg="$1" + FAILED_FUNCS+="$function:L$lineno\n" + echo "ERROR: $function:L$lineno!" + echo " $msg" + ERROR=$((ERROR+1)) +} + +# assert string comparison of val1 equal val2, printing out msg +# usage: assert_equal val1 val2 msg +function assert_equal { + local lineno + lineno=`caller 0 | awk '{print $1}'` + local function + function=`caller 0 | awk '{print $2}'` + local msg=$3 + + if [ -z "$msg" ]; then + msg="OK" + fi + if [[ "$1" != "$2" ]]; then + FAILED_FUNCS+="$function:L$lineno\n" + echo "ERROR: $1 != $2 in $function:L$lineno!" + echo " $msg" + ERROR=$((ERROR+1)) + else + PASS=$((PASS+1)) + echo "PASS: $function:L$lineno - $msg" + fi +} + +# assert variable is empty/blank, printing out msg +# usage: assert_empty VAR msg +function assert_empty { + local lineno + lineno=`caller 0 | awk '{print $1}'` + local function + function=`caller 0 | awk '{print $2}'` + local msg=$2 + + if [ -z "$msg" ]; then + msg="OK" + fi + if [[ ! -z ${!1} ]]; then + FAILED_FUNCS+="$function:L$lineno\n" + echo "ERROR: $1 not empty in $function:L$lineno!" + echo " $msg" + ERROR=$((ERROR+1)) + else + PASS=$((PASS+1)) + echo "PASS: $function:L$lineno - $msg" + fi +} + +# assert the arguments evaluate to true +# assert_true "message" arg1 arg2 +function assert_true { + local lineno + lineno=`caller 0 | awk '{print $1}'` + local function + function=`caller 0 | awk '{print $2}'` + local msg=$1 + shift + + $@ + if [ $? -eq 0 ]; then + PASS=$((PASS+1)) + echo "PASS: $function:L$lineno - $msg" + else + FAILED_FUNCS+="$function:L$lineno\n" + echo "ERROR: test failed in $function:L$lineno!" + echo " $msg" + ERROR=$((ERROR+1)) + fi +} + +# assert the arguments evaluate to false +# assert_false "message" arg1 arg2 +function assert_false { + local lineno + lineno=`caller 0 | awk '{print $1}'` + local function + function=`caller 0 | awk '{print $2}'` + local msg=$1 + shift + + $@ + if [ $? -eq 0 ]; then + FAILED_FUNCS+="$function:L$lineno\n" + echo "ERROR: test failed in $function:L$lineno!" + echo " $msg" + ERROR=$((ERROR+1)) + else + PASS=$((PASS+1)) + echo "PASS: $function:L$lineno - $msg" + fi +} + + +# Print a summary of passing and failing tests and exit +# (with an error if we have failed tests) +# usage: report_results +function report_results { + echo "$PASS Tests PASSED" + if [[ $ERROR -gt 0 ]]; then + echo + echo "The following $ERROR tests FAILED" + echo -e "$FAILED_FUNCS" + echo "---" + exit 1 + fi + exit 0 +} diff --git a/tools/build_bm.sh b/tools/build_bm.sh deleted file mode 100755 index 44cf3030ee..0000000000 --- a/tools/build_bm.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash -# Build an OpenStack install on a bare metal machine. -set +x - -# Source params -source ./stackrc - -# Param string to pass to stack.sh. Like "EC2_DMZ_HOST=192.168.1.1 MYSQL_USER=nova" -STACKSH_PARAMS=${STACKSH_PARAMS:-} - -# Option to use the version of devstack on which we are currently working -USE_CURRENT_DEVSTACK=${USE_CURRENT_DEVSTACK:-1} - -# Configure the runner -RUN_SH=`mktemp` -cat > $RUN_SH <$CONFIG_CONF_TMP <$CONFIG_INI_TMP <$CONFIG_INI_TMP <>$CONFIG_INI_TMP <$CFG <$PXEDIR/stack-initrd.gz -fi -cp -pu $PXEDIR/stack-initrd.gz $DEST_DIR/ubuntu - -if [ ! -r $PXEDIR/vmlinuz-*-generic ]; then - MNTDIR=`mktemp -d --tmpdir mntXXXXXXXX` - mount -t ext4 -o loop $PXEDIR/stack-initrd.img $MNTDIR - - if [ ! -r $MNTDIR/boot/vmlinuz-*-generic ]; then - echo "No kernel found" - umount $MNTDIR - rmdir $MNTDIR - exit 1 - else - cp -pu $MNTDIR/boot/vmlinuz-*-generic $PXEDIR - fi - umount $MNTDIR - rmdir $MNTDIR -fi - -# Get generic kernel version -KNAME=`basename $PXEDIR/vmlinuz-*-generic` -KVER=${KNAME#vmlinuz-} -cp -pu $PXEDIR/vmlinuz-$KVER $DEST_DIR/ubuntu -cat >>$CFG <>$CFG <>$CFG <> $MNTDIR/etc/sudoers - - umount $MNTDIR - rmdir $MNTDIR - qemu-nbd -d $NBD - NBD="" - mv $DEV_FILE_TMP $DEV_FILE -fi -rm -f $DEV_FILE_TMP - -# clone git repositories onto the system -# ====================================== - -IMG_FILE_TMP=`mktemp $IMG_FILE.XXXXXX` - -if [ ! -r $IMG_FILE ]; then - NBD=`map_nbd $DEV_FILE` - - # Pre-create the image file - # FIXME(dt): This should really get the partition size to - # pre-create the image file - dd if=/dev/zero of=$IMG_FILE_TMP bs=1 count=1 seek=$((2*1024*1024*1024)) - # Create filesystem image for RAM disk - dd if=${NBD}p1 of=$IMG_FILE_TMP bs=1M - - qemu-nbd -d $NBD - NBD="" - mv $IMG_FILE_TMP $IMG_FILE -fi -rm -f $IMG_FILE_TMP - -MNTDIR=`mktemp -d --tmpdir mntXXXXXXXX` -mount -t ext4 -o loop $IMG_FILE $MNTDIR -cp -p /etc/resolv.conf $MNTDIR/etc/resolv.conf - -# We need to install a non-virtual kernel and modules to boot from -if [ ! -r "`ls $MNTDIR/boot/vmlinuz-*-generic | head -1`" ]; then - chroot $MNTDIR apt-get install -y linux-generic -fi - -# git clone only if directory doesn't exist already. Since ``DEST`` might not -# be owned by the installation user, we create the directory and change the -# ownership to the proper user. -function git_clone { - - # clone new copy or fetch latest changes - CHECKOUT=${MNTDIR}$2 - if [ ! -d $CHECKOUT ]; then - mkdir -p $CHECKOUT - git clone $1 $CHECKOUT - else - pushd $CHECKOUT - git fetch - popd - fi - - # FIXME(ja): checkout specified version (should works for branches and tags) - - pushd $CHECKOUT - # checkout the proper branch/tag - git checkout $3 - # force our local version to be the same as the remote version - git reset --hard origin/$3 - popd - - # give ownership to the stack user - chroot $MNTDIR chown -R stack $2 -} - -git_clone $NOVA_REPO $DEST/nova $NOVA_BRANCH -git_clone $GLANCE_REPO $DEST/glance $GLANCE_BRANCH -git_clone $KEYSTONE_REPO $DEST/keystone $KEYSTONE_BRANCH -git_clone $NOVNC_REPO $DEST/novnc $NOVNC_BRANCH -git_clone $HORIZON_REPO $DEST/horizon $HORIZON_BRANCH -git_clone $NOVACLIENT_REPO $DEST/python-novaclient $NOVACLIENT_BRANCH -git_clone $OPENSTACKX_REPO $DEST/openstackx $OPENSTACKX_BRANCH - -# Use this version of devstack -rm -rf $MNTDIR/$DEST/devstack -cp -pr $CWD $MNTDIR/$DEST/devstack -chroot $MNTDIR chown -R stack $DEST/devstack - -# Configure host network for DHCP -mkdir -p $MNTDIR/etc/network -cat > $MNTDIR/etc/network/interfaces <$MNTDIR/etc/hostname -echo "127.0.0.1 localhost ramstack" >$MNTDIR/etc/hosts - -# Configure the runner -RUN_SH=$MNTDIR/$DEST/run.sh -cat > $RUN_SH < $DEST/run.sh.log -echo >> $DEST/run.sh.log -echo >> $DEST/run.sh.log -echo "All done! Time to start clicking." >> $DEST/run.sh.log -EOF - -# Make the run.sh executable -chmod 755 $RUN_SH -chroot $MNTDIR chown stack $DEST/run.sh - -umount $MNTDIR -rmdir $MNTDIR diff --git a/tools/build_uec.sh b/tools/build_uec.sh deleted file mode 100755 index 8167105025..0000000000 --- a/tools/build_uec.sh +++ /dev/null @@ -1,300 +0,0 @@ -#!/usr/bin/env bash - -# Make sure that we have the proper version of ubuntu (only works on oneiric) -if ! egrep -q "oneiric" /etc/lsb-release; then - echo "This script only works with ubuntu oneiric." - exit 1 -fi - -# Keep track of the current directory -TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=`cd $TOOLS_DIR/..; pwd` - -cd $TOP_DIR - -# Source params -source ./stackrc - -# Ubuntu distro to install -DIST_NAME=${DIST_NAME:-oneiric} - -# Configure how large the VM should be -GUEST_SIZE=${GUEST_SIZE:-10G} - -# exit on error to stop unexpected errors -set -o errexit -set -o xtrace - -# Abort if localrc is not set -if [ ! -e $TOP_DIR/localrc ]; then - echo "You must have a localrc with ALL necessary passwords defined before proceeding." - echo "See stack.sh for required passwords." - exit 1 -fi - -# Install deps if needed -DEPS="kvm libvirt-bin kpartx cloud-utils curl" -apt-get install -y --force-yes $DEPS || true # allow this to fail gracefully for concurrent builds - -# Where to store files and instances -WORK_DIR=${WORK_DIR:-/opt/uecstack} - -# Where to store images -image_dir=$WORK_DIR/images/$DIST_NAME -mkdir -p $image_dir - -# Start over with a clean base image, if desired -if [ $CLEAN_BASE ]; then - rm -f $image_dir/disk -fi - -# Get the base image if it does not yet exist -if [ ! -e $image_dir/disk ]; then - $TOOLS_DIR/get_uec_image.sh -r $GUEST_SIZE $DIST_NAME $image_dir/disk $image_dir/kernel -fi - -# Copy over dev environment if COPY_ENV is set. -# This will also copy over your current devstack. -if [ $COPY_ENV ]; then - cd $TOOLS_DIR - ./copy_dev_environment_to_uec.sh $image_dir/disk -fi - -# Option to warm the base image with software requirements. -if [ $WARM_CACHE ]; then - cd $TOOLS_DIR - ./warm_apts_and_pips_for_uec.sh $image_dir/disk -fi - -# Name of our instance, used by libvirt -GUEST_NAME=${GUEST_NAME:-devstack} - -# Mop up after previous runs -virsh destroy $GUEST_NAME || true - -# Where this vm is stored -vm_dir=$WORK_DIR/instances/$GUEST_NAME - -# Create vm dir and remove old disk -mkdir -p $vm_dir -rm -f $vm_dir/disk - -# Create a copy of the base image -qemu-img create -f qcow2 -b $image_dir/disk $vm_dir/disk - -# Back to devstack -cd $TOP_DIR - -GUEST_NETWORK=${GUEST_NETWORK:-1} -GUEST_RECREATE_NET=${GUEST_RECREATE_NET:-yes} -GUEST_IP=${GUEST_IP:-192.168.$GUEST_NETWORK.50} -GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24} -GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0} -GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.$GUEST_NETWORK.1} -GUEST_MAC=${GUEST_MAC:-"02:16:3e:07:69:`printf '%02X' $GUEST_NETWORK`"} -GUEST_RAM=${GUEST_RAM:-1524288} -GUEST_CORES=${GUEST_CORES:-1} - -# libvirt.xml configuration -NET_XML=$vm_dir/net.xml -NET_NAME=${NET_NAME:-devstack-$GUEST_NETWORK} -cat > $NET_XML < - $NET_NAME - - - - - - - - -EOF - -if [[ "$GUEST_RECREATE_NET" == "yes" ]]; then - virsh net-destroy $NET_NAME || true - # destroying the network isn't enough to delete the leases - rm -f /var/lib/libvirt/dnsmasq/$NET_NAME.leases - virsh net-create $vm_dir/net.xml -fi - -# libvirt.xml configuration -LIBVIRT_XML=$vm_dir/libvirt.xml -cat > $LIBVIRT_XML < - $GUEST_NAME - $GUEST_RAM - - hvm - $image_dir/kernel - root=/dev/vda ro console=ttyS0 init=/usr/lib/cloud-init/uncloud-init ds=nocloud-net;s=http://192.168.$GUEST_NETWORK.1:4567/ ubuntu-pass=ubuntu - - - - - - $GUEST_CORES - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -EOF - - -rm -rf $vm_dir/uec -cp -r $TOOLS_DIR/uec $vm_dir/uec - -# set metadata -cat > $vm_dir/uec/meta-data< $vm_dir/uec/user-data< localrc <> $vm_dir/uec/user-data< /opt/stack/.ssh/authorized_keys -chown -R stack /opt/stack -chmod 700 /opt/stack/.ssh -chmod 600 /opt/stack/.ssh/authorized_keys - -grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || - echo "#includedir /etc/sudoers.d" >> /etc/sudoers -( umask 226 && echo "stack ALL=(ALL) NOPASSWD:ALL" \ - > /etc/sudoers.d/50_stack_sh ) -EOF -fi - -# Run stack.sh -cat >> $vm_dir/uec/user-data< $MNT_DIR/etc/network/interfaces <$MNT_DIR/etc/hostname -echo "127.0.0.1 localhost ramstack" >$MNT_DIR/etc/hosts - -# Configure the runner -RUN_SH=$MNT_DIR/$DEST/run.sh -cat > $RUN_SH < $DEST/run.sh.log -echo >> $DEST/run.sh.log -echo >> $DEST/run.sh.log -echo "All done! Time to start clicking." >> $DEST/run.sh.log -EOF - -# Make the run.sh executable -chmod 755 $RUN_SH -chroot $MNT_DIR chown stack $DEST/run.sh - -umount $MNT_DIR/dev -umount $MNT_DIR -rmdir $MNT_DIR -mv $DEST_FILE_TMP $DEST_FILE -rm -f $DEST_FILE_TMP - -trap - SIGHUP SIGINT SIGTERM SIGQUIT EXIT diff --git a/tools/build_usb_boot.sh b/tools/build_usb_boot.sh deleted file mode 100755 index cca2a681d6..0000000000 --- a/tools/build_usb_boot.sh +++ /dev/null @@ -1,146 +0,0 @@ -#!/bin/bash -e -# build_usb_boot.sh - Create a syslinux boot environment -# -# build_usb_boot.sh destdev -# -# Assumes syslinux is installed -# Needs to run as root - -DEST_DIR=${1:-/tmp/syslinux-boot} -PXEDIR=${PXEDIR:-/opt/ramstack/pxe} -PROGDIR=`dirname $0` - -# Clean up any resources that may be in use -cleanup() { - set +o errexit - - # Mop up temporary files - if [ -n "$DEST_DEV" ]; then - umount $DEST_DIR - rmdir $DEST_DIR - fi - if [ -n "$MNTDIR" -a -d "$MNTDIR" ]; then - umount $MNTDIR - rmdir $MNTDIR - fi - - # Kill ourselves to signal any calling process - trap 2; kill -2 $$ -} - -trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT - -# Keep track of the current directory -TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=`cd $TOOLS_DIR/..; pwd` - -if [ -b $DEST_DIR ]; then - # We have a block device, install syslinux and mount it - DEST_DEV=$DEST_DIR - DEST_DIR=`mktemp -d --tmpdir mntXXXXXX` - mount $DEST_DEV $DEST_DIR - - if [ ! -d $DEST_DIR/syslinux ]; then - mkdir -p $DEST_DIR/syslinux - fi - - # Install syslinux on the device - syslinux --install --directory syslinux $DEST_DEV -else - # We have a directory (for sanity checking output) - DEST_DEV="" - if [ ! -d $DEST_DIR/syslinux ]; then - mkdir -p $DEST_DIR/syslinux - fi -fi - -# Get some more stuff from syslinux -for i in memdisk menu.c32; do - cp -pu /usr/lib/syslinux/$i $DEST_DIR/syslinux -done - -CFG=$DEST_DIR/syslinux/syslinux.cfg -cat >$CFG <$PXEDIR/stack-initrd.gz -fi -cp -pu $PXEDIR/stack-initrd.gz $DEST_DIR/ubuntu - -if [ ! -r $PXEDIR/vmlinuz-*-generic ]; then - MNTDIR=`mktemp -d --tmpdir mntXXXXXXXX` - mount -t ext4 -o loop $PXEDIR/stack-initrd.img $MNTDIR - - if [ ! -r $MNTDIR/boot/vmlinuz-*-generic ]; then - echo "No kernel found" - umount $MNTDIR - rmdir $MNTDIR - if [ -n "$DEST_DEV" ]; then - umount $DEST_DIR - rmdir $DEST_DIR - fi - exit 1 - else - cp -pu $MNTDIR/boot/vmlinuz-*-generic $PXEDIR - fi - umount $MNTDIR - rmdir $MNTDIR -fi - -# Get generic kernel version -KNAME=`basename $PXEDIR/vmlinuz-*-generic` -KVER=${KNAME#vmlinuz-} -cp -pu $PXEDIR/vmlinuz-$KVER $DEST_DIR/ubuntu -cat >>$CFG <>$CFG <>$CFG < $STAGING_DIR/etc/sudoers.d/50_stack_sh ) - -# Gracefully cp only if source file/dir exists -function cp_it { - if [ -e $1 ] || [ -d $1 ]; then - cp -pRL $1 $2 - fi -} - -# Copy over your ssh keys and env if desired -cp_it ~/.ssh $STAGING_DIR/$DEST/.ssh -cp_it ~/.ssh/id_rsa.pub $STAGING_DIR/$DEST/.ssh/authorized_keys -cp_it ~/.gitconfig $STAGING_DIR/$DEST/.gitconfig -cp_it ~/.vimrc $STAGING_DIR/$DEST/.vimrc -cp_it ~/.bashrc $STAGING_DIR/$DEST/.bashrc - -# Copy devstack -rm -rf $STAGING_DIR/$DEST/devstack -cp_it . $STAGING_DIR/$DEST/devstack - -# Give stack ownership over $DEST so it may do the work needed -chroot $STAGING_DIR chown -R stack $DEST - -# Unmount -umount $STAGING_DIR diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh new file mode 100755 index 0000000000..cb8d7aa328 --- /dev/null +++ b/tools/create-stack-user.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash + +# **create-stack-user.sh** + +# Create a user account suitable for running DevStack +# - create a group named $STACK_USER if it does not exist +# - create a user named $STACK_USER if it does not exist +# +# - home is $DEST +# +# - configure sudo for $STACK_USER + +# ``stack.sh`` was never intended to run as root. It had a hack to do what is +# now in this script and re-launch itself, but that hack was less than perfect +# and it was time for this nonsense to stop. Run this script as root to create +# the user and configure sudo. + +set -o errexit + +# Keep track of the DevStack directory +TOP_DIR=$(cd $(dirname "$0")/.. && pwd) + +# Import common functions +source $TOP_DIR/functions + +# Determine what system we are running on. This provides ``os_VENDOR``, +# ``os_RELEASE``, ``os_PACKAGE``, ``os_CODENAME`` +# and ``DISTRO`` +GetDistro + +# Needed to get ``ENABLED_SERVICES`` and ``STACK_USER`` +source $TOP_DIR/stackrc + +# Give the non-root user the ability to run as **root** via ``sudo`` +is_package_installed sudo || is_package_installed sudo-ldap || install_package sudo + +[[ -z "$STACK_USER" ]] && die "STACK_USER is not set. Exiting." + +if ! getent group $STACK_USER >/dev/null; then + echo "Creating a group called $STACK_USER" + groupadd $STACK_USER +fi + +if ! getent passwd $STACK_USER >/dev/null; then + echo "Creating a user called $STACK_USER" + useradd -g $STACK_USER -s /bin/bash -d $DEST -m $STACK_USER + # RHEL based distros create home dir with 700 permissions, + # And Ubuntu 21.04+ with 750, i.e missing executable + # permission for either group or others + # Devstack deploy will have issues with this, fix it by + # adding executable permission + if [[ $(stat -c '%A' $DEST|grep -o x|wc -l) -lt 3 ]]; then + echo "Executable permission missing for $DEST, adding it" + chmod +x $DEST + fi +fi + +echo "Giving stack user passwordless sudo privileges" +# UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one +grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || + echo "#includedir /etc/sudoers.d" >> /etc/sudoers +( umask 226 && echo "$STACK_USER ALL=(ALL) NOPASSWD:ALL" \ + > /etc/sudoers.d/50_stack_sh ) diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh new file mode 100755 index 0000000000..f4a4edcbe2 --- /dev/null +++ b/tools/create_userrc.sh @@ -0,0 +1,271 @@ +#!/usr/bin/env bash + +# **create_userrc.sh** + +# Pre-create rc files and credentials for the default users. + +# Warning: This script just for development purposes + +set -o errexit + +# short_source prints out the current location of the caller in a way +# that strips redundant directories. This is useful for PS4 +# usage. Needed before we start tracing due to how we set +# PS4. Normally we'd pick this up from stackrc, but that's not sourced +# here. +function short_source { + saveIFS=$IFS + IFS=" " + called=($(caller 0)) + IFS=$saveIFS + file=${called[2]} + file=${file#$RC_DIR/} + printf "%-40s " "$file:${called[1]}:${called[0]}" +} +# PS4 is exported to child shells and uses the 'short_source' function, so +# export it so child shells have access to the 'short_source' function also. +export -f short_source + +set -o xtrace + +ACCOUNT_DIR=./accrc + +function display_help { +cat < + +This script creates certificates and sourcable rc files per project/user. + +Target account directory hierarchy: +target_dir-| + |-cacert.pem + |-project1-name| + | |- user1 + | |- user1-cert.pem + | |- user1-pk.pem + | |- user2 + | .. + |-project2-name.. + .. + +Optional Arguments +-P include password to the rc files; with -A it assume all users password is the same +-A try with all user +-u create files just for the specified user +-C create user and project, the specifid project will be the user's project +-r when combined with -C and the (-u) user exists it will be the user's project role in the (-C)project (default: Member) +-p password for the user +--heat-url +--os-username +--os-password +--os-project-name +--os-project-id +--os-user-domain-id +--os-user-domain-name +--os-project-domain-id +--os-project-domain-name +--os-auth-url +--os-cacert +--target-dir +--skip-project +--debug + +Example: +$0 -AP +$0 -P -C myproject -u myuser -p mypass +EOF +} + +if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-id:,os-tenant-name:,os-project-name:,os-project-id:,os-project-domain-id:,os-project-domain-name:,os-user-domain-id:,os-user-domain-name:,os-auth-url:,target-dir:,heat-url:,skip-project:,os-cacert:,help,debug -- "$@"); then + display_help + exit 1 +fi +eval set -- $options +ADDPASS="" +HEAT_URL="" + +# The services users usually in the service project. +# rc files for service users, is out of scope. +# Supporting different project for services is out of scope. +SKIP_PROJECT="service" +MODE="" +ROLE=Member +USER_NAME="" +USER_PASS="" +while [ $# -gt 0 ]; do + case "$1" in + -h|--help) display_help; exit 0 ;; + --os-username) export OS_USERNAME=$2; shift ;; + --os-password) export OS_PASSWORD=$2; shift ;; + --os-tenant-name) export OS_PROJECT_NAME=$2; shift ;; + --os-tenant-id) export OS_PROJECT_ID=$2; shift ;; + --os-project-name) export OS_PROJECT_NAME=$2; shift ;; + --os-project-id) export OS_PROJECT_ID=$2; shift ;; + --os-user-domain-id) export OS_USER_DOMAIN_ID=$2; shift ;; + --os-user-domain-name) export OS_USER_DOMAIN_NAME=$2; shift ;; + --os-project-domain-id) export OS_PROJECT_DOMAIN_ID=$2; shift ;; + --os-project-domain-name) export OS_PROJECT_DOMAIN_NAME=$2; shift ;; + --skip-tenant) SKIP_PROJECT="$SKIP_PROJECT$2,"; shift ;; + --skip-project) SKIP_PROJECT="$SKIP_PROJECT$2,"; shift ;; + --os-auth-url) export OS_AUTH_URL=$2; shift ;; + --os-cacert) export OS_CACERT=$2; shift ;; + --target-dir) ACCOUNT_DIR=$2; shift ;; + --heat-url) HEAT_URL=$2; shift ;; + --debug) set -o xtrace ;; + -u) MODE=${MODE:-one}; USER_NAME=$2; shift ;; + -p) USER_PASS=$2; shift ;; + -A) MODE=all; ;; + -P) ADDPASS="yes" ;; + -C) MODE=create; PROJECT=$2; shift ;; + -r) ROLE=$2; shift ;; + (--) shift; break ;; + (-*) echo "$0: error - unrecognized option $1" >&2; display_help; exit 1 ;; + (*) echo "$0: error - unexpected argument $1" >&2; display_help; exit 1 ;; + esac + shift +done + +if [ -z "$OS_PASSWORD" ]; then + if [ -z "$ADMIN_PASSWORD" ];then + echo "The admin password is required option!" >&2 + exit 2 + else + OS_PASSWORD=$ADMIN_PASSWORD + fi +fi + +if [ -z "$OS_PROJECT_ID" -a "$OS_TENANT_ID" ]; then + export OS_PROJECT_ID=$OS_TENANT_ID +fi + +if [ -z "$OS_PROJECT_NAME" -a "$OS_TENANT_NAME" ]; then + export OS_PROJECT_NAME=$OS_TENANT_NAME +fi + +if [ -z "$OS_PROJECT_NAME" -a -z "$OS_PROJECT_ID" ]; then + export OS_PROJECT_NAME=admin +fi + +if [ -z "$OS_USERNAME" ]; then + export OS_USERNAME=admin +fi + +if [ -z "$OS_AUTH_URL" ]; then + export OS_AUTH_URL=http://localhost:5000/v3/ +fi + +if [ -z "$OS_USER_DOMAIN_ID" -a -z "$OS_USER_DOMAIN_NAME" ]; then + # purposefully not exported as it would force v3 auth within this file. + OS_USER_DOMAIN_ID=default +fi + +if [ -z "$OS_PROJECT_DOMAIN_ID" -a -z "$OS_PROJECT_DOMAIN_NAME" ]; then + # purposefully not exported as it would force v3 auth within this file. + OS_PROJECT_DOMAIN_ID=default +fi + +USER_PASS=${USER_PASS:-$OS_PASSWORD} +USER_NAME=${USER_NAME:-$OS_USERNAME} + +if [ -z "$MODE" ]; then + echo "You must specify at least -A or -u parameter!" >&2 + echo + display_help + exit 3 +fi + +function add_entry { + local user_id=$1 + local user_name=$2 + local project_id=$3 + local project_name=$4 + local user_passwd=$5 + + mkdir -p "$ACCOUNT_DIR/$project_name" + local rcfile="$ACCOUNT_DIR/$project_name/$user_name" + + cat >"$rcfile" <>"$rcfile" + fi + if [ -n "$HEAT_URL" ]; then + echo "export HEAT_URL=\"$HEAT_URL/$project_id\"" >>"$rcfile" + echo "export OS_NO_CLIENT_AUTH=True" >>"$rcfile" + fi + for v in OS_USER_DOMAIN_ID OS_USER_DOMAIN_NAME OS_PROJECT_DOMAIN_ID OS_PROJECT_DOMAIN_NAME; do + if [ ${!v} ]; then + echo "export $v=${!v}" >>"$rcfile" + else + echo "unset $v" >>"$rcfile" + fi + done +} + +#admin users expected +function create_or_get_project { + local name=$1 + local id + eval $(openstack project show -f shell -c id $name) + if [[ -z $id ]]; then + eval $(openstack project create -f shell -c id $name) + fi + echo $id +} + +function create_or_get_role { + local name=$1 + local id + eval $(openstack role show -f shell -c id $name) + if [[ -z $id ]]; then + eval $(openstack role create -f shell -c id $name) + fi + echo $id +} + +# Provides empty string when the user does not exists +function get_user_id { + openstack user list | grep " $1 " | cut -d " " -f2 +} + +if [ $MODE != "create" ]; then + # looks like I can't ask for all project related to a specified user + openstack project list --long --quote none -f csv | grep ',True' | grep -v "${SKIP_PROJECT}" | while IFS=, read project_id project_name desc enabled; do + openstack user list --project $project_id --long --quote none -f csv | grep ',True' | while IFS=, read user_id user_name project email enabled; do + if [ $MODE = one -a "$user_name" != "$USER_NAME" ]; then + continue; + fi + + # Checks for a specific password defined for an user. + # Example for an username johndoe: JOHNDOE_PASSWORD=1234 + # This mechanism is used by lib/swift + eval SPECIFIC_UPASSWORD="\$${user_name}_password" + if [ -n "$SPECIFIC_UPASSWORD" ]; then + USER_PASS=$SPECIFIC_UPASSWORD + fi + add_entry "$user_id" "$user_name" "$project_id" "$project_name" "$USER_PASS" + done + done +else + project_name=$PROJECT + project_id=$(create_or_get_project "$PROJECT") + user_name=$USER_NAME + user_id=`get_user_id $user_name` + if [ -z "$user_id" ]; then + eval $(openstack user create "$user_name" --project "$project_id" --password "$USER_PASS" --email "$user_name@example.com" -f shell -c id) + user_id=$id + add_entry "$user_id" "$user_name" "$project_id" "$project_name" "$USER_PASS" + else + role_id=$(create_or_get_role "$ROLE") + openstack role add "$role_id" --user "$user_id" --project "$project_id" + add_entry "$user_id" "$user_name" "$project_id" "$project_name" "$USER_PASS" + fi +fi diff --git a/tools/dbcounter/dbcounter.py b/tools/dbcounter/dbcounter.py new file mode 100644 index 0000000000..86e5529c97 --- /dev/null +++ b/tools/dbcounter/dbcounter.py @@ -0,0 +1,121 @@ +import json +import logging +import os +import threading +import time +import queue + +import sqlalchemy +from sqlalchemy.engine import CreateEnginePlugin +from sqlalchemy import event + +# https://docs.sqlalchemy.org/en/14/core/connections.html? +# highlight=createengineplugin#sqlalchemy.engine.CreateEnginePlugin + +LOG = logging.getLogger(__name__) + +# The theory of operation here is that we register this plugin with +# sqlalchemy via an entry_point. It gets loaded by virtue of plugin= +# being in the database connection URL, which gives us an opportunity +# to hook the engines that get created. +# +# We opportunistically spawn a thread, which we feed "hits" to over a +# queue, and which occasionally writes those hits to a special +# database called 'stats'. We access that database with the same user, +# pass, and host as the main connection URL for simplicity. + + +class LogCursorEventsPlugin(CreateEnginePlugin): + def __init__(self, url, kwargs): + self.db_name = url.database + LOG.info('Registered counter for database %s' % self.db_name) + new_url = sqlalchemy.engine.URL.create(url.drivername, + url.username, + url.password, + url.host, + url.port, + 'stats') + + self.engine = sqlalchemy.create_engine(new_url) + self.queue = queue.Queue() + self.thread = None + + def update_url(self, url): + return url.difference_update_query(["dbcounter"]) + + def engine_created(self, engine): + """Hook the engine creation process. + + This is the plug point for the sqlalchemy plugin. Using + plugin=$this in the URL causes this method to be called when + the engine is created, giving us a chance to hook it below. + """ + event.listen(engine, "before_cursor_execute", self._log_event) + + def ensure_writer_thread(self): + self.thread = threading.Thread(target=self.stat_writer, daemon=True) + self.thread.start() + + def _log_event(self, conn, cursor, statement, parameters, context, + executemany): + """Queue a "hit" for this operation to be recorded. + + Attepts to determine the operation by the first word of the + statement, or 'OTHER' if it cannot be determined. + """ + + # Start our thread if not running. If we were forked after the + # engine was created and this plugin was associated, our + # writer thread is gone, so respawn. + if not self.thread or not self.thread.is_alive(): + self.ensure_writer_thread() + + try: + op = statement.strip().split(' ', 1)[0] or 'OTHER' + except Exception: + op = 'OTHER' + + self.queue.put((self.db_name, op)) + + def do_incr(self, db, op, count): + """Increment the counter for (db,op) by count.""" + + query = sqlalchemy.text('INSERT INTO queries (db, op, count) ' + ' VALUES (:db, :op, :count) ' + ' ON DUPLICATE KEY UPDATE count=count+:count') + try: + with self.engine.begin() as conn: + r = conn.execute(query, {'db': db, 'op': op, 'count': count}) + except Exception as e: + LOG.error('Failed to account for access to database %r: %s', + db, e) + + def stat_writer(self): + """Consume messages from the queue and write them in batches. + + This reads "hists" from from a queue fed by _log_event() and + writes (db,op)+=count stats to the database after ten seconds + of no activity to avoid triggering a write for every SELECT + call. Write no less often than every sixty seconds to avoid being + starved by constant activity. + """ + LOG.debug('[%i] Writer thread running' % os.getpid()) + while True: + to_write = {} + last = time.time() + while time.time() - last < 60: + try: + item = self.queue.get(timeout=10) + to_write.setdefault(item, 0) + to_write[item] += 1 + except queue.Empty: + break + + if to_write: + LOG.debug('[%i] Writing DB stats %s' % ( + os.getpid(), + ','.join(['%s:%s=%i' % (db, op, count) + for (db, op), count in to_write.items()]))) + + for (db, op), count in to_write.items(): + self.do_incr(db, op, count) diff --git a/tools/dbcounter/pyproject.toml b/tools/dbcounter/pyproject.toml new file mode 100644 index 0000000000..d74d688997 --- /dev/null +++ b/tools/dbcounter/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["sqlalchemy", "setuptools>=42"] +build-backend = "setuptools.build_meta" \ No newline at end of file diff --git a/tools/dbcounter/setup.cfg b/tools/dbcounter/setup.cfg new file mode 100644 index 0000000000..12300bf619 --- /dev/null +++ b/tools/dbcounter/setup.cfg @@ -0,0 +1,14 @@ +[metadata] +name = dbcounter +author = Dan Smith +author_email = dms@danplanet.com +version = 0.1 +description = A teeny tiny dbcounter plugin for use with devstack +url = http://github.com/openstack/devstack +license = Apache + +[options] +py_modules = dbcounter +entry_points = + [sqlalchemy.plugins] + dbcounter = dbcounter:LogCursorEventsPlugin diff --git a/tools/debug_function.sh b/tools/debug_function.sh new file mode 100755 index 0000000000..68bd85dc61 --- /dev/null +++ b/tools/debug_function.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +# This is a small helper to speed development and debug with devstack. +# It is intended to help you run a single function in a project module +# without having to re-stack. +# +# For example, to run the just start_glance function, do this: +# +# ./tools/debug_function.sh glance start_glance + +if [ ! -f "lib/$1" ]; then + echo "Usage: $0 [project] [function] [function...]" +fi + +source stackrc +source lib/$1 +shift +set -x +while [ "$1" ]; do + echo ==== Running $1 ==== + $1 + echo ==== Done with $1 ==== + shift +done diff --git a/tools/discover_hosts.sh b/tools/discover_hosts.sh new file mode 100755 index 0000000000..4ec6a40511 --- /dev/null +++ b/tools/discover_hosts.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +# **discover_hosts.sh** + +# This is just a very simple script to run the +# "nova-manage cell_v2 discover_hosts" command +# which is needed to discover compute nodes and +# register them with a parent cell in Nova. +# This assumes that /etc/nova/nova.conf exists +# and has the following entries filled in: +# +# [api_database] +# connection = This is the URL to the nova_api database +# +# In other words this should be run on the primary +# (API) node in a multi-node setup. + +if [[ -x $(which nova-manage) ]]; then + nova-manage cell_v2 discover_hosts --verbose +fi diff --git a/tools/dstat.sh b/tools/dstat.sh new file mode 100755 index 0000000000..e6cbb0f21c --- /dev/null +++ b/tools/dstat.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +# **tools/dstat.sh** - Execute instances of DStat to log system load info +# +# Multiple instances of DStat are executed in order to take advantage of +# incompatible features, particularly CSV output and the "top-cpu-adv" and +# "top-io-adv" flags. +# +# Assumes: +# - dstat command is installed + +# Retrieve log directory as argument from calling script. +LOGDIR=$1 + +DSTAT_TOP_OPTS="--top-cpu-adv --top-io-adv --top-mem" +if dstat --version | grep -q 'pcp-dstat' ; then + # dstat is unmaintained, and moving to a plugin of performance + # co-pilot. Fedora 29 for example has rolled this out. It's + # mostly compatible, except for a few options which are not + # implemented (yet?) + DSTAT_TOP_OPTS="" +fi + +# Command line arguments for primary DStat process. +DSTAT_OPTS="-tcmndrylpg ${DSTAT_TOP_OPTS} --swap --tcp" + +# Command-line arguments for secondary background DStat process. +DSTAT_CSV_OPTS="-tcmndrylpg --tcp --output $LOGDIR/dstat-csv.log" + +# Execute and background the secondary dstat process and discard its output. +dstat $DSTAT_CSV_OPTS >& /dev/null & + +# Execute and background the primary dstat process, but keep its output in this +# TTY. +dstat $DSTAT_OPTS & + +# Catch any exit signals, making sure to also terminate any child processes. +trap "kill -- -$$" EXIT + +# Keep this script running as long as child dstat processes are alive. +wait diff --git a/tools/file_tracker.sh b/tools/file_tracker.sh new file mode 100755 index 0000000000..9c31b30a56 --- /dev/null +++ b/tools/file_tracker.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +set -o errexit + +# time to sleep between checks +SLEEP_TIME=20 + +function tracker { + echo "Number of open files | Number of open files not in use | Maximum number of files allowed to be opened" + while true; do + cat /proc/sys/fs/file-nr + sleep $SLEEP_TIME + done +} + +function usage { + echo "Usage: $0 [-x] [-s N]" 1>&2 + exit 1 +} + +while getopts ":s:x" opt; do + case $opt in + s) + SLEEP_TIME=$OPTARG + ;; + x) + set -o xtrace + ;; + *) + usage + ;; + esac +done + +tracker diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh new file mode 100755 index 0000000000..9e2818f2cc --- /dev/null +++ b/tools/fixup_stuff.sh @@ -0,0 +1,113 @@ +#!/usr/bin/env bash + +# **fixup_stuff.sh** + +# fixup_stuff.sh +# +# All distro and package specific hacks go in here + + +# If ``TOP_DIR`` is set we're being sourced rather than running stand-alone +# or in a sub-shell +if [[ -z "$TOP_DIR" ]]; then + set -o errexit + set -o xtrace + + # Keep track of the current directory + TOOLS_DIR=$(cd $(dirname "$0") && pwd) + TOP_DIR=$(cd $TOOLS_DIR/..; pwd) + + # Change dir to top of DevStack + cd $TOP_DIR + + # Import common functions + source $TOP_DIR/functions + + FILES=$TOP_DIR/files +fi + +# Python Packages +# --------------- + +function fixup_fedora { + if ! is_fedora; then + return + fi + # Disable selinux to avoid configuring to allow Apache access + # to Horizon files (LP#1175444) + if selinuxenabled; then + #persit selinux config across reboots + cat << EOF | sudo tee /etc/selinux/config +SELINUX=permissive +SELINUXTYPE=targeted +EOF + # then disable at runtime + sudo setenforce 0 + fi + + FORCE_FIREWALLD=$(trueorfalse False FORCE_FIREWALLD) + if [[ $FORCE_FIREWALLD == "False" ]]; then + # On Fedora 20 firewalld interacts badly with libvirt and + # slows things down significantly (this issue was fixed in + # later fedoras). There was also an additional issue with + # firewalld hanging after install of libvirt with polkit [1]. + # firewalld also causes problems with neturon+ipv6 [2] + # + # Note we do the same as the RDO packages and stop & disable, + # rather than remove. This is because other packages might + # have the dependency [3][4]. + # + # [1] https://bugzilla.redhat.com/show_bug.cgi?id=1099031 + # [2] https://bugs.launchpad.net/neutron/+bug/1455303 + # [3] https://github.com/redhat-openstack/openstack-puppet-modules/blob/master/firewall/manifests/linux/redhat.pp + # [4] https://docs.openstack.org/devstack/latest/guides/neutron.html + if is_package_installed firewalld; then + sudo systemctl disable firewalld + # The iptables service files are no longer included by default, + # at least on a baremetal Fedora 21 Server install. + install_package iptables-services + sudo systemctl enable iptables + sudo systemctl stop firewalld + sudo systemctl start iptables + fi + fi + + # Since pip10, pip will refuse to uninstall files from packages + # that were created with distutils (rather than more modern + # setuptools). This is because it technically doesn't have a + # manifest of what to remove. However, in most cases, simply + # overwriting works. So this hacks around those packages that + # have been dragged in by some other system dependency + sudo rm -rf /usr/lib64/python3*/site-packages/PyYAML-*.egg-info + + # After updating setuptools based on the requirements, the files from the + # python3-setuptools RPM are deleted, it breaks some tools such as semanage + # (used in diskimage-builder) that use the -s flag of the python + # interpreter, enforcing the use of the packages from /usr/lib. + # Importing setuptools in a such environment fails. + # Enforce the package re-installation to fix those applications. + if is_package_installed python3-setuptools; then + sudo dnf reinstall -y python3-setuptools + fi +} + +function fixup_ubuntu { + if ! is_ubuntu; then + return + fi + + # Since pip10, pip will refuse to uninstall files from packages + # that were created with distutils (rather than more modern + # setuptools). This is because it technically doesn't have a + # manifest of what to remove. However, in most cases, simply + # overwriting works. So this hacks around those packages that + # have been dragged in by some other system dependency + sudo rm -rf /usr/lib/python3/dist-packages/PyYAML-*.egg-info + sudo rm -rf /usr/lib/python3/dist-packages/pyasn1_modules-*.egg-info + sudo rm -rf /usr/lib/python3/dist-packages/simplejson-*.egg-info +} + +function fixup_all { + fixup_ubuntu + fixup_fedora +} diff --git a/tools/generate-devstack-plugins-list.py b/tools/generate-devstack-plugins-list.py new file mode 100644 index 0000000000..bc28515a26 --- /dev/null +++ b/tools/generate-devstack-plugins-list.py @@ -0,0 +1,86 @@ +#! /usr/bin/env python3 + +# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This script is intended to be run as part of a periodic proposal bot +# job in OpenStack infrastructure. +# +# In order to function correctly, the environment in which the +# script runs must have +# * network access to the review.opendev.org Gerrit API +# working directory +# * network access to https://opendev.org/ + +import functools +import logging +import json +import requests + +from requests.adapters import HTTPAdapter +from requests.packages.urllib3.util.retry import Retry + +logging.basicConfig(level=logging.DEBUG) + +url = 'https://review.opendev.org/projects/' + +# This is what a project looks like +''' + "openstack-attic/akanda": { + "id": "openstack-attic%2Fakanda", + "state": "READ_ONLY" + }, +''' + +def is_in_wanted_namespace(proj): + # only interested in openstack or x namespace (e.g. not retired + # stackforge, etc). + # + # openstack/openstack "super-repo" of openstack projects as + # submodules, that can cause gitea to 500 timeout and thus stop + # this script. Skip it. + if proj.startswith('stackforge/') or \ + proj.startswith('stackforge-attic/') or \ + proj == "openstack/openstack": + return False + else: + return True + +# Check if this project has a plugin file +def has_devstack_plugin(session, proj): + # Don't link in the deb packaging repos + if "openstack/deb-" in proj: + return False + r = session.get("https://opendev.org/%s/raw/branch/master/devstack/plugin.sh" % proj) + return r.status_code == 200 + +logging.debug("Getting project list from %s" % url) +r = requests.get(url) +projects = sorted(filter(is_in_wanted_namespace, json.loads(r.text[4:]))) +logging.debug("Found %d projects" % len(projects)) + +s = requests.Session() +# sometimes gitea gives us a 500 error; retry sanely +# https://stackoverflow.com/a/35636367 +# We need to disable raise_on_status because if any repo endup with 500 then +# propose-updates job which run this script will fail. +retries = Retry(total=3, backoff_factor=1, + status_forcelist=[ 500 ], + raise_on_status=False) +s.mount('https://', HTTPAdapter(max_retries=retries)) + +found_plugins = filter(functools.partial(has_devstack_plugin, s), projects) + +for project in found_plugins: + print(project) diff --git a/tools/generate-devstack-plugins-list.sh b/tools/generate-devstack-plugins-list.sh new file mode 100755 index 0000000000..3307943df9 --- /dev/null +++ b/tools/generate-devstack-plugins-list.sh @@ -0,0 +1,91 @@ +#!/bin/bash -ex + +# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This script is intended to be run as a periodic proposal bot job +# in OpenStack infrastructure, though you can run it as a one-off. +# +# In order to function correctly, the environment in which the +# script runs must have +# * a writable doc/source directory relative to the current +# working directory +# AND ( ( +# * git +# * all git repos meant to be searched for plugins cloned and +# at the desired level of up-to-datedness +# * the environment variable git_dir pointing to the location +# * of said git repositories +# ) OR ( +# * network access to the review.opendev.org Gerrit API +# working directory +# * network access to https://opendev.org +# )) +# +# If a file named data/devstack-plugins-registry.header or +# data/devstack-plugins-registry.footer is found relative to the +# current working directory, it will be prepended or appended to +# the generated reStructuredText plugins table respectively. + +# Print the title underline for a RST table. Argument is the length +# of the first column, second column is assumed to be "URL" +function title_underline { + local len=$1 + while [[ $len -gt 0 ]]; do + printf "=" + len=$(( len - 1)) + done + printf " ===\n" +} + +( +if [[ -r data/devstack-plugins-registry.header ]]; then + cat data/devstack-plugins-registry.header +fi + +sorted_plugins=$(python3 tools/generate-devstack-plugins-list.py) + +# find the length of the name column & pad +name_col_len=$(echo "${sorted_plugins}" | wc -L) +name_col_len=$(( name_col_len + 2 )) + +# ====================== === +# Plugin Name URL +# ====================== === +# foobar `https://... `__ +# ... + +printf "\n\n" +title_underline ${name_col_len} +printf "%-${name_col_len}s %s\n" "Plugin Name" "URL" +title_underline ${name_col_len} + +for plugin in ${sorted_plugins}; do + giturl="https://opendev.org/${plugin}" + gitlink="https://opendev.org/${plugin}" + printf "%-${name_col_len}s %s\n" "${plugin}" "\`${giturl} <${gitlink}>\`__" +done + +title_underline ${name_col_len} + +printf "\n\n" + +if [[ -r data/devstack-plugins-registry.footer ]]; then + cat data/devstack-plugins-registry.footer +fi +) > doc/source/plugin-registry.rst + +if [[ -n ${1} ]]; then + cp doc/source/plugin-registry.rst ${1}/doc/source/plugin-registry.rst +fi diff --git a/tools/get-stats.py b/tools/get-stats.py new file mode 100755 index 0000000000..b958af61b2 --- /dev/null +++ b/tools/get-stats.py @@ -0,0 +1,220 @@ +#!/usr/bin/python3 + +import argparse +import csv +import datetime +import glob +import itertools +import json +import logging +import os +import re +import socket +import subprocess +import sys + +try: + import psutil +except ImportError: + psutil = None + print('No psutil, process information will not be included', + file=sys.stderr) + +try: + import pymysql +except ImportError: + pymysql = None + print('No pymysql, database information will not be included', + file=sys.stderr) + +LOG = logging.getLogger('perf') + +# https://www.elastic.co/blog/found-crash-elasticsearch#mapping-explosion + + +def tryint(value): + try: + return int(value) + except (ValueError, TypeError): + return value + + +def get_service_stats(service): + stats = {'MemoryCurrent': 0} + output = subprocess.check_output(['/usr/bin/systemctl', 'show', service] + + ['-p%s' % stat for stat in stats]) + for line in output.decode().split('\n'): + if not line: + continue + stat, val = line.split('=') + stats[stat] = tryint(val) + + return stats + + +def get_services_stats(): + services = [os.path.basename(s) for s in + glob.glob('/etc/systemd/system/devstack@*.service')] + \ + ['apache2.service'] + return [dict(service=service, **get_service_stats(service)) + for service in services] + + +def get_process_stats(proc): + cmdline = proc.cmdline() + if 'python' in cmdline[0]: + cmdline = cmdline[1:] + return {'cmd': cmdline[0], + 'pid': proc.pid, + 'args': ' '.join(cmdline[1:]), + 'rss': proc.memory_info().rss} + + +def get_processes_stats(matches): + me = os.getpid() + procs = psutil.process_iter() + + def proc_matches(proc): + return me != proc.pid and any( + re.search(match, ' '.join(proc.cmdline())) + for match in matches) + + return [ + get_process_stats(proc) + for proc in procs + if proc_matches(proc)] + + +def get_db_stats(host, user, passwd): + dbs = [] + try: + db = pymysql.connect(host=host, user=user, password=passwd, + database='stats', + cursorclass=pymysql.cursors.DictCursor) + except pymysql.err.OperationalError as e: + if 'Unknown database' in str(e): + print('No stats database; assuming devstack failed', + file=sys.stderr) + return [] + raise + + with db: + with db.cursor() as cur: + cur.execute('SELECT db,op,count FROM queries') + for row in cur: + dbs.append({k: tryint(v) for k, v in row.items()}) + return dbs + + +def get_http_stats_for_log(logfile): + stats = {} + apache_fields = ('host', 'a', 'b', 'date', 'tz', 'request', 'status', + 'length', 'c', 'agent') + ignore_agents = ('curl', 'uwsgi', 'nova-status') + ignored_services = set() + for line in csv.reader(open(logfile), delimiter=' '): + fields = dict(zip(apache_fields, line)) + if len(fields) != len(apache_fields): + # Not a combined access log, so we can bail completely + return [] + try: + method, url, http = fields['request'].split(' ') + except ValueError: + method = url = http = '' + if 'HTTP' not in http: + # Not a combined access log, so we can bail completely + return [] + + # Tempest's User-Agent is unchanged, but client libraries and + # inter-service API calls use proper strings. So assume + # 'python-urllib' is tempest so we can tell it apart. + if 'python-urllib' in fields['agent'].lower(): + agent = 'tempest' + else: + agent = fields['agent'].split(' ')[0] + if agent.startswith('python-'): + agent = agent.replace('python-', '') + if '/' in agent: + agent = agent.split('/')[0] + + if agent in ignore_agents: + continue + + try: + service, rest = url.strip('/').split('/', 1) + except ValueError: + # Root calls like "GET /identity" + service = url.strip('/') + rest = '' + + if not service.isalpha(): + ignored_services.add(service) + continue + + method_key = '%s-%s' % (agent, method) + try: + length = int(fields['length']) + except ValueError: + LOG.warning('[%s] Failed to parse length %r from line %r' % ( + logfile, fields['length'], line)) + length = 0 + stats.setdefault(service, {'largest': 0}) + stats[service].setdefault(method_key, 0) + stats[service][method_key] += 1 + stats[service]['largest'] = max(stats[service]['largest'], + length) + + if ignored_services: + LOG.warning('Ignored services: %s' % ','.join( + sorted(ignored_services))) + + # Flatten this for ES + return [{'service': service, 'log': os.path.basename(logfile), + **vals} + for service, vals in stats.items()] + + +def get_http_stats(logfiles): + return list(itertools.chain.from_iterable(get_http_stats_for_log(log) + for log in logfiles)) + + +def get_report_info(): + return { + 'timestamp': datetime.datetime.now().isoformat(), + 'hostname': socket.gethostname(), + 'version': 2, + } + + +if __name__ == '__main__': + process_defaults = ['privsep', 'mysqld', 'erlang', 'etcd'] + parser = argparse.ArgumentParser() + parser.add_argument('--db-user', default='root', + help=('MySQL user for collecting stats ' + '(default: "root")')) + parser.add_argument('--db-pass', default=None, + help='MySQL password for db-user') + parser.add_argument('--db-host', default='localhost', + help='MySQL hostname') + parser.add_argument('--apache-log', action='append', default=[], + help='Collect API call stats from this apache log') + parser.add_argument('--process', action='append', + default=process_defaults, + help=('Include process stats for this cmdline regex ' + '(default is %s)' % ','.join(process_defaults))) + args = parser.parse_args() + + logging.basicConfig(level=logging.WARNING) + + data = { + 'services': get_services_stats(), + 'db': pymysql and args.db_pass and get_db_stats(args.db_host, + args.db_user, + args.db_pass) or [], + 'processes': psutil and get_processes_stats(args.process) or [], + 'api': get_http_stats(args.apache_log), + 'report': get_report_info(), + } + + print(json.dumps(data, indent=2)) diff --git a/tools/get_uec_image.sh b/tools/get_uec_image.sh deleted file mode 100755 index f66f2bc2fb..0000000000 --- a/tools/get_uec_image.sh +++ /dev/null @@ -1,103 +0,0 @@ -#!/bin/bash -# get_uec_image.sh - Prepare Ubuntu UEC images - -CACHEDIR=${CACHEDIR:-/opt/stack/cache} -ROOTSIZE=${ROOTSIZE:-2000} - -# Keep track of the current directory -TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=`cd $TOOLS_DIR/..; pwd` - -# exit on error to stop unexpected errors -set -o errexit -set -o xtrace - -usage() { - echo "Usage: $0 - Fetch and prepare Ubuntu images" - echo "" - echo "$0 [-r rootsize] release imagefile [kernel]" - echo "" - echo "-r size - root fs size (min 2000MB)" - echo "release - Ubuntu release: jaunty - oneric" - echo "imagefile - output image file" - echo "kernel - output kernel" - exit 1 -} - -# Clean up any resources that may be in use -cleanup() { - set +o errexit - - # Mop up temporary files - if [ -n "$IMG_FILE_TMP" -a -e "$IMG_FILE_TMP" ]; then - rm -f $IMG_FILE_TMP - fi - - # Kill ourselves to signal any calling process - trap 2; kill -2 $$ -} - -while getopts hr: c; do - case $c in - h) usage - ;; - r) ROOTSIZE=$OPTARG - ;; - esac -done -shift `expr $OPTIND - 1` - -if [[ ! "$#" -eq "2" && ! "$#" -eq "3" ]]; then - usage -fi - -# Default args -DIST_NAME=$1 -IMG_FILE=$2 -IMG_FILE_TMP=`mktemp $IMG_FILE.XXXXXX` -KERNEL=$3 - -case $DIST_NAME in - oneiric) ;; - natty) ;; - maverick) ;; - lucid) ;; - *) echo "Unknown release: $DIST_NAME" - usage - ;; -esac - -trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT - -# Check dependencies -if [ ! -x "`which qemu-img`" -o -z "`dpkg -l | grep cloud-utils`" ]; then - # Missing KVM? - apt_get install qemu-kvm cloud-utils -fi - -# Find resize script -RESIZE=`which resize-part-image || which uec-resize-image` -if [ -z "$RESIZE" ]; then - echo "resize tool from cloud-utils not found" - exit 1 -fi - -# Get the UEC image -UEC_NAME=$DIST_NAME-server-cloudimg-amd64 -if [ ! -d $CACHEDIR ]; then - mkdir -p $CACHEDIR/$DIST_NAME -fi -if [ ! -e $CACHEDIR/$DIST_NAME/$UEC_NAME.tar.gz ]; then - (cd $CACHEDIR/$DIST_NAME && wget -N http://uec-images.ubuntu.com/$DIST_NAME/current/$UEC_NAME.tar.gz) - (cd $CACHEDIR/$DIST_NAME && tar Sxvzf $UEC_NAME.tar.gz) -fi - -$RESIZE $CACHEDIR/$DIST_NAME/$UEC_NAME.img ${ROOTSIZE} $IMG_FILE_TMP -mv $IMG_FILE_TMP $IMG_FILE - -# Copy kernel to destination -if [ -n "$KERNEL" ]; then - cp -p $CACHEDIR/$DIST_NAME/*-vmlinuz-virtual $KERNEL -fi - -trap - SIGHUP SIGINT SIGTERM SIGQUIT EXIT diff --git a/tools/image_list.sh b/tools/image_list.sh new file mode 100755 index 0000000000..81231be9f3 --- /dev/null +++ b/tools/image_list.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +# Print out a list of image and other files to download for caching. +# This is mostly used by the OpenStack infrasturucture during daily +# image builds to save the large images to /opt/cache/files (see [1]) +# +# The two lists of URL's downloaded are the IMAGE_URLS and +# EXTRA_CACHE_URLS, which are setup in stackrc +# +# [1] project-config:nodepool/elements/cache-devstack/extra-data.d/55-cache-devstack-repos + +# Keep track of the DevStack directory +TOP_DIR=$(cd $(dirname "$0")/.. && pwd) + +# The following "source" implicitly calls get_default_host_ip() in +# stackrc and will die if the selected default IP happens to lie +# in the default ranges for FIXED_RANGE or FLOATING_RANGE. Since we +# do not really need HOST_IP to be properly set in the remainder of +# this script, just set it to some dummy value and make stackrc happy. +HOST_IP=SKIP +source $TOP_DIR/functions + +# Possible virt drivers, if we have more, add them here. Always keep +# dummy in the end position to trigger the fall through case. +DRIVERS="openvz ironic libvirt vsphere dummy" + +# Extra variables to trigger getting additional images. +export ENABLED_SERVICES="h-api,tr-api" +HEAT_FETCHED_TEST_IMAGE="Fedora-i386-20-20131211.1-sda" +PRECACHE_IMAGES=True + +# Loop over all the virt drivers and collect all the possible images +ALL_IMAGES="" +for driver in $DRIVERS; do + VIRT_DRIVER=$driver + URLS=$(source $TOP_DIR/stackrc && echo $IMAGE_URLS) + if [[ ! -z "$ALL_IMAGES" ]]; then + ALL_IMAGES+=, + fi + ALL_IMAGES+=$URLS +done + +# Sanity check - ensure we have a minimum number of images +num=$(echo $ALL_IMAGES | tr ',' '\n' | sort | uniq | wc -l) +if [[ "$num" -lt 4 ]]; then + echo "ERROR: We only found $num images in $ALL_IMAGES, which can't be right." + exit 1 +fi + +# This is extra non-image files that we want pre-cached. This is kept +# in a separate list because devstack loops over the IMAGE_LIST to +# upload files glance and these aren't images. (This was a bit of an +# after-thought which is why the naming around this is very +# image-centric) +URLS=$(source $TOP_DIR/stackrc && echo $EXTRA_CACHE_URLS) +ALL_IMAGES+=$URLS + +# Make a nice combined list +echo $ALL_IMAGES | tr ',' '\n' | sort | uniq diff --git a/tools/info.sh b/tools/info.sh new file mode 100755 index 0000000000..282667f9d0 --- /dev/null +++ b/tools/info.sh @@ -0,0 +1,149 @@ +#!/usr/bin/env bash + +# **info.sh** + +# Produce a report on the state of DevStack installs +# +# Output fields are separated with '|' chars +# Output types are git,localrc,os,pip,pkg: +# +# git||[] +# localrc|= +# os|= +# pip|| +# pkg|| + +function usage { + echo "$0 - Report on the DevStack configuration" + echo "" + echo "Usage: $0" + exit 1 +} + +if [ "$1" = "-h" ]; then + usage +fi + +# Keep track of the current directory +TOOLS_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) +cd $TOP_DIR + +# Import common functions +source $TOP_DIR/functions + +# Source params +source $TOP_DIR/stackrc + +DEST=${DEST:-/opt/stack} +FILES=$TOP_DIR/files +if [[ ! -d $FILES ]]; then + echo "ERROR: missing devstack/files - did you grab more than just stack.sh?" + exit 1 +fi + + +# OS +# -- + +# Determine what OS we're using +GetDistro + +echo "os|distro=$DISTRO" +echo "os|vendor=$os_VENDOR" +echo "os|release=$os_RELEASE" + +# Repos +# ----- + +# git_report +function git_report { + local dir=$1 + local proj ref branch head + if [[ -d $dir/.git ]]; then + pushd $dir >/dev/null + proj=$(basename $dir) + ref=$(git symbolic-ref HEAD) + branch=${ref##refs/heads/} + head=$(git show-branch --sha1-name $branch | cut -d' ' -f1) + echo "git|${proj}|${branch}${head}" + popd >/dev/null + fi +} + +for i in $DEST/*; do + if [[ -d $i ]]; then + git_report $i + fi +done + + +# Packages +# -------- + +# - Only check packages for the services enabled +# - Parse version info from the package metadata, not the package/file names + +for p in $(get_packages $ENABLED_SERVICES); do + if [[ "$os_PACKAGE" = "deb" ]]; then + ver=$(dpkg -s $p 2>/dev/null | grep '^Version: ' | cut -d' ' -f2) + elif [[ "$os_PACKAGE" = "rpm" ]]; then + ver=$(rpm -q --queryformat "%{VERSION}-%{RELEASE}\n" $p) + else + exit_distro_not_supported "finding version of a package" + fi + echo "pkg|${p}|${ver}" +done + + +# Pips +# ---- + +CMD_PIP=$(get_pip_command) + +# Pip tells us what is currently installed +FREEZE_FILE=$(mktemp --tmpdir freeze.XXXXXX) +$CMD_PIP freeze >$FREEZE_FILE 2>/dev/null + +# Loop through our requirements and look for matches +while read line; do + if [[ -n "$line" ]]; then + if [[ "$line" =~ \+(.*)@(.*)#egg=(.*) ]]; then + # Handle URLs + p=${BASH_REMATCH[1]} + ver=${BASH_REMATCH[2]} + elif [[ "$line" =~ (.*)[=\<\>]=(.*) ]]; then + # Normal pip packages + p=${BASH_REMATCH[1]} + ver=${BASH_REMATCH[2]} + else + # Unhandled format in freeze file + continue + fi + echo "pip|${p}|${ver}" + else + # No match in freeze file + continue + fi +done <$FREEZE_FILE + +rm $FREEZE_FILE + + +# localrc +# ------- + +# Dump localrc with 'localrc|' prepended and comments and passwords left out +if [[ -r $TOP_DIR/localrc ]]; then + RC=$TOP_DIR/localrc +elif [[ -f $RC_DIR/.localrc.auto ]]; then + RC=$TOP_DIR/.localrc.auto +fi +if [[ -n $RC ]]; then + sed -e ' + /^[ \t]*$/d; + /PASSWORD/s/=.*$/=\/; + /^#/d; + s/^/localrc\|/; + ' $RC +fi diff --git a/tools/install_openvpn.sh b/tools/install_openvpn.sh deleted file mode 100755 index 44eee728e2..0000000000 --- a/tools/install_openvpn.sh +++ /dev/null @@ -1,218 +0,0 @@ -#!/bin/bash -# install_openvpn.sh - Install OpenVPN and generate required certificates -# -# install_openvpn.sh --client name -# install_openvpn.sh --server [name] -# -# name is used on the CN of the generated cert, and the filename of -# the configuration, certificate and key files. -# -# --server mode configures the host with a running OpenVPN server instance -# --client mode creates a tarball of a client configuration for this server - -# Get config file -if [ -e localrc ]; then - . localrc -fi -if [ -e vpnrc ]; then - . vpnrc -fi - -# Do some IP manipulation -function cidr2netmask() { - set -- $(( 5 - ($1 / 8) )) 255 255 255 255 $(( (255 << (8 - ($1 % 8))) & 255 )) 0 0 0 - if [[ $1 -gt 1 ]]; then - shift $1 - else - shift - fi - echo ${1-0}.${2-0}.${3-0}.${4-0} -} - -FIXED_NET=`echo $FIXED_RANGE | cut -d'/' -f1` -FIXED_CIDR=`echo $FIXED_RANGE | cut -d'/' -f2` -FIXED_MASK=`cidr2netmask $FIXED_CIDR` - -# VPN Config -VPN_SERVER=${VPN_SERVER:-`ifconfig eth0 | awk "/inet addr:/ { print \$2 }" | cut -d: -f2`} # 50.56.12.212 -VPN_PROTO=${VPN_PROTO:-tcp} -VPN_PORT=${VPN_PORT:-6081} -VPN_DEV=${VPN_DEV:-tap0} -VPN_BRIDGE=${VPN_BRIDGE:-br100} -VPN_BRIDGE_IF=${VPN_BRIDGE_IF:-$FLAT_INTERFACE} -VPN_CLIENT_NET=${VPN_CLIENT_NET:-$FIXED_NET} -VPN_CLIENT_MASK=${VPN_CLIENT_MASK:-$FIXED_MASK} -VPN_CLIENT_DHCP="${VPN_CLIENT_DHCP:-net.1 net.254}" - -VPN_DIR=/etc/openvpn -CA_DIR=$VPN_DIR/easy-rsa - -usage() { - echo "$0 - OpenVPN install and certificate generation" - echo "" - echo "$0 --client name" - echo "$0 --server [name]" - echo "" - echo " --server mode configures the host with a running OpenVPN server instance" - echo " --client mode creates a tarball of a client configuration for this server" - exit 1 -} - -if [ -z $1 ]; then - usage -fi - -# Install OpenVPN -VPN_EXEC=`which openvpn` -if [ -z "$VPN_EXEC" -o ! -x "$VPN_EXEC" ]; then - apt-get install -y openvpn bridge-utils -fi -if [ ! -d $CA_DIR ]; then - cp -pR /usr/share/doc/openvpn/examples/easy-rsa/2.0/ $CA_DIR -fi - -# Keep track of the current directory -TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $TOOLS_DIR/.. && pwd) - -WEB_DIR=$TOP_DIR/../vpn -if [[ ! -d $WEB_DIR ]]; then - mkdir -p $WEB_DIR -fi -WEB_DIR=$(cd $TOP_DIR/../vpn && pwd) - -cd $CA_DIR -source ./vars - -# Override the defaults -export KEY_COUNTRY="US" -export KEY_PROVINCE="TX" -export KEY_CITY="SanAntonio" -export KEY_ORG="Cloudbuilders" -export KEY_EMAIL="rcb@lists.rackspace.com" - -if [ ! -r $CA_DIR/keys/dh1024.pem ]; then - # Initialize a new CA - $CA_DIR/clean-all - $CA_DIR/build-dh - $CA_DIR/pkitool --initca - openvpn --genkey --secret $CA_DIR/keys/ta.key ## Build a TLS key -fi - -do_server() { - NAME=$1 - # Generate server certificate - $CA_DIR/pkitool --server $NAME - - (cd $CA_DIR/keys; - cp $NAME.crt $NAME.key ca.crt dh1024.pem ta.key $VPN_DIR - ) - cat >$VPN_DIR/br-up <$VPN_DIR/br-down <$VPN_DIR/$NAME.conf <$TMP_DIR/$HOST.conf <$VPN_DIR/hostname - fi - do_server $NAME - ;; - --clean) $CA_DIR/clean-all - ;; - *) usage -esac diff --git a/tools/install_pip.sh b/tools/install_pip.sh new file mode 100755 index 0000000000..027693fc0a --- /dev/null +++ b/tools/install_pip.sh @@ -0,0 +1,149 @@ +#!/usr/bin/env bash + +# **install_pip.sh** + +# Update pip and friends to a known common version + +# Assumptions: +# - PYTHON3_VERSION refers to a version already installed + +set -o errexit + +# Keep track of the current directory +TOOLS_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=`cd $TOOLS_DIR/..; pwd` + +# Change dir to top of DevStack +cd $TOP_DIR + +# Import common functions +source $TOP_DIR/stackrc + +# don't start tracing until after we've sourced the world +set -o xtrace + +FILES=$TOP_DIR/files + +# The URL from where the get-pip.py file gets downloaded. If a local +# get-pip.py mirror is available, PIP_GET_PIP_URL can be set to that +# mirror in local.conf to avoid download timeouts. +# Example: +# PIP_GET_PIP_URL="http://local-server/get-pip.py" +# +# Note that if get-pip.py already exists in $FILES this script will +# not re-download or check for a new version. For example, this is +# done by openstack-infra diskimage-builder elements as part of image +# preparation [1]. This prevents any network access, which can be +# unreliable in CI situations. +# [1] https://opendev.org/openstack/project-config/src/branch/master/nodepool/elements/cache-devstack/source-repository-pip + +PIP_GET_PIP_URL=${PIP_GET_PIP_URL:-"https://bootstrap.pypa.io/get-pip.py"} + +GetDistro +echo "Distro: $DISTRO" + +function get_versions { + # FIXME(dhellmann): Deal with multiple python versions here? This + # is just used for reporting, so maybe not? + PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null || which pip3 2>/dev/null || true) + if [[ -n $PIP ]]; then + PIP_VERSION=$($PIP --version | awk '{ print $2}') + echo "pip: $PIP_VERSION" + else + echo "pip: Not Installed" + fi +} + + +function install_get_pip { + _pip_url=$PIP_GET_PIP_URL + _local_pip="$FILES/$(basename $_pip_url)" + + # If get-pip.py isn't python, delete it. This was probably an + # outage on the server. + if [[ -r $_local_pip ]]; then + if ! head -1 $_local_pip | grep -q '#!/usr/bin/env python'; then + echo "WARNING: Corrupt $_local_pip found removing" + rm $_local_pip + fi + fi + + # The OpenStack gate and others put a cached version of get-pip.py + # for this to find, explicitly to avoid download issues. + # + # However, if DevStack *did* download the file, we want to check + # for updates; people can leave their stacks around for a long + # time and in the mean-time pip might get upgraded. + # + # Thus we use curl's "-z" feature to always check the modified + # since and only download if a new version is out -- but only if + # it seems we downloaded the file originally. + if [[ ! -r $_local_pip || -r $_local_pip.downloaded ]]; then + # only test freshness if LOCAL_PIP is actually there, + # otherwise we generate a scary warning. + local timecond="" + if [[ -r $_local_pip ]]; then + timecond="-z $_local_pip" + fi + + curl -f --retry 6 --retry-delay 5 \ + $timecond -o $_local_pip $_pip_url || \ + die $LINENO "Download of get-pip.py failed" + touch $_local_pip.downloaded + fi + sudo -H -E python${PYTHON3_VERSION} $_local_pip +} + + +function configure_pypi_alternative_url { + PIP_ROOT_FOLDER="$HOME/.pip" + PIP_CONFIG_FILE="$PIP_ROOT_FOLDER/pip.conf" + if [[ ! -d $PIP_ROOT_FOLDER ]]; then + echo "Creating $PIP_ROOT_FOLDER" + mkdir $PIP_ROOT_FOLDER + fi + if [[ ! -f $PIP_CONFIG_FILE ]]; then + echo "Creating $PIP_CONFIG_FILE" + touch $PIP_CONFIG_FILE + fi + if ! ini_has_option "$PIP_CONFIG_FILE" "global" "index-url"; then + # It means that the index-url does not exist + iniset "$PIP_CONFIG_FILE" "global" "index-url" "$PYPI_OVERRIDE" + fi + +} + +# Show starting versions +get_versions + +if [[ -n $PYPI_ALTERNATIVE_URL ]]; then + configure_pypi_alternative_url +fi + +if is_fedora && [[ ${DISTRO} == f* || ${DISTRO} == rhel* ]]; then + # get-pip.py will not install over the python3-pip package in + # Fedora 34 any more. + # https://bugzilla.redhat.com/show_bug.cgi?id=1988935 + # https://github.com/pypa/pip/issues/9904 + # You can still install using get-pip.py if python3-pip is *not* + # installed; this *should* remain separate under /usr/local and not break + # if python3-pip is later installed. + # For general sanity, we just use the packaged pip. It should be + # recent enough anyway. This is included via rpms/general + : # Simply fall through +elif is_ubuntu; then + # pip on Ubuntu 20.04 and higher is new enough, too + # drop setuptools from u-c + sed -i -e '/setuptools/d' $REQUIREMENTS_DIR/upper-constraints.txt +else + install_get_pip + + # Note setuptools is part of requirements.txt and we want to make sure + # we obey any versioning as described there. + pip_install_gr setuptools +fi + +set -x + + +get_versions diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh new file mode 100755 index 0000000000..bb470b2927 --- /dev/null +++ b/tools/install_prereqs.sh @@ -0,0 +1,89 @@ +#!/usr/bin/env bash + +# **install_prereqs.sh** + +# Install system package prerequisites +# +# install_prereqs.sh [-f] +# +# -f Force an install run now + +FORCE_PREREQ=0 + +while getopts ":f" opt; do + case $opt in + f) + FORCE_PREREQ=1 + ;; + esac +done + +# If ``TOP_DIR`` is set we're being sourced rather than running stand-alone +# or in a sub-shell +if [[ -z "$TOP_DIR" ]]; then + # Keep track of the DevStack directory + TOP_DIR=$(cd $(dirname "$0")/.. && pwd) + + # Import common functions + source $TOP_DIR/functions + + # Determine what system we are running on. This provides ``os_VENDOR``, + # ``os_RELEASE``, ``os_PACKAGE``, ``os_CODENAME`` + # and ``DISTRO`` + GetDistro + + # Needed to get ``ENABLED_SERVICES`` + source $TOP_DIR/stackrc + + # Prereq dirs are here + FILES=$TOP_DIR/files +fi + +# Minimum wait time +PREREQ_RERUN_MARKER=${PREREQ_RERUN_MARKER:-$TOP_DIR/.prereqs} +PREREQ_RERUN_HOURS=${PREREQ_RERUN_HOURS:-2} +PREREQ_RERUN_SECONDS=$((60*60*$PREREQ_RERUN_HOURS)) + +NOW=$(date "+%s") +LAST_RUN=$(head -1 $PREREQ_RERUN_MARKER 2>/dev/null || echo "0") +DELTA=$(($NOW - $LAST_RUN)) +if [[ $DELTA -lt $PREREQ_RERUN_SECONDS && -z "$FORCE_PREREQ" ]]; then + echo "Re-run time has not expired ($(($PREREQ_RERUN_SECONDS - $DELTA)) seconds remaining) " + echo "and FORCE_PREREQ not set; exiting..." + return 0 +fi + +# Make sure the proxy config is visible to sub-processes +export_proxy_variables + + +# Install Packages +# ================ + +# Install package requirements +PACKAGES=$(get_packages general,$ENABLED_SERVICES) +PACKAGES="$PACKAGES $(get_plugin_packages)" + +if is_ubuntu && echo $PACKAGES | grep -q dkms ; then + # Ensure headers for the running kernel are installed for any DKMS builds + PACKAGES="$PACKAGES linux-headers-$(uname -r)" +fi + +install_package $PACKAGES + +if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then + if is_ubuntu || is_fedora; then + install_package rsyslog-relp + else + exit_distro_not_supported "rsyslog-relp installation" + fi +fi + +# TODO(clarkb) remove these once we are switched to global venv by default +export PYTHON=$(which python${PYTHON3_VERSION} 2>/dev/null || which python3 2>/dev/null) + +# Mark end of run +# --------------- + +date "+%s" >$PREREQ_RERUN_MARKER +date >>$PREREQ_RERUN_MARKER diff --git a/tools/jenkins/README.md b/tools/jenkins/README.md deleted file mode 100644 index 371017db1a..0000000000 --- a/tools/jenkins/README.md +++ /dev/null @@ -1,38 +0,0 @@ -Getting Started With Jenkins and Devstack -========================================= -This little corner of devstack is to show how to get an Openstack jenkins -environment up and running quickly, using the rcb configuration methodology. - - -To create a jenkins server --------------------------- - - cd tools/jenkins/jenkins_home - ./build_jenkins.sh - -This will create a jenkins environment configured with sample test scripts that run against xen and kvm. - -Configuring XS --------------- -In order to make the tests for XS work, you must install xs 5.6 on a separate machine, -and install the the jenkins public key on that server. You then need to create the -/var/lib/jenkins/xenrc on your jenkins server like so: - - MYSQL_PASSWORD=secrete - SERVICE_TOKEN=secrete - ADMIN_PASSWORD=secrete - RABBIT_PASSWORD=secrete - # This is the password for your guest (for both stack and root users) - GUEST_PASSWORD=secrete - # Do not download the usual images yet! - IMAGE_URLS="" - FLOATING_RANGE=192.168.1.224/28 - VIRT_DRIVER=xenserver - # Explicitly set multi-host - MULTI_HOST=1 - # Give extra time for boot - ACTIVE_TIMEOUT=45 - # IMPORTANT: This is the ip of your xenserver - XEN_IP=10.5.5.1 - # IMPORTANT: The following must be set to your dom0 root password! - XENAPI_PASSWORD='MY_XEN_ROOT_PW' diff --git a/tools/jenkins/adapters/euca.sh b/tools/jenkins/adapters/euca.sh deleted file mode 100755 index b49ce9f21f..0000000000 --- a/tools/jenkins/adapters/euca.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -# Echo commands, exit on error -set -o xtrace -set -o errexit - -TOP_DIR=$(cd ../../.. && pwd) -HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2` -ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./euca.sh' diff --git a/tools/jenkins/adapters/floating_ips.sh b/tools/jenkins/adapters/floating_ips.sh deleted file mode 100755 index a97f93578a..0000000000 --- a/tools/jenkins/adapters/floating_ips.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -# Echo commands, exit on error -set -o xtrace -set -o errexit - -TOP_DIR=$(cd ../../.. && pwd) -HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2` -ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./floating_ips.sh' diff --git a/tools/jenkins/build_configuration.sh b/tools/jenkins/build_configuration.sh deleted file mode 100755 index e295ef2017..0000000000 --- a/tools/jenkins/build_configuration.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -EXECUTOR_NUMBER=$1 -CONFIGURATION=$2 -ADAPTER=$3 -RC=$4 - -function usage() { - echo "Usage: $0 - Build a configuration" - echo "" - echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]" - exit 1 -} - -# Validate inputs -if [[ "$EXECUTOR_NUMBER" = "" || "$CONFIGURATION" = "" || "$ADAPTER" = "" ]]; then - usage -fi - -# Execute configuration script -cd configurations && ./$CONFIGURATION.sh $EXECUTOR_NUMBER $CONFIGURATION $ADAPTER "$RC" diff --git a/tools/jenkins/configurations/kvm.sh b/tools/jenkins/configurations/kvm.sh deleted file mode 100755 index 727b42a48e..0000000000 --- a/tools/jenkins/configurations/kvm.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/bash - -# exit on error to stop unexpected errors -set -o errexit -set -o xtrace - -EXECUTOR_NUMBER=$1 -CONFIGURATION=$2 -ADAPTER=$3 -RC=$4 - -function usage() { - echo "Usage: $0 - Build a test configuration" - echo "" - echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]" - exit 1 -} - -# Validate inputs -if [[ "$EXECUTOR_NUMBER" = "" || "$CONFIGURATION" = "" || "$ADAPTER" = "" ]]; then - usage -fi - -# This directory -CUR_DIR=$(cd $(dirname "$0") && pwd) - -# devstack directory -cd ../../.. -TOP_DIR=$(pwd) - -# Deps -apt-get install -y --force-yes libvirt-bin || true - -# Name test instance based on executor -BASE_NAME=executor-`printf "%02d" $EXECUTOR_NUMBER` -GUEST_NAME=$BASE_NAME.$ADAPTER -virsh list | grep $BASE_NAME | cut -d " " -f1 | xargs -n 1 virsh destroy || true -virsh net-list | grep $BASE_NAME | cut -d " " -f1 | xargs -n 1 virsh net-destroy || true - -# Configure localrc -cat <localrc -RECLONE=yes -GUEST_NETWORK=$EXECUTOR_NUMBER -GUEST_NAME=$GUEST_NAME -FLOATING_RANGE=192.168.$EXECUTOR_NUMBER.128/27 -GUEST_CORES=1 -GUEST_RAM=12574720 -MYSQL_PASSWORD=chicken -RABBIT_PASSWORD=chicken -SERVICE_TOKEN=chicken -ADMIN_PASSWORD=chicken -USERNAME=admin -TENANT=admin -NET_NAME=$BASE_NAME -ACTIVE_TIMEOUT=45 -BOOT_TIMEOUT=45 -$RC -EOF -cd tools -sudo ./build_uec.sh - -# Make the address of the instances available to test runners -echo HEAD=`cat /var/lib/libvirt/dnsmasq/$BASE_NAME.leases | cut -d " " -f3` > $TOP_DIR/addresses diff --git a/tools/jenkins/configurations/xs.sh b/tools/jenkins/configurations/xs.sh deleted file mode 100755 index 864f949114..0000000000 --- a/tools/jenkins/configurations/xs.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash -set -o errexit -set -o xtrace - - -EXECUTOR_NUMBER=$1 -CONFIGURATION=$2 -ADAPTER=$3 -RC=$4 - -function usage() { - echo "Usage: $0 - Build a test configuration" - echo "" - echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]" - exit 1 -} - -# Validate inputs -if [[ "$EXECUTOR_NUMBER" = "" || "$CONFIGURATION" = "" || "$ADAPTER" = "" ]]; then - usage -fi - -# Configuration of xenrc -XENRC=/var/lib/jenkins/xenrc -if [ ! -e $XENRC ]; then - echo "/var/lib/jenkins/xenrc is not present! See README.md" - exit 1 -fi - -# Move to top of devstack -cd ../../.. - -# Use xenrc as the start of our localrc -cp $XENRC localrc - -# Set the PUB_IP -PUB_IP=192.168.1.1$EXECUTOR_NUMBER -echo "PUB_IP=$PUB_IP" >> localrc - -# Overrides -echo "$RC" >> localrc - -# Source localrc -. localrc - -# Make host ip available to tester -echo "HEAD=$PUB_IP" > addresses - -# Build configuration -REMOTE_DEVSTACK=/root/devstack -ssh root@$XEN_IP "rm -rf $REMOTE_DEVSTACK" -scp -pr . root@$XEN_IP:$REMOTE_DEVSTACK -ssh root@$XEN_IP "cd $REMOTE_DEVSTACK/tools/xen && ./build_domU.sh" diff --git a/tools/jenkins/jenkins_home/.gitignore b/tools/jenkins/jenkins_home/.gitignore deleted file mode 100644 index d831d01ccf..0000000000 --- a/tools/jenkins/jenkins_home/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -builds -workspace -*.sw* diff --git a/tools/jenkins/jenkins_home/build_jenkins.sh b/tools/jenkins/jenkins_home/build_jenkins.sh deleted file mode 100755 index e0e774ee9e..0000000000 --- a/tools/jenkins/jenkins_home/build_jenkins.sh +++ /dev/null @@ -1,108 +0,0 @@ -#!/bin/bash - -# Echo commands, exit on error -set -o xtrace -set -o errexit - -# Make sure only root can run our script -if [[ $EUID -ne 0 ]]; then - echo "This script must be run as root" - exit 1 -fi - -# This directory -CUR_DIR=$(cd $(dirname "$0") && pwd) - -# Configure trunk jenkins! -echo "deb http://pkg.jenkins-ci.org/debian binary/" > /etc/apt/sources.list.d/jenkins.list -wget -q -O - http://pkg.jenkins-ci.org/debian/jenkins-ci.org.key | sudo apt-key add - -apt-get update - - -# Clean out old jenkins - useful if you are having issues upgrading -CLEAN_JENKINS=${CLEAN_JENKINS:-no} -if [ "$CLEAN_JENKINS" = "yes" ]; then - apt-get remove jenkins jenkins-common -fi - -# Install software -DEPS="jenkins cloud-utils" -apt-get install -y --force-yes $DEPS - -# Install jenkins -if [ ! -e /var/lib/jenkins ]; then - echo "Jenkins installation failed" - exit 1 -fi - -# Make sure user has configured a jenkins ssh pubkey -if [ ! -e /var/lib/jenkins/.ssh/id_rsa.pub ]; then - echo "Public key for jenkins is missing. This is used to ssh into your instances." - echo "Please run "su -c ssh-keygen jenkins" before proceeding" - exit 1 -fi - -# Setup sudo -JENKINS_SUDO=/etc/sudoers.d/jenkins -cat > $JENKINS_SUDO < $JENKINS_GITCONF < - - 4 - Jenkins - jenkins@rcb.me - -EOF - -# Add build numbers -JOBS=`ls jobs` -for job in ${JOBS// / }; do - if [ ! -e jobs/$job/nextBuildNumber ]; then - echo 1 > jobs/$job/nextBuildNumber - fi -done - -# Set ownership to jenkins -chown -R jenkins $CUR_DIR - -# Make sure this directory is accessible to jenkins -if ! su -c "ls $CUR_DIR" jenkins; then - echo "Your devstack directory is not accessible by jenkins." - echo "There is a decent chance you are trying to run this from a directory in /root." - echo "If so, try moving devstack elsewhere (eg. /opt/devstack)." - exit 1 -fi - -# Move aside old jobs, if present -if [ ! -h /var/lib/jenkins/jobs ]; then - echo "Installing jobs symlink" - if [ -d /var/lib/jenkins/jobs ]; then - mv /var/lib/jenkins/jobs /var/lib/jenkins/jobs.old - fi -fi - -# Set up jobs symlink -rm -f /var/lib/jenkins/jobs -ln -s $CUR_DIR/jobs /var/lib/jenkins/jobs - -# List of plugins -PLUGINS=http://hudson-ci.org/downloads/plugins/build-timeout/1.6/build-timeout.hpi,http://mirrors.jenkins-ci.org/plugins/git/1.1.12/git.hpi,http://hudson-ci.org/downloads/plugins/global-build-stats/1.2/global-build-stats.hpi,http://hudson-ci.org/downloads/plugins/greenballs/1.10/greenballs.hpi,http://download.hudson-labs.org/plugins/console-column-plugin/1.0/console-column-plugin.hpi - -# Configure plugins -for plugin in ${PLUGINS//,/ }; do - name=`basename $plugin` - dest=/var/lib/jenkins/plugins/$name - if [ ! -e $dest ]; then - curl -L $plugin -o $dest - fi -done - -# Restart jenkins -/etc/init.d/jenkins stop || true -/etc/init.d/jenkins start diff --git a/tools/jenkins/jenkins_home/clean.sh b/tools/jenkins/jenkins_home/clean.sh deleted file mode 100755 index eb03022a93..0000000000 --- a/tools/jenkins/jenkins_home/clean.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -# This script is not yet for general consumption. - -set -o errexit - -if [ ! "$FORCE" = "yes" ]; then - echo "FORCE not set to 'yes'. Make sure this is something you really want to do. Exiting." - exit 1 -fi - -virsh list | cut -d " " -f1 | grep -v "-" | egrep -e "[0-9]" | xargs -n 1 virsh destroy || true -virsh net-list | grep active | cut -d " " -f1 | xargs -n 1 virsh net-destroy || true -killall dnsmasq || true -if [ "$CLEAN" = "yes" ]; then - rm -rf jobs -fi -rm /var/lib/jenkins/jobs -git checkout -f -git fetch -git merge origin/jenkins -./build_jenkins.sh diff --git a/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml b/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml deleted file mode 100644 index 94c51f514b..0000000000 --- a/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml +++ /dev/null @@ -1,82 +0,0 @@ - - - - - false - - - - - RC - - - - - - - - 2 - - - origin - +refs/heads/*:refs/remotes/origin/* - git://github.com/cloudbuilders/devstack.git - - - - - master - - - false - false - false - false - false - false - false - - Default - - - - - - - false - - - true - false - false - false - - false - - - ADAPTER - - euca - floating_ips - - - - - - sed -i 's/) 2>&1 | tee "${LOGFILE}"/)/' stack.sh - - - set -o errexit -cd tools/jenkins -sudo ./build_configuration.sh $EXECUTOR_NUMBER kvm $ADAPTER "$RC" - - - set -o errexit -cd tools/jenkins -./run_test.sh $EXECUTOR_NUMBER $ADAPTER $RC "$RC" - - - - - false - diff --git a/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/euca/config.xml b/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/euca/config.xml deleted file mode 100644 index 0be70a5c71..0000000000 --- a/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/euca/config.xml +++ /dev/null @@ -1,15 +0,0 @@ - - - false - - - false - false - false - false - - false - - - - \ No newline at end of file diff --git a/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/floatingips/config.xml b/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/floatingips/config.xml deleted file mode 100644 index 0be70a5c71..0000000000 --- a/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/floatingips/config.xml +++ /dev/null @@ -1,15 +0,0 @@ - - - false - - - false - false - false - false - - false - - - - \ No newline at end of file diff --git a/tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml b/tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml deleted file mode 100644 index d0fa6af3f9..0000000000 --- a/tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml +++ /dev/null @@ -1,88 +0,0 @@ - - - - In order for this to work, you must create a /var/lib/jenkins/xenrc file as described in README.md - false - - - - - RC - - - - - - - - 2 - - - origin - +refs/heads/*:refs/remotes/origin/* - git://github.com/cloudbuilders/devstack.git - - - - - master - - - false - false - false - false - false - false - false - - Default - - - - - - - false - - - true - false - false - false - - false - - - ADAPTER - - euca - floating_ips - - - - - - sed -i 's/) 2>&1 | tee "${LOGFILE}"/)/' stack.sh - - - set -o errexit -cd tools/jenkins -sudo ./build_configuration.sh $EXECUTOR_NUMBER xs $ADAPTER "$RC" - - - #!/bin/bash -set -o errexit -set -o xtrace - -. localrc - -# Unlike kvm, ssh to the xen host to run tests, in case the test instance is launch with a host only network -ssh root@$XEN_IP "cd devstack && . localrc && cd tools/jenkins && ./run_test.sh $EXECUTOR_NUMBER $ADAPTER '$RC'" - - - - - - true - diff --git a/tools/jenkins/jenkins_home/print_summary.py b/tools/jenkins/jenkins_home/print_summary.py deleted file mode 100755 index 1d71a4a8eb..0000000000 --- a/tools/jenkins/jenkins_home/print_summary.py +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/python -import urllib -import json -import sys - - -def print_usage(): - print "Usage: %s [jenkins_url (eg. http://50.56.12.202:8080/)]"\ - % sys.argv[0] - sys.exit() - - -def fetch_blob(url): - return json.loads(urllib.urlopen(url + '/api/json').read()) - - -if len(sys.argv) < 2: - print_usage() - -BASE_URL = sys.argv[1] - -root = fetch_blob(BASE_URL) -results = {} -for job_url in root['jobs']: - job = fetch_blob(job_url['url']) - if job.get('activeConfigurations'): - (tag, name) = job['name'].split('-') - if not results.get(tag): - results[tag] = {} - if not results[tag].get(name): - results[tag][name] = [] - - for config_url in job['activeConfigurations']: - config = fetch_blob(config_url['url']) - - log_url = '' - if config.get('lastBuild'): - log_url = config['lastBuild']['url'] + 'console' - - results[tag][name].append({'test': config['displayName'], - 'status': config['color'], - 'logUrl': log_url, - 'healthReport': config['healthReport']}) - -print json.dumps(results) diff --git a/tools/jenkins/run_test.sh b/tools/jenkins/run_test.sh deleted file mode 100755 index 464956375e..0000000000 --- a/tools/jenkins/run_test.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -EXECUTOR_NUMBER=$1 -ADAPTER=$2 -RC=$3 - -function usage() { - echo "Usage: $0 - Run a test" - echo "" - echo "$0 [EXECUTOR_NUMBER] [ADAPTER] [RC (optional)]" - exit 1 -} - -# Validate inputs -if [[ "$EXECUTOR_NUMBER" = "" || "$ADAPTER" = "" ]]; then - usage -fi - -# Execute configuration script -cd adapters && ./$ADAPTER.sh $EXECUTOR_NUMBER $ADAPTER "$RC" diff --git a/tools/make_cert.sh b/tools/make_cert.sh new file mode 100755 index 0000000000..0212d0033a --- /dev/null +++ b/tools/make_cert.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +# **make_cert.sh** + +# Create a CA hierarchy (if necessary) and server certificate +# +# This mimics the CA structure that DevStack sets up when ``tls_proxy`` is enabled +# but in the current directory unless ``DATA_DIR`` is set + +ENABLE_TLS=True +DATA_DIR=${DATA_DIR:-`pwd`/ca-data} + +ROOT_CA_DIR=$DATA_DIR/root +INT_CA_DIR=$DATA_DIR/int + +# Import common functions +source $TOP_DIR/functions + +# Import TLS functions +source lib/tls + +function usage { + echo "$0 - Create CA and/or certs" + echo "" + echo "Usage: $0 commonName [orgUnit]" + exit 1 +} + +CN=$1 +if [ -z "$CN" ]; then + usage +fi +ORG_UNIT_NAME=${2:-$ORG_UNIT_NAME} + +# Useful on OS/X +if [[ `uname -s` == 'Darwin' && -d /usr/local/Cellar/openssl ]]; then + # set up for brew-installed modern OpenSSL + OPENSSL_CONF=/usr/local/etc/openssl/openssl.cnf + OPENSSL=/usr/local/Cellar/openssl/*/bin/openssl +fi + +DEVSTACK_CERT_NAME=$CN +DEVSTACK_HOSTNAME=$CN +DEVSTACK_CERT=$DATA_DIR/$DEVSTACK_CERT_NAME.pem + +# Make sure the CA is set up +configure_CA +fix_system_ca_bundle_path +init_CA + +# Create the server cert +make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME + +# Create a cert bundle +cat $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key \ + $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/cacert.pem >$DEVSTACK_CERT diff --git a/tools/memory_tracker.sh b/tools/memory_tracker.sh new file mode 100755 index 0000000000..2f404c26fb --- /dev/null +++ b/tools/memory_tracker.sh @@ -0,0 +1,125 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +set -o errexit + +# TODO(frickler): make this use stackrc variables +if [ -x /opt/stack/data/venv/bin/python ]; then + PYTHON=/opt/stack/data/venv/bin/python +else + PYTHON=${PYTHON:-python3} +fi + +# time to sleep between checks +SLEEP_TIME=20 + +# MemAvailable is the best estimation and has built-in heuristics +# around reclaimable memory. However, it is not available until 3.14 +# kernel (i.e. Ubuntu LTS Trusty misses it). In that case, we fall +# back to free+buffers+cache as the available memory. +USE_MEM_AVAILABLE=0 +if grep -q '^MemAvailable:' /proc/meminfo; then + USE_MEM_AVAILABLE=1 +fi + +function get_mem_unevictable { + awk '/^Unevictable:/ {print $2}' /proc/meminfo +} + +function get_mem_available { + if [[ $USE_MEM_AVAILABLE -eq 1 ]]; then + awk '/^MemAvailable:/ {print $2}' /proc/meminfo + else + awk '/^MemFree:/ {free=$2} + /^Buffers:/ {buffers=$2} + /^Cached:/ {cached=$2} + END { print free+buffers+cached }' /proc/meminfo + fi +} + +function tracker { + local low_point + local unevictable_point + low_point=$(get_mem_available) + # log mlocked memory at least on first iteration + unevictable_point=0 + while [ 1 ]; do + + local mem_available + mem_available=$(get_mem_available) + + local unevictable + unevictable=$(get_mem_unevictable) + + if [ $mem_available -lt $low_point -o $unevictable -ne $unevictable_point ]; then + echo "[[[" + date + + # whenever we see less memory available than last time, dump the + # snapshot of current usage; i.e. checking the latest entry in the file + # will give the peak-memory usage + if [[ $mem_available -lt $low_point ]]; then + low_point=$mem_available + echo "---" + # always available greppable output; given difference in + # meminfo output as described above... + echo "memory_tracker low_point: $mem_available" + echo "---" + cat /proc/meminfo + echo "---" + # would hierarchial view be more useful (-H)? output is + # not sorted by usage then, however, and the first + # question is "what's using up the memory" + # + # there are a lot of kernel threads, especially on a 8-cpu + # system. do a best-effort removal to improve + # signal/noise ratio of output. + ps --sort=-pmem -eo pid:10,pmem:6,rss:15,ppid:10,cputime:10,nlwp:8,wchan:25,args:100 | + grep -v ']$' + fi + echo "---" + + # list processes that lock memory from swap + if [[ $unevictable -ne $unevictable_point ]]; then + unevictable_point=$unevictable + ${PYTHON} $(dirname $0)/mlock_report.py + fi + + echo "]]]" + fi + sleep $SLEEP_TIME + done +} + +function usage { + echo "Usage: $0 [-x] [-s N]" 1>&2 + exit 1 +} + +while getopts ":s:x" opt; do + case $opt in + s) + SLEEP_TIME=$OPTARG + ;; + x) + set -o xtrace + ;; + *) + usage + ;; + esac +done +shift $((OPTIND-1)) + +tracker diff --git a/tools/mlock_report.py b/tools/mlock_report.py new file mode 100644 index 0000000000..8cbda15895 --- /dev/null +++ b/tools/mlock_report.py @@ -0,0 +1,53 @@ +# This tool lists processes that lock memory pages from swapping to disk. + +import re + +import psutil + + +LCK_SUMMARY_REGEX = re.compile( + r"^VmLck:\s+(?P[\d]+)\s+kB", re.MULTILINE) + + +def main(): + try: + print(_get_report()) + except Exception as e: + print("Failure listing processes locking memory: %s" % str(e)) + raise + + +def _get_report(): + mlock_users = [] + for proc in psutil.process_iter(): + # sadly psutil does not expose locked pages info, that's why we + # iterate over the /proc/%pid/status files manually + try: + s = open("%s/%d/status" % (psutil.PROCFS_PATH, proc.pid), 'r') + with s: + for line in s: + result = LCK_SUMMARY_REGEX.search(line) + if result: + locked = int(result.group('locked')) + if locked: + mlock_users.append({'name': proc.name(), + 'pid': proc.pid, + 'locked': locked}) + except OSError: + # pids can disappear, we're ok with that + continue + + + # produce a single line log message with per process mlock stats + if mlock_users: + return "; ".join( + "[%(name)s (pid:%(pid)s)]=%(locked)dKB" % args + # log heavy users first + for args in sorted(mlock_users, key=lambda d: d['locked']) + ) + else: + return "no locked memory" + + +if __name__ == "__main__": + main() diff --git a/tools/outfilter.py b/tools/outfilter.py new file mode 100644 index 0000000000..3955d39794 --- /dev/null +++ b/tools/outfilter.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python3 + +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This is an output filter to filter and timestamp the logs from Grenade and +# DevStack. Largely our awk filters got beyond the complexity level which were +# sustainable, so this provides us much more control in a single place. +# +# The overhead of running python should be less than execing `date` a million +# times during a run. + +import argparse +import datetime +import re +import sys + +IGNORE_LINES = re.compile(r'(set \+o|xtrace)') +HAS_DATE = re.compile(r'^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3} \|') + + +def get_options(): + parser = argparse.ArgumentParser( + description='Filter output by DevStack and friends') + parser.add_argument('-o', '--outfile', + help='Output file for content', + default=None) + # NOTE(ianw): This is intended for the case where your stdout is + # being captured by something like ansible which independently + # logs timestamps on the lines it receives. Note that if using a + # output file, those log lines are still timestamped. + parser.add_argument('-b', '--no-timestamp', action='store_true', + help='Do not prefix stdout with timestamp (bare)', + default=False) + parser.add_argument('-v', '--verbose', action='store_true', + default=False) + return parser.parse_args() + + +def skip_line(line): + """Should we skip this line.""" + return IGNORE_LINES.search(line) is not None + + +def main(): + opts = get_options() + outfile = None + if opts.outfile: + # note, binary mode so we can do unbuffered output. + outfile = open(opts.outfile, 'ab', 0) + + # Otherwise fileinput reprocess args as files + sys.argv = [] + + for line in iter(sys.stdin.readline, ''): + # put skip lines here + if skip_line(line): + continue + + # This prevents us from nesting date lines, because we'd like + # to pull this in directly in Grenade and not double up on + # DevStack lines. + # NOTE(ianw): we could actually strip the extra ts in "bare" + # mode (which came after this)? ... as we get more experience + # with zuulv3 native jobs and ansible capture it may become + # clearer what to do + if HAS_DATE.search(line) is None: + now = datetime.datetime.now(datetime.timezone.utc).replace( + tzinfo=None) + ts_line = ("%s | %s" % ( + now.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3], + line)) + else: + ts_line = line + + if opts.verbose: + sys.stdout.write(line if opts.no_timestamp else ts_line) + sys.stdout.flush() + + if outfile: + # We've opened outfile as a binary file to get the + # non-buffered behaviour. on python3, sys.stdin was + # opened with the system encoding and made the line into + # utf-8, so write the logfile out in utf-8 bytes. + outfile.write(ts_line.encode('utf-8')) + outfile.flush() + + +if __name__ == '__main__': + try: + sys.exit(main()) + except KeyboardInterrupt: + sys.exit(1) diff --git a/tools/ping_neutron.sh b/tools/ping_neutron.sh new file mode 100755 index 0000000000..ab8e8dfca8 --- /dev/null +++ b/tools/ping_neutron.sh @@ -0,0 +1,71 @@ +#!/bin/bash +# +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Ping a neutron guest using a network namespace probe + +set -o errexit +set -o pipefail + +TOP_DIR=$(cd $(dirname "$0")/.. && pwd) + +# This *must* be run as the admin tenant +source $TOP_DIR/openrc admin admin + +function usage { + cat - < [ping args] + +This provides a wrapper to ping neutron guests that are on isolated +tenant networks that the caller can't normally reach. It does so by +using either the DHCP or Metadata network namespace to support both +ML2/OVS and OVN. + +It takes arguments like ping, except the first arg must be the network +name. + +Note: in environments with duplicate network names, the results are +non deterministic. + +This should *really* be in the neutron cli. + +EOF + exit 1 +} + +# BUG: with duplicate network names, this fails pretty hard since it +# will just pick the first match. +function _get_net_id { + openstack --os-cloud devstack-admin --os-region-name="$REGION_NAME" --os-project-name admin --os-username admin --os-password $ADMIN_PASSWORD network list | grep $1 | head -n 1 | awk '{print $2}' +} + +NET_NAME=$1 + +if [[ -z "$NET_NAME" ]]; then + echo "Error: net_name is required" + usage +fi + +REMAINING_ARGS="${@:2}" + +NET_ID=`_get_net_id $NET_NAME` +NET_NS=$(ip netns list | grep "$NET_ID" | head -n 1) + +# This runs a command inside the specific netns +NET_NS_CMD="ip netns exec $NET_NS" + +PING_CMD="sudo $NET_NS_CMD ping $REMAINING_ARGS" +echo "Running $PING_CMD" +$PING_CMD diff --git a/tools/rfc.sh b/tools/rfc.sh deleted file mode 100755 index 0bc1531951..0000000000 --- a/tools/rfc.sh +++ /dev/null @@ -1,145 +0,0 @@ -#!/bin/sh -e -# Copyright (c) 2010-2011 Gluster, Inc. -# This initial version of this file was taken from the source tree -# of GlusterFS. It was not directly attributed, but is assumed to be -# Copyright (c) 2010-2011 Gluster, Inc and release GPLv3 -# Subsequent modifications are Copyright (c) 2011 OpenStack, LLC. -# -# GlusterFS is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published -# by the Free Software Foundation; either version 3 of the License, -# or (at your option) any later version. -# -# GlusterFS is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see -# . - - -branch="master"; - -set_hooks_commit_msg() -{ - top_dir=`git rev-parse --show-toplevel` - f="${top_dir}/.git/hooks/commit-msg"; - u="https://review.openstack.org/tools/hooks/commit-msg"; - - if [ -x "$f" ]; then - return; - fi - - curl -o $f $u || wget -O $f $u; - - chmod +x $f; - - GIT_EDITOR=true git commit --amend -} - -add_remote() -{ - username=$1 - project=$2 - - echo "No remote set, testing ssh://$username@review.openstack.org:29418" - if project_list=`ssh -p29418 -o StrictHostKeyChecking=no $username@review.openstack.org gerrit ls-projects 2>/dev/null` - then - echo "$username@review.openstack.org:29418 worked." - if echo $project_list | grep $project >/dev/null - then - echo "Creating a git remote called gerrit that maps to:" - echo " ssh://$username@review.openstack.org:29418/$project" - git remote add gerrit ssh://$username@review.openstack.org:29418/$project - else - echo "The current project name, $project, is not a known project." - echo "Please either reclone from github/gerrit or create a" - echo "remote named gerrit that points to the intended project." - return 1 - fi - - return 0 - fi - return 1 -} - -check_remote() -{ - if ! git remote | grep gerrit >/dev/null 2>&1 - then - origin_project=`git remote show origin | grep 'Fetch URL' | perl -nle '@fields = split(m|[:/]|); $len = $#fields; print $fields[$len-1], "/", $fields[$len];'` - if add_remote $USERNAME $origin_project - then - return 0 - else - echo "Your local name doesn't work on Gerrit." - echo -n "Enter Gerrit username (same as launchpad): " - read gerrit_user - if add_remote $gerrit_user $origin_project - then - return 0 - else - echo "Can't infer where gerrit is - please set a remote named" - echo "gerrit manually and then try again." - echo - echo "For more information, please see:" - echo "\thttp://wiki.openstack.org/GerritWorkflow" - exit 1 - fi - fi - fi -} - -rebase_changes() -{ - git fetch; - - GIT_EDITOR=true git rebase -i origin/$branch || exit $?; -} - - -assert_diverge() -{ - if ! git diff origin/$branch..HEAD | grep -q . - then - echo "No changes between the current branch and origin/$branch." - exit 1 - fi -} - - -main() -{ - set_hooks_commit_msg; - - check_remote; - - rebase_changes; - - assert_diverge; - - bug=$(git show --format='%s %b' | perl -nle 'if (/\b([Bb]ug|[Ll][Pp])\s*[#:]?\s*(\d+)/) {print "$2"; exit}') - - bp=$(git show --format='%s %b' | perl -nle 'if (/\b([Bb]lue[Pp]rint|[Bb][Pp])\s*[#:]?\s*([0-9a-zA-Z-_]+)/) {print "$2"; exit}') - - if [ "$DRY_RUN" = 1 ]; then - drier='echo -e Please use the following command to send your commits to review:\n\n' - else - drier= - fi - - local_branch=`git branch | grep -Ei "\* (.*)" | cut -f2 -d' '` - if [ -z "$bug" ]; then - if [ -z "$bp" ]; then - $drier git push gerrit HEAD:refs/for/$branch/$local_branch; - else - $drier git push gerrit HEAD:refs/for/$branch/bp/$bp; - fi - else - $drier git push gerrit HEAD:refs/for/$branch/bug/$bug; - fi -} - -main "$@" diff --git a/tools/uec/meta.py b/tools/uec/meta.py deleted file mode 100644 index 5b845d81a6..0000000000 --- a/tools/uec/meta.py +++ /dev/null @@ -1,29 +0,0 @@ -import sys -from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler -from SimpleHTTPServer import SimpleHTTPRequestHandler - -def main(host, port, HandlerClass = SimpleHTTPRequestHandler, - ServerClass = HTTPServer, protocol="HTTP/1.0"): - """simple http server that listens on a give address:port""" - - server_address = (host, port) - - HandlerClass.protocol_version = protocol - httpd = ServerClass(server_address, HandlerClass) - - sa = httpd.socket.getsockname() - print "Serving HTTP on", sa[0], "port", sa[1], "..." - httpd.serve_forever() - -if __name__ == '__main__': - if sys.argv[1:]: - address = sys.argv[1] - else: - address = '0.0.0.0' - if ':' in address: - host, port = address.split(':') - else: - host = address - port = 8080 - - main(host, int(port)) diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py new file mode 100755 index 0000000000..87312d9469 --- /dev/null +++ b/tools/update_clouds_yaml.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python3 + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Update the clouds.yaml file. + +import argparse +import os.path +import sys + +import yaml + + +class UpdateCloudsYaml: + def __init__(self, args): + if args.file: + self._clouds_path = args.file + self._create_directory = False + else: + self._clouds_path = os.path.expanduser( + '~/.config/openstack/clouds.yaml') + self._create_directory = True + self._clouds = {} + + self._cloud = args.os_cloud + self._cloud_data = { + 'region_name': args.os_region_name, + 'auth': { + 'auth_url': args.os_auth_url, + 'username': args.os_username, + 'user_domain_id': 'default', + 'password': args.os_password, + }, + } + + if args.os_project_name and args.os_system_scope: + print( + "WARNING: os_project_name and os_system_scope were both " + "given. os_system_scope will take priority." + ) + + if args.os_system_scope: # system-scoped + self._cloud_data['auth']['system_scope'] = args.os_system_scope + elif args.os_project_name: # project-scoped + self._cloud_data['auth']['project_name'] = args.os_project_name + self._cloud_data['auth']['project_domain_id'] = 'default' + + if args.os_cacert: + self._cloud_data['cacert'] = args.os_cacert + + def run(self): + self._read_clouds() + self._update_clouds() + self._write_clouds() + + def _read_clouds(self): + try: + with open(self._clouds_path) as clouds_file: + self._clouds = yaml.safe_load(clouds_file) + except IOError: + # The user doesn't have a clouds.yaml file. + print("The user clouds.yaml file didn't exist.") + self._clouds = {} + + def _update_clouds(self): + self._clouds.setdefault('clouds', {})[self._cloud] = self._cloud_data + + def _write_clouds(self): + + if self._create_directory: + clouds_dir = os.path.dirname(self._clouds_path) + os.makedirs(clouds_dir) + + with open(self._clouds_path, 'w') as clouds_file: + yaml.dump(self._clouds, clouds_file, default_flow_style=False) + + +def main(): + parser = argparse.ArgumentParser('Update clouds.yaml file.') + parser.add_argument('--file') + parser.add_argument('--os-cloud', required=True) + parser.add_argument('--os-region-name', default='RegionOne') + parser.add_argument('--os-cacert') + parser.add_argument('--os-auth-url', required=True) + parser.add_argument('--os-username', required=True) + parser.add_argument('--os-password', required=True) + parser.add_argument('--os-project-name') + parser.add_argument('--os-system-scope') + + args = parser.parse_args() + + update_clouds_yaml = UpdateCloudsYaml(args) + update_clouds_yaml.run() + + +if __name__ == "__main__": + main() diff --git a/tools/upload_image.sh b/tools/upload_image.sh new file mode 100755 index 0000000000..19c6b71976 --- /dev/null +++ b/tools/upload_image.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +# upload_image.sh - Retrieve and upload an image into Glance +# +# upload_image.sh +# +# Assumes credentials are set via OS_* environment variables + +function usage { + echo "$0 - Retrieve and upload an image into Glance" + echo "" + echo "Usage: $0 [...]" + echo "" + echo "Assumes credentials are set via OS_* environment variables" + exit 1 +} + +# Keep track of the current directory +TOOLS_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import configuration +source $TOP_DIR/openrc "" "" "" "" + +# Find the cache dir +FILES=$TOP_DIR/files + +if [[ -z "$1" ]]; then + usage +fi + +# Get a token to authenticate to glance +TOKEN=$(openstack token issue -c id -f value) +die_if_not_set $LINENO TOKEN "Keystone fail to get token" + +# Glance connection info. Note the port must be specified. +GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_HOST:9292} +GLANCE_SERVICE_PROTOCOL=${GLANCE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} + +for IMAGE in "$*"; do + upload_image $IMAGE $TOKEN +done diff --git a/tools/verify-ipv6-address.py b/tools/verify-ipv6-address.py new file mode 100644 index 0000000000..dc18fa6d8a --- /dev/null +++ b/tools/verify-ipv6-address.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python3 + +import argparse +import ipaddress +import sys + +def main(): + parser = argparse.ArgumentParser( + description="Check if a given string is a valid IPv6 address.", + formatter_class=argparse.RawTextHelpFormatter, + ) + parser.add_argument( + "address", + help=( + "The IPv6 address string to validate.\n" + "Examples:\n" + " 2001:0db8:85a3:0000:0000:8a2e:0370:7334\n" + " 2001:db8::1\n" + " ::1\n" + " fe80::1%eth0 (scope IDs are handled)" + ), + ) + args = parser.parse_args() + + try: + # try to create a IPv6Address: if we fail to parse or get an + # IPv4Address then die + ip_obj = ipaddress.ip_address(args.address.strip('[]')) + if isinstance(ip_obj, ipaddress.IPv6Address): + sys.exit(0) + else: + sys.exit(1) + except ValueError: + sys.exit(1) + except Exception as e: + print(f"An unexpected error occurred during validation: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/tools/verify-ipv6-only-deployments.sh b/tools/verify-ipv6-only-deployments.sh new file mode 100755 index 0000000000..a1acecbb3f --- /dev/null +++ b/tools/verify-ipv6-only-deployments.sh @@ -0,0 +1,95 @@ +#!/bin/bash +# +# +# NOTE(gmann): This script is used in 'devstack-tempest-ipv6' zuul job to verify that +# services are deployed on IPv6 properly or not. This will capture if any devstck or devstack +# plugins are missing the required setting to listen on IPv6 address. This is run as part of +# run phase of zuul job and before test run. Child job of 'devstack-tempest-ipv6' +# can expand the IPv6 verification specific to project by defining the new post-run script which +# will run along with this base script. +# If there are more common verification for IPv6 then we can always extent this script. + +# Keep track of the DevStack directory +TOP_DIR=$(cd $(dirname "$0")/../../devstack && pwd) +source $TOP_DIR/stackrc +source $TOP_DIR/openrc admin admin + +function verify_devstack_ipv6_setting { + local _service_host='' + _service_host=$(echo $SERVICE_HOST | tr -d []) + local _host_ipv6='' + _host_ipv6=$(echo $HOST_IPV6 | tr -d []) + local _service_listen_address='' + _service_listen_address=$(echo $SERVICE_LISTEN_ADDRESS | tr -d []) + local _service_local_host='' + _service_local_host=$(echo $SERVICE_LOCAL_HOST | tr -d []) + local _tunnel_endpoint_ip='' + _tunnel_endpoint_ip=$(echo $TUNNEL_ENDPOINT_IP | tr -d []) + if [[ "$SERVICE_IP_VERSION" != 6 ]]; then + echo $SERVICE_IP_VERSION "SERVICE_IP_VERSION is not set to 6 which is must for devstack to deploy services with IPv6 address." + exit 1 + fi + if [[ "$TUNNEL_IP_VERSION" != 6 ]]; then + echo $TUNNEL_IP_VERSION "TUNNEL_IP_VERSION is not set to 6 so TUNNEL_ENDPOINT_IP cannot be an IPv6 address." + exit 1 + fi + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_service_host"; then + echo $SERVICE_HOST "SERVICE_HOST is not IPv6 which means devstack cannot deploy services on IPv6 addresses." + exit 1 + fi + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_host_ipv6"; then + echo $HOST_IPV6 "HOST_IPV6 is not IPv6 which means devstack cannot deploy services on IPv6 addresses." + exit 1 + fi + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_service_listen_address"; then + echo $SERVICE_LISTEN_ADDRESS "SERVICE_LISTEN_ADDRESS is not IPv6 which means devstack cannot deploy services on IPv6 addresses." + exit 1 + fi + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_service_local_host"; then + echo $SERVICE_LOCAL_HOST "SERVICE_LOCAL_HOST is not IPv6 which means devstack cannot deploy services on IPv6 addresses." + exit 1 + fi + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$_tunnel_endpoint_ip"; then + echo $TUNNEL_ENDPOINT_IP "TUNNEL_ENDPOINT_IP is not IPv6 which means devstack will not deploy with an IPv6 endpoint address." + exit 1 + fi + echo "Devstack is properly configured with IPv6" + echo "SERVICE_IP_VERSION:" $SERVICE_IP_VERSION "HOST_IPV6:" $HOST_IPV6 "SERVICE_HOST:" $SERVICE_HOST "SERVICE_LISTEN_ADDRESS:" $SERVICE_LISTEN_ADDRESS "SERVICE_LOCAL_HOST:" $SERVICE_LOCAL_HOST "TUNNEL_IP_VERSION:" $TUNNEL_IP_VERSION "TUNNEL_ENDPOINT_IP:" $TUNNEL_ENDPOINT_IP +} + +function sanity_check_system_ipv6_enabled { + if [ ! -f "/proc/sys/net/ipv6/conf/default/disable_ipv6" ] || [ "$(cat /proc/sys/net/ipv6/conf/default/disable_ipv6)" -ne "0" ]; then + echo "IPv6 is disabled in system" + exit 1 + fi + echo "IPv6 is enabled in system" +} + +function verify_service_listen_address_is_ipv6 { + local endpoints_verified=False + local all_ipv6=True + endpoints=$(openstack endpoint list -f value -c URL) + for endpoint in ${endpoints}; do + local endpoint_address='' + endpoint_address=$(echo "$endpoint" | awk -F/ '{print $3}' | awk -F] '{print $1}') + endpoint_address=$(echo $endpoint_address | tr -d '[]') + if ! python3 ${TOP_DIR}/tools/verify-ipv6-address.py "$endpoint_address"; then + all_ipv6=False + echo $endpoint ": This is not an IPv6 endpoint which means corresponding service is not listening on an IPv6 address." + continue + fi + endpoints_verified=True + done + if [[ "$all_ipv6" == "False" ]] || [[ "$endpoints_verified" == "False" ]]; then + exit 1 + fi + echo "All services deployed by devstack are on IPv6 endpoints" + echo $endpoints +} + +#First thing to verify if system has IPv6 enabled or not +sanity_check_system_ipv6_enabled +#Verify whether devstack is configured properly with IPv6 setting +verify_devstack_ipv6_setting +#Get all registrfed endpoints by devstack in keystone and verify that each endpoints address is IPv6. +verify_service_listen_address_is_ipv6 diff --git a/tools/warm_apts_and_pips_for_uec.sh b/tools/warm_apts_and_pips_for_uec.sh deleted file mode 100755 index ec7e916c24..0000000000 --- a/tools/warm_apts_and_pips_for_uec.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env bash - -# Echo commands -set -o xtrace - -# Exit on error to stop unexpected errors -set -o errexit - -# Keep track of the current directory -TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=`cd $TOOLS_DIR/..; pwd` - -# Change dir to top of devstack -cd $TOP_DIR - -# Echo usage -usage() { - echo "Cache OpenStack dependencies on a uec image to speed up performance." - echo "" - echo "Usage: $0 [full path to raw uec base image]" -} - -# Make sure this is a raw image -if ! qemu-img info $1 | grep -q "file format: raw"; then - usage - exit 1 -fi - -# Make sure we are in the correct dir -if [ ! -d files/apts ]; then - echo "Please run this script from devstack/tools/" - exit 1 -fi - -# Mount the image -STAGING_DIR=/tmp/`echo $1 | sed "s/\//_/g"`.stage -mkdir -p $STAGING_DIR -umount $STAGING_DIR || true -sleep 1 -mount -t ext4 -o loop $1 $STAGING_DIR - -# Make sure that base requirements are installed -cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf - -# Perform caching on the base image to speed up subsequent runs -chroot $STAGING_DIR apt-get update -chroot $STAGING_DIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1` -chroot $STAGING_DIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` || true -mkdir -p $STAGING_DIR/var/cache/pip -PIP_DOWNLOAD_CACHE=/var/cache/pip chroot $STAGING_DIR pip install `cat files/pips/*` || true - -# Unmount -umount $STAGING_DIR diff --git a/tools/worlddump.py b/tools/worlddump.py new file mode 100755 index 0000000000..26ced3f653 --- /dev/null +++ b/tools/worlddump.py @@ -0,0 +1,272 @@ +#!/usr/bin/env python3 +# +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +"""Dump the state of the world for post mortem.""" + +import argparse +import datetime +import fnmatch +import io +import os +import shutil +import subprocess +import sys + + +GMR_PROCESSES = ( + 'nova-compute', + 'neutron-dhcp-agent', + 'neutron-l3-agent', + 'neutron-metadata-agent', + 'neutron-openvswitch-agent', + 'cinder-volume', +) + + +def get_options(): + parser = argparse.ArgumentParser( + description='Dump world state for debugging') + parser.add_argument('-d', '--dir', + default='.', + help='Output directory for worlddump') + parser.add_argument('-n', '--name', + default='', + help='Additional name to tag into file') + return parser.parse_args() + + +def filename(dirname, name=""): + now = datetime.datetime.now(datetime.timezone.utc) + fmt = "worlddump-%Y-%m-%d-%H%M%S" + if name: + fmt += "-" + name + fmt += ".txt" + return os.path.join(dirname, now.strftime(fmt)) + + +def warn(msg): + print("WARN: %s" % msg) + + +def _dump_cmd(cmd): + print(cmd) + print("-" * len(cmd)) + print() + try: + subprocess.check_call(cmd, shell=True) + print() + except subprocess.CalledProcessError as e: + print("*** Failed to run '%(cmd)s': %(err)s" % {'cmd': cmd, 'err': e}) + + +def _find_cmd(cmd): + if not shutil.which(cmd): + print("*** %s not found: skipping" % cmd) + return False + return True + + +def _header(name): + print() + print(name) + print("=" * len(name)) + print() + + +def _bridge_list(): + process = subprocess.Popen(['sudo', 'ovs-vsctl', 'list-br'], + stdout=subprocess.PIPE) + stdout, _ = process.communicate() + return stdout.split() + + +# This method gets a max openflow version supported by openvswitch. +# For example 'ovs-ofctl --version' displays the following: +# +# ovs-ofctl (Open vSwitch) 2.0.2 +# Compiled Dec 9 2015 14:08:08 +# OpenFlow versions 0x1:0x4 +# +# The above shows that openvswitch supports from OpenFlow10 to OpenFlow13. +# This method gets max version searching 'OpenFlow versions 0x1:0x'. +# And return a version value converted to an integer type. +def _get_ofp_version(): + process = subprocess.Popen(['ovs-ofctl', '--version'], + stdout=subprocess.PIPE) + stdout, _ = process.communicate() + find_str = b'OpenFlow versions 0x1:0x' + offset = stdout.find(find_str) + return int(stdout[offset + len(find_str):-1]) - 1 + + +def disk_space(): + # the df output + _header("File System Summary") + + dfraw = os.popen("df -Ph").read() + df = [s.split() for s in dfraw.splitlines()] + for fs in df: + try: + if int(fs[4][:-1]) > 95: + warn("Device %s (%s) is %s full, might be an issue" % ( + fs[0], fs[5], fs[4])) + except ValueError: + # if it doesn't look like an int, that's fine + pass + + print(dfraw) + + +def ebtables_dump(): + tables = ['filter', 'nat'] + _header("EB Tables Dump") + if not _find_cmd('ebtables'): + return + for table in tables: + _dump_cmd("sudo ebtables -t %s -L" % table) + + +def iptables_dump(): + tables = ['filter', 'nat', 'mangle'] + _header("IP Tables Dump") + + for table in tables: + _dump_cmd("sudo iptables --line-numbers -L -nv -t %s" % table) + + +def _netns_list(): + process = subprocess.Popen(['ip', 'netns'], stdout=subprocess.PIPE) + stdout, _ = process.communicate() + # NOTE(jlvillal): Sometimes 'ip netns list' can return output like: + # qrouter-0805fd7d-c493-4fa6-82ca-1c6c9b23cd9e (id: 1) + # qdhcp-bb2cc6ae-2ae8-474f-adda-a94059b872b5 (id: 0) + output = [x.split()[0] for x in stdout.splitlines()] + return output + + +def network_dump(): + _header("Network Dump") + + _dump_cmd("bridge link") + _dump_cmd("ip link show type bridge") + ip_cmds = ["neigh", "addr", "route", "-6 route"] + for cmd in ip_cmds + ['netns']: + _dump_cmd("ip %s" % cmd) + for netns_ in _netns_list(): + for cmd in ip_cmds: + args = {'netns': bytes.decode(netns_), 'cmd': cmd} + _dump_cmd('sudo ip netns exec %(netns)s ip %(cmd)s' % args) + + +def ovs_dump(): + _header("Open vSwitch Dump") + + # NOTE(cdent): If we're not using neutron + ovs these commands + # will not be present so + if not _find_cmd('ovs-vsctl'): + return + + bridges = _bridge_list() + ofctl_cmds = ('show', 'dump-ports-desc', 'dump-ports', 'dump-flows') + ofp_max = _get_ofp_version() + vers = 'OpenFlow10' + for i in range(1, ofp_max + 1): + vers += ',OpenFlow1' + str(i) + _dump_cmd("sudo ovs-vsctl show") + for ofctl_cmd in ofctl_cmds: + for bridge in bridges: + args = {'vers': vers, 'cmd': ofctl_cmd, 'bridge': bytes.decode(bridge)} + _dump_cmd("sudo ovs-ofctl --protocols=%(vers)s %(cmd)s %(bridge)s" % args) + + +def process_list(): + _header("Process Listing") + _dump_cmd("ps axo " + "user,ppid,pid,pcpu,pmem,vsz,rss,tty,stat,start,time,args") + + +def compute_consoles(): + _header("Compute consoles") + for root, _, filenames in os.walk('/opt/stack'): + for filename in fnmatch.filter(filenames, 'console.log'): + fullpath = os.path.join(root, filename) + _dump_cmd("sudo cat %s" % fullpath) + + +def guru_meditation_reports(): + for service in GMR_PROCESSES: + _header("%s Guru Meditation Report" % service) + + try: + subprocess.check_call(['pgrep', '-f', service]) + except subprocess.CalledProcessError: + print("Skipping as %s does not appear to be running" % service) + continue + + _dump_cmd("killall -e -USR2 %s" % service) + print("guru meditation report in %s log" % service) + + +def var_core(): + if os.path.exists('/var/core'): + _header("/var/core dumps") + # NOTE(ianw) : see DEBUG_LIBVIRT_COREDUMPS. We could think + # about getting backtraces out of these. There are other + # tools out there that can do that sort of thing though. + _dump_cmd("ls -ltrah /var/core") + + +def disable_stdio_buffering(): + # re-open STDOUT as binary, then wrap it in a + # TextIOWrapper, and write through everything. + binary_stdout = io.open(sys.stdout.fileno(), 'wb', 0) + sys.stdout = io.TextIOWrapper(binary_stdout, write_through=True) + + +def main(): + opts = get_options() + fname = filename(opts.dir, opts.name) + print("World dumping... see %s for details" % fname) + + disable_stdio_buffering() + + with io.open(fname, 'w') as f: + os.dup2(f.fileno(), sys.stdout.fileno()) + disk_space() + process_list() + network_dump() + ovs_dump() + iptables_dump() + ebtables_dump() + compute_consoles() + guru_meditation_reports() + var_core() + # Singular name for ease of log retrieval + copyname = os.path.join(opts.dir, 'worlddump') + if opts.name: + copyname += '-' + opts.name + copyname += '-latest.txt' + # We make a full copy to deal with jobs that may or may not + # gzip logs breaking symlinks. + shutil.copyfile(fname, copyname) + + +if __name__ == '__main__': + try: + sys.exit(main()) + except KeyboardInterrupt: + sys.exit(1) diff --git a/tools/xen/README.md b/tools/xen/README.md deleted file mode 100644 index 63350ea7f2..0000000000 --- a/tools/xen/README.md +++ /dev/null @@ -1,70 +0,0 @@ -Getting Started With XenServer 5.6 and Devstack -=============================================== -The purpose of the code in this directory it to help developers bootstrap -a XenServer 5.6 + Openstack development environment. This file gives -some pointers on how to get started. - -Step 1: Install Xenserver ------------------------- -Install XenServer 5.6 on a clean box. You can get XenServer by signing -up for an account on citrix.com, and then visiting: -https://www.citrix.com/English/ss/downloads/details.asp?downloadId=2311504&productId=683148 - -Here are some sample Xenserver network settings for when you are just -getting started (I use settings like this with a lappy + cheap wifi router): - -* XenServer Host IP: 192.168.1.10 -* XenServer Netmask: 255.255.255.0 -* XenServer Gateway: 192.168.1.1 -* XenServer DNS: 192.168.1.1 - -Step 2: Prepare DOM0 -------------------- -At this point, your server is missing some critical software that you will -need to run devstack (like git). Do this to install required software: - - wget --no-check-certificate https://github.com/cloudbuilders/devstack/raw/xen/tools/xen/prepare_dom0.sh - chmod 755 prepare_dom0.sh - ./prepare_dom0.sh - -This script will also clone devstack in /root/devstack - -Step 3: Configure your localrc ------------------------------ -Devstack uses a localrc for user-specific configuration. Note that -the XENAPI_PASSWORD must be your dom0 root password. -Of course, use real passwords if this machine is exposed. - - cat > /root/devstack/localrc <> /etc/sysconfig/network -fi - -# Also, enable ip forwarding in rc.local, since the above trick isn't working -if ! grep -q "echo 1 >/proc/sys/net/ipv4/ip_forward" /etc/rc.local; then - echo "echo 1 >/proc/sys/net/ipv4/ip_forward" >> /etc/rc.local -fi - -# Enable ip forwarding at runtime as well -echo 1 > /proc/sys/net/ipv4/ip_forward - -# Directory where we stage the build -STAGING_DIR=$TOP_DIR/stage - -# Option to clean out old stuff -CLEAN=${CLEAN:-0} -if [ "$CLEAN" = "1" ]; then - rm -rf $STAGING_DIR -fi - -# Download our base image. This image is made using prepare_guest.sh -BASE_IMAGE_URL=${BASE_IMAGE_URL:-http://images.ansolabs.com/xen/stage.tgz} -if [ ! -e $STAGING_DIR ]; then - if [ ! -e /tmp/stage.tgz ]; then - wget $BASE_IMAGE_URL -O /tmp/stage.tgz - fi - tar xfz /tmp/stage.tgz - cd $TOP_DIR -fi - -# Free up precious disk space -rm -f /tmp/stage.tgz - -# Make sure we have a stage -if [ ! -d $STAGING_DIR/etc ]; then - echo "Stage is not properly set up!" - exit 1 -fi - -# Directory where our conf files are stored -FILES_DIR=$TOP_DIR/files -TEMPLATES_DIR=$TOP_DIR/templates - -# Directory for supporting script files -SCRIPT_DIR=$TOP_DIR/scripts - -# Version of ubuntu with which we are working -UBUNTU_VERSION=`cat $STAGING_DIR/etc/lsb-release | grep "DISTRIB_CODENAME=" | sed "s/DISTRIB_CODENAME=//"` -KERNEL_VERSION=`ls $STAGING_DIR/boot/vmlinuz* | head -1 | sed "s/.*vmlinuz-//"` - -# Setup fake grub -rm -rf $STAGING_DIR/boot/grub/ -mkdir -p $STAGING_DIR/boot/grub/ -cp $TEMPLATES_DIR/menu.lst.in $STAGING_DIR/boot/grub/menu.lst -sed -e "s,@KERNEL_VERSION@,$KERNEL_VERSION,g" -i $STAGING_DIR/boot/grub/menu.lst - -# Setup fstab, tty, and other system stuff -cp $FILES_DIR/fstab $STAGING_DIR/etc/fstab -cp $FILES_DIR/hvc0.conf $STAGING_DIR/etc/init/ - -# Put the VPX into UTC. -rm -f $STAGING_DIR/etc/localtime - -# Configure dns (use same dns as dom0) -cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf - -# Copy over devstack -rm -f /tmp/devstack.tar -tar --exclude='stage' --exclude='xen/xvas' --exclude='xen/nova' -cvf /tmp/devstack.tar $TOP_DIR/../../../devstack -cd $STAGING_DIR/opt/stack/ -tar xf /tmp/devstack.tar -cd $TOP_DIR - -# Configure OVA -VDI_SIZE=$(($VDI_MB*1024*1024)) -PRODUCT_BRAND=${PRODUCT_BRAND:-openstack} -PRODUCT_VERSION=${PRODUCT_VERSION:-001} -BUILD_NUMBER=${BUILD_NUMBER:-001} -LABEL="$PRODUCT_BRAND $PRODUCT_VERSION-$BUILD_NUMBER" -OVA=$STAGING_DIR/tmp/ova.xml -cp $TEMPLATES_DIR/ova.xml.in $OVA -sed -e "s,@VDI_SIZE@,$VDI_SIZE,g" -i $OVA -sed -e "s,@PRODUCT_BRAND@,$PRODUCT_BRAND,g" -i $OVA -sed -e "s,@PRODUCT_VERSION@,$PRODUCT_VERSION,g" -i $OVA -sed -e "s,@BUILD_NUMBER@,$BUILD_NUMBER,g" -i $OVA - -# Directory for xvas -XVA_DIR=$TOP_DIR/xvas - -# Create xva dir -mkdir -p $XVA_DIR - -# Clean nova if desired -if [ "$CLEAN" = "1" ]; then - rm -rf $TOP_DIR/nova -fi - -# Checkout nova -if [ ! -d $TOP_DIR/nova ]; then - git clone git://github.com/cloudbuilders/nova.git - git checkout diablo -fi - -# Run devstack on launch -cat <$STAGING_DIR/etc/rc.local -GUEST_PASSWORD=$GUEST_PASSWORD STAGING_DIR=/ DO_TGZ=0 bash /opt/stack/devstack/tools/xen/prepare_guest.sh -su -c "/opt/stack/run.sh > /opt/stack/run.sh.log" stack -exit 0 -EOF - -# Install plugins -cp -pr $TOP_DIR/nova/plugins/xenserver/xenapi/etc/xapi.d /etc/ -chmod a+x /etc/xapi.d/plugins/* -yum --enablerepo=base install -y parted -mkdir -p /boot/guest - -# Set local storage il8n -SR_UUID=`xe sr-list --minimal name-label="Local storage"` -xe sr-param-set uuid=$SR_UUID other-config:i18n-key=local-storage - - -# Shutdown previous runs -DO_SHUTDOWN=${DO_SHUTDOWN:-1} -if [ "$DO_SHUTDOWN" = "1" ]; then - # Shutdown all domU's that created previously - xe vm-list --minimal name-label="$LABEL" | xargs ./scripts/uninstall-os-vpx.sh - - # Destroy any instances that were launched - for uuid in `xe vm-list | grep -1 instance | grep uuid | sed "s/.*\: //g"`; do - echo "Shutting down nova instance $uuid" - xe vm-unpause uuid=$uuid || true - xe vm-shutdown uuid=$uuid - xe vm-destroy uuid=$uuid - done - - # Destroy orphaned vdis - for uuid in `xe vdi-list | grep -1 Glance | grep uuid | sed "s/.*\: //g"`; do - xe vdi-destroy uuid=$uuid - done -fi - -# Path to head xva. By default keep overwriting the same one to save space -USE_SEPARATE_XVAS=${USE_SEPARATE_XVAS:-0} -if [ "$USE_SEPARATE_XVAS" = "0" ]; then - XVA=$XVA_DIR/$UBUNTU_VERSION.xva -else - XVA=$XVA_DIR/$UBUNTU_VERSION.$GUEST_NAME.xva -fi - -# Clean old xva. In the future may not do this every time. -rm -f $XVA - -# Configure the hostname -echo $GUEST_NAME > $STAGING_DIR/etc/hostname - -# Hostname must resolve for rabbit -cat <$STAGING_DIR/etc/hosts -$MGT_IP $GUEST_NAME -127.0.0.1 localhost localhost.localdomain -EOF - -# Configure the network -INTERFACES=$STAGING_DIR/etc/network/interfaces -cp $TEMPLATES_DIR/interfaces.in $INTERFACES -sed -e "s,@ETH1_IP@,$VM_IP,g" -i $INTERFACES -sed -e "s,@ETH1_NETMASK@,$VM_NETMASK,g" -i $INTERFACES -sed -e "s,@ETH2_IP@,$MGT_IP,g" -i $INTERFACES -sed -e "s,@ETH2_NETMASK@,$MGT_NETMASK,g" -i $INTERFACES -sed -e "s,@ETH3_IP@,$PUB_IP,g" -i $INTERFACES -sed -e "s,@ETH3_NETMASK@,$PUB_NETMASK,g" -i $INTERFACES - -# Gracefully cp only if source file/dir exists -function cp_it { - if [ -e $1 ] || [ -d $1 ]; then - cp -pRL $1 $2 - fi -} - -# Copy over your ssh keys and env if desired -COPYENV=${COPYENV:-1} -if [ "$COPYENV" = "1" ]; then - cp_it ~/.ssh $STAGING_DIR/opt/stack/.ssh - cp_it ~/.ssh/id_rsa.pub $STAGING_DIR/opt/stack/.ssh/authorized_keys - cp_it ~/.gitconfig $STAGING_DIR/opt/stack/.gitconfig - cp_it ~/.vimrc $STAGING_DIR/opt/stack/.vimrc - cp_it ~/.bashrc $STAGING_DIR/opt/stack/.bashrc -fi - -# Configure run.sh -cat <$STAGING_DIR/opt/stack/run.sh -#!/bin/bash -cd /opt/stack/devstack -killall screen -UPLOAD_LEGACY_TTY=yes HOST_IP=$PUB_IP VIRT_DRIVER=xenserver FORCE=yes MULTI_HOST=1 $STACKSH_PARAMS ./stack.sh -EOF -chmod 755 $STAGING_DIR/opt/stack/run.sh - -# Create xva -if [ ! -e $XVA ]; then - rm -rf /tmp/mkxva* - UID=0 $SCRIPT_DIR/mkxva -o $XVA -t xva -x $OVA $STAGING_DIR $VDI_MB /tmp/ -fi - -# Start guest -$TOP_DIR/scripts/install-os-vpx.sh -f $XVA -v $VM_BR -m $MGT_BR -p $PUB_BR - -# If we have copied our ssh credentials, use ssh to monitor while the installation runs -WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1} -if [ "$WAIT_TILL_LAUNCH" = "1" ] && [ -e ~/.ssh/id_rsa.pub ] && [ "$COPYENV" = "1" ]; then - # Done creating the container, let's tail the log - echo - echo "=============================================================" - echo " -- YAY! --" - echo "=============================================================" - echo - echo "We're done launching the vm, about to start tailing the" - echo "stack.sh log. It will take a second or two to start." - echo - echo "Just CTRL-C at any time to stop tailing." - - set +o xtrace - - while ! ssh -q stack@$PUB_IP "[ -e run.sh.log ]"; do - sleep 1 - done - - ssh stack@$PUB_IP 'tail -f run.sh.log' & - - TAIL_PID=$! - - function kill_tail() { - kill $TAIL_PID - exit 1 - } - - # Let Ctrl-c kill tail and exit - trap kill_tail SIGINT - - echo "Waiting stack.sh to finish..." - while ! ssh -q stack@$PUB_IP "grep -q 'stack.sh completed in' run.sh.log"; do - sleep 1 - done - - kill $TAIL_PID - - if ssh -q stack@$PUB_IP "grep -q 'stack.sh failed' run.sh.log"; then - exit 1 - fi - echo "" - echo "Finished - Zip-a-dee Doo-dah!" - echo "You can then visit the OpenStack Dashboard" - echo "at http://$PUB_IP, and contact other services at the usual ports." -else - echo "################################################################################" - echo "" - echo "All Finished!" - echo "Now, you can monitor the progress of the stack.sh installation by " - echo "tailing /opt/stack/run.sh.log from within your domU." - echo "" - echo "ssh into your domU now: 'ssh stack@$PUB_IP' using your password" - echo "and then do: 'tail -f /opt/stack/run.sh.log'" - echo "" - echo "When the script completes, you can then visit the OpenStack Dashboard" - echo "at http://$PUB_IP, and contact other services at the usual ports." - -fi diff --git a/tools/xen/build_domU_multi.sh b/tools/xen/build_domU_multi.sh deleted file mode 100755 index 130bec5b50..0000000000 --- a/tools/xen/build_domU_multi.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env bash - -# Echo commands -set -o xtrace - -# Head node host, which runs glance, api, keystone -HEAD_PUB_IP=${HEAD_PUB_IP:-192.168.1.57} -HEAD_MGT_IP=${HEAD_MGT_IP:-172.16.100.57} - -COMPUTE_PUB_IP=${COMPUTE_PUB_IP:-192.168.1.58} -COMPUTE_MGT_IP=${COMPUTE_MGT_IP:-172.16.100.58} - -# Networking params -FLOATING_RANGE=${FLOATING_RANGE:-192.168.1.196/30} - -# Variables common amongst all hosts in the cluster -COMMON_VARS="$STACKSH_PARAMS MYSQL_HOST=$HEAD_MGT_IP RABBIT_HOST=$HEAD_MGT_IP GLANCE_HOSTPORT=$HEAD_MGT_IP:9292 FLOATING_RANGE=$FLOATING_RANGE" - -# Helper to launch containers -function build_domU { - GUEST_NAME=$1 PUB_IP=$2 MGT_IP=$3 DO_SHUTDOWN=$4 TERMINATE=$TERMINATE STACKSH_PARAMS="$COMMON_VARS $5" ./build_domU.sh -} - -# Launch the head node - headnode uses a non-ip domain name, -# because rabbit won't launch with an ip addr hostname :( -build_domU HEADNODE $HEAD_PUB_IP $HEAD_MGT_IP 1 "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vnc,horizon,mysql,rabbit" - -# Wait till the head node is up -while ! curl -L http://$HEAD_PUB_IP | grep -q username; do - echo "Waiting for head node ($HEAD_PUB_IP) to start..." - sleep 5 -done - -# Build the HA compute host -build_domU $COMPUTE_PUB_IP $COMPUTE_PUB_IP $COMPUTE_MGT_IP 0 "ENABLED_SERVICES=n-cpu,n-net,n-api" diff --git a/tools/xen/files/fstab b/tools/xen/files/fstab deleted file mode 100644 index 6c9b9818c3..0000000000 --- a/tools/xen/files/fstab +++ /dev/null @@ -1,5 +0,0 @@ -LABEL=vpxroot / ext3 defaults 1 1 -tmpfs /dev/shm tmpfs defaults 0 0 -devpts /dev/pts devpts gid=5,mode=620 0 0 -sysfs /sys sysfs defaults 0 0 -proc /proc proc defaults 0 0 diff --git a/tools/xen/files/hvc0.conf b/tools/xen/files/hvc0.conf deleted file mode 100644 index 4eedaf6ee1..0000000000 --- a/tools/xen/files/hvc0.conf +++ /dev/null @@ -1,10 +0,0 @@ -# hvc0 - getty -# -# This service maintains a getty on hvc0 from the point the system is -# started until it is shut down again. - -start on stopped rc RUNLEVEL=[2345] -stop on runlevel [!2345] - -respawn -exec /sbin/getty -8 9600 hvc0 diff --git a/tools/xen/prepare_dom0.sh b/tools/xen/prepare_dom0.sh deleted file mode 100755 index ce16ada43e..0000000000 --- a/tools/xen/prepare_dom0.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/sh -set -o xtrace -set -o errexit - -# Install basics for vi and git -yum -y --enablerepo=base install gcc make vim-enhanced zlib-devel openssl-devel - -# Simple but usable vimrc -if [ ! -e /root/.vimrc ]; then - cat > /root/.vimrc <$STAGING_DIR/etc/apt/sources.list -deb http://us.archive.ubuntu.com/ubuntu/ oneiric main restricted -deb-src http://us.archive.ubuntu.com/ubuntu/ oneiric main restricted -deb http://us.archive.ubuntu.com/ubuntu/ oneiric-updates main restricted -deb-src http://us.archive.ubuntu.com/ubuntu/ oneiric-updates main restricted -deb http://us.archive.ubuntu.com/ubuntu/ oneiric universe -deb http://us.archive.ubuntu.com/ubuntu/ oneiric-updates universe -deb http://us.archive.ubuntu.com/ubuntu/ oneiric multiverse -deb http://us.archive.ubuntu.com/ubuntu/ oneiric-updates multiverse -EOF - -# Install basics -chroot $STAGING_DIR apt-get update -chroot $STAGING_DIR apt-get install -y linux-image-$KERNEL_VERSION -chroot $STAGING_DIR apt-get install -y cracklib-runtime curl wget ssh openssh-server tcpdump ethtool -chroot $STAGING_DIR apt-get install -y curl wget ssh openssh-server python-pip git vim-nox sudo -chroot $STAGING_DIR pip install xenapi - -# Install guest utilities -XEGUEST=xe-guest-utilities_5.6.100-651_amd64.deb -wget http://images.ansolabs.com/xen/$XEGUEST -O $XEGUEST -cp $XEGUEST $STAGING_DIR/root -chroot $STAGING_DIR dpkg -i /root/$XEGUEST -chroot $STAGING_DIR update-rc.d -f xe-linux-distribution remove -chroot $STAGING_DIR update-rc.d xe-linux-distribution defaults - -# Make a small cracklib dictionary, so that passwd still works, but we don't -# have the big dictionary. -mkdir -p $STAGING_DIR/usr/share/cracklib -echo a | chroot $STAGING_DIR cracklib-packer - -# Make /etc/shadow, and set the root password -chroot $STAGING_DIR "pwconv" -echo "root:$GUEST_PASSWORD" | chroot $STAGING_DIR chpasswd - -# Put the VPX into UTC. -rm -f $STAGING_DIR/etc/localtime - -# Add stack user -chroot $STAGING_DIR groupadd libvirtd -chroot $STAGING_DIR useradd stack -s /bin/bash -d /opt/stack -G libvirtd -echo stack:$GUEST_PASSWORD | chroot $STAGING_DIR chpasswd -echo "stack ALL=(ALL) NOPASSWD: ALL" >> $STAGING_DIR/etc/sudoers - -# Give ownership of /opt/stack to stack user -chroot $STAGING_DIR chown -R stack /opt/stack - -# Make our ip address hostnames look nice at the command prompt -echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $STAGING_DIR/opt/stack/.bashrc -echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $STAGING_DIR/root/.bashrc -echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $STAGING_DIR/etc/profile - -function setup_vimrc { - if [ ! -e $1 ]; then - # Simple but usable vimrc - cat > $1 <&2 - exit 1 - else - echo "$dest_sr" - fi -} - - -find_network() -{ - result=$(xe_min network-list bridge="$1") - if [ "$result" = "" ] - then - result=$(xe_min network-list name-label="$1") - fi - echo "$result" -} - - -find_template() -{ - xe_min template-list other-config:os-vpx=true -} - - -renumber_system_disk() -{ - local v="$1" - local vdi_uuid=$(xe_min vbd-list vm-uuid="$v" type=Disk userdevice=xvda \ - params=vdi-uuid) - if [ "$vdi_uuid" ] - then - local vbd_uuid=$(xe_min vbd-list vm-uuid="$v" vdi-uuid="$vdi_uuid") - xe vbd-destroy uuid="$vbd_uuid" - local new_vbd_uuid=$(xe vbd-create vm-uuid="$v" vdi-uuid="$vdi_uuid" \ - device=0 bootable=true type=Disk) - xe vbd-param-set other-config:owner uuid="$new_vbd_uuid" - fi -} - - -create_vif() -{ - xe vif-create vm-uuid="$1" network-uuid="$2" device="$3" -} - -create_gi_vif() -{ - local v="$1" - # Note that we've made the outbound device eth1, so that it comes up after - # the guest installer VIF, which means that the outbound one wins in terms - # of gateway. - local gi_network_uuid=$(xe_min network-list \ - other-config:is_guest_installer_network=true) - create_vif "$v" "$gi_network_uuid" "0" >/dev/null -} - -create_vm_vif() -{ - local v="$1" - echo "Installing management interface on $BRIDGE_V." - local out_network_uuid=$(find_network "$BRIDGE_V") - create_vif "$v" "$out_network_uuid" "1" >/dev/null -} - -create_management_vif() -{ - local v="$1" - echo "Installing management interface on $BRIDGE_M." - local out_network_uuid=$(find_network "$BRIDGE_M") - create_vif "$v" "$out_network_uuid" "2" >/dev/null -} - - -# This installs the interface for public traffic, only if a bridge is specified -# The interface is not configured at this stage, but it will be, once the admin -# tasks are complete for the services of this VPX -create_public_vif() -{ - local v="$1" - if [[ -z $BRIDGE_P ]] - then - echo "Skipping installation of interface for public traffic." - else - echo "Installing public interface on $BRIDGE_P." - pub_network_uuid=$(find_network "$BRIDGE_P") - create_vif "$v" "$pub_network_uuid" "3" >/dev/null - fi -} - - -label_system_disk() -{ - local v="$1" - local vdi_uuid=$(xe_min vbd-list vm-uuid="$v" type=Disk userdevice=0 \ - params=vdi-uuid) - xe vdi-param-set \ - name-label="$NAME system disk" \ - other-config:os-vpx=true \ - uuid=$vdi_uuid -} - - -create_data_disk() -{ - local v="$1" - - local sys_vdi_uuid=$(xe_min vbd-list vm-uuid="$v" type=Disk params=vdi-uuid) - local data_vdi_uuid=$(xe_min vdi-list other-config:os-vpx-data=true) - - if echo "$data_vdi_uuid" | grep -q , - then - echo "Multiple data disks found -- assuming that you want a new one." - data_vdi_uuid="" - else - data_in_use=$(xe_min vbd-list vdi-uuid="$data_vdi_uuid") - if [ "$data_in_use" != "" ] - then - echo "Data disk already in use -- will create another one." - data_vdi_uuid="" - fi - fi - - if [ "$data_vdi_uuid" = "" ] - then - echo -n "Creating new data disk ($DATA_VDI_SIZE)... " - sr_uuid=$(xe_min vdi-list params=sr-uuid uuid="$sys_vdi_uuid") - data_vdi_uuid=$(xe vdi-create name-label="$NAME data disk" \ - sr-uuid="$sr_uuid" \ - type=user \ - virtual-size="$DATA_VDI_SIZE") - xe vdi-param-set \ - other-config:os-vpx-data=true \ - uuid="$data_vdi_uuid" - dom0_uuid=$(xe_min vm-list is-control-domain=true) - vbd_uuid=$(xe vbd-create device=autodetect type=Disk \ - vdi-uuid="$data_vdi_uuid" vm-uuid="$dom0_uuid") - xe vbd-plug uuid=$vbd_uuid - dev=$(xe_min vbd-list params=device uuid=$vbd_uuid) - mke2fs -q -j -m0 /dev/$dev - e2label /dev/$dev vpxstate - xe vbd-unplug uuid=$vbd_uuid - xe vbd-destroy uuid=$vbd_uuid - else - echo -n "Attaching old data disk... " - fi - vbd_uuid=$(xe vbd-create device=2 type=Disk \ - vdi-uuid="$data_vdi_uuid" vm-uuid="$v") - xe vbd-param-set other-config:os-vpx-data=true uuid=$vbd_uuid - echo "done." -} - - -set_kernel_params() -{ - local v="$1" - local args=$KERNEL_PARAMS - local cmdline=$(cat /proc/cmdline) - for word in $cmdline - do - if echo "$word" | grep -q "geppetto" - then - args="$word $args" - fi - done - if [ "$args" != "" ] - then - echo "Passing Geppetto args to VPX: $args." - xe vm-param-set PV-args="$args" uuid="$v" - fi -} - - -set_memory() -{ - local v="$1" - if [ "$RAM" != "" ] - then - echo "Setting RAM to $RAM MiB." - [ "$BALLOONING" == 1 ] && RAM_MIN=$(($RAM / 2)) || RAM_MIN=$RAM - xe vm-memory-limits-set static-min=16MiB static-max=${RAM}MiB \ - dynamic-min=${RAM_MIN}MiB dynamic-max=${RAM}MiB \ - uuid="$v" - fi -} - - -# Make the VM auto-start on server boot. -set_auto_start() -{ - local v="$1" - xe vm-param-set uuid="$v" other-config:auto_poweron=true -} - - -set_all() -{ - local v="$1" - set_kernel_params "$v" - set_memory "$v" - set_auto_start "$v" - label_system_disk "$v" - create_gi_vif "$v" - create_vm_vif "$v" - create_management_vif "$v" - create_public_vif "$v" -} - - -log_vifs() -{ - local v="$1" - - (IFS=, - for vif in $(xe_min vif-list vm-uuid="$v") - do - dev=$(xe_min vif-list uuid="$vif" params=device) - mac=$(xe_min vif-list uuid="$vif" params=MAC | sed -e 's/:/-/g') - echo "eth$dev has MAC $mac." - done - unset IFS) | sort -} - - -destroy_vifs() -{ - local v="$1" - IFS=, - for vif in $(xe_min vif-list vm-uuid="$v") - do - xe vif-destroy uuid="$vif" - done - unset IFS -} - - -get_params "$@" - -thisdir=$(dirname "$0") - -if [ "$FROM_TEMPLATE" ] -then - template_uuid=$(find_template) - name=$(xe_min template-list params=name-label uuid="$template_uuid") - echo -n "Cloning $name... " - vm_uuid=$(xe vm-clone vm="$template_uuid" new-name-label="$name") - xe vm-param-set is-a-template=false uuid="$vm_uuid" - echo $vm_uuid. - - destroy_vifs "$vm_uuid" - set_all "$vm_uuid" -else - if [ ! -f "$VPX_FILE" ] - then - # Search $thisdir/$VPX_FILE too. In particular, this is used when - # installing the VPX from the supp-pack, because we want to be able to - # invoke this script from the RPM and the firstboot script. - if [ -f "$thisdir/$VPX_FILE" ] - then - VPX_FILE="$thisdir/$VPX_FILE" - else - echo "$VPX_FILE does not exist." >&2 - exit 1 - fi - fi - - echo "Found OS-VPX File: $VPX_FILE. " - - dest_sr=$(get_dest_sr) - - echo -n "Installing $NAME... " - vm_uuid=$(xe vm-import filename=$VPX_FILE sr-uuid="$dest_sr") - echo $vm_uuid. - - renumber_system_disk "$vm_uuid" - - nl=$(xe_min vm-list params=name-label uuid=$vm_uuid) - xe vm-param-set \ - "name-label=${nl/ import/}" \ - other-config:os-vpx=true \ - uuid=$vm_uuid - - set_all "$vm_uuid" - create_data_disk "$vm_uuid" - - if [ "$AS_TEMPLATE" ] - then - xe vm-param-set uuid="$vm_uuid" is-a-template=true \ - other-config:instant=true - echo -n "Installing VPX from template... " - vm_uuid=$(xe vm-clone vm="$vm_uuid" new-name-label="${nl/ import/}") - xe vm-param-set is-a-template=false uuid="$vm_uuid" - echo "$vm_uuid." - fi -fi - - -log_vifs "$vm_uuid" - -echo -n "Starting VM... " -xe vm-start uuid=$vm_uuid -echo "done." - - -show_ip() -{ - ip_addr=$(echo "$1" | sed -n "s,^.*"$2"/ip: \([^;]*\).*$,\1,p") - echo -n "IP address for $3: " - if [ "$ip_addr" = "" ] - then - echo "did not appear." - else - echo "$ip_addr." - fi -} - - -if [ "$WAIT_FOR_NETWORK" ] -then - echo "Waiting for network configuration... " - i=0 - while [ $i -lt 600 ] - do - ip=$(xe_min vm-list params=networks uuid=$vm_uuid) - if [ "$ip" != "" ] - then - show_ip "$ip" "1" "$BRIDGE_M" - if [[ $BRIDGE_P ]] - then - show_ip "$ip" "2" "$BRIDGE_P" - fi - echo "Installation complete." - exit 0 - fi - sleep 10 - let i=i+1 - done -fi diff --git a/tools/xen/scripts/mkxva b/tools/xen/scripts/mkxva deleted file mode 100755 index dcdee61ac2..0000000000 --- a/tools/xen/scripts/mkxva +++ /dev/null @@ -1,365 +0,0 @@ -#!/bin/bash -# -# Copyright (c) 2011 Citrix Systems, Inc. -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -set -eu - -set -o xtrace - -VBOX_IMG=/output/packages/vbox-img - -usage() { - cat >&2 < -t -x - -o: Colon-separated list of output filenames (one for each type). - -p: Create a disk label and partition within the output image - -t: Colon-separated list of types of output file. xva and ovf supported. - -x: XML filenames (one for each type) - -EOF - exit 1 -} - -# parse cmdline - -OPT_USE_PARTITION= -OPT_TYPES= -OPT_OUTPUT_FILES= -OPT_XML_FILES= - -while getopts o:pt:x: o -do case "$o" in - o) OPT_OUTPUT_FILES=$(echo "$OPTARG" | sed -e 's/\s*:\s*/ /g') - ;; - p) OPT_USE_PARTITION=1 - ;; - t) OPT_TYPES=$(echo "$OPTARG" | sed -e 's/\s*:\s*/ /g') - ;; - x) OPT_XML_FILES=$(echo "$OPTARG" | sed -e 's/\s*:\s*/ /g') - ;; - [?]) usage - ;; - esac -done -shift $((OPTIND-1)) - -[ $# -ne 3 ] && usage -FS_STAGING="$1" -FS_SIZE_MIB="$2" -TMPDIR="$3" - -if [ "$UID" = "0" ] -then - SUDO= -else - SUDO=sudo -fi - -if [ "$FS_SIZE_MIB" = "0" ] -then - # Just create a dummy file. This allows developers to bypass bits of - # the build by setting the size to 0. - touch $OPT_OUTPUT_FILES - exit 0 -fi - -# create temporary files and dirs -FS_TMPFILE=$(mktemp "$TMPDIR/mkxva-fsimg-XXXXX") -XVA_TARBALL_STAGING=$(mktemp -d "$TMPDIR/mkxva-tarball-staging-XXXXX") -OVF_STAGING=$(mktemp -d "$TMPDIR/mkxva-ovf-staging-XXXXX") - -# Find udevsettle and udevtrigger on this installation -if [ -x "/sbin/udevsettle" ] ; then - UDEVSETTLE="/sbin/udevsettle --timeout=30" -elif [ -x "/sbin/udevadm" ] ; then - UDEVSETTLE='/sbin/udevadm settle' -else - UDEVSETTLE='/bin/true' -fi - -if [ -x "/sbin/udevtrigger" ] ; then - UDEVTRIGGER=/sbin/udevtrigger -elif [ -x "/sbin/udevadm" ] ; then - UDEVTRIGGER='/sbin/udevadm trigger' -else - UDEVTRIGGER= -fi - -# CLEAN_ variables track devices and mounts that must be taken down -# no matter how the script exits. Loop devices are vulnerable to -# exhaustion so we make every effort to remove them - -CLEAN_KPARTX= -CLEAN_LOSETUP= -CLEAN_MOUNTPOINT= - -cleanup_devices () { - if [ -n "$CLEAN_MOUNTPOINT" ] ; then - echo "Mountpoint $CLEAN_MOUNTPOINT removed on abnormal exit" - $SUDO umount "$CLEAN_MOUNTPOINT" || echo "umount failed" - rmdir "$CLEAN_MOUNTPOINT" || echo "rmdir failed" - fi - if [ -n "$CLEAN_KPARTX" ] ; then - echo "kpartx devices for $CLEAN_KPARTX removed on abnormal exit" - $SUDO kpartx -d "$CLEAN_KPARTX" || echo "kpartx -d failed" - fi - if [ -n "$CLEAN_LOSETUP" ] ; then - echo "Loop device $CLEAN_LOSETUP removed on abnormal exit" - $SUDO losetup -d "$CLEAN_LOSETUP" # Allow losetup errors to propagate - fi -} - -trap "cleanup_devices" EXIT - -make_fs_inner () { - local staging="$1" - local output="$2" - local options="$3" - CLEAN_MOUNTPOINT=$(mktemp -d "$TMPDIR/mkfs-XXXXXX") - - # copy staging dir contents to fs image - $SUDO mount $options "$output" "$CLEAN_MOUNTPOINT" - $SUDO tar -C "$staging" -c . | tar -C "$CLEAN_MOUNTPOINT" -x - $SUDO umount "$CLEAN_MOUNTPOINT" - rmdir "$CLEAN_MOUNTPOINT" - CLEAN_MOUNTPOINT= -} - -# Turn a staging dir into an ext3 filesystem within a partition -make_fs_in_partition () { - local staging="$1" - local output="$2" - - # create new empty disk - dd if=/dev/zero of="$output" bs=1M count=$FS_SIZE_MIB - # Set up a loop device on the empty disk image - local loopdevice=$($SUDO losetup -f) - $SUDO losetup "$loopdevice" "$output" - CLEAN_LOSETUP="$loopdevice" - # Create a partition table and single partition. - # Start partition at sector 63 to allow space for grub - cat < "$CLEAN_MOUNTPOINT/boot/grub/grub.conf" </dev/null - gzip "$file" - else - local file="$outputdir"/$(printf "%08d" $i) - dd if="$diskimg" of="$file" skip=$i bs=1M count=1 2>/dev/null - local chksum=$(sha1sum -b "$file") - echo -n "${chksum/ */}" > "$file.checksum" - fi - i=$(($i + 1)) - done -} - -if [ -n "$OPT_USE_PARTITION" ] ; then - make_fs_in_partition "$FS_STAGING" "$FS_TMPFILE" -else - make_fs "$FS_STAGING" "$FS_TMPFILE" -fi - -VDI_SIZE=$(stat --format=%s "$FS_TMPFILE") - -make_xva () { - local output_file="$1" - local xml_file="$2" - local subdir - local rio - - if [[ `cat $xml_file` =~ "\s*class\s*VDI\s*\s*\s*id\s*(Ref:[0-9]+)" ]] - then - # it's a rio style xva - subdir="${BASH_REMATCH[1]}"; - rio=1 - else - # it's a geneva style xva - subdir="xvda" - rio=0 - fi - - cp "$xml_file" "$XVA_TARBALL_STAGING"/ova.xml - sed -i -e "s/@VDI_SIZE@/$VDI_SIZE/" "$XVA_TARBALL_STAGING"/ova.xml - mkdir "$XVA_TARBALL_STAGING/$subdir" - splitvdi "$FS_TMPFILE" "$XVA_TARBALL_STAGING/$subdir" "$rio" - TARFILE_MEMBERS=$(cd "$XVA_TARBALL_STAGING" && echo ova.xml $subdir/*) - tar -C "$XVA_TARBALL_STAGING" --format=v7 -c $TARFILE_MEMBERS -f "$output_file.tmp" - mv "$output_file.tmp" "$output_file" -} - -make_ovf () { - local output_dir="$1" - local xml_file="$2" - local output_base=$(basename "$output_dir") - local disk="$output_dir/${output_base}.vmdk" - local manifest="$output_dir/${output_base}.mf" - local ovf="$output_dir/${output_base}.ovf" - - mkdir -p "$output_dir" - rm -f "$disk" - $VBOX_IMG convert --srcfilename="$FS_TMPFILE" --dstfilename="$disk" \ - --srcformat RAW --dstformat VMDK --variant Stream - chmod 0644 "$disk" - - local n_bytes=$(stat --printf=%s "$disk") - cp "$xml_file" "$ovf" - sed -i -e "s/@MKXVA_DISK_FULLSIZE@/$VDI_SIZE/" "$ovf" - sed -i -e "s/@MKXVA_DISK_SIZE@/$n_bytes/" "$ovf" - sed -i -e "s/@MKXVA_DISK_MIB_SIZE@/$FS_SIZE_MIB/" "$ovf" - sed -i -e "s/@MKXVA_DISK_FILENAME@/${output_base}.vmdk/" "$ovf" - - for to_sign in "$ovf" "$disk" - do - local sha1_sum=$(sha1sum "$to_sign" | cut -d' ' -f1) - echo "SHA1($(basename "$to_sign"))= $sha1_sum" >> $manifest - done -} - -output_files="$OPT_OUTPUT_FILES" -xml_files="$OPT_XML_FILES" -# Iterate through the type list creating the relevant VMs -for create_type in $OPT_TYPES -do - # Shift one parameter from the front of the lists - create_output_file="${output_files%% *}" - output_files="${output_files#* }" - create_xml_file="${xml_files%% *}" - xml_files="${xml_files#* }" - echo "Creating $create_type appliance $create_output_file using metadata file $create_xml_file" - - case "$create_type" in - xva) - make_xva "$create_output_file" "$create_xml_file" - ;; - ovf) - make_ovf "$create_output_file" "$create_xml_file" - ;; - *) - echo "Unknown VM type '$create_type'" - exit 1 - ;; - esac - -done - - -# cleanup -if [ -z "${DO_NOT_CLEANUP:-}" ] ; then - rm -rf "$XVA_TARBALL_STAGING" - rm -f "$FS_TMPFILE" -fi diff --git a/tools/xen/scripts/uninstall-os-vpx.sh b/tools/xen/scripts/uninstall-os-vpx.sh deleted file mode 100755 index a82f3a05fb..0000000000 --- a/tools/xen/scripts/uninstall-os-vpx.sh +++ /dev/null @@ -1,101 +0,0 @@ -#!/bin/bash -# -# Copyright (c) 2011 Citrix Systems, Inc. -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -remove_data= -if [ "$1" = "--remove-data" ] -then - remove_data=1 -fi - -set -eu - -xe_min() -{ - local cmd="$1" - shift - /opt/xensource/bin/xe "$cmd" --minimal "$@" -} - -destroy_vdi() -{ - local vbd_uuid="$1" - local type=$(xe_min vbd-list uuid=$vbd_uuid params=type) - local dev=$(xe_min vbd-list uuid=$vbd_uuid params=userdevice) - local vdi_uuid=$(xe_min vbd-list uuid=$vbd_uuid params=vdi-uuid) - - if [ "$type" = 'Disk' ] && [ "$dev" != 'xvda' ] && [ "$dev" != '0' ] - then - echo -n "Destroying data disk... " - xe vdi-destroy uuid=$vdi_uuid - echo "done." - fi -} - -uninstall() -{ - local vm_uuid="$1" - local power_state=$(xe_min vm-list uuid=$vm_uuid params=power-state) - - if [ "$power_state" != "halted" ] - then - echo -n "Shutting down VM... " - xe vm-shutdown vm=$vm_uuid force=true - echo "done." - fi - - if [ "$remove_data" = "1" ] - then - for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g') - do - destroy_vdi "$v" - done - fi - - echo -n "Deleting VM... " - xe vm-uninstall vm=$vm_uuid force=true >/dev/null - echo "done." -} - -uninstall_template() -{ - local vm_uuid="$1" - - if [ "$remove_data" = "1" ] - then - for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g') - do - destroy_vdi "$v" - done - fi - - echo -n "Deleting template... " - xe template-uninstall template-uuid=$vm_uuid force=true >/dev/null - echo "done." -} - - -for u in $(xe_min vm-list other-config:os-vpx=true | sed -e 's/,/ /g') -do - uninstall "$u" -done - -for u in $(xe_min template-list other-config:os-vpx=true | sed -e 's/,/ /g') -do - uninstall_template "$u" -done diff --git a/tools/xen/templates/hosts.in b/tools/xen/templates/hosts.in deleted file mode 100644 index 8ab4c3e919..0000000000 --- a/tools/xen/templates/hosts.in +++ /dev/null @@ -1,8 +0,0 @@ -127.0.0.1 localhost -127.0.0.1 %HOSTNAME% -::1 localhost ip6-localhost ip6-loopback -fe00::0 ip6-localnet -ff00::0 ip6-mcastprefix -ff02::1 ip6-allnodes -ff02::2 ip6-allrouters - diff --git a/tools/xen/templates/interfaces.in b/tools/xen/templates/interfaces.in deleted file mode 100644 index 49c3d681d9..0000000000 --- a/tools/xen/templates/interfaces.in +++ /dev/null @@ -1,21 +0,0 @@ -auto lo -iface lo inet loopback - -auto eth0 -iface eth0 inet dhcp - -auto eth1 -iface eth1 inet static - address @ETH1_IP@ - netmask @ETH1_NETMASK@ -post-up ethtool -K eth1 tx off - -auto eth2 -iface eth2 inet static - address @ETH2_IP@ - netmask @ETH2_NETMASK@ - -auto eth3 -iface eth3 inet static - address @ETH3_IP@ - netmask @ETH3_NETMASK@ diff --git a/tools/xen/templates/menu.lst.in b/tools/xen/templates/menu.lst.in deleted file mode 100644 index 8bc6426251..0000000000 --- a/tools/xen/templates/menu.lst.in +++ /dev/null @@ -1,6 +0,0 @@ -default 0 - -title default - root (hd0,0) - kernel /boot/vmlinuz-@KERNEL_VERSION@ ro root=LABEL=vpxroot console=xvc0 - initrd /boot/initrd.img-@KERNEL_VERSION@ diff --git a/tools/xen/templates/ova.xml.in b/tools/xen/templates/ova.xml.in deleted file mode 100644 index 8443dcb856..0000000000 --- a/tools/xen/templates/ova.xml.in +++ /dev/null @@ -1,14 +0,0 @@ - - - - - - - - - - - - diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000000..26cd68c031 --- /dev/null +++ b/tox.ini @@ -0,0 +1,56 @@ +[tox] +minversion = 3.18.0 +skipsdist = True +envlist = bashate + +[testenv] +usedevelop = False +basepython = python3 + +[testenv:bashate] +# if you want to test out some changes you have made to bashate +# against devstack, just set BASHATE_INSTALL_PATH=/path/... to your +# modified bashate tree +deps = + {env:BASHATE_INSTALL_PATH:bashate} +allowlist_externals = bash +commands = bash -c "find {toxinidir} \ + -not \( -type d -name .?\* -prune \) \ + -not \( -type d -name doc -prune \) \ + -not \( -type f -name localrc -prune \) \ + -type f \ + -not -name \*~ \ + -not -name \*.md \ + -not -name stack-screenrc \ + -not -name \*.orig \ + -not -name \*.rej \ + \( \ + -name \*.sh -or \ + -name \*rc -or \ + -name functions\* -or \ + -wholename \*/inc/\* -or \ + -wholename \*/lib/\* \ + \) \ + -print0 | xargs -0 bashate -v -iE006 -eE005,E042" + +[testenv:docs] +deps = + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} + -r{toxinidir}/doc/requirements.txt +allowlist_externals = bash +setenv = + TOP_DIR={toxinidir} +commands = + sphinx-build -W -b html -d doc/build/doctrees doc/source doc/build/html + +[testenv:pdf-docs] +deps = {[testenv:docs]deps} +allowlist_externals = + make +commands = + sphinx-build -W -b latex doc/source doc/build/pdf + make -C doc/build/pdf + +[testenv:venv] +deps = -r{toxinidir}/doc/requirements.txt +commands = {posargs} diff --git a/unstack.sh b/unstack.sh new file mode 100755 index 0000000000..8e8996c63b --- /dev/null +++ b/unstack.sh @@ -0,0 +1,194 @@ +#!/bin/bash + +# **unstack.sh** + +# Stops that which is started by ``stack.sh`` (mostly) +# mysql and rabbit are left running as OpenStack code refreshes +# do not require them to be restarted. +# +# Stop all processes by setting ``UNSTACK_ALL`` or specifying ``-a`` +# on the command line + +UNSTACK_ALL=${UNSTACK_ALL:-""} + +while getopts ":a" opt; do + case $opt in + a) + UNSTACK_ALL="-1" + ;; + esac +done + +# Keep track of the current DevStack directory. +TOP_DIR=$(cd $(dirname "$0") && pwd) +FILES=$TOP_DIR/files + +# Import common functions +source $TOP_DIR/functions + +# Import database library +source $TOP_DIR/lib/database + +# Load local configuration +source $TOP_DIR/openrc + +# Destination path for service data +DATA_DIR=${DATA_DIR:-${DEST}/data} + +if [[ $EUID -eq 0 ]]; then + echo "You are running this script as root." + echo "It might work but you will have a better day running it as $STACK_USER" + exit 1 +fi + + +# Configure Projects +# ================== + +# Determine what system we are running on. This provides ``os_VENDOR``, +# ``os_RELEASE``, ``os_PACKAGE``, ``os_CODENAME`` and ``DISTRO`` +GetDistro + +# Plugin Phase 0: override_defaults - allow plugins to override +# defaults before other services are run +run_phase override_defaults + +# Import apache functions +source $TOP_DIR/lib/apache + +# Import TLS functions +source $TOP_DIR/lib/tls + +# Source project function libraries +source $TOP_DIR/lib/infra +source $TOP_DIR/lib/lvm +source $TOP_DIR/lib/horizon +source $TOP_DIR/lib/keystone +source $TOP_DIR/lib/glance +source $TOP_DIR/lib/nova +source $TOP_DIR/lib/placement +source $TOP_DIR/lib/cinder +source $TOP_DIR/lib/swift +source $TOP_DIR/lib/neutron +source $TOP_DIR/lib/ldap +source $TOP_DIR/lib/dstat +source $TOP_DIR/lib/atop +source $TOP_DIR/lib/etcd3 + +# Extras Source +# -------------- + +# Phase: source +if [[ -d $TOP_DIR/extras.d ]]; then + for i in $TOP_DIR/extras.d/*.sh; do + [[ -r $i ]] && source $i source + done +fi + +load_plugin_settings + +set -o xtrace + +# Run extras +# ========== + +# Phase: unstack +run_phase unstack + +# Call service stop + +if is_service_enabled nova; then + stop_nova + cleanup_nova +fi + +if is_service_enabled placement; then + stop_placement +fi + +if is_service_enabled glance; then + stop_glance +fi + +if is_service_enabled keystone; then + stop_keystone +fi + +# Swift runs daemons +if is_service_enabled s-proxy; then + stop_swift + cleanup_swift +fi + +# Apache has the WSGI processes +if is_service_enabled horizon; then + stop_horizon +fi + +# Kill TLS proxies and cleanup certificates +if is_service_enabled tls-proxy; then + stop_tls_proxy + cleanup_CA +fi + +SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/* + +# BUG: tgt likes to exit 1 on service stop if everything isn't +# perfect, we should clean up cinder stop paths. + +# Get the iSCSI volumes +if is_service_enabled cinder; then + stop_cinder || /bin/true + cleanup_cinder || /bin/true +fi + +if [[ -n "$UNSTACK_ALL" ]]; then + # Stop MySQL server + if is_service_enabled mysql; then + stop_service mysql + fi + + if is_service_enabled postgresql; then + stop_service postgresql + fi + + # Stop rabbitmq-server + if is_service_enabled rabbit; then + stop_service rabbitmq-server + fi +fi + +if is_service_enabled neutron; then + stop_neutron + cleanup_neutron +fi + +if is_service_enabled etcd3; then + stop_etcd3 + cleanup_etcd3 +fi + +if is_service_enabled openstack-cli-server; then + stop_service devstack@openstack-cli-server +fi + +stop_dstat + +if is_service_enabled atop; then + stop_atop +fi + +# NOTE: Cinder automatically installs the lvm2 package, independently of the +# enabled backends. So if Cinder is enabled, and installed successfully we are +# sure lvm2 (lvremove, /etc/lvm/lvm.conf, etc.) is here. +if is_service_enabled cinder && is_package_installed lvm2; then + clean_lvm_filter +fi + +clean_pyc_files +rm -Rf $DEST/async + +# Clean any safe.directory items we wrote into the global +# gitconfig. We can identify the relevant ones by checking that they +# point to somewhere in our $DEST directory. +sudo sed -i "\+directory = ${DEST}+ d" /etc/gitconfig