From 4bf4de25ea487ceb7005dc63d01f73fe56a13a16 Mon Sep 17 00:00:00 2001 From: Chris Patterson Date: Mon, 29 Nov 2021 10:59:39 -0500 Subject: [PATCH 0001/2310] sources/azure: remove unused remnants related to agent command (#1119) Some references were missed in the removal of the agent command in PR #799. This simply removes the remaining references. Signed-off-by: Chris Patterson --- cloudinit/sources/DataSourceAzure.py | 6 +-- doc/examples/cloud-config-datasources.txt | 1 - tests/unittests/test_datasource/test_azure.py | 46 +++++-------------- 3 files changed, 13 insertions(+), 40 deletions(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 93493fa01a4..6c1bc085cd7 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -52,8 +52,6 @@ DS_NAME = 'Azure' DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"} -AGENT_START = ['service', 'walinuxagent', 'start'] -AGENT_START_BUILTIN = "__builtin__" BOUNCE_COMMAND_IFUP = [ 'sh', '-xc', "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x" @@ -262,7 +260,6 @@ def get_resource_disk_on_freebsd(port_id): PLATFORM_ENTROPY_SOURCE = None BUILTIN_DS_CONFIG = { - 'agent_command': AGENT_START_BUILTIN, 'data_dir': AGENT_SEED_DIR, 'set_hostname': True, 'hostname_bounce': { @@ -1525,8 +1522,7 @@ def _negotiate(self): dhclient_lease_file, pubkey_info=pubkey_info) - LOG.debug("negotiating with fabric via agent command %s", - self.ds_cfg['agent_command']) + LOG.debug("negotiating with fabric") try: fabric_data = metadata_func() except Exception as e: diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt index 13bb687c36b..d1a4d79eb7f 100644 --- a/doc/examples/cloud-config-datasources.txt +++ b/doc/examples/cloud-config-datasources.txt @@ -46,7 +46,6 @@ datasource: local-hostname: myhost.internal Azure: - agent_command: [service, walinuxagent, start] set_hostname: True hostname_bounce: interface: eth0 diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index d02740496f3..995d2b10583 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -652,7 +652,7 @@ def _get_mockds(self): ]) return dsaz - def _get_ds(self, data, agent_command=None, distro='ubuntu', + def _get_ds(self, data, distro='ubuntu', apply_network=None, instance_id=None): def _wait_for_files(flist, _maxwait=None, _naplen=None): @@ -722,8 +722,6 @@ def _dmi_mocks(key): distro = distro_cls(distro, data.get('sys_cfg', {}), self.paths) dsrc = dsaz.DataSourceAzure( data.get('sys_cfg', {}), distro=distro, paths=self.paths) - if agent_command is not None: - dsrc.ds_cfg['agent_command'] = agent_command if apply_network is not None: dsrc.ds_cfg['apply_network_config'] = apply_network @@ -921,7 +919,7 @@ def test_get_data_on_ubuntu_will_not_remove_network_scripts_disabled(self): def test_crawl_metadata_returns_structured_data_and_caches_nothing(self): """Return all structured metadata and cache no class attributes.""" - yaml_cfg = "{agent_command: my_command}\n" + yaml_cfg = "" odata = {'HostName': "myhost", 'UserName': "myuser", 'UserData': {'text': 'FOOBAR', 'encoding': 'plain'}, 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}} @@ -931,7 +929,7 @@ def test_crawl_metadata_returns_structured_data_and_caches_nothing(self): expected_cfg = { 'PreprovisionedVMType': None, 'PreprovisionedVm': False, - 'datasource': {'Azure': {'agent_command': 'my_command'}}, + 'datasource': {'Azure': {}}, 'system_info': {'default_user': {'name': 'myuser'}}} expected_metadata = { 'azure_data': { @@ -1449,19 +1447,16 @@ def test_ovf_can_include_unicode(self): def test_dsaz_report_ready_returns_true_when_report_succeeds( self): dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - dsrc.ds_cfg['agent_command'] = '__builtin__' self.assertTrue(dsrc._report_ready(lease=mock.MagicMock())) def test_dsaz_report_ready_returns_false_and_does_not_propagate_exc( self): dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - dsrc.ds_cfg['agent_command'] = '__builtin__' self.m_get_metadata_from_fabric.side_effect = Exception self.assertFalse(dsrc._report_ready(lease=mock.MagicMock())) def test_dsaz_report_failure_returns_true_when_report_succeeds(self): dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - dsrc.ds_cfg['agent_command'] = '__builtin__' with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: # mock crawl metadata failure to cause report failure @@ -1475,7 +1470,6 @@ def test_dsaz_report_failure_returns_true_when_report_succeeds(self): def test_dsaz_report_failure_returns_false_and_does_not_propagate_exc( self): dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - dsrc.ds_cfg['agent_command'] = '__builtin__' with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ mock.patch.object(dsrc, '_ephemeral_dhcp_ctx') \ @@ -1505,7 +1499,6 @@ def test_dsaz_report_failure_returns_false_and_does_not_propagate_exc( def test_dsaz_report_failure_description_msg(self): dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - dsrc.ds_cfg['agent_command'] = '__builtin__' with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: # mock crawl metadata failure to cause report failure @@ -1518,7 +1511,6 @@ def test_dsaz_report_failure_description_msg(self): def test_dsaz_report_failure_no_description_msg(self): dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - dsrc.ds_cfg['agent_command'] = '__builtin__' with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: m_crawl_metadata.side_effect = Exception @@ -1529,7 +1521,6 @@ def test_dsaz_report_failure_no_description_msg(self): def test_dsaz_report_failure_uses_cached_ephemeral_dhcp_ctx_lease(self): dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - dsrc.ds_cfg['agent_command'] = '__builtin__' with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ mock.patch.object(dsrc, '_ephemeral_dhcp_ctx') \ @@ -1558,7 +1549,6 @@ def test_dsaz_report_failure_uses_cached_ephemeral_dhcp_ctx_lease(self): def test_dsaz_report_failure_no_net_uses_new_ephemeral_dhcp_lease(self): dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - dsrc.ds_cfg['agent_command'] = '__builtin__' with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ mock.patch.object(dsrc.distro.networking, 'is_up') \ @@ -1584,7 +1574,6 @@ def test_dsaz_report_failure_no_net_uses_new_ephemeral_dhcp_lease(self): def test_dsaz_report_failure_no_net_and_no_dhcp_uses_fallback_lease( self): dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - dsrc.ds_cfg['agent_command'] = '__builtin__' with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ mock.patch.object(dsrc.distro.networking, 'is_up') \ @@ -1609,14 +1598,12 @@ def test_dsaz_report_failure_no_net_and_no_dhcp_uses_fallback_lease( def test_exception_fetching_fabric_data_doesnt_propagate(self): """Errors communicating with fabric should warn, but return True.""" dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - dsrc.ds_cfg['agent_command'] = '__builtin__' self.m_get_metadata_from_fabric.side_effect = Exception ret = self._get_and_setup(dsrc) self.assertTrue(ret) def test_fabric_data_included_in_metadata(self): dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - dsrc.ds_cfg['agent_command'] = '__builtin__' self.m_get_metadata_from_fabric.return_value = {'test': 'value'} ret = self._get_and_setup(dsrc) self.assertTrue(ret) @@ -1672,7 +1659,6 @@ def test_instance_id_from_dmidecode_used(self): def test_instance_id_from_dmidecode_used_for_builtin(self): ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - ds.ds_cfg['agent_command'] = '__builtin__' ds.get_data() self.assertEqual(self.instance_id, ds.metadata['instance-id']) @@ -2099,13 +2085,11 @@ def tearDown(self): self.patches.close() super(TestAzureBounce, self).tearDown() - def _get_ds(self, ovfcontent=None, agent_command=None): + def _get_ds(self, ovfcontent=None): if ovfcontent is not None: populate_dir(os.path.join(self.paths.seed_dir, "azure"), {'ovf-env.xml': ovfcontent}) dsrc = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - if agent_command is not None: - dsrc.ds_cfg['agent_command'] = agent_command return dsrc def _get_and_setup(self, dsrc): @@ -2161,8 +2145,7 @@ def test_force_performs_bounce_regardless(self, perform_hostname_bounce): host_name = 'unchanged-host-name' self.get_hostname.return_value = host_name cfg = {'hostname_bounce': {'policy': 'force'}} - dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg), - agent_command=['not', '__builtin__']) + dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) ret = self._get_and_setup(dsrc) self.assertTrue(ret) self.assertEqual(1, perform_hostname_bounce.call_count) @@ -2171,8 +2154,7 @@ def test_bounce_skipped_on_ifupdown_absent(self): host_name = 'unchanged-host-name' self.get_hostname.return_value = host_name cfg = {'hostname_bounce': {'policy': 'force'}} - dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg), - agent_command=['not', '__builtin__']) + dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) patch_path = MOCKPATH + 'subp.which' with mock.patch(patch_path) as m_which: m_which.return_value = None @@ -2187,8 +2169,7 @@ def test_different_hostnames_sets_hostname(self): expected_hostname = 'azure-expected-host-name' self.get_hostname.return_value = 'default-host-name' dsrc = self._get_ds( - self.get_ovf_env_with_dscfg(expected_hostname, {}), - agent_command=['not', '__builtin__']) + self.get_ovf_env_with_dscfg(expected_hostname, {})) ret = self._get_and_setup(dsrc) self.assertTrue(ret) self.assertEqual(expected_hostname, @@ -2200,8 +2181,7 @@ def test_different_hostnames_performs_bounce( expected_hostname = 'azure-expected-host-name' self.get_hostname.return_value = 'default-host-name' dsrc = self._get_ds( - self.get_ovf_env_with_dscfg(expected_hostname, {}), - agent_command=['not', '__builtin__']) + self.get_ovf_env_with_dscfg(expected_hostname, {})) ret = self._get_and_setup(dsrc) self.assertTrue(ret) self.assertEqual(1, perform_hostname_bounce.call_count) @@ -2210,8 +2190,7 @@ def test_different_hostnames_sets_hostname_back(self): initial_host_name = 'default-host-name' self.get_hostname.return_value = initial_host_name dsrc = self._get_ds( - self.get_ovf_env_with_dscfg('some-host-name', {}), - agent_command=['not', '__builtin__']) + self.get_ovf_env_with_dscfg('some-host-name', {})) ret = self._get_and_setup(dsrc) self.assertTrue(ret) self.assertEqual(initial_host_name, @@ -2224,8 +2203,7 @@ def test_failure_in_bounce_still_resets_host_name( initial_host_name = 'default-host-name' self.get_hostname.return_value = initial_host_name dsrc = self._get_ds( - self.get_ovf_env_with_dscfg('some-host-name', {}), - agent_command=['not', '__builtin__']) + self.get_ovf_env_with_dscfg('some-host-name', {})) ret = self._get_and_setup(dsrc) self.assertTrue(ret) self.assertEqual(initial_host_name, @@ -2240,7 +2218,7 @@ def test_environment_correct_for_bounce_command( self.get_hostname.return_value = old_hostname cfg = {'hostname_bounce': {'interface': interface, 'policy': 'force'}} data = self.get_ovf_env_with_dscfg(hostname, cfg) - dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) + dsrc = self._get_ds(data) ret = self._get_and_setup(dsrc) self.assertTrue(ret) self.assertEqual(1, self.subp.call_count) @@ -2254,7 +2232,7 @@ def test_default_bounce_command_ifup_used_by_default( self, mock_get_boot_telemetry): cfg = {'hostname_bounce': {'policy': 'force'}} data = self.get_ovf_env_with_dscfg('some-hostname', cfg) - dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) + dsrc = self._get_ds(data) ret = self._get_and_setup(dsrc) self.assertTrue(ret) self.assertEqual(1, self.subp.call_count) From c39d4f455d6663948c06c1f8186ab69b24ea0013 Mon Sep 17 00:00:00 2001 From: dermotbradley Date: Tue, 30 Nov 2021 20:08:42 +0000 Subject: [PATCH 0002/2310] cc_ssh_authkey_fingerprints.py: prevent duplicate messages on console (#1081) When cloud-init is configured to show SSH user key fingerprints during boot two of the same message appears for each user. This appears to be as the util.multi_log call defaults to send to both console directly and to stderr (which also goes to console). This change sends them only to console directly. --- .../config/cc_ssh_authkey_fingerprints.py | 2 +- .../modules/test_keys_to_console.py | 38 +++++++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index 05d30ad1a9c..5323522cf64 100755 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -70,7 +70,7 @@ def _pprint_key_entries(user, key_fn, key_entries, hash_meth='sha256', if not key_entries: message = ("%sno authorized SSH keys fingerprints found for user %s.\n" % (prefix, user)) - util.multi_log(message) + util.multi_log(message, console=True, stderr=False) return tbl_fields = ['Keytype', 'Fingerprint (%s)' % (hash_meth), 'Options', 'Comment'] diff --git a/tests/integration_tests/modules/test_keys_to_console.py b/tests/integration_tests/modules/test_keys_to_console.py index 39e06b55cb2..e79db3c7591 100644 --- a/tests/integration_tests/modules/test_keys_to_console.py +++ b/tests/integration_tests/modules/test_keys_to_console.py @@ -23,6 +23,15 @@ emit_keys_to_console: false """ +ENABLE_KEYS_TO_CONSOLE_USER_DATA = """\ +#cloud-config +ssh: + emit_keys_to_console: true +users: + - default + - name: barfoo +""" + @pytest.mark.user_data(BLACKLIST_USER_DATA) class TestKeysToConsoleBlacklist: @@ -70,3 +79,32 @@ def test_header_excluded(self, class_client): def test_footer_excluded(self, class_client): syslog = class_client.read_from_file("/var/log/syslog") assert "END SSH HOST KEY FINGERPRINTS" not in syslog + + +@pytest.mark.user_data(ENABLE_KEYS_TO_CONSOLE_USER_DATA) +@pytest.mark.ec2 +@pytest.mark.lxd_container +@pytest.mark.oci +@pytest.mark.openstack +class TestKeysToConsoleEnabled: + """Test that output can be enabled disabled.""" + + def test_duplicate_messaging_console_log(self, class_client): + class_client.execute('cloud-init status --wait --long').ok + try: + console_log = class_client.instance.console_log() + except NotImplementedError: + # Assume that an exception here means that we can't use the console + # log + pytest.skip("NotImplementedError when requesting console log") + return + if console_log.lower() == 'no console output': + # This test retries because we might not have the full console log + # on the first fetch. However, if we have no console output + # at all, we don't want to keep retrying as that would trigger + # another 5 minute wait on the pycloudlib side, which could + # leave us waiting for a couple hours + pytest.fail('no console output') + return + msg = "no authorized SSH keys fingerprints found for user barfoo." + assert 1 == console_log.count(msg) From a1cf55e5e6331b9b3a4f9ceb412dd14c78abb5ea Mon Sep 17 00:00:00 2001 From: eb3095 <45504889+eb3095@users.noreply.github.com> Date: Wed, 1 Dec 2021 13:35:28 -0500 Subject: [PATCH 0003/2310] Fix missing metadata routes for vultr (#1125) Vultr uses 169.254.169.254 for the metadata server. Some distros are having trouble with this on IPv6 only servers because the route is not being assigned to the link-local interface by default as it is in other distros. This change sets that route before attempting to fetch the metadata avoiding the current issue. --- cloudinit/sources/helpers/vultr.py | 52 ++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/cloudinit/sources/helpers/vultr.py b/cloudinit/sources/helpers/vultr.py index 55487ac31c6..ad347bea510 100644 --- a/cloudinit/sources/helpers/vultr.py +++ b/cloudinit/sources/helpers/vultr.py @@ -9,6 +9,8 @@ from cloudinit import dmi from cloudinit import util from cloudinit import net +from cloudinit import netinfo +from cloudinit import subp from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError from functools import lru_cache @@ -21,6 +23,9 @@ def get_metadata(url, timeout, retries, sec_between, agent): # Bring up interface try: with EphemeralDHCPv4(connectivity_url_data={"url": url}): + # Set metadata route + set_route() + # Fetch the metadata v1 = read_metadata(url, timeout, retries, sec_between, agent) except (NoDHCPLeaseError) as exc: @@ -30,6 +35,53 @@ def get_metadata(url, timeout, retries, sec_between, agent): return json.loads(v1) +# Set route for metadata +def set_route(): + # Get routes, confirm entry does not exist + routes = netinfo.route_info() + + # If no tools exist and empty dict is returned + if 'ipv4' not in routes: + return + + # We only care about IPv4 + routes = routes['ipv4'] + + # Searchable list + dests = [] + + # Parse each route into a more searchable format + for route in routes: + dests.append(route['destination']) + + gw_present = '100.64.0.0' in dests or '100.64.0.0/10' in dests + dest_present = '169.254.169.254' in dests + + # If not IPv6 only (No link local) + # or the route is already present + if not gw_present or dest_present: + return + + # Set metadata route + if subp.which('ip'): + subp.subp([ + 'ip', + 'route', + 'add', + '169.254.169.254/32', + 'dev', + net.find_fallback_nic() + ]) + elif subp.which('route'): + subp.subp([ + 'route', + 'add', + '-net', + '169.254.169.254/32', + '100.64.0.1' + ]) + + # Read the system information from SMBIOS def get_sysinfo(): return { From cf38c2cbc5875813fbb9858f45e5b95789b7ffea Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 2 Dec 2021 08:51:26 -0600 Subject: [PATCH 0004/2310] Move GCE metadata fetch to init-local (SC-502) (#1122) GCE currently fetches metadata after network has come up. There's no reason we can't fetch at init-local time, so update GCE to fetch at init-local time to be more performant and consistent with other datasources. --- cloudinit/sources/DataSourceGCE.py | 25 +++++++++-- .../modules/test_combined.py | 41 +++++++++++++++++++ .../unittests/test_datasource/test_common.py | 1 + tests/unittests/test_datasource/test_gce.py | 24 +++++++++++ 4 files changed, 87 insertions(+), 4 deletions(-) diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index 9f838bd45d0..b82fa410a0c 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -4,6 +4,7 @@ import datetime import json +from contextlib import suppress as noop from base64 import b64decode @@ -13,6 +14,7 @@ from cloudinit import sources from cloudinit import url_helper from cloudinit import util +from cloudinit.net.dhcp import EphemeralDHCPv4 LOG = logging.getLogger(__name__) @@ -58,6 +60,7 @@ def get_value(self, path, is_text, is_recursive=False): class DataSourceGCE(sources.DataSource): dsname = 'GCE' + perform_dhcp_setup = False def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -73,10 +76,19 @@ def __init__(self, sys_cfg, distro, paths): def _get_data(self): url_params = self.get_url_params() - ret = util.log_time( - LOG.debug, 'Crawl of GCE metadata service', - read_md, kwargs={'address': self.metadata_address, - 'url_params': url_params}) + network_context = noop() + if self.perform_dhcp_setup: + network_context = EphemeralDHCPv4(self.fallback_interface) + with network_context: + ret = util.log_time( + LOG.debug, + "Crawl of GCE metadata service", + read_md, + kwargs={ + "address": self.metadata_address, + "url_params": url_params, + }, + ) if not ret['success']: if ret['platform_reports_gce']: @@ -117,6 +129,10 @@ def region(self): return self.availability_zone.rsplit('-', 1)[0] +class DataSourceGCELocal(DataSourceGCE): + perform_dhcp_setup = True + + def _write_host_key_to_guest_attributes(key_type, key_value): url = '%s/%s/%s' % (GUEST_ATTRIBUTES_URL, HOSTKEY_NAMESPACE, key_type) key_value = key_value.encode('utf-8') @@ -272,6 +288,7 @@ def platform_reports_gce(): # Used to match classes to dependencies. datasources = [ + (DataSourceGCELocal, (sources.DEP_FILESYSTEM,)), (DataSourceGCE, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ] diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py index bc19c2a285f..758c96fa675 100644 --- a/tests/integration_tests/modules/test_combined.py +++ b/tests/integration_tests/modules/test_combined.py @@ -209,6 +209,31 @@ def test_no_problems(self, class_client: IntegrationInstance): log = client.read_from_file('/var/log/cloud-init.log') verify_clean_log(log) + def test_correct_datasource_detected( + self, class_client: IntegrationInstance + ): + """Test datasource is detected at the proper boot stage.""" + client = class_client + status_file = client.read_from_file("/run/cloud-init/status.json") + + platform_datasources = { + "azure": "DataSourceAzure [seed=/dev/sr0]", + "ec2": "DataSourceEc2Local", + "gce": "DataSourceGCELocal", + "oci": "DataSourceOracle", + "openstack": "DataSourceOpenStackLocal [net,ver=2]", + "lxd_container": ( + "DataSourceNoCloud " + "[seed=/var/lib/cloud/seed/nocloud-net][dsmode=net]" + ), + "lxd_vm": "DataSourceNoCloud [seed=/dev/sr0][dsmode=net]", + } + + assert ( + platform_datasources[client.settings.PLATFORM] + == json.loads(status_file)["v1"]["datasource"] + ) + def _check_common_metadata(self, data): assert data['base64_encoded_keys'] == [] assert data['merged_cfg'] == 'redacted for non-root user' @@ -277,3 +302,19 @@ def test_instance_json_ec2(self, class_client: IntegrationInstance): assert v1_data['instance_id'] == client.instance.name assert v1_data['local_hostname'].startswith('ip-') assert v1_data['region'] == client.cloud.cloud_instance.region + + @pytest.mark.gce + def test_instance_json_gce(self, class_client: IntegrationInstance): + client = class_client + instance_json_file = client.read_from_file( + "/run/cloud-init/instance-data.json" + ) + data = json.loads(instance_json_file) + self._check_common_metadata(data) + v1_data = data["v1"] + assert v1_data["cloud_name"] == "gce" + assert v1_data["platform"] == "gce" + assert v1_data["subplatform"].startswith("metadata") + assert v1_data["availability_zone"] == client.instance.zone + assert v1_data["instance_id"] == client.instance.instance_id + assert v1_data["local_hostname"] == client.instance.name diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index 17d531601aa..9089e5def69 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -41,6 +41,7 @@ CloudSigma.DataSourceCloudSigma, ConfigDrive.DataSourceConfigDrive, DigitalOcean.DataSourceDigitalOcean, + GCE.DataSourceGCELocal, Hetzner.DataSourceHetzner, IBMCloud.DataSourceIBMCloud, LXD.DataSourceLXD, diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py index 80b38f9e301..1d91b301ff6 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -360,5 +360,29 @@ def test_publish_host_keys(self, m_readurl): self.ds.publish_host_keys(hostkeys) m_readurl.assert_has_calls(readurl_expected_calls, any_order=True) + @mock.patch( + "cloudinit.sources.DataSourceGCE.EphemeralDHCPv4", + autospec=True, + ) + @mock.patch( + "cloudinit.sources.DataSourceGCE.DataSourceGCELocal.fallback_interface" + ) + def test_local_datasource_uses_ephemeral_dhcp(self, _m_fallback, m_dhcp): + _set_mock_metadata() + ds = DataSourceGCE.DataSourceGCELocal( + sys_cfg={}, distro=None, paths=None + ) + ds._get_data() + assert m_dhcp.call_count == 1 + + @mock.patch( + "cloudinit.sources.DataSourceGCE.EphemeralDHCPv4", + autospec=True, + ) + def test_datasource_doesnt_use_ephemeral_dhcp(self, m_dhcp): + _set_mock_metadata() + ds = DataSourceGCE.DataSourceGCE(sys_cfg={}, distro=None, paths=None) + ds._get_data() + assert m_dhcp.call_count == 0 # vi: ts=4 expandtab From ff10fc0914a8b29acc23348d7848439a5eb4960a Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 2 Dec 2021 22:08:34 -0600 Subject: [PATCH 0005/2310] testing: Remove date from final_message test (SC-638) (#1127) --- .../modules/test_combined.py | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py index 758c96fa675..26a8397df6a 100644 --- a/tests/integration_tests/modules/test_combined.py +++ b/tests/integration_tests/modules/test_combined.py @@ -74,23 +74,16 @@ def test_final_message(self, class_client: IntegrationInstance): """Test that final_message module works as expected. Also tests LP 1511485: final_message is silent. - - It's possible that if this test is run within a minute or so of - midnight that we'll see a failure because the day in the logs - is different from the day specified in the test definition. """ client = class_client log = client.read_from_file('/var/log/cloud-init.log') - # Get date on host rather than locally as our host could be in a - # wildly different timezone (or more likely recording UTC) - today = client.execute('date "+%a, %d %b %Y"') expected = ( - 'This is my final message!\n' - r'\d+\.\d+.*\n' - '{}.*\n' - 'DataSource.*\n' - r'\d+\.\d+' - ).format(today) + "This is my final message!\n" + r"\d+\.\d+.*\n" + r"\w{3}, \d{2} \w{3} \d{4} \d{2}:\d{2}:\d{2} \+\d{4}\n" # Datetime + "DataSource.*\n" + r"\d+\.\d+" + ) assert re.search(expected, log) From 0fe96a44cde48cc688afe75beb8fd126c8892b8c Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Thu, 2 Dec 2021 21:25:43 -0700 Subject: [PATCH 0006/2310] jinja: provide and document jinja-safe key aliases in instance-data (SC-622) (#1123) Allow #cloud-config and cloud-init query to use underscore-delimited "jinja-safe" key aliases for any instance-data.json keys containing jinja operator characters. This provides a means to use Jinja's dot-notation instead of square brackets and quoting to reference "unsafe" obtain attribute names. Support for these aliased keys is available to both #cloud-config user-data and `cloud-init query`. For example #cloud-config alias access can look like: {{ ds.config.user_network_config }} - instead of - {{ ds.config["user.network-config"] }} --- cloudinit/cmd/query.py | 134 ++++++++++++++---- cloudinit/cmd/tests/test_query.py | 71 ++++++++-- cloudinit/handlers/jinja_template.py | 48 +++++-- cloudinit/sources/DataSourceLXD.py | 26 ++-- cloudinit/sources/tests/test_lxd.py | 71 +++++----- doc/rtd/topics/instancedata.rst | 16 ++- .../datasources/test_lxd_discovery.py | 17 ++- .../modules/test_jinja_templating.py | 12 +- tests/unittests/test_builtin_handlers.py | 68 ++++++--- 9 files changed, 326 insertions(+), 137 deletions(-) diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py index 07db9552559..e53cd855d23 100644 --- a/cloudinit/cmd/query.py +++ b/cloudinit/cmd/query.py @@ -19,7 +19,10 @@ import sys from cloudinit.handlers.jinja_template import ( - convert_jinja_instance_data, render_jinja_payload) + convert_jinja_instance_data, + get_jinja_variable_alias, + render_jinja_payload +) from cloudinit.cmd.devel import addLogHandlerCLI, read_cfg_paths from cloudinit import log from cloudinit.sources import ( @@ -93,22 +96,24 @@ def load_userdata(ud_file_path): return util.decomp_gzip(bdata, quiet=False, decode=True) -def handle_args(name, args): - """Handle calls to 'cloud-init query' as a subcommand.""" - paths = None - addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING) - if not any([args.list_keys, args.varname, args.format, args.dump_all]): - LOG.error( - 'Expected one of the options: --all, --format,' - ' --list-keys or varname') - get_parser().print_help() - return 1 +def _read_instance_data(instance_data, user_data, vendor_data) -> dict: + """Return a dict of merged instance-data, vendordata and userdata. + The dict will contain supplemental userdata and vendordata keys sourced + from default user-data and vendor-data files. + + Non-root users will have redacted INSTANCE_JSON_FILE content and redacted + vendordata and userdata values. + + :raise: IOError/OSError on absence of instance-data.json file or invalid + access perms. + """ + paths = None uid = os.getuid() - if not all([args.instance_data, args.user_data, args.vendor_data]): + if not all([instance_data, user_data, vendor_data]): paths = read_cfg_paths() - if args.instance_data: - instance_data_fn = args.instance_data + if instance_data: + instance_data_fn = instance_data else: redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE) if uid == 0: @@ -124,12 +129,12 @@ def handle_args(name, args): instance_data_fn = redacted_data_fn else: instance_data_fn = redacted_data_fn - if args.user_data: - user_data_fn = args.user_data + if user_data: + user_data_fn = user_data else: user_data_fn = os.path.join(paths.instance_link, 'user-data.txt') - if args.vendor_data: - vendor_data_fn = args.vendor_data + if vendor_data: + vendor_data_fn = vendor_data else: vendor_data_fn = os.path.join(paths.instance_link, 'vendor-data.txt') @@ -140,7 +145,7 @@ def handle_args(name, args): LOG.error("No read permission on '%s'. Try sudo", instance_data_fn) else: LOG.error('Missing instance-data file: %s', instance_data_fn) - return 1 + raise instance_data = util.load_json(instance_json) if uid != 0: @@ -151,6 +156,65 @@ def handle_args(name, args): else: instance_data['userdata'] = load_userdata(user_data_fn) instance_data['vendordata'] = load_userdata(vendor_data_fn) + return instance_data + + +def _find_instance_data_leaf_by_varname_path( + jinja_vars_without_aliases: dict, jinja_vars_with_aliases: dict, + varname: str, list_keys: bool +): + """Return the value of the dot-delimited varname path in instance-data + + Split a dot-delimited jinja variable name path into components, walk the + path components into the instance_data and look up a matching jinja + variable name or cloud-init's underscore-delimited key aliases. + + :raises: ValueError when varname represents an invalid key name or path or + if list-keys is provided by varname isn't a dict object. + """ + walked_key_path = "" + response = jinja_vars_without_aliases + for key_path_part in varname.split('.'): + try: + # Walk key path using complete aliases dict, yet response + # should only contain jinja_without_aliases + jinja_vars_with_aliases = jinja_vars_with_aliases[key_path_part] + except KeyError as e: + if walked_key_path: + msg = "instance-data '{key_path}' has no '{leaf}'".format( + leaf=key_path_part, key_path=walked_key_path + ) + else: + msg = "Undefined instance-data key '{}'".format(varname) + raise ValueError(msg) from e + if key_path_part in response: + response = response[key_path_part] + else: # We are an underscore_delimited key alias + for key in response: + if get_jinja_variable_alias(key) == key_path_part: + response = response[key] + break + if walked_key_path: + walked_key_path += "." + walked_key_path += key_path_part + return response + + +def handle_args(name, args): + """Handle calls to 'cloud-init query' as a subcommand.""" + addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING) + if not any([args.list_keys, args.varname, args.format, args.dump_all]): + LOG.error( + 'Expected one of the options: --all, --format,' + ' --list-keys or varname') + get_parser().print_help() + return 1 + try: + instance_data = _read_instance_data( + args.instance_data, args.user_data, args.vendor_data + ) + except (IOError, OSError): + return 1 if args.format: payload = '## template: jinja\n{fmt}'.format(fmt=args.format) rendered_payload = render_jinja_payload( @@ -162,20 +226,32 @@ def handle_args(name, args): return 0 return 1 + # If not rendering a structured format above, query output will be either: + # - JSON dump of all instance-data/jinja variables + # - JSON dump of a value at an dict path into the instance-data dict. + # - a list of keys for a specific dict path into the instance-data dict. response = convert_jinja_instance_data(instance_data) if args.varname: + jinja_vars_with_aliases = convert_jinja_instance_data( + instance_data, include_key_aliases=True + ) try: - for var in args.varname.split('.'): - response = response[var] - except KeyError: - LOG.error('Undefined instance-data key %s', args.varname) + response = _find_instance_data_leaf_by_varname_path( + jinja_vars_without_aliases=response, + jinja_vars_with_aliases=jinja_vars_with_aliases, + varname=args.varname, + list_keys=args.list_keys + ) + except (KeyError, ValueError) as e: + LOG.error(e) + return 1 + if args.list_keys: + if not isinstance(response, dict): + LOG.error( + "--list-keys provided but '%s' is not a dict", + args.varname + ) return 1 - if args.list_keys: - if not isinstance(response, dict): - LOG.error("--list-keys provided but '%s' is not a dict", var) - return 1 - response = '\n'.join(sorted(response.keys())) - elif args.list_keys: response = '\n'.join(sorted(response.keys())) if not isinstance(response, str): response = util.json_dumps(response) diff --git a/cloudinit/cmd/tests/test_query.py b/cloudinit/cmd/tests/test_query.py index c258d321687..d96c394518d 100644 --- a/cloudinit/cmd/tests/test_query.py +++ b/cloudinit/cmd/tests/test_query.py @@ -75,6 +75,40 @@ def test_handle_args_error_on_missing_param(self, caplog, capsys): assert 'usage: query' in out assert 1 == m_cli_log.call_count + @pytest.mark.parametrize( + "inst_data,varname,expected_error", ( + ( + '{"v1": {"key-2": "value-2"}}', + 'v1.absent_leaf', + "instance-data 'v1' has no 'absent_leaf'\n" + ), + ( + '{"v1": {"key-2": "value-2"}}', + 'absent_key', + "Undefined instance-data key 'absent_key'\n" + ), + ) + ) + def test_handle_args_error_on_invalid_vaname_paths( + self, inst_data, varname, expected_error, caplog, tmpdir + ): + """Error when varname is not a valid instance-data variable path.""" + instance_data = tmpdir.join('instance-data') + instance_data.write(inst_data) + args = self.args( + debug=False, dump_all=False, format=None, + instance_data=instance_data.strpath, + list_keys=False, user_data=None, vendor_data=None, varname=varname + ) + paths, _, _, _ = self._setup_paths(tmpdir) + with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths: + m_paths.return_value = paths + with mock.patch( + "cloudinit.cmd.query.addLogHandlerCLI", return_value="" + ): + assert 1 == query.handle_args('anyname', args) + assert expected_error in caplog.text + def test_handle_args_error_on_missing_instance_data(self, caplog, tmpdir): """When instance_data file path does not exist, log an error.""" absent_fn = tmpdir.join('absent') @@ -166,7 +200,7 @@ def test_handle_args_root_processes_user_data( assert 0 == query.handle_args('anyname', args) out, _err = capsys.readouterr() cmd_output = json.loads(out) - assert "it worked" == cmd_output['my_var'] + assert "it worked" == cmd_output['my-var'] if ud_expected == "ci-b64:": ud_expected = "ci-b64:{}".format(b64e(ud_src)) if vd_expected == "ci-b64:": @@ -193,8 +227,8 @@ def test_handle_args_root_uses_instance_sensitive_data( m_getuid.return_value = 0 assert 0 == query.handle_args('anyname', args) expected = ( - '{\n "my_var": "it worked",\n "userdata": "ud",\n ' - '"vendordata": "vd"\n}\n' + '{\n "my-var": "it worked",\n ' + '"userdata": "ud",\n "vendordata": "vd"\n}\n' ) out, _err = capsys.readouterr() assert expected == out @@ -211,7 +245,7 @@ def test_handle_args_dumps_all_instance_data(self, capsys, tmpdir): m_getuid.return_value = 100 assert 0 == query.handle_args('anyname', args) expected = ( - '{\n "my_var": "it worked",\n "userdata": "<%s> file:ud",\n' + '{\n "my-var": "it worked",\n "userdata": "<%s> file:ud",\n' ' "vendordata": "<%s> file:vd"\n}\n' % ( REDACT_SENSITIVE_VALUE, REDACT_SENSITIVE_VALUE ) @@ -233,21 +267,38 @@ def test_handle_args_returns_top_level_varname(self, capsys, tmpdir): out, _err = capsys.readouterr() assert 'it worked\n' == out - def test_handle_args_returns_nested_varname(self, capsys, tmpdir): + @pytest.mark.parametrize( + 'inst_data,varname,expected', + ( + ( + '{"v1": {"key-2": "value-2"}, "my-var": "it worked"}', + 'v1.key_2', + 'value-2\n' + ), + # Assert no jinja underscore-delimited aliases are reported on CLI + ( + '{"v1": {"something-hyphenated": {"no.underscores":"x",' + ' "no-alias": "y"}}, "my-var": "it worked"}', + 'v1.something_hyphenated', + '{\n "no-alias": "y",\n "no.underscores": "x"\n}\n' + ), + ) + ) + def test_handle_args_returns_nested_varname( + self, inst_data, varname, expected, capsys, tmpdir + ): """If user_data file is a jinja template render instance-data vars.""" instance_data = tmpdir.join('instance-data') - instance_data.write( - '{"v1": {"key-2": "value-2"}, "my-var": "it worked"}' - ) + instance_data.write(inst_data) args = self.args( debug=False, dump_all=False, format=None, instance_data=instance_data.strpath, user_data='ud', - vendor_data='vd', list_keys=False, varname='v1.key_2') + vendor_data='vd', list_keys=False, varname=varname) with mock.patch('os.getuid') as m_getuid: m_getuid.return_value = 100 assert 0 == query.handle_args('anyname', args) out, _err = capsys.readouterr() - assert 'value-2\n' == out + assert expected == out def test_handle_args_returns_standardized_vars_to_top_level_aliases( self, capsys, tmpdir diff --git a/cloudinit/handlers/jinja_template.py b/cloudinit/handlers/jinja_template.py index 5033abbb4bd..de88a5ea89c 100644 --- a/cloudinit/handlers/jinja_template.py +++ b/cloudinit/handlers/jinja_template.py @@ -1,14 +1,18 @@ # This file is part of cloud-init. See LICENSE file for license information. +import copy from errno import EACCES import os import re +from typing import Optional try: from jinja2.exceptions import UndefinedError as JUndefinedError + from jinja2.lexer import operator_re except ImportError: # No jinja2 dependency JUndefinedError = Exception + operator_re = re.compile(r'[-.]') from cloudinit import handlers from cloudinit import log as logging @@ -97,7 +101,9 @@ def render_jinja_payload_from_file( def render_jinja_payload(payload, payload_fn, instance_data, debug=False): instance_jinja_vars = convert_jinja_instance_data( instance_data, - decode_paths=instance_data.get('base64-encoded-keys', [])) + decode_paths=instance_data.get('base64-encoded-keys', []), + include_key_aliases=True + ) if debug: LOG.debug('Converted jinja variables\n%s', json_dumps(instance_jinja_vars)) @@ -118,7 +124,30 @@ def render_jinja_payload(payload, payload_fn, instance_data, debug=False): return rendered_payload -def convert_jinja_instance_data(data, prefix='', sep='/', decode_paths=()): +def get_jinja_variable_alias(orig_name: str) -> Optional[str]: + """Return a jinja variable alias, replacing any operators with underscores. + + Provide underscore-delimited key aliases to simplify dot-notation + attribute references for keys which contain operators "." or "-". + This provides for simpler short-hand jinja attribute notation + allowing one to avoid quoting keys which contain operators. + {{ ds.v1_0.config.user_network_config }} instead of + {{ ds['v1.0'].config["user.network-config"] }}. + + :param orig_name: String representing a jinja variable name to scrub/alias. + + :return: A string with any jinja operators replaced if needed. Otherwise, + none if no alias required. + """ + alias_name = re.sub(operator_re, '_', orig_name) + if alias_name != orig_name: + return alias_name + return None + + +def convert_jinja_instance_data( + data, prefix='', sep='/', decode_paths=(), include_key_aliases=False +): """Process instance-data.json dict for use in jinja templates. Replace hyphens with underscores for jinja templates and decode any @@ -127,21 +156,24 @@ def convert_jinja_instance_data(data, prefix='', sep='/', decode_paths=()): result = {} decode_paths = [path.replace('-', '_') for path in decode_paths] for key, value in sorted(data.items()): - if '-' in key: - # Standardize keys for use in #cloud-config/shell templates - key = key.replace('-', '_') key_path = '{0}{1}{2}'.format(prefix, sep, key) if prefix else key if key_path in decode_paths: value = b64d(value) if isinstance(value, dict): result[key] = convert_jinja_instance_data( - value, key_path, sep=sep, decode_paths=decode_paths) - if re.match(r'v\d+', key): + value, key_path, sep=sep, decode_paths=decode_paths, + include_key_aliases=include_key_aliases + ) + if re.match(r'v\d+$', key): # Copy values to top-level aliases for subkey, subvalue in result[key].items(): - result[subkey] = subvalue + result[subkey] = copy.deepcopy(subvalue) else: result[key] = value + if include_key_aliases: + alias_name = get_jinja_variable_alias(key) + if alias_name: + result[alias_name] = copy.deepcopy(result[key]) return result # vi: ts=4 expandtab diff --git a/cloudinit/sources/DataSourceLXD.py b/cloudinit/sources/DataSourceLXD.py index 55ae52a2df5..469707d2838 100644 --- a/cloudinit/sources/DataSourceLXD.py +++ b/cloudinit/sources/DataSourceLXD.py @@ -190,19 +190,16 @@ def _get_data(self) -> bool: self.metadata = _raw_instance_data_to_dict( "meta-data", self._crawled_metadata.get("meta-data") ) - if LXD_SOCKET_API_VERSION in self._crawled_metadata: - config = self._crawled_metadata[LXD_SOCKET_API_VERSION].get( - "config", {} + config = self._crawled_metadata.get("config", {}) + user_metadata = config.get("user.meta-data", {}) + if user_metadata: + user_metadata = _raw_instance_data_to_dict( + "user.meta-data", user_metadata + ) + if not isinstance(self.metadata, dict): + self.metadata = util.mergemanydict( + [util.load_yaml(self.metadata), user_metadata] ) - user_metadata = config.get("user.meta-data", {}) - if user_metadata: - user_metadata = _raw_instance_data_to_dict( - "user.meta-data", user_metadata - ) - if not isinstance(self.metadata, dict): - self.metadata = util.mergemanydict( - [util.load_yaml(self.metadata), user_metadata] - ) if "user-data" in self._crawled_metadata: self.userdata_raw = self._crawled_metadata["user-data"] if "network-config" in self._crawled_metadata: @@ -304,7 +301,8 @@ def read_metadata( if metadata_only: return md # Skip network-data, vendor-data, user-data - md[LXD_SOCKET_API_VERSION] = { + md = { + "_metadata_api_version": api_version, # Document API version read "config": {}, "meta-data": md["meta-data"] } @@ -345,7 +343,7 @@ def read_metadata( # Leave raw data values/format unchanged to represent it in # instance-data.json for cloud-init query or jinja template # use. - md[LXD_SOCKET_API_VERSION]["config"][cfg_key] = response.text + md["config"][cfg_key] = response.text # Promote common CONFIG_KEY_ALIASES to top-level keys. if cfg_key in CONFIG_KEY_ALIASES: # Due to sort of config_routes, promote cloud-init.* diff --git a/cloudinit/sources/tests/test_lxd.py b/cloudinit/sources/tests/test_lxd.py index fc2a41dfbeb..a6e51f3b963 100644 --- a/cloudinit/sources/tests/test_lxd.py +++ b/cloudinit/sources/tests/test_lxd.py @@ -42,15 +42,12 @@ def _add_network_v1_device(devname) -> dict: "network-config": NETWORK_V1, "user-data": "#cloud-config\npackages: [sl]\n", "vendor-data": "#cloud-config\nruncmd: ['echo vendor-data']\n", - "1.0": { - "meta-data": "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n", - "config": { - "user.user-data": - "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n", - "user.vendor-data": - "#cloud-config\nruncmd: ['echo vendor-data']\n", - "user.network-config": yaml.safe_dump(NETWORK_V1), - } + "config": { + "user.user-data": + "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n", + "user.vendor-data": + "#cloud-config\nruncmd: ['echo vendor-data']\n", + "user.network-config": yaml.safe_dump(NETWORK_V1), } } @@ -190,8 +187,10 @@ class TestReadMetadata: "http://lxd/1.0/meta-data": "local-hostname: md\n", "http://lxd/1.0/config": "[]", }, - {"1.0": {"config": {}, "meta-data": "local-hostname: md\n"}, - "meta-data": "local-hostname: md\n"}, + { + "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION, + "config": {}, "meta-data": "local-hostname: md\n" + }, ["[GET] [HTTP:200] http://lxd/1.0/meta-data", "[GET] [HTTP:200] http://lxd/1.0/config"], ), @@ -211,12 +210,10 @@ class TestReadMetadata: "http://lxd/1.0/config/user.vendor-data": "", # 404 }, { - "1.0": { - "config": { - "user.custom1": "custom1", # Not promoted - "user.network-config": "net-config", - }, - "meta-data": "local-hostname: md\n", + "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION, + "config": { + "user.custom1": "custom1", # Not promoted + "user.network-config": "net-config", }, "meta-data": "local-hostname: md\n", "network-config": "net-config", @@ -250,15 +247,13 @@ class TestReadMetadata: "http://lxd/1.0/config/user.vendor-data": "vendor-data", }, { - "1.0": { - "config": { - "user.custom1": "custom1", # Not promoted - "user.meta-data": "meta-data", - "user.network-config": "net-config", - "user.user-data": "user-data", - "user.vendor-data": "vendor-data", - }, - "meta-data": "local-hostname: md\n", + "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION, + "config": { + "user.custom1": "custom1", # Not promoted + "user.meta-data": "meta-data", + "user.network-config": "net-config", + "user.user-data": "user-data", + "user.vendor-data": "vendor-data", }, "meta-data": "local-hostname: md\n", "network-config": "net-config", @@ -303,19 +298,17 @@ class TestReadMetadata: "cloud-init.vendor-data", }, { - "1.0": { - "config": { - "user.meta-data": "user.meta-data", - "user.network-config": "user.network-config", - "user.user-data": "user.user-data", - "user.vendor-data": "user.vendor-data", - "cloud-init.network-config": - "cloud-init.network-config", - "cloud-init.user-data": "cloud-init.user-data", - "cloud-init.vendor-data": - "cloud-init.vendor-data", - }, - "meta-data": "local-hostname: md\n", + "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION, + "config": { + "user.meta-data": "user.meta-data", + "user.network-config": "user.network-config", + "user.user-data": "user.user-data", + "user.vendor-data": "user.vendor-data", + "cloud-init.network-config": + "cloud-init.network-config", + "cloud-init.user-data": "cloud-init.user-data", + "cloud-init.vendor-data": + "cloud-init.vendor-data", }, "meta-data": "local-hostname: md\n", "network-config": "cloud-init.network-config", diff --git a/doc/rtd/topics/instancedata.rst b/doc/rtd/topics/instancedata.rst index 6c17139f406..c33b907abcb 100644 --- a/doc/rtd/topics/instancedata.rst +++ b/doc/rtd/topics/instancedata.rst @@ -530,12 +530,18 @@ Both user-data scripts and **#cloud-config** data support jinja template rendering. When the first line of the provided user-data begins with, **## template: jinja** cloud-init will use jinja to render that file. -Any instance-data-sensitive.json variables are surfaced as dot-delimited -jinja template variables because cloud-config modules are run as 'root' -user. +Any instance-data-sensitive.json variables are surfaced as jinja template +variables because cloud-config modules are run as 'root' user. - -Below are some examples of providing these types of user-data: +.. note:: + cloud-init also provides jinja-safe key aliases for any instance-data.json + keys which contain jinja operator characters such as +, -, ., /, etc. Any + jinja operator will be replaced with underscores in the jinja-safe key + alias. This allows for cloud-init templates to use aliased variable + references which allow for jinja's dot-notation reference such as + ``{{ ds.v1_0.my_safe_key }}`` instead of ``{{ ds["v1.0"]["my/safe-key"] }}``. + +Below are some other examples of using jinja templates in user-data: * Cloud config calling home with the ec2 public hostname and availability-zone diff --git a/tests/integration_tests/datasources/test_lxd_discovery.py b/tests/integration_tests/datasources/test_lxd_discovery.py index 93200962922..3f05e906919 100644 --- a/tests/integration_tests/datasources/test_lxd_discovery.py +++ b/tests/integration_tests/datasources/test_lxd_discovery.py @@ -53,7 +53,9 @@ def test_lxd_datasource_discovery(client: IntegrationInstance): assert "lxd" == v1["platform"] assert "LXD socket API v. 1.0 (/dev/lxd/sock)" == v1["subplatform"] ds_cfg = json.loads(client.execute('cloud-init query ds').stdout) - assert ["config", "meta_data"] == sorted(list(ds_cfg["1.0"].keys())) + assert ["_doc", "_metadata_api_version", "config", "meta-data"] == sorted( + list(ds_cfg.keys()) + ) if ( client.settings.PLATFORM == "lxd_vm" and ImageSpecification.from_os_image().release in ("xenial", "bionic") @@ -62,15 +64,18 @@ def test_lxd_datasource_discovery(client: IntegrationInstance): # to start the lxd-agent. # https://github.com/canonical/pycloudlib/blob/main/pycloudlib/\ # lxd/defaults.py#L13-L27 - lxd_config_keys = ["user.meta_data", "user.vendor_data"] + # Underscore-delimited aliases exist for any keys containing hyphens or + # dots. + lxd_config_keys = ["user.meta-data", "user.vendor-data"] else: - lxd_config_keys = ["user.meta_data"] - assert lxd_config_keys == list(ds_cfg["1.0"]["config"].keys()) + lxd_config_keys = ["user.meta-data"] + assert "1.0" == ds_cfg["_metadata_api_version"] + assert lxd_config_keys == list(ds_cfg["config"].keys()) assert {"public-keys": v1["public_ssh_keys"][0]} == ( - yaml.safe_load(ds_cfg["1.0"]["config"]["user.meta_data"]) + yaml.safe_load(ds_cfg["config"]["user.meta-data"]) ) assert ( - "#cloud-config\ninstance-id" in ds_cfg["1.0"]["meta_data"] + "#cloud-config\ninstance-id" in ds_cfg["meta-data"] ) # Assert NoCloud seed data is still present in cloud image metadata # This will start failing if we redact metadata templates from diff --git a/tests/integration_tests/modules/test_jinja_templating.py b/tests/integration_tests/modules/test_jinja_templating.py index 35b8ee2ddd0..fe8eff1ac01 100644 --- a/tests/integration_tests/modules/test_jinja_templating.py +++ b/tests/integration_tests/modules/test_jinja_templating.py @@ -11,6 +11,7 @@ runcmd: - echo {{v1.local_hostname}} > /var/tmp/runcmd_output - echo {{merged_cfg._doc}} >> /var/tmp/runcmd_output + - echo {{v1['local-hostname']}} >> /var/tmp/runcmd_output """ @@ -18,13 +19,16 @@ def test_runcmd_with_variable_substitution(client: IntegrationInstance): """Test jinja substitution. - Ensure we can also substitute variables from instance-data-sensitive - LP: #1931392 + Ensure underscore-delimited aliases exist for hyphenated key and + we can also substitute variables from instance-data-sensitive + LP: #1931392. """ + hostname = client.execute('hostname').stdout.strip() expected = [ - client.execute('hostname').stdout.strip(), + hostname, ('Merged cloud-init system config from /etc/cloud/cloud.cfg and ' - '/etc/cloud/cloud.cfg.d/') + '/etc/cloud/cloud.cfg.d/'), + hostname ] output = client.read_from_file('/var/tmp/runcmd_output') verify_ordered_items_in_text(expected, output) diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py index 30293e9ea8e..230866b9d8a 100644 --- a/tests/unittests/test_builtin_handlers.py +++ b/tests/unittests/test_builtin_handlers.py @@ -5,6 +5,7 @@ import copy import errno import os +import pytest import shutil import tempfile from textwrap import dedent @@ -281,17 +282,44 @@ def test_jinja_template_handle_renders_jinja_content_missing_keys(self): self.logs.getvalue()) -class TestConvertJinjaInstanceData(CiTestCase): - - def test_convert_instance_data_hyphens_to_underscores(self): - """Replace hyphenated keys with underscores in instance-data.""" - data = {'hyphenated-key': 'hyphenated-val', - 'underscore_delim_key': 'underscore_delimited_val'} - expected_data = {'hyphenated_key': 'hyphenated-val', - 'underscore_delim_key': 'underscore_delimited_val'} - self.assertEqual( - expected_data, - convert_jinja_instance_data(data=data)) +class TestConvertJinjaInstanceData: + + @pytest.mark.parametrize( + "include_key_aliases,data,expected", ( + ( + False, + {'my-key': 'my-val'}, + {'my-key': 'my-val'} + ), + ( + True, + {'my-key': 'my-val'}, + {'my-key': 'my-val', 'my_key': 'my-val'} + ), + ( + False, + {'my.key': 'my.val'}, + {'my.key': 'my.val'} + ), + ( + True, + {'my.key': 'my.val'}, + {'my.key': 'my.val', 'my_key': 'my.val'} + ), + ( + True, + {'my/key': 'my/val'}, + {'my/key': 'my/val', 'my_key': 'my/val'} + ), + ) + ) + def test_convert_instance_data_operators_to_underscores( + self, include_key_aliases, data, expected + ): + """Replace Jinja operators keys with underscores in instance-data.""" + assert expected == convert_jinja_instance_data( + data=data, include_key_aliases=include_key_aliases + ) def test_convert_instance_data_promotes_versioned_keys_to_top_level(self): """Any versioned keys are promoted as top-level keys @@ -307,11 +335,10 @@ def test_convert_instance_data_promotes_versioned_keys_to_top_level(self): expected_data.update({'v1key1': 'v1.1', 'v2key1': 'v2.1'}) converted_data = convert_jinja_instance_data(data=data) - self.assertCountEqual( - ['ds', 'v1', 'v2', 'v1key1', 'v2key1'], converted_data.keys()) - self.assertEqual( - expected_data, - converted_data) + assert sorted(['ds', 'v1', 'v2', 'v1key1', 'v2key1']) == sorted( + converted_data.keys() + ) + assert expected_data == converted_data def test_convert_instance_data_most_recent_version_of_promoted_keys(self): """The most-recent versioned key value is promoted to top-level.""" @@ -324,9 +351,7 @@ def test_convert_instance_data_most_recent_version_of_promoted_keys(self): 'key3': 'newer v2 key3'}) converted_data = convert_jinja_instance_data(data=data) - self.assertEqual( - expected_data, - converted_data) + assert expected_data == converted_data def test_convert_instance_data_decodes_decode_paths(self): """Any decode_paths provided are decoded by convert_instance_data.""" @@ -336,9 +361,7 @@ def test_convert_instance_data_decodes_decode_paths(self): converted_data = convert_jinja_instance_data( data=data, decode_paths=('key1/subkey1',)) - self.assertEqual( - expected_data, - converted_data) + assert expected_data == converted_data class TestRenderJinjaPayload(CiTestCase): @@ -355,6 +378,7 @@ def test_render_jinja_payload_logs_jinja_vars_on_debug(self): DEBUG: Converted jinja variables { "hostname": "foo", + "instance-id": "iid", "instance_id": "iid", "v1": { "hostname": "foo" From ffa6fc88249aa080aa31811a45569a45e567418a Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 2 Dec 2021 22:36:37 -0600 Subject: [PATCH 0007/2310] Fix exception when no activator found (#1129) Given that there are additional network management tools that we haven't yet supported with activators, we should log a warning and continue without network activation here, especially since this was a no-op for years. LP: #1948681 --- cloudinit/distros/__init__.py | 7 ++++++- cloudinit/net/activators.py | 6 +++++- tests/unittests/test_net_activators.py | 5 +++-- 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index cf6aad14b25..fe44f20eee7 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -228,7 +228,12 @@ def apply_network_config(self, netconfig, bring_up=False) -> bool: # Now try to bring them up if bring_up: LOG.debug('Bringing up newly configured network interfaces') - network_activator = activators.select_activator() + try: + network_activator = activators.select_activator() + except activators.NoActivatorException: + LOG.warning("No network activator found, not bringing up " + "network interfaces") + return True network_activator.bring_up_all_interfaces(network_state) else: LOG.debug("Not bringing up newly configured network interfaces") diff --git a/cloudinit/net/activators.py b/cloudinit/net/activators.py index 11149548b6f..137338d89ab 100644 --- a/cloudinit/net/activators.py +++ b/cloudinit/net/activators.py @@ -16,6 +16,10 @@ LOG = logging.getLogger(__name__) +class NoActivatorException(Exception): + pass + + def _alter_interface(cmd, device_name) -> bool: LOG.debug("Attempting command %s for device %s", cmd, device_name) try: @@ -271,7 +275,7 @@ def select_activator(priority=None, target=None) -> Type[NetworkActivator]: tmsg = "" if target and target != "/": tmsg = " in target=%s" % target - raise RuntimeError( + raise NoActivatorException( "No available network activators found%s. Searched " "through list: %s" % (tmsg, priority)) selected = found[0] diff --git a/tests/unittests/test_net_activators.py b/tests/unittests/test_net_activators.py index f63a8b74ce8..9da211957ab 100644 --- a/tests/unittests/test_net_activators.py +++ b/tests/unittests/test_net_activators.py @@ -12,7 +12,8 @@ IfUpDownActivator, NetplanActivator, NetworkManagerActivator, - NetworkdActivator + NetworkdActivator, + NoActivatorException, ) from cloudinit.net.network_state import parse_net_config_data from cloudinit.safeyaml import load @@ -99,7 +100,7 @@ def test_none_available(self, unavailable_mocks): resp = search_activator() assert resp == [] - with pytest.raises(RuntimeError): + with pytest.raises(NoActivatorException): select_activator() From 039c40f9b3d88ee8158604bb18ca4bf2fb5d5e51 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Fri, 3 Dec 2021 13:11:46 -0700 Subject: [PATCH 0008/2310] Reorganize unit test locations under tests/unittests (#1126) This attempts to standardize unit test file location under test/unittests/ such that any source file located at cloudinit/path/to/file.py may have a corresponding unit test file at test/unittests/path/to/test_file.py. Noteworthy Comments: ==================== Four different duplicate test files existed: test_{gpg,util,cc_mounts,cc_resolv_conf}.py Each of these duplicate file pairs has been merged together. This is a break in git history for these files. The test suite appears to have a dependency on test order. Changing test order causes some tests to fail. This should be rectified, but for now some tests have been modified in tests/unittests/config/test_set_passwords.py. A helper class name starts with "Test" which causes pytest to try executing it as a test case, which then throws warnings "due to Class having __init__()". Silence by changing the name of the class. # helpers.py is imported in many test files, import paths change cloudinit/tests/helpers.py -> tests/unittests/helpers.py # Move directories: cloudinit/distros/tests -> tests/unittests/distros cloudinit/cmd/devel/tests -> tests/unittests/cmd/devel cloudinit/cmd/tests -> tests/unittests/cmd/ cloudinit/sources/helpers/tests -> tests/unittests/sources/helpers cloudinit/sources/tests -> tests/unittests/sources cloudinit/net/tests -> tests/unittests/net cloudinit/config/tests -> tests/unittests/config cloudinit/analyze/tests/ -> tests/unittests/analyze/ # Standardize tests already in tests/unittests/ test_datasource -> sources test_distros -> distros test_vmware -> sources/vmware test_handler -> config # this contains cloudconfig module tests test_runs -> runs --- cloudinit/config/tests/test_mounts.py | 61 - cloudinit/config/tests/test_resolv_conf.py | 92 - cloudinit/tests/test_gpg.py | 55 - cloudinit/tests/test_util.py | 1187 ------------ doc/rtd/topics/testing.rst | 13 +- setup.py | 2 +- .../unittests/analyze}/test_boot.py | 2 +- .../unittests/analyze}/test_dump.py | 2 +- .../unittests/cloudinit/__init__py | 0 .../tests => tests/unittests/cmd}/__init__.py | 0 .../unittests/cmd/devel}/__init__.py | 0 .../unittests/cmd/devel}/test_logs.py | 2 +- .../unittests/cmd/devel}/test_render.py | 2 +- .../unittests/cmd}/test_clean.py | 2 +- .../unittests/cmd}/test_cloud_id.py | 2 +- .../unittests/cmd}/test_main.py | 2 +- .../unittests/cmd}/test_query.py | 2 +- .../unittests/cmd}/test_status.py | 2 +- .../unittests/config}/__init__.py | 0 .../test_apt_conf_v1.py} | 2 +- .../test_apt_configure_sources_list_v1.py} | 2 +- .../test_apt_configure_sources_list_v3.py} | 2 +- .../test_apt_key.py} | 0 .../test_apt_source_v1.py} | 2 +- .../test_apt_source_v3.py} | 2 +- .../test_cc_apk_configure.py} | 2 +- .../config/test_cc_apt_pipelining.py | 2 +- .../test_cc_bootcmd.py} | 2 +- .../test_cc_ca_certs.py} | 2 +- .../test_cc_chef.py} | 2 +- .../test_cc_debug.py} | 2 +- .../config/test_cc_disable_ec2_metadata.py | 2 +- .../test_cc_disk_setup.py} | 2 +- .../unittests/config/test_cc_final_message.py | 0 .../test_cc_growpart.py} | 2 +- .../unittests/config/test_cc_grub_dpkg.py | 0 .../test_cc_install_hotplug.py} | 0 .../config/test_cc_keys_to_console.py | 0 .../test_cc_landscape.py} | 2 +- .../test_cc_locale.py} | 2 +- .../test_cc_lxd.py} | 2 +- .../test_cc_mcollective.py} | 2 +- .../test_cc_mounts.py} | 57 +- .../test_cc_ntp.py} | 2 +- .../test_cc_power_state_change.py} | 4 +- .../test_cc_puppet.py} | 2 +- .../test_cc_refresh_rmc_and_interface.py} | 4 +- .../test_cc_resizefs.py} | 2 +- .../test_cc_resolv_conf.py} | 106 +- .../test_cc_rh_subscription.py} | 2 +- .../test_cc_rsyslog.py} | 2 +- .../test_cc_runcmd.py} | 2 +- .../test_cc_seed_random.py} | 2 +- .../test_cc_set_hostname.py} | 2 +- .../unittests/config/test_cc_set_passwords.py | 26 +- .../unittests/config/test_cc_snap.py | 2 +- .../test_cc_spacewalk.py} | 2 +- .../unittests/config/test_cc_ssh.py | 2 +- .../test_cc_timezone.py} | 2 +- .../config/test_cc_ubuntu_advantage.py | 2 +- .../config/test_cc_ubuntu_drivers.py | 2 +- .../test_cc_update_etc_hosts.py} | 2 +- .../unittests/config/test_cc_users_groups.py | 2 +- .../test_cc_write_files.py} | 2 +- .../test_cc_write_files_deferred.py} | 4 +- .../test_cc_yum_add_repo.py} | 2 +- .../test_cc_zypper_add_repo.py} | 4 +- .../{test_handler => config}/test_schema.py | 2 +- .../{test_distros => distros}/__init__.py | 0 .../{test_distros => distros}/test_arch.py | 2 +- .../test_bsd_utils.py | 2 +- .../test_create_users.py | 2 +- .../{test_distros => distros}/test_debian.py | 2 +- .../test_dragonflybsd.py | 2 +- .../{test_distros => distros}/test_freebsd.py | 2 +- .../{test_distros => distros}/test_generic.py | 2 +- .../{test_distros => distros}/test_gentoo.py | 2 +- .../test_hostname.py | 0 .../{test_distros => distros}/test_hosts.py | 0 .../unittests/distros}/test_init.py | 0 .../test_manage_service.py | 12 +- .../{test_distros => distros}/test_netbsd.py | 0 .../test_netconfig.py | 2 +- .../unittests/distros}/test_networking.py | 0 .../test_opensuse.py | 2 +- .../{test_distros => distros}/test_photon.py | 4 +- .../{test_distros => distros}/test_resolv.py | 2 +- .../{test_distros => distros}/test_sles.py | 2 +- .../test_sysconfig.py | 2 +- .../test_user_data_normalize.py | 2 +- .../unittests/filters}/__init__.py | 0 .../test_launch_index.py | 2 +- .../tests => tests/unittests}/helpers.py | 0 .../tests => tests/unittests/net}/__init__.py | 0 .../unittests/net}/test_dhcp.py | 2 +- .../unittests/net}/test_init.py | 2 +- .../unittests/net}/test_network_state.py | 2 +- .../unittests/net}/test_networkd.py | 0 .../{test_datasource => runs}/__init__.py | 0 .../{test_runs => runs}/test_merge_run.py | 2 +- .../{test_runs => runs}/test_simple_run.py | 2 +- .../{test_filters => sources}/__init__.py | 0 .../sources/helpers}/test_netlink.py | 2 +- .../sources/helpers}/test_openstack.py | 2 +- .../test_aliyun.py | 2 +- .../test_altcloud.py | 2 +- .../test_azure.py | 2 +- .../test_azure_helper.py | 2 +- .../test_cloudsigma.py | 2 +- .../test_cloudstack.py | 2 +- .../test_common.py | 2 +- .../test_configdrive.py | 2 +- .../test_digitalocean.py | 2 +- .../{test_datasource => sources}/test_ec2.py | 2 +- .../test_exoscale.py | 2 +- .../{test_datasource => sources}/test_gce.py | 2 +- .../test_hetzner.py | 2 +- .../test_ibmcloud.py | 2 +- .../unittests/sources}/test_init.py | 2 +- .../unittests/sources}/test_lxd.py | 0 .../{test_datasource => sources}/test_maas.py | 2 +- .../test_nocloud.py | 2 +- .../test_opennebula.py | 2 +- .../test_openstack.py | 2 +- .../unittests/sources}/test_oracle.py | 2 +- .../{test_datasource => sources}/test_ovf.py | 2 +- .../{test_datasource => sources}/test_rbx.py | 2 +- .../test_scaleway.py | 2 +- .../test_smartos.py | 2 +- .../test_upcloud.py | 2 +- .../test_vmware.py | 2 +- .../test_vultr.py | 2 +- .../vmware}/__init__.py | 0 .../vmware}/test_custom_script.py | 2 +- .../vmware}/test_guestcust_util.py | 2 +- .../vmware}/test_vmware_config_file.py | 2 +- tests/unittests/test__init__.py | 2 +- tests/unittests/test_atomic_helper.py | 2 +- tests/unittests/test_builtin_handlers.py | 2 +- tests/unittests/test_cli.py | 2 +- .../unittests}/test_conftest.py | 2 +- tests/unittests/test_cs_util.py | 2 +- tests/unittests/test_data.py | 2 +- .../unittests}/test_dhclient_hook.py | 2 +- .../tests => tests/unittests}/test_dmi.py | 2 +- tests/unittests/test_ds_identify.py | 2 +- tests/unittests/test_ec2_util.py | 2 +- .../tests => tests/unittests}/test_event.py | 0 .../unittests}/test_features.py | 0 tests/unittests/test_gpg.py | 49 + tests/unittests/test_helpers.py | 2 +- tests/unittests/test_log.py | 2 +- tests/unittests/test_merging.py | 2 +- tests/unittests/test_net.py | 2 +- tests/unittests/test_net_freebsd.py | 2 +- .../tests => tests/unittests}/test_netinfo.py | 2 +- tests/unittests/test_pathprefix2dict.py | 2 +- .../unittests}/test_persistence.py | 0 tests/unittests/test_registry.py | 2 +- tests/unittests/test_reporting.py | 2 +- tests/unittests/test_reporting_hyperv.py | 2 +- tests/unittests/test_runs/__init__.py | 0 .../unittests}/test_simpletable.py | 2 +- tests/unittests/test_sshutil.py | 2 +- .../tests => tests/unittests}/test_stages.py | 2 +- .../tests => tests/unittests}/test_subp.py | 2 +- .../unittests}/test_temp_utils.py | 2 +- tests/unittests/test_templating.py | 2 +- .../tests => tests/unittests}/test_upgrade.py | 2 +- .../unittests}/test_url_helper.py | 2 +- tests/unittests/test_util.py | 1660 +++++++++++++++-- .../tests => tests/unittests}/test_version.py | 2 +- tests/unittests/test_vmware/__init__.py | 0 tests/unittests/util.py | 8 +- tox.ini | 10 +- 175 files changed, 1899 insertions(+), 1715 deletions(-) delete mode 100644 cloudinit/config/tests/test_mounts.py delete mode 100644 cloudinit/config/tests/test_resolv_conf.py delete mode 100644 cloudinit/tests/test_gpg.py delete mode 100644 cloudinit/tests/test_util.py rename {cloudinit/analyze/tests => tests/unittests/analyze}/test_boot.py (99%) rename {cloudinit/analyze/tests => tests/unittests/analyze}/test_dump.py (99%) rename cloudinit/cmd/devel/tests/__init__.py => tests/unittests/cloudinit/__init__py (100%) rename {cloudinit/cmd/tests => tests/unittests/cmd}/__init__.py (100%) rename {cloudinit/distros/tests => tests/unittests/cmd/devel}/__init__.py (100%) rename {cloudinit/cmd/devel/tests => tests/unittests/cmd/devel}/test_logs.py (99%) rename {cloudinit/cmd/devel/tests => tests/unittests/cmd/devel}/test_render.py (99%) rename {cloudinit/cmd/tests => tests/unittests/cmd}/test_clean.py (99%) rename {cloudinit/cmd/tests => tests/unittests/cmd}/test_cloud_id.py (99%) rename {cloudinit/cmd/tests => tests/unittests/cmd}/test_main.py (99%) rename {cloudinit/cmd/tests => tests/unittests/cmd}/test_query.py (99%) rename {cloudinit/cmd/tests => tests/unittests/cmd}/test_status.py (99%) rename {cloudinit/net/tests => tests/unittests/config}/__init__.py (100%) rename tests/unittests/{test_handler/test_handler_apt_conf_v1.py => config/test_apt_conf_v1.py} (99%) rename tests/unittests/{test_handler/test_handler_apt_configure_sources_list_v1.py => config/test_apt_configure_sources_list_v1.py} (99%) rename tests/unittests/{test_handler/test_handler_apt_configure_sources_list_v3.py => config/test_apt_configure_sources_list_v3.py} (99%) rename tests/unittests/{test_handler/test_handler_apt_key.py => config/test_apt_key.py} (100%) rename tests/unittests/{test_handler/test_handler_apt_source_v1.py => config/test_apt_source_v1.py} (99%) rename tests/unittests/{test_handler/test_handler_apt_source_v3.py => config/test_apt_source_v3.py} (99%) rename tests/unittests/{test_handler/test_handler_apk_configure.py => config/test_cc_apk_configure.py} (99%) rename cloudinit/config/tests/test_apt_pipelining.py => tests/unittests/config/test_cc_apt_pipelining.py (95%) rename tests/unittests/{test_handler/test_handler_bootcmd.py => config/test_cc_bootcmd.py} (99%) rename tests/unittests/{test_handler/test_handler_ca_certs.py => config/test_cc_ca_certs.py} (99%) rename tests/unittests/{test_handler/test_handler_chef.py => config/test_cc_chef.py} (99%) rename tests/unittests/{test_handler/test_handler_debug.py => config/test_cc_debug.py} (96%) rename cloudinit/config/tests/test_disable_ec2_metadata.py => tests/unittests/config/test_cc_disable_ec2_metadata.py (97%) rename tests/unittests/{test_handler/test_handler_disk_setup.py => config/test_cc_disk_setup.py} (99%) rename cloudinit/config/tests/test_final_message.py => tests/unittests/config/test_cc_final_message.py (100%) rename tests/unittests/{test_handler/test_handler_growpart.py => config/test_cc_growpart.py} (99%) rename cloudinit/config/tests/test_grub_dpkg.py => tests/unittests/config/test_cc_grub_dpkg.py (100%) rename tests/unittests/{test_handler/test_handler_install_hotplug.py => config/test_cc_install_hotplug.py} (100%) rename cloudinit/config/tests/test_keys_to_console.py => tests/unittests/config/test_cc_keys_to_console.py (100%) rename tests/unittests/{test_handler/test_handler_landscape.py => config/test_cc_landscape.py} (98%) rename tests/unittests/{test_handler/test_handler_locale.py => config/test_cc_locale.py} (98%) rename tests/unittests/{test_handler/test_handler_lxd.py => config/test_cc_lxd.py} (99%) rename tests/unittests/{test_handler/test_handler_mcollective.py => config/test_cc_mcollective.py} (99%) rename tests/unittests/{test_handler/test_handler_mounts.py => config/test_cc_mounts.py} (88%) rename tests/unittests/{test_handler/test_handler_ntp.py => config/test_cc_ntp.py} (99%) rename tests/unittests/{test_handler/test_handler_power_state.py => config/test_cc_power_state_change.py} (98%) rename tests/unittests/{test_handler/test_handler_puppet.py => config/test_cc_puppet.py} (99%) rename tests/unittests/{test_handler/test_handler_refresh_rmc_and_interface.py => config/test_cc_refresh_rmc_and_interface.py} (97%) rename tests/unittests/{test_handler/test_handler_resizefs.py => config/test_cc_resizefs.py} (99%) rename tests/unittests/{test_handler/test_handler_resolv_conf.py => config/test_cc_resolv_conf.py} (52%) rename tests/unittests/{test_rh_subscription.py => config/test_cc_rh_subscription.py} (99%) rename tests/unittests/{test_handler/test_handler_rsyslog.py => config/test_cc_rsyslog.py} (99%) rename tests/unittests/{test_handler/test_handler_runcmd.py => config/test_cc_runcmd.py} (99%) rename tests/unittests/{test_handler/test_handler_seed_random.py => config/test_cc_seed_random.py} (99%) rename tests/unittests/{test_handler/test_handler_set_hostname.py => config/test_cc_set_hostname.py} (99%) rename cloudinit/config/tests/test_set_passwords.py => tests/unittests/config/test_cc_set_passwords.py (90%) rename cloudinit/config/tests/test_snap.py => tests/unittests/config/test_cc_snap.py (99%) rename tests/unittests/{test_handler/test_handler_spacewalk.py => config/test_cc_spacewalk.py} (97%) rename cloudinit/config/tests/test_ssh.py => tests/unittests/config/test_cc_ssh.py (99%) rename tests/unittests/{test_handler/test_handler_timezone.py => config/test_cc_timezone.py} (96%) rename cloudinit/config/tests/test_ubuntu_advantage.py => tests/unittests/config/test_cc_ubuntu_advantage.py (99%) rename cloudinit/config/tests/test_ubuntu_drivers.py => tests/unittests/config/test_cc_ubuntu_drivers.py (99%) rename tests/unittests/{test_handler/test_handler_etc_hosts.py => config/test_cc_update_etc_hosts.py} (98%) rename cloudinit/config/tests/test_users_groups.py => tests/unittests/config/test_cc_users_groups.py (99%) rename tests/unittests/{test_handler/test_handler_write_files.py => config/test_cc_write_files.py} (99%) rename tests/unittests/{test_handler/test_handler_write_files_deferred.py => config/test_cc_write_files_deferred.py} (96%) rename tests/unittests/{test_handler/test_handler_yum_add_repo.py => config/test_cc_yum_add_repo.py} (99%) rename tests/unittests/{test_handler/test_handler_zypper_add_repo.py => config/test_cc_zypper_add_repo.py} (99%) rename tests/unittests/{test_handler => config}/test_schema.py (99%) rename tests/unittests/{test_distros => distros}/__init__.py (100%) rename tests/unittests/{test_distros => distros}/test_arch.py (96%) rename tests/unittests/{test_distros => distros}/test_bsd_utils.py (97%) rename tests/unittests/{test_distros => distros}/test_create_users.py (99%) rename tests/unittests/{test_distros => distros}/test_debian.py (99%) rename tests/unittests/{test_distros => distros}/test_dragonflybsd.py (94%) rename tests/unittests/{test_distros => distros}/test_freebsd.py (96%) rename tests/unittests/{test_distros => distros}/test_generic.py (99%) rename tests/unittests/{test_distros => distros}/test_gentoo.py (95%) rename tests/unittests/{test_distros => distros}/test_hostname.py (100%) rename tests/unittests/{test_distros => distros}/test_hosts.py (100%) rename {cloudinit/distros/tests => tests/unittests/distros}/test_init.py (100%) rename tests/unittests/{test_distros => distros}/test_manage_service.py (76%) rename tests/unittests/{test_distros => distros}/test_netbsd.py (100%) rename tests/unittests/{test_distros => distros}/test_netconfig.py (99%) rename {cloudinit/distros/tests => tests/unittests/distros}/test_networking.py (100%) rename tests/unittests/{test_distros => distros}/test_opensuse.py (84%) rename tests/unittests/{test_distros => distros}/test_photon.py (96%) rename tests/unittests/{test_distros => distros}/test_resolv.py (98%) rename tests/unittests/{test_distros => distros}/test_sles.py (84%) rename tests/unittests/{test_distros => distros}/test_sysconfig.py (98%) rename tests/unittests/{test_distros => distros}/test_user_data_normalize.py (99%) rename {cloudinit/sources/tests => tests/unittests/filters}/__init__.py (100%) rename tests/unittests/{test_filters => filters}/test_launch_index.py (99%) rename {cloudinit/tests => tests/unittests}/helpers.py (100%) rename {cloudinit/tests => tests/unittests/net}/__init__.py (100%) rename {cloudinit/net/tests => tests/unittests/net}/test_dhcp.py (99%) rename {cloudinit/net/tests => tests/unittests/net}/test_init.py (99%) rename {cloudinit/net/tests => tests/unittests/net}/test_network_state.py (99%) rename {cloudinit/net/tests => tests/unittests/net}/test_networkd.py (100%) rename tests/unittests/{test_datasource => runs}/__init__.py (100%) rename tests/unittests/{test_runs => runs}/test_merge_run.py (98%) rename tests/unittests/{test_runs => runs}/test_simple_run.py (99%) rename tests/unittests/{test_filters => sources}/__init__.py (100%) rename {cloudinit/sources/helpers/tests => tests/unittests/sources/helpers}/test_netlink.py (99%) rename {cloudinit/sources/helpers/tests => tests/unittests/sources/helpers}/test_openstack.py (97%) rename tests/unittests/{test_datasource => sources}/test_aliyun.py (99%) rename tests/unittests/{test_datasource => sources}/test_altcloud.py (99%) rename tests/unittests/{test_datasource => sources}/test_azure.py (99%) rename tests/unittests/{test_datasource => sources}/test_azure_helper.py (99%) rename tests/unittests/{test_datasource => sources}/test_cloudsigma.py (98%) rename tests/unittests/{test_datasource => sources}/test_cloudstack.py (99%) rename tests/unittests/{test_datasource => sources}/test_common.py (98%) rename tests/unittests/{test_datasource => sources}/test_configdrive.py (99%) rename tests/unittests/{test_datasource => sources}/test_digitalocean.py (99%) rename tests/unittests/{test_datasource => sources}/test_ec2.py (99%) rename tests/unittests/{test_datasource => sources}/test_exoscale.py (99%) rename tests/unittests/{test_datasource => sources}/test_gce.py (99%) rename tests/unittests/{test_datasource => sources}/test_hetzner.py (98%) rename tests/unittests/{test_datasource => sources}/test_ibmcloud.py (99%) rename {cloudinit/sources/tests => tests/unittests/sources}/test_init.py (99%) rename {cloudinit/sources/tests => tests/unittests/sources}/test_lxd.py (100%) rename tests/unittests/{test_datasource => sources}/test_maas.py (99%) rename tests/unittests/{test_datasource => sources}/test_nocloud.py (99%) rename tests/unittests/{test_datasource => sources}/test_opennebula.py (99%) rename tests/unittests/{test_datasource => sources}/test_openstack.py (99%) rename {cloudinit/sources/tests => tests/unittests/sources}/test_oracle.py (99%) rename tests/unittests/{test_datasource => sources}/test_ovf.py (99%) rename tests/unittests/{test_datasource => sources}/test_rbx.py (99%) rename tests/unittests/{test_datasource => sources}/test_scaleway.py (99%) rename tests/unittests/{test_datasource => sources}/test_smartos.py (99%) rename tests/unittests/{test_datasource => sources}/test_upcloud.py (99%) rename tests/unittests/{test_datasource => sources}/test_vmware.py (99%) rename tests/unittests/{test_datasource => sources}/test_vultr.py (99%) rename tests/unittests/{test_handler => sources/vmware}/__init__.py (100%) rename tests/unittests/{test_vmware => sources/vmware}/test_custom_script.py (98%) rename tests/unittests/{test_vmware => sources/vmware}/test_guestcust_util.py (98%) rename tests/unittests/{ => sources/vmware}/test_vmware_config_file.py (99%) rename {cloudinit/tests => tests/unittests}/test_conftest.py (97%) rename {cloudinit/tests => tests/unittests}/test_dhclient_hook.py (98%) rename {cloudinit/tests => tests/unittests}/test_dmi.py (99%) rename {cloudinit/tests => tests/unittests}/test_event.py (100%) rename {cloudinit/tests => tests/unittests}/test_features.py (100%) rename {cloudinit/tests => tests/unittests}/test_netinfo.py (99%) rename {cloudinit/tests => tests/unittests}/test_persistence.py (100%) delete mode 100644 tests/unittests/test_runs/__init__.py rename {cloudinit/tests => tests/unittests}/test_simpletable.py (98%) rename {cloudinit/tests => tests/unittests}/test_stages.py (99%) rename {cloudinit/tests => tests/unittests}/test_subp.py (99%) rename {cloudinit/tests => tests/unittests}/test_temp_utils.py (98%) rename {cloudinit/tests => tests/unittests}/test_upgrade.py (97%) rename {cloudinit/tests => tests/unittests}/test_url_helper.py (99%) rename {cloudinit/tests => tests/unittests}/test_version.py (95%) delete mode 100644 tests/unittests/test_vmware/__init__.py diff --git a/cloudinit/config/tests/test_mounts.py b/cloudinit/config/tests/test_mounts.py deleted file mode 100644 index 56510fd626c..00000000000 --- a/cloudinit/config/tests/test_mounts.py +++ /dev/null @@ -1,61 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -from unittest import mock - -import pytest - -from cloudinit.config.cc_mounts import create_swapfile -from cloudinit.subp import ProcessExecutionError - - -M_PATH = 'cloudinit.config.cc_mounts.' - - -class TestCreateSwapfile: - - @pytest.mark.parametrize('fstype', ('xfs', 'btrfs', 'ext4', 'other')) - @mock.patch(M_PATH + 'util.get_mount_info') - @mock.patch(M_PATH + 'subp.subp') - def test_happy_path(self, m_subp, m_get_mount_info, fstype, tmpdir): - swap_file = tmpdir.join("swap-file") - fname = str(swap_file) - - # Some of the calls to subp.subp should create the swap file; this - # roughly approximates that - m_subp.side_effect = lambda *args, **kwargs: swap_file.write('') - - m_get_mount_info.return_value = (mock.ANY, fstype) - - create_swapfile(fname, '') - assert mock.call(['mkswap', fname]) in m_subp.call_args_list - - @mock.patch(M_PATH + "util.get_mount_info") - @mock.patch(M_PATH + "subp.subp") - def test_fallback_from_fallocate_to_dd( - self, m_subp, m_get_mount_info, caplog, tmpdir - ): - swap_file = tmpdir.join("swap-file") - fname = str(swap_file) - - def subp_side_effect(cmd, *args, **kwargs): - # Mock fallocate failing, to initiate fallback - if cmd[0] == "fallocate": - raise ProcessExecutionError() - - m_subp.side_effect = subp_side_effect - # Use ext4 so both fallocate and dd are valid swap creation methods - m_get_mount_info.return_value = (mock.ANY, "ext4") - - create_swapfile(fname, "") - - cmds = [args[0][0] for args, _kwargs in m_subp.call_args_list] - assert "fallocate" in cmds, "fallocate was not called" - assert "dd" in cmds, "fallocate failure did not fallback to dd" - - assert cmds.index("dd") > cmds.index( - "fallocate" - ), "dd ran before fallocate" - - assert mock.call(["mkswap", fname]) in m_subp.call_args_list - - msg = "fallocate swap creation failed, will attempt with dd" - assert msg in caplog.text diff --git a/cloudinit/config/tests/test_resolv_conf.py b/cloudinit/config/tests/test_resolv_conf.py deleted file mode 100644 index aff110e5c53..00000000000 --- a/cloudinit/config/tests/test_resolv_conf.py +++ /dev/null @@ -1,92 +0,0 @@ -import pytest - -from unittest import mock -from cloudinit.config.cc_resolv_conf import generate_resolv_conf -from tests.unittests.util import TestingDistro - -EXPECTED_HEADER = """\ -# Your system has been configured with 'manage-resolv-conf' set to true. -# As a result, cloud-init has written this file with configuration data -# that it has been provided. Cloud-init, by default, will write this file -# a single time (PER_ONCE). -#\n\n""" - - -class TestGenerateResolvConf: - - dist = TestingDistro() - tmpl_fn = "templates/resolv.conf.tmpl" - - @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") - def test_dist_resolv_conf_fn(self, m_render_to_file): - self.dist.resolve_conf_fn = "/tmp/resolv-test.conf" - generate_resolv_conf(self.tmpl_fn, - mock.MagicMock(), - self.dist.resolve_conf_fn) - - assert [ - mock.call(mock.ANY, self.dist.resolve_conf_fn, mock.ANY) - ] == m_render_to_file.call_args_list - - @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") - def test_target_fname_is_used_if_passed(self, m_render_to_file): - path = "/use/this/path" - generate_resolv_conf(self.tmpl_fn, mock.MagicMock(), path) - - assert [ - mock.call(mock.ANY, path, mock.ANY) - ] == m_render_to_file.call_args_list - - # Patch in templater so we can assert on the actual generated content - @mock.patch("cloudinit.templater.util.write_file") - # Parameterise with the value to be passed to generate_resolv_conf as the - # params parameter, and the expected line after the header as - # expected_extra_line. - @pytest.mark.parametrize( - "params,expected_extra_line", - [ - # No options - ({}, None), - # Just a true flag - ({"options": {"foo": True}}, "options foo"), - # Just a false flag - ({"options": {"foo": False}}, None), - # Just an option - ({"options": {"foo": "some_value"}}, "options foo:some_value"), - # A true flag and an option - ( - {"options": {"foo": "some_value", "bar": True}}, - "options bar foo:some_value", - ), - # Two options - ( - {"options": {"foo": "some_value", "bar": "other_value"}}, - "options bar:other_value foo:some_value", - ), - # Everything - ( - { - "options": { - "foo": "some_value", - "bar": "other_value", - "baz": False, - "spam": True, - } - }, - "options spam bar:other_value foo:some_value", - ), - ], - ) - def test_flags_and_options( - self, m_write_file, params, expected_extra_line - ): - target_fn = "/etc/resolv.conf" - generate_resolv_conf(self.tmpl_fn, params, target_fn) - - expected_content = EXPECTED_HEADER - if expected_extra_line is not None: - # If we have any extra lines, expect a trailing newline - expected_content += "\n".join([expected_extra_line, ""]) - assert [ - mock.call(mock.ANY, expected_content, mode=mock.ANY) - ] == m_write_file.call_args_list diff --git a/cloudinit/tests/test_gpg.py b/cloudinit/tests/test_gpg.py deleted file mode 100644 index 311dfad6742..00000000000 --- a/cloudinit/tests/test_gpg.py +++ /dev/null @@ -1,55 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -"""Test gpg module.""" - -from unittest import mock - -from cloudinit import gpg -from cloudinit import subp -from cloudinit.tests.helpers import CiTestCase - - -@mock.patch("cloudinit.gpg.time.sleep") -@mock.patch("cloudinit.gpg.subp.subp") -class TestReceiveKeys(CiTestCase): - """Test the recv_key method.""" - - def test_retries_on_subp_exc(self, m_subp, m_sleep): - """retry should be done on gpg receive keys failure.""" - retries = (1, 2, 4) - my_exc = subp.ProcessExecutionError( - stdout='', stderr='', exit_code=2, cmd=['mycmd']) - m_subp.side_effect = (my_exc, my_exc, ('', '')) - gpg.recv_key("ABCD", "keyserver.example.com", retries=retries) - self.assertEqual([mock.call(1), mock.call(2)], m_sleep.call_args_list) - - def test_raises_error_after_retries(self, m_subp, m_sleep): - """If the final run fails, error should be raised.""" - naplen = 1 - keyid, keyserver = ("ABCD", "keyserver.example.com") - m_subp.side_effect = subp.ProcessExecutionError( - stdout='', stderr='', exit_code=2, cmd=['mycmd']) - with self.assertRaises(ValueError) as rcm: - gpg.recv_key(keyid, keyserver, retries=(naplen,)) - self.assertIn(keyid, str(rcm.exception)) - self.assertIn(keyserver, str(rcm.exception)) - m_sleep.assert_called_with(naplen) - - def test_no_retries_on_none(self, m_subp, m_sleep): - """retry should not be done if retries is None.""" - m_subp.side_effect = subp.ProcessExecutionError( - stdout='', stderr='', exit_code=2, cmd=['mycmd']) - with self.assertRaises(ValueError): - gpg.recv_key("ABCD", "keyserver.example.com", retries=None) - m_sleep.assert_not_called() - - def test_expected_gpg_command(self, m_subp, m_sleep): - """Verify gpg is called with expected args.""" - key, keyserver = ("DEADBEEF", "keyserver.example.com") - retries = (1, 2, 4) - m_subp.return_value = ('', '') - gpg.recv_key(key, keyserver, retries=retries) - m_subp.assert_called_once_with( - ['gpg', '--no-tty', - '--keyserver=%s' % keyserver, '--recv-keys', key], - capture=True) - m_sleep.assert_not_called() diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py deleted file mode 100644 index 7a3175f36f4..00000000000 --- a/cloudinit/tests/test_util.py +++ /dev/null @@ -1,1187 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Tests for cloudinit.util""" - -import base64 -import logging -import json -import platform -import pytest - -import cloudinit.util as util -from cloudinit import subp - -from cloudinit.tests.helpers import CiTestCase, mock -from textwrap import dedent - -LOG = logging.getLogger(__name__) - -MOUNT_INFO = [ - '68 0 8:3 / / ro,relatime shared:1 - btrfs /dev/sda1 ro,attr2,inode64', - '153 68 254:0 / /home rw,relatime shared:101 - xfs /dev/sda2 rw,attr2' -] - -OS_RELEASE_SLES = dedent("""\ - NAME="SLES" - VERSION="12-SP3" - VERSION_ID="12.3" - PRETTY_NAME="SUSE Linux Enterprise Server 12 SP3" - ID="sles" - ANSI_COLOR="0;32" - CPE_NAME="cpe:/o:suse:sles:12:sp3" -""") - -OS_RELEASE_OPENSUSE = dedent("""\ - NAME="openSUSE Leap" - VERSION="42.3" - ID=opensuse - ID_LIKE="suse" - VERSION_ID="42.3" - PRETTY_NAME="openSUSE Leap 42.3" - ANSI_COLOR="0;32" - CPE_NAME="cpe:/o:opensuse:leap:42.3" - BUG_REPORT_URL="https://bugs.opensuse.org" - HOME_URL="https://www.opensuse.org/" -""") - -OS_RELEASE_OPENSUSE_L15 = dedent("""\ - NAME="openSUSE Leap" - VERSION="15.0" - ID="opensuse-leap" - ID_LIKE="suse opensuse" - VERSION_ID="15.0" - PRETTY_NAME="openSUSE Leap 15.0" - ANSI_COLOR="0;32" - CPE_NAME="cpe:/o:opensuse:leap:15.0" - BUG_REPORT_URL="https://bugs.opensuse.org" - HOME_URL="https://www.opensuse.org/" -""") - -OS_RELEASE_OPENSUSE_TW = dedent("""\ - NAME="openSUSE Tumbleweed" - ID="opensuse-tumbleweed" - ID_LIKE="opensuse suse" - VERSION_ID="20180920" - PRETTY_NAME="openSUSE Tumbleweed" - ANSI_COLOR="0;32" - CPE_NAME="cpe:/o:opensuse:tumbleweed:20180920" - BUG_REPORT_URL="https://bugs.opensuse.org" - HOME_URL="https://www.opensuse.org/" -""") - -OS_RELEASE_CENTOS = dedent("""\ - NAME="CentOS Linux" - VERSION="7 (Core)" - ID="centos" - ID_LIKE="rhel fedora" - VERSION_ID="7" - PRETTY_NAME="CentOS Linux 7 (Core)" - ANSI_COLOR="0;31" - CPE_NAME="cpe:/o:centos:centos:7" - HOME_URL="https://www.centos.org/" - BUG_REPORT_URL="https://bugs.centos.org/" - - CENTOS_MANTISBT_PROJECT="CentOS-7" - CENTOS_MANTISBT_PROJECT_VERSION="7" - REDHAT_SUPPORT_PRODUCT="centos" - REDHAT_SUPPORT_PRODUCT_VERSION="7" -""") - -OS_RELEASE_REDHAT_7 = dedent("""\ - NAME="Red Hat Enterprise Linux Server" - VERSION="7.5 (Maipo)" - ID="rhel" - ID_LIKE="fedora" - VARIANT="Server" - VARIANT_ID="server" - VERSION_ID="7.5" - PRETTY_NAME="Red Hat" - ANSI_COLOR="0;31" - CPE_NAME="cpe:/o:redhat:enterprise_linux:7.5:GA:server" - HOME_URL="https://www.redhat.com/" - BUG_REPORT_URL="https://bugzilla.redhat.com/" - - REDHAT_BUGZILLA_PRODUCT="Red Hat Enterprise Linux 7" - REDHAT_BUGZILLA_PRODUCT_VERSION=7.5 - REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux" - REDHAT_SUPPORT_PRODUCT_VERSION="7.5" -""") - -OS_RELEASE_ALMALINUX_8 = dedent("""\ - NAME="AlmaLinux" - VERSION="8.3 (Purple Manul)" - ID="almalinux" - ID_LIKE="rhel centos fedora" - VERSION_ID="8.3" - PLATFORM_ID="platform:el8" - PRETTY_NAME="AlmaLinux 8.3 (Purple Manul)" - ANSI_COLOR="0;34" - CPE_NAME="cpe:/o:almalinux:almalinux:8.3:GA" - HOME_URL="https://almalinux.org/" - BUG_REPORT_URL="https://bugs.almalinux.org/" - - ALMALINUX_MANTISBT_PROJECT="AlmaLinux-8" - ALMALINUX_MANTISBT_PROJECT_VERSION="8.3" -""") - -OS_RELEASE_EUROLINUX_7 = dedent("""\ - VERSION="7.9 (Minsk)" - ID="eurolinux" - ID_LIKE="rhel scientific centos fedora" - VERSION_ID="7.9" - PRETTY_NAME="EuroLinux 7.9 (Minsk)" - ANSI_COLOR="0;31" - CPE_NAME="cpe:/o:eurolinux:eurolinux:7.9:GA" - HOME_URL="http://www.euro-linux.com/" - BUG_REPORT_URL="mailto:support@euro-linux.com" - REDHAT_BUGZILLA_PRODUCT="EuroLinux 7" - REDHAT_BUGZILLA_PRODUCT_VERSION=7.9 - REDHAT_SUPPORT_PRODUCT="EuroLinux" - REDHAT_SUPPORT_PRODUCT_VERSION="7.9" -""") - -OS_RELEASE_EUROLINUX_8 = dedent("""\ - NAME="EuroLinux" - VERSION="8.4 (Vaduz)" - ID="eurolinux" - ID_LIKE="rhel fedora centos" - VERSION_ID="8.4" - PLATFORM_ID="platform:el8" - PRETTY_NAME="EuroLinux 8.4 (Vaduz)" - ANSI_COLOR="0;34" - CPE_NAME="cpe:/o:eurolinux:eurolinux:8" - HOME_URL="https://www.euro-linux.com/" - BUG_REPORT_URL="https://github.com/EuroLinux/eurolinux-distro-bugs-and-rfc/" - REDHAT_SUPPORT_PRODUCT="EuroLinux" - REDHAT_SUPPORT_PRODUCT_VERSION="8" -""") - -OS_RELEASE_ROCKY_8 = dedent("""\ - NAME="Rocky Linux" - VERSION="8.3 (Green Obsidian)" - ID="rocky" - ID_LIKE="rhel fedora" - VERSION_ID="8.3" - PLATFORM_ID="platform:el8" - PRETTY_NAME="Rocky Linux 8.3 (Green Obsidian)" - ANSI_COLOR="0;31" - CPE_NAME="cpe:/o:rocky:rocky:8" - HOME_URL="https://rockylinux.org/" - BUG_REPORT_URL="https://bugs.rockylinux.org/" - ROCKY_SUPPORT_PRODUCT="Rocky Linux" - ROCKY_SUPPORT_PRODUCT_VERSION="8" -""") - -OS_RELEASE_VIRTUOZZO_8 = dedent("""\ - NAME="Virtuozzo Linux" - VERSION="8" - ID="virtuozzo" - ID_LIKE="rhel fedora" - VERSION_ID="8" - PLATFORM_ID="platform:el8" - PRETTY_NAME="Virtuozzo Linux" - ANSI_COLOR="0;31" - CPE_NAME="cpe:/o:virtuozzoproject:vzlinux:8" - HOME_URL="https://www.vzlinux.org" - BUG_REPORT_URL="https://bugs.openvz.org" -""") - -OS_RELEASE_CLOUDLINUX_8 = dedent("""\ - NAME="CloudLinux" - VERSION="8.4 (Valery Rozhdestvensky)" - ID="cloudlinux" - ID_LIKE="rhel fedora centos" - VERSION_ID="8.4" - PLATFORM_ID="platform:el8" - PRETTY_NAME="CloudLinux 8.4 (Valery Rozhdestvensky)" - ANSI_COLOR="0;31" - CPE_NAME="cpe:/o:cloudlinux:cloudlinux:8.4:GA:server" - HOME_URL="https://www.cloudlinux.com/" - BUG_REPORT_URL="https://www.cloudlinux.com/support" -""") - -OS_RELEASE_OPENEULER_20 = dedent("""\ - NAME="openEuler" - VERSION="20.03 (LTS-SP2)" - ID="openEuler" - VERSION_ID="20.03" - PRETTY_NAME="openEuler 20.03 (LTS-SP2)" - ANSI_COLOR="0;31" -""") - -REDHAT_RELEASE_CENTOS_6 = "CentOS release 6.10 (Final)" -REDHAT_RELEASE_CENTOS_7 = "CentOS Linux release 7.5.1804 (Core)" -REDHAT_RELEASE_REDHAT_6 = ( - "Red Hat Enterprise Linux Server release 6.10 (Santiago)") -REDHAT_RELEASE_REDHAT_7 = ( - "Red Hat Enterprise Linux Server release 7.5 (Maipo)") -REDHAT_RELEASE_ALMALINUX_8 = ( - "AlmaLinux release 8.3 (Purple Manul)") -REDHAT_RELEASE_EUROLINUX_7 = "EuroLinux release 7.9 (Minsk)" -REDHAT_RELEASE_EUROLINUX_8 = "EuroLinux release 8.4 (Vaduz)" -REDHAT_RELEASE_ROCKY_8 = ( - "Rocky Linux release 8.3 (Green Obsidian)") -REDHAT_RELEASE_VIRTUOZZO_8 = ( - "Virtuozzo Linux release 8") -REDHAT_RELEASE_CLOUDLINUX_8 = ( - "CloudLinux release 8.4 (Valery Rozhdestvensky)") -OS_RELEASE_DEBIAN = dedent("""\ - PRETTY_NAME="Debian GNU/Linux 9 (stretch)" - NAME="Debian GNU/Linux" - VERSION_ID="9" - VERSION="9 (stretch)" - ID=debian - HOME_URL="https://www.debian.org/" - SUPPORT_URL="https://www.debian.org/support" - BUG_REPORT_URL="https://bugs.debian.org/" -""") - -OS_RELEASE_UBUNTU = dedent("""\ - NAME="Ubuntu"\n - # comment test - VERSION="16.04.3 LTS (Xenial Xerus)"\n - ID=ubuntu\n - ID_LIKE=debian\n - PRETTY_NAME="Ubuntu 16.04.3 LTS"\n - VERSION_ID="16.04"\n - HOME_URL="http://www.ubuntu.com/"\n - SUPPORT_URL="http://help.ubuntu.com/"\n - BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"\n - VERSION_CODENAME=xenial\n - UBUNTU_CODENAME=xenial\n -""") - -OS_RELEASE_PHOTON = ("""\ - NAME="VMware Photon OS" - VERSION="4.0" - ID=photon - VERSION_ID=4.0 - PRETTY_NAME="VMware Photon OS/Linux" - ANSI_COLOR="1;34" - HOME_URL="https://vmware.github.io/photon/" - BUG_REPORT_URL="https://github.com/vmware/photon/issues" -""") - - -class FakeCloud(object): - - def __init__(self, hostname, fqdn): - self.hostname = hostname - self.fqdn = fqdn - self.calls = [] - - def get_hostname(self, fqdn=None, metadata_only=None): - myargs = {} - if fqdn is not None: - myargs['fqdn'] = fqdn - if metadata_only is not None: - myargs['metadata_only'] = metadata_only - self.calls.append(myargs) - if fqdn: - return self.fqdn - return self.hostname - - -class TestUtil(CiTestCase): - - def test_parse_mount_info_no_opts_no_arg(self): - result = util.parse_mount_info('/home', MOUNT_INFO, LOG) - self.assertEqual(('/dev/sda2', 'xfs', '/home'), result) - - def test_parse_mount_info_no_opts_arg(self): - result = util.parse_mount_info('/home', MOUNT_INFO, LOG, False) - self.assertEqual(('/dev/sda2', 'xfs', '/home'), result) - - def test_parse_mount_info_with_opts(self): - result = util.parse_mount_info('/', MOUNT_INFO, LOG, True) - self.assertEqual( - ('/dev/sda1', 'btrfs', '/', 'ro,relatime'), - result - ) - - @mock.patch('cloudinit.util.get_mount_info') - def test_mount_is_rw(self, m_mount_info): - m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'rw,relatime') - is_rw = util.mount_is_read_write('/') - self.assertEqual(is_rw, True) - - @mock.patch('cloudinit.util.get_mount_info') - def test_mount_is_ro(self, m_mount_info): - m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'ro,relatime') - is_rw = util.mount_is_read_write('/') - self.assertEqual(is_rw, False) - - -class TestUptime(CiTestCase): - - @mock.patch('cloudinit.util.boottime') - @mock.patch('cloudinit.util.os.path.exists') - @mock.patch('cloudinit.util.time.time') - def test_uptime_non_linux_path(self, m_time, m_exists, m_boottime): - boottime = 1000.0 - uptime = 10.0 - m_boottime.return_value = boottime - m_time.return_value = boottime + uptime - m_exists.return_value = False - result = util.uptime() - self.assertEqual(str(uptime), result) - - -class TestShellify(CiTestCase): - - def test_input_dict_raises_type_error(self): - self.assertRaisesRegex( - TypeError, 'Input.*was.*dict.*xpected', - util.shellify, {'mykey': 'myval'}) - - def test_input_str_raises_type_error(self): - self.assertRaisesRegex( - TypeError, 'Input.*was.*str.*xpected', util.shellify, "foobar") - - def test_value_with_int_raises_type_error(self): - self.assertRaisesRegex( - TypeError, 'shellify.*int', util.shellify, ["foo", 1]) - - def test_supports_strings_and_lists(self): - self.assertEqual( - '\n'.join(["#!/bin/sh", "echo hi mom", "'echo' 'hi dad'", - "'echo' 'hi' 'sis'", ""]), - util.shellify(["echo hi mom", ["echo", "hi dad"], - ('echo', 'hi', 'sis')])) - - def test_supports_comments(self): - self.assertEqual( - '\n'.join(["#!/bin/sh", "echo start", "echo end", ""]), - util.shellify(["echo start", None, "echo end"])) - - -class TestGetHostnameFqdn(CiTestCase): - - def test_get_hostname_fqdn_from_only_cfg_fqdn(self): - """When cfg only has the fqdn key, derive hostname and fqdn from it.""" - hostname, fqdn = util.get_hostname_fqdn( - cfg={'fqdn': 'myhost.domain.com'}, cloud=None) - self.assertEqual('myhost', hostname) - self.assertEqual('myhost.domain.com', fqdn) - - def test_get_hostname_fqdn_from_cfg_fqdn_and_hostname(self): - """When cfg has both fqdn and hostname keys, return them.""" - hostname, fqdn = util.get_hostname_fqdn( - cfg={'fqdn': 'myhost.domain.com', 'hostname': 'other'}, cloud=None) - self.assertEqual('other', hostname) - self.assertEqual('myhost.domain.com', fqdn) - - def test_get_hostname_fqdn_from_cfg_hostname_with_domain(self): - """When cfg has only hostname key which represents a fqdn, use that.""" - hostname, fqdn = util.get_hostname_fqdn( - cfg={'hostname': 'myhost.domain.com'}, cloud=None) - self.assertEqual('myhost', hostname) - self.assertEqual('myhost.domain.com', fqdn) - - def test_get_hostname_fqdn_from_cfg_hostname_without_domain(self): - """When cfg has a hostname without a '.' query cloud.get_hostname.""" - mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') - hostname, fqdn = util.get_hostname_fqdn( - cfg={'hostname': 'myhost'}, cloud=mycloud) - self.assertEqual('myhost', hostname) - self.assertEqual('cloudhost.mycloud.com', fqdn) - self.assertEqual( - [{'fqdn': True, 'metadata_only': False}], mycloud.calls) - - def test_get_hostname_fqdn_from_without_fqdn_or_hostname(self): - """When cfg has neither hostname nor fqdn cloud.get_hostname.""" - mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') - hostname, fqdn = util.get_hostname_fqdn(cfg={}, cloud=mycloud) - self.assertEqual('cloudhost', hostname) - self.assertEqual('cloudhost.mycloud.com', fqdn) - self.assertEqual( - [{'fqdn': True, 'metadata_only': False}, - {'metadata_only': False}], mycloud.calls) - - def test_get_hostname_fqdn_from_passes_metadata_only_to_cloud(self): - """Calls to cloud.get_hostname pass the metadata_only parameter.""" - mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') - _hn, _fqdn = util.get_hostname_fqdn( - cfg={}, cloud=mycloud, metadata_only=True) - self.assertEqual( - [{'fqdn': True, 'metadata_only': True}, - {'metadata_only': True}], mycloud.calls) - - -class TestBlkid(CiTestCase): - ids = { - "id01": "1111-1111", - "id02": "22222222-2222", - "id03": "33333333-3333", - "id04": "44444444-4444", - "id05": "55555555-5555-5555-5555-555555555555", - "id06": "66666666-6666-6666-6666-666666666666", - "id07": "52894610484658920398", - "id08": "86753098675309867530", - "id09": "99999999-9999-9999-9999-999999999999", - } - - blkid_out = dedent("""\ - /dev/loop0: TYPE="squashfs" - /dev/loop1: TYPE="squashfs" - /dev/loop2: TYPE="squashfs" - /dev/loop3: TYPE="squashfs" - /dev/sda1: UUID="{id01}" TYPE="vfat" PARTUUID="{id02}" - /dev/sda2: UUID="{id03}" TYPE="ext4" PARTUUID="{id04}" - /dev/sda3: UUID="{id05}" TYPE="ext4" PARTUUID="{id06}" - /dev/sda4: LABEL="default" UUID="{id07}" UUID_SUB="{id08}" """ - """TYPE="zfs_member" PARTUUID="{id09}" - /dev/loop4: TYPE="squashfs" - """) - - maxDiff = None - - def _get_expected(self): - return ({ - "/dev/loop0": {"DEVNAME": "/dev/loop0", "TYPE": "squashfs"}, - "/dev/loop1": {"DEVNAME": "/dev/loop1", "TYPE": "squashfs"}, - "/dev/loop2": {"DEVNAME": "/dev/loop2", "TYPE": "squashfs"}, - "/dev/loop3": {"DEVNAME": "/dev/loop3", "TYPE": "squashfs"}, - "/dev/loop4": {"DEVNAME": "/dev/loop4", "TYPE": "squashfs"}, - "/dev/sda1": {"DEVNAME": "/dev/sda1", "TYPE": "vfat", - "UUID": self.ids["id01"], - "PARTUUID": self.ids["id02"]}, - "/dev/sda2": {"DEVNAME": "/dev/sda2", "TYPE": "ext4", - "UUID": self.ids["id03"], - "PARTUUID": self.ids["id04"]}, - "/dev/sda3": {"DEVNAME": "/dev/sda3", "TYPE": "ext4", - "UUID": self.ids["id05"], - "PARTUUID": self.ids["id06"]}, - "/dev/sda4": {"DEVNAME": "/dev/sda4", "TYPE": "zfs_member", - "LABEL": "default", - "UUID": self.ids["id07"], - "UUID_SUB": self.ids["id08"], - "PARTUUID": self.ids["id09"]}, - }) - - @mock.patch("cloudinit.subp.subp") - def test_functional_blkid(self, m_subp): - m_subp.return_value = ( - self.blkid_out.format(**self.ids), "") - self.assertEqual(self._get_expected(), util.blkid()) - m_subp.assert_called_with(["blkid", "-o", "full"], capture=True, - decode="replace") - - @mock.patch("cloudinit.subp.subp") - def test_blkid_no_cache_uses_no_cache(self, m_subp): - """blkid should turn off cache if disable_cache is true.""" - m_subp.return_value = ( - self.blkid_out.format(**self.ids), "") - self.assertEqual(self._get_expected(), - util.blkid(disable_cache=True)) - m_subp.assert_called_with(["blkid", "-o", "full", "-c", "/dev/null"], - capture=True, decode="replace") - - -@mock.patch('cloudinit.subp.subp') -class TestUdevadmSettle(CiTestCase): - def test_with_no_params(self, m_subp): - """called with no parameters.""" - util.udevadm_settle() - m_subp.called_once_with(mock.call(['udevadm', 'settle'])) - - def test_with_exists_and_not_exists(self, m_subp): - """with exists=file where file does not exist should invoke subp.""" - mydev = self.tmp_path("mydev") - util.udevadm_settle(exists=mydev) - m_subp.called_once_with( - ['udevadm', 'settle', '--exit-if-exists=%s' % mydev]) - - def test_with_exists_and_file_exists(self, m_subp): - """with exists=file where file does exist should not invoke subp.""" - mydev = self.tmp_path("mydev") - util.write_file(mydev, "foo\n") - util.udevadm_settle(exists=mydev) - self.assertIsNone(m_subp.call_args) - - def test_with_timeout_int(self, m_subp): - """timeout can be an integer.""" - timeout = 9 - util.udevadm_settle(timeout=timeout) - m_subp.called_once_with( - ['udevadm', 'settle', '--timeout=%s' % timeout]) - - def test_with_timeout_string(self, m_subp): - """timeout can be a string.""" - timeout = "555" - util.udevadm_settle(timeout=timeout) - m_subp.assert_called_once_with( - ['udevadm', 'settle', '--timeout=%s' % timeout]) - - def test_with_exists_and_timeout(self, m_subp): - """test call with both exists and timeout.""" - mydev = self.tmp_path("mydev") - timeout = "3" - util.udevadm_settle(exists=mydev) - m_subp.called_once_with( - ['udevadm', 'settle', '--exit-if-exists=%s' % mydev, - '--timeout=%s' % timeout]) - - def test_subp_exception_raises_to_caller(self, m_subp): - m_subp.side_effect = subp.ProcessExecutionError("BOOM") - self.assertRaises(subp.ProcessExecutionError, util.udevadm_settle) - - -@mock.patch('os.path.exists') -class TestGetLinuxDistro(CiTestCase): - - def setUp(self): - # python2 has no lru_cache, and therefore, no cache_clear() - if hasattr(util.get_linux_distro, "cache_clear"): - util.get_linux_distro.cache_clear() - - @classmethod - def os_release_exists(self, path): - """Side effect function""" - if path == '/etc/os-release': - return 1 - - @classmethod - def redhat_release_exists(self, path): - """Side effect function """ - if path == '/etc/redhat-release': - return 1 - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists): - """Verify we get the correct name if the os-release file has - the distro name in quotes""" - m_os_release.return_value = OS_RELEASE_SLES - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual(('sles', '12.3', platform.machine()), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_distro_bare_name(self, m_os_release, m_path_exists): - """Verify we get the correct name if the os-release file does not - have the distro name in quotes""" - m_os_release.return_value = OS_RELEASE_UBUNTU - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual(('ubuntu', '16.04', 'xenial'), dist) - - @mock.patch('platform.system') - @mock.patch('platform.release') - @mock.patch('cloudinit.util._parse_redhat_release') - def test_get_linux_freebsd(self, m_parse_redhat_release, - m_platform_release, - m_platform_system, m_path_exists): - """Verify we get the correct name and release name on FreeBSD.""" - m_path_exists.return_value = False - m_platform_release.return_value = '12.0-RELEASE-p10' - m_platform_system.return_value = 'FreeBSD' - m_parse_redhat_release.return_value = {} - util.is_BSD.cache_clear() - dist = util.get_linux_distro() - self.assertEqual(('freebsd', '12.0-RELEASE-p10', ''), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_centos6(self, m_os_release, m_path_exists): - """Verify we get the correct name and release name on CentOS 6.""" - m_os_release.return_value = REDHAT_RELEASE_CENTOS_6 - m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists - dist = util.get_linux_distro() - self.assertEqual(('centos', '6.10', 'Final'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_centos7_redhat_release(self, m_os_release, m_exists): - """Verify the correct release info on CentOS 7 without os-release.""" - m_os_release.return_value = REDHAT_RELEASE_CENTOS_7 - m_exists.side_effect = TestGetLinuxDistro.redhat_release_exists - dist = util.get_linux_distro() - self.assertEqual(('centos', '7.5.1804', 'Core'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_redhat7_osrelease(self, m_os_release, m_path_exists): - """Verify redhat 7 read from os-release.""" - m_os_release.return_value = OS_RELEASE_REDHAT_7 - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual(('redhat', '7.5', 'Maipo'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_redhat7_rhrelease(self, m_os_release, m_path_exists): - """Verify redhat 7 read from redhat-release.""" - m_os_release.return_value = REDHAT_RELEASE_REDHAT_7 - m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists - dist = util.get_linux_distro() - self.assertEqual(('redhat', '7.5', 'Maipo'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_redhat6_rhrelease(self, m_os_release, m_path_exists): - """Verify redhat 6 read from redhat-release.""" - m_os_release.return_value = REDHAT_RELEASE_REDHAT_6 - m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists - dist = util.get_linux_distro() - self.assertEqual(('redhat', '6.10', 'Santiago'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_copr_centos(self, m_os_release, m_path_exists): - """Verify we get the correct name and release name on COPR CentOS.""" - m_os_release.return_value = OS_RELEASE_CENTOS - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual(('centos', '7', 'Core'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_almalinux8_rhrelease(self, m_os_release, m_path_exists): - """Verify almalinux 8 read from redhat-release.""" - m_os_release.return_value = REDHAT_RELEASE_ALMALINUX_8 - m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists - dist = util.get_linux_distro() - self.assertEqual(('almalinux', '8.3', 'Purple Manul'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_almalinux8_osrelease(self, m_os_release, m_path_exists): - """Verify almalinux 8 read from os-release.""" - m_os_release.return_value = OS_RELEASE_ALMALINUX_8 - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual(('almalinux', '8.3', 'Purple Manul'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_eurolinux7_rhrelease(self, m_os_release, m_path_exists): - """Verify eurolinux 7 read from redhat-release.""" - m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_7 - m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists - dist = util.get_linux_distro() - self.assertEqual(('eurolinux', '7.9', 'Minsk'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_eurolinux7_osrelease(self, m_os_release, m_path_exists): - """Verify eurolinux 7 read from os-release.""" - m_os_release.return_value = OS_RELEASE_EUROLINUX_7 - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual(('eurolinux', '7.9', 'Minsk'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_eurolinux8_rhrelease(self, m_os_release, m_path_exists): - """Verify eurolinux 8 read from redhat-release.""" - m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_8 - m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists - dist = util.get_linux_distro() - self.assertEqual(('eurolinux', '8.4', 'Vaduz'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_eurolinux8_osrelease(self, m_os_release, m_path_exists): - """Verify eurolinux 8 read from os-release.""" - m_os_release.return_value = OS_RELEASE_EUROLINUX_8 - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual(('eurolinux', '8.4', 'Vaduz'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_rocky8_rhrelease(self, m_os_release, m_path_exists): - """Verify rocky linux 8 read from redhat-release.""" - m_os_release.return_value = REDHAT_RELEASE_ROCKY_8 - m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists - dist = util.get_linux_distro() - self.assertEqual(('rocky', '8.3', 'Green Obsidian'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_rocky8_osrelease(self, m_os_release, m_path_exists): - """Verify rocky linux 8 read from os-release.""" - m_os_release.return_value = OS_RELEASE_ROCKY_8 - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual(('rocky', '8.3', 'Green Obsidian'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_virtuozzo8_rhrelease(self, m_os_release, m_path_exists): - """Verify virtuozzo linux 8 read from redhat-release.""" - m_os_release.return_value = REDHAT_RELEASE_VIRTUOZZO_8 - m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists - dist = util.get_linux_distro() - self.assertEqual(('virtuozzo', '8', 'Virtuozzo Linux'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_virtuozzo8_osrelease(self, m_os_release, m_path_exists): - """Verify virtuozzo linux 8 read from os-release.""" - m_os_release.return_value = OS_RELEASE_VIRTUOZZO_8 - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual(('virtuozzo', '8', 'Virtuozzo Linux'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_cloud8_rhrelease(self, m_os_release, m_path_exists): - """Verify cloudlinux 8 read from redhat-release.""" - m_os_release.return_value = REDHAT_RELEASE_CLOUDLINUX_8 - m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists - dist = util.get_linux_distro() - self.assertEqual(('cloudlinux', '8.4', 'Valery Rozhdestvensky'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_cloud8_osrelease(self, m_os_release, m_path_exists): - """Verify cloudlinux 8 read from os-release.""" - m_os_release.return_value = OS_RELEASE_CLOUDLINUX_8 - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual(('cloudlinux', '8.4', 'Valery Rozhdestvensky'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_debian(self, m_os_release, m_path_exists): - """Verify we get the correct name and release name on Debian.""" - m_os_release.return_value = OS_RELEASE_DEBIAN - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual(('debian', '9', 'stretch'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_openeuler(self, m_os_release, m_path_exists): - """Verify get the correct name and release name on Openeuler.""" - m_os_release.return_value = OS_RELEASE_OPENEULER_20 - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual(('openEuler', '20.03', 'LTS-SP2'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_opensuse(self, m_os_release, m_path_exists): - """Verify we get the correct name and machine arch on openSUSE - prior to openSUSE Leap 15. - """ - m_os_release.return_value = OS_RELEASE_OPENSUSE - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual(('opensuse', '42.3', platform.machine()), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_opensuse_l15(self, m_os_release, m_path_exists): - """Verify we get the correct name and machine arch on openSUSE - for openSUSE Leap 15.0 and later. - """ - m_os_release.return_value = OS_RELEASE_OPENSUSE_L15 - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual(('opensuse-leap', '15.0', platform.machine()), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_opensuse_tw(self, m_os_release, m_path_exists): - """Verify we get the correct name and machine arch on openSUSE - for openSUSE Tumbleweed - """ - m_os_release.return_value = OS_RELEASE_OPENSUSE_TW - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual( - ('opensuse-tumbleweed', '20180920', platform.machine()), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_photon_os_release(self, m_os_release, m_path_exists): - """Verify we get the correct name and machine arch on PhotonOS""" - m_os_release.return_value = OS_RELEASE_PHOTON - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual( - ('photon', '4.0', 'VMware Photon OS/Linux'), dist) - - @mock.patch('platform.system') - @mock.patch('platform.dist', create=True) - def test_get_linux_distro_no_data(self, m_platform_dist, - m_platform_system, m_path_exists): - """Verify we get no information if os-release does not exist""" - m_platform_dist.return_value = ('', '', '') - m_platform_system.return_value = "Linux" - m_path_exists.return_value = 0 - dist = util.get_linux_distro() - self.assertEqual(('', '', ''), dist) - - @mock.patch('platform.system') - @mock.patch('platform.dist', create=True) - def test_get_linux_distro_no_impl(self, m_platform_dist, - m_platform_system, m_path_exists): - """Verify we get an empty tuple when no information exists and - Exceptions are not propagated""" - m_platform_dist.side_effect = Exception() - m_platform_system.return_value = "Linux" - m_path_exists.return_value = 0 - dist = util.get_linux_distro() - self.assertEqual(('', '', ''), dist) - - @mock.patch('platform.system') - @mock.patch('platform.dist', create=True) - def test_get_linux_distro_plat_data(self, m_platform_dist, - m_platform_system, m_path_exists): - """Verify we get the correct platform information""" - m_platform_dist.return_value = ('foo', '1.1', 'aarch64') - m_platform_system.return_value = "Linux" - m_path_exists.return_value = 0 - dist = util.get_linux_distro() - self.assertEqual(('foo', '1.1', 'aarch64'), dist) - - -class TestGetVariant: - @pytest.mark.parametrize('info, expected_variant', [ - ({'system': 'Linux', 'dist': ('almalinux',)}, 'almalinux'), - ({'system': 'linux', 'dist': ('alpine',)}, 'alpine'), - ({'system': 'linux', 'dist': ('arch',)}, 'arch'), - ({'system': 'linux', 'dist': ('centos',)}, 'centos'), - ({'system': 'linux', 'dist': ('cloudlinux',)}, 'cloudlinux'), - ({'system': 'linux', 'dist': ('debian',)}, 'debian'), - ({'system': 'linux', 'dist': ('eurolinux',)}, 'eurolinux'), - ({'system': 'linux', 'dist': ('fedora',)}, 'fedora'), - ({'system': 'linux', 'dist': ('openEuler',)}, 'openeuler'), - ({'system': 'linux', 'dist': ('photon',)}, 'photon'), - ({'system': 'linux', 'dist': ('rhel',)}, 'rhel'), - ({'system': 'linux', 'dist': ('rocky',)}, 'rocky'), - ({'system': 'linux', 'dist': ('suse',)}, 'suse'), - ({'system': 'linux', 'dist': ('virtuozzo',)}, 'virtuozzo'), - ({'system': 'linux', 'dist': ('ubuntu',)}, 'ubuntu'), - ({'system': 'linux', 'dist': ('linuxmint',)}, 'ubuntu'), - ({'system': 'linux', 'dist': ('mint',)}, 'ubuntu'), - ({'system': 'linux', 'dist': ('redhat',)}, 'rhel'), - ({'system': 'linux', 'dist': ('opensuse',)}, 'suse'), - ({'system': 'linux', 'dist': ('opensuse-tumbleweed',)}, 'suse'), - ({'system': 'linux', 'dist': ('opensuse-leap',)}, 'suse'), - ({'system': 'linux', 'dist': ('sles',)}, 'suse'), - ({'system': 'linux', 'dist': ('sle_hpc',)}, 'suse'), - ({'system': 'linux', 'dist': ('my_distro',)}, 'linux'), - ({'system': 'Windows', 'dist': ('dontcare',)}, 'windows'), - ({'system': 'Darwin', 'dist': ('dontcare',)}, 'darwin'), - ({'system': 'Freebsd', 'dist': ('dontcare',)}, 'freebsd'), - ({'system': 'Netbsd', 'dist': ('dontcare',)}, 'netbsd'), - ({'system': 'Openbsd', 'dist': ('dontcare',)}, 'openbsd'), - ({'system': 'Dragonfly', 'dist': ('dontcare',)}, 'dragonfly'), - ]) - def test_get_variant(self, info, expected_variant): - """Verify we get the correct variant name""" - assert util._get_variant(info) == expected_variant - - -class TestJsonDumps(CiTestCase): - def test_is_str(self): - """json_dumps should return a string.""" - self.assertTrue(isinstance(util.json_dumps({'abc': '123'}), str)) - - def test_utf8(self): - smiley = '\\ud83d\\ude03' - self.assertEqual( - {'smiley': smiley}, - json.loads(util.json_dumps({'smiley': smiley}))) - - def test_non_utf8(self): - blob = b'\xba\x03Qx-#y\xea' - self.assertEqual( - {'blob': 'ci-b64:' + base64.b64encode(blob).decode('utf-8')}, - json.loads(util.json_dumps({'blob': blob}))) - - -@mock.patch('os.path.exists') -class TestIsLXD(CiTestCase): - - def test_is_lxd_true_on_sock_device(self, m_exists): - """When lxd's /dev/lxd/sock exists, is_lxd returns true.""" - m_exists.return_value = True - self.assertTrue(util.is_lxd()) - m_exists.assert_called_once_with('/dev/lxd/sock') - - def test_is_lxd_false_when_sock_device_absent(self, m_exists): - """When lxd's /dev/lxd/sock is absent, is_lxd returns false.""" - m_exists.return_value = False - self.assertFalse(util.is_lxd()) - m_exists.assert_called_once_with('/dev/lxd/sock') - - -class TestReadCcFromCmdline: - - @pytest.mark.parametrize( - "cmdline,expected_cfg", - [ - # Return None if cmdline has no cc:end_cc content. - (CiTestCase.random_string(), None), - # Return None if YAML content is empty string. - ('foo cc: end_cc bar', None), - # Return expected dictionary without trailing end_cc marker. - ('foo cc: ssh_pwauth: true', {'ssh_pwauth': True}), - # Return expected dictionary w escaped newline and no end_cc. - ('foo cc: ssh_pwauth: true\\n', {'ssh_pwauth': True}), - # Return expected dictionary of yaml between cc: and end_cc. - ('foo cc: ssh_pwauth: true end_cc bar', {'ssh_pwauth': True}), - # Return dict with list value w escaped newline, no end_cc. - ( - 'cc: ssh_import_id: [smoser, kirkland]\\n', - {'ssh_import_id': ['smoser', 'kirkland']} - ), - # Parse urlencoded brackets in yaml content. - ( - 'cc: ssh_import_id: %5Bsmoser, kirkland%5D end_cc', - {'ssh_import_id': ['smoser', 'kirkland']} - ), - # Parse complete urlencoded yaml content. - ( - 'cc: ssh_import_id%3A%20%5Buser1%2C%20user2%5D end_cc', - {'ssh_import_id': ['user1', 'user2']} - ), - # Parse nested dictionary in yaml content. - ( - 'cc: ntp: {enabled: true, ntp_client: myclient} end_cc', - {'ntp': {'enabled': True, 'ntp_client': 'myclient'}} - ), - # Parse single mapping value in yaml content. - ('cc: ssh_import_id: smoser end_cc', {'ssh_import_id': 'smoser'}), - # Parse multiline content with multiple mapping and nested lists. - ( - ('cc: ssh_import_id: [smoser, bob]\\n' - 'runcmd: [ [ ls, -l ], echo hi ] end_cc'), - {'ssh_import_id': ['smoser', 'bob'], - 'runcmd': [['ls', '-l'], 'echo hi']} - ), - # Parse multiline encoded content w/ mappings and nested lists. - ( - ('cc: ssh_import_id: %5Bsmoser, bob%5D\\n' - 'runcmd: [ [ ls, -l ], echo hi ] end_cc'), - {'ssh_import_id': ['smoser', 'bob'], - 'runcmd': [['ls', '-l'], 'echo hi']} - ), - # test encoded escaped newlines work. - # - # unquote(encoded_content) - # 'ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ]' - ( - ('cc: ' + - ('ssh_import_id%3A%20%5Bsmoser%2C%20bob%5D%5Cn' - 'runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%2C' - '%20echo%20hi%20%5D') + ' end_cc'), - {'ssh_import_id': ['smoser', 'bob'], - 'runcmd': [['ls', '-l'], 'echo hi']} - ), - # test encoded newlines work. - # - # unquote(encoded_content) - # 'ssh_import_id: [smoser, bob]\nruncmd: [ [ ls, -l ], echo hi ]' - ( - ("cc: " + - ('ssh_import_id%3A%20%5Bsmoser%2C%20bob%5D%0A' - 'runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%2C' - '%20echo%20hi%20%5D') + ' end_cc'), - {'ssh_import_id': ['smoser', 'bob'], - 'runcmd': [['ls', '-l'], 'echo hi']} - ), - # Parse and merge multiple yaml content sections. - ( - ('cc:ssh_import_id: [smoser, bob] end_cc ' - 'cc: runcmd: [ [ ls, -l ] ] end_cc'), - {'ssh_import_id': ['smoser', 'bob'], - 'runcmd': [['ls', '-l']]} - ), - # Parse and merge multiple encoded yaml content sections. - ( - ('cc:ssh_import_id%3A%20%5Bsmoser%5D end_cc ' - 'cc:runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%20%5D end_cc'), - {'ssh_import_id': ['smoser'], 'runcmd': [['ls', '-l']]} - ), - ] - ) - def test_read_conf_from_cmdline_config(self, expected_cfg, cmdline): - assert expected_cfg == util.read_conf_from_cmdline(cmdline=cmdline) - - -class TestMountCb: - """Tests for ``util.mount_cb``. - - These tests consider the "unit" under test to be ``util.mount_cb`` and - ``util.unmounter``, which is only used by ``mount_cb``. - - TODO: Test default mtype determination - TODO: Test the if/else branch that actually performs the mounting operation - """ - - @pytest.yield_fixture - def already_mounted_device_and_mountdict(self): - """Mock an already-mounted device, and yield (device, mount dict)""" - device = "/dev/fake0" - mountpoint = "/mnt/fake" - with mock.patch("cloudinit.util.subp.subp"): - with mock.patch("cloudinit.util.mounts") as m_mounts: - mounts = {device: {"mountpoint": mountpoint}} - m_mounts.return_value = mounts - yield device, mounts[device] - - @pytest.fixture - def already_mounted_device(self, already_mounted_device_and_mountdict): - """already_mounted_device_and_mountdict, but return only the device""" - return already_mounted_device_and_mountdict[0] - - @pytest.mark.parametrize( - "mtype,expected", - [ - # While the filesystem is called iso9660, the mount type is cd9660 - ("iso9660", "cd9660"), - # vfat is generally called "msdos" on BSD - ("vfat", "msdos"), - # judging from man pages, only FreeBSD has this alias - ("msdosfs", "msdos"), - # Test happy path - ("ufs", "ufs") - ], - ) - @mock.patch("cloudinit.util.is_Linux", autospec=True) - @mock.patch("cloudinit.util.is_BSD", autospec=True) - @mock.patch("cloudinit.util.subp.subp") - @mock.patch("cloudinit.temp_utils.tempdir", autospec=True) - def test_normalize_mtype_on_bsd( - self, m_tmpdir, m_subp, m_is_BSD, m_is_Linux, mtype, expected - ): - m_is_BSD.return_value = True - m_is_Linux.return_value = False - m_tmpdir.return_value.__enter__ = mock.Mock( - autospec=True, return_value="/tmp/fake" - ) - m_tmpdir.return_value.__exit__ = mock.Mock( - autospec=True, return_value=True - ) - callback = mock.Mock(autospec=True) - - util.mount_cb('/dev/fake0', callback, mtype=mtype) - assert mock.call( - ["mount", "-o", "ro", "-t", expected, "/dev/fake0", "/tmp/fake"], - update_env=None) in m_subp.call_args_list - - @pytest.mark.parametrize("invalid_mtype", [int(0), float(0.0), dict()]) - def test_typeerror_raised_for_invalid_mtype(self, invalid_mtype): - with pytest.raises(TypeError): - util.mount_cb(mock.Mock(), mock.Mock(), mtype=invalid_mtype) - - @mock.patch("cloudinit.util.subp.subp") - def test_already_mounted_does_not_mount_or_umount_anything( - self, m_subp, already_mounted_device - ): - util.mount_cb(already_mounted_device, mock.Mock()) - - assert 0 == m_subp.call_count - - @pytest.mark.parametrize("trailing_slash_in_mounts", ["/", ""]) - def test_already_mounted_calls_callback( - self, trailing_slash_in_mounts, already_mounted_device_and_mountdict - ): - device, mount_dict = already_mounted_device_and_mountdict - mountpoint = mount_dict["mountpoint"] - mount_dict["mountpoint"] += trailing_slash_in_mounts - - callback = mock.Mock() - util.mount_cb(device, callback) - - # The mountpoint passed to callback should always have a trailing - # slash, regardless of the input - assert [mock.call(mountpoint + "/")] == callback.call_args_list - - def test_already_mounted_calls_callback_with_data( - self, already_mounted_device - ): - callback = mock.Mock() - util.mount_cb( - already_mounted_device, callback, data=mock.sentinel.data - ) - - assert [ - mock.call(mock.ANY, mock.sentinel.data) - ] == callback.call_args_list - - -@mock.patch("cloudinit.util.write_file") -class TestEnsureFile: - """Tests for ``cloudinit.util.ensure_file``.""" - - def test_parameters_passed_through(self, m_write_file): - """Test the parameters in the signature are passed to write_file.""" - util.ensure_file( - mock.sentinel.path, - mode=mock.sentinel.mode, - preserve_mode=mock.sentinel.preserve_mode, - ) - - assert 1 == m_write_file.call_count - args, kwargs = m_write_file.call_args - assert (mock.sentinel.path,) == args - assert mock.sentinel.mode == kwargs["mode"] - assert mock.sentinel.preserve_mode == kwargs["preserve_mode"] - - @pytest.mark.parametrize( - "kwarg,expected", - [ - # Files should be world-readable by default - ("mode", 0o644), - # The previous behaviour of not preserving mode should be retained - ("preserve_mode", False), - ], - ) - def test_defaults(self, m_write_file, kwarg, expected): - """Test that ensure_file defaults appropriately.""" - util.ensure_file(mock.sentinel.path) - - assert 1 == m_write_file.call_count - _args, kwargs = m_write_file.call_args - assert expected == kwargs[kwarg] - - def test_static_parameters_are_passed(self, m_write_file): - """Test that the static write_files parameters are passed correctly.""" - util.ensure_file(mock.sentinel.path) - - assert 1 == m_write_file.call_count - _args, kwargs = m_write_file.call_args - assert "" == kwargs["content"] - assert "ab" == kwargs["omode"] - - -@mock.patch("cloudinit.util.grp.getgrnam") -@mock.patch("cloudinit.util.os.setgid") -@mock.patch("cloudinit.util.os.umask") -class TestRedirectOutputPreexecFn: - """This tests specifically the preexec_fn used in redirect_output.""" - - @pytest.fixture(params=["outfmt", "errfmt"]) - def preexec_fn(self, request): - """A fixture to gather the preexec_fn used by redirect_output. - - This enables simpler direct testing of it, and parameterises any tests - using it to cover both the stdout and stderr code paths. - """ - test_string = "| piped output to invoke subprocess" - if request.param == "outfmt": - args = (test_string, None) - elif request.param == "errfmt": - args = (None, test_string) - with mock.patch("cloudinit.util.subprocess.Popen") as m_popen: - util.redirect_output(*args) - - assert 1 == m_popen.call_count - _args, kwargs = m_popen.call_args - assert "preexec_fn" in kwargs, "preexec_fn not passed to Popen" - return kwargs["preexec_fn"] - - def test_preexec_fn_sets_umask( - self, m_os_umask, _m_setgid, _m_getgrnam, preexec_fn - ): - """preexec_fn should set a mask that avoids world-readable files.""" - preexec_fn() - - assert [mock.call(0o037)] == m_os_umask.call_args_list - - def test_preexec_fn_sets_group_id_if_adm_group_present( - self, _m_os_umask, m_setgid, m_getgrnam, preexec_fn - ): - """We should setgrp to adm if present, so files are owned by them.""" - fake_group = mock.Mock(gr_gid=mock.sentinel.gr_gid) - m_getgrnam.return_value = fake_group - - preexec_fn() - - assert [mock.call("adm")] == m_getgrnam.call_args_list - assert [mock.call(mock.sentinel.gr_gid)] == m_setgid.call_args_list - - def test_preexec_fn_handles_absent_adm_group_gracefully( - self, _m_os_umask, m_setgid, m_getgrnam, preexec_fn - ): - """We should handle an absent adm group gracefully.""" - m_getgrnam.side_effect = KeyError("getgrnam(): name not found: 'adm'") - - preexec_fn() - - assert 0 == m_setgid.call_count - -# vi: ts=4 expandtab diff --git a/doc/rtd/topics/testing.rst b/doc/rtd/topics/testing.rst index d882e036bbd..7a1e3eece5d 100644 --- a/doc/rtd/topics/testing.rst +++ b/doc/rtd/topics/testing.rst @@ -3,8 +3,7 @@ Testing ******* cloud-init has both unit tests and integration tests. Unit tests can -be found in-tree alongside the source code, as well as -at ``tests/unittests``. Integration tests can be found at +be found at ``tests/unittests``. Integration tests can be found at ``tests/integration_tests``. Documentation specifically for integration tests can be found on the :ref:`integration_tests` page, but the guidelines specified below apply to both types of tests. @@ -36,6 +35,16 @@ Test Layout subclass (indirectly) from ``TestCase`` (e.g. `TestPrependBaseCommands`_) +* Unit tests and integration tests are located under cloud-init/tests + + * For consistency, unit test files should have a matching name and + directory location under `tests/unittests` + + * For example: the expected test file for code in + `cloudinit/path/to/file.py` is + `tests/unittests/path/to/test_file.py` + + ``pytest`` Tests ---------------- diff --git a/setup.py b/setup.py index 58fddf0f2a5..100b07feff8 100755 --- a/setup.py +++ b/setup.py @@ -291,7 +291,7 @@ def finalize_options(self): author='Scott Moser', author_email='scott.moser@canonical.com', url='http://launchpad.net/cloud-init/', - packages=setuptools.find_packages(exclude=['tests.*', '*.tests', 'tests']), + packages=setuptools.find_packages(exclude=['tests.*', 'tests']), scripts=['tools/cloud-init-per'], license='Dual-licensed under GPLv3 or Apache 2.0', data_files=data_files, diff --git a/cloudinit/analyze/tests/test_boot.py b/tests/unittests/analyze/test_boot.py similarity index 99% rename from cloudinit/analyze/tests/test_boot.py rename to tests/unittests/analyze/test_boot.py index 6b3afb5ed27..fd878b44f3b 100644 --- a/cloudinit/analyze/tests/test_boot.py +++ b/tests/unittests/analyze/test_boot.py @@ -1,6 +1,6 @@ import os from cloudinit.analyze.__main__ import (analyze_boot, get_parser) -from cloudinit.tests.helpers import CiTestCase, mock +from tests.unittests.helpers import CiTestCase, mock from cloudinit.analyze.show import dist_check_timestamp, SystemctlReader, \ FAIL_CODE, CONTAINER_CODE diff --git a/cloudinit/analyze/tests/test_dump.py b/tests/unittests/analyze/test_dump.py similarity index 99% rename from cloudinit/analyze/tests/test_dump.py rename to tests/unittests/analyze/test_dump.py index dac1efb6216..e3683bbf050 100644 --- a/cloudinit/analyze/tests/test_dump.py +++ b/tests/unittests/analyze/test_dump.py @@ -7,7 +7,7 @@ dump_events, parse_ci_logline, parse_timestamp) from cloudinit.util import write_file from cloudinit.subp import which -from cloudinit.tests.helpers import CiTestCase, mock, skipIf +from tests.unittests.helpers import CiTestCase, mock, skipIf class TestParseTimestamp(CiTestCase): diff --git a/cloudinit/cmd/devel/tests/__init__.py b/tests/unittests/cloudinit/__init__py similarity index 100% rename from cloudinit/cmd/devel/tests/__init__.py rename to tests/unittests/cloudinit/__init__py diff --git a/cloudinit/cmd/tests/__init__.py b/tests/unittests/cmd/__init__.py similarity index 100% rename from cloudinit/cmd/tests/__init__.py rename to tests/unittests/cmd/__init__.py diff --git a/cloudinit/distros/tests/__init__.py b/tests/unittests/cmd/devel/__init__.py similarity index 100% rename from cloudinit/distros/tests/__init__.py rename to tests/unittests/cmd/devel/__init__.py diff --git a/cloudinit/cmd/devel/tests/test_logs.py b/tests/unittests/cmd/devel/test_logs.py similarity index 99% rename from cloudinit/cmd/devel/tests/test_logs.py rename to tests/unittests/cmd/devel/test_logs.py index ddfd58e1860..18bdcddab7e 100644 --- a/cloudinit/cmd/devel/tests/test_logs.py +++ b/tests/unittests/cmd/devel/test_logs.py @@ -6,7 +6,7 @@ from cloudinit.cmd.devel import logs from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE -from cloudinit.tests.helpers import ( +from tests.unittests.helpers import ( FilesystemMockingTestCase, mock, wrap_and_call) from cloudinit.subp import subp from cloudinit.util import ensure_dir, load_file, write_file diff --git a/cloudinit/cmd/devel/tests/test_render.py b/tests/unittests/cmd/devel/test_render.py similarity index 99% rename from cloudinit/cmd/devel/tests/test_render.py rename to tests/unittests/cmd/devel/test_render.py index a7fcf2ced01..c7ddca3d20f 100644 --- a/cloudinit/cmd/devel/tests/test_render.py +++ b/tests/unittests/cmd/devel/test_render.py @@ -7,7 +7,7 @@ from cloudinit.cmd.devel import render from cloudinit.helpers import Paths from cloudinit.sources import INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE -from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJinja +from tests.unittests.helpers import CiTestCase, mock, skipUnlessJinja from cloudinit.util import ensure_dir, write_file diff --git a/cloudinit/cmd/tests/test_clean.py b/tests/unittests/cmd/test_clean.py similarity index 99% rename from cloudinit/cmd/tests/test_clean.py rename to tests/unittests/cmd/test_clean.py index a848a810fec..81fc930ee60 100644 --- a/cloudinit/cmd/tests/test_clean.py +++ b/tests/unittests/cmd/test_clean.py @@ -2,7 +2,7 @@ from cloudinit.cmd import clean from cloudinit.util import ensure_dir, sym_link, write_file -from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock +from tests.unittests.helpers import CiTestCase, wrap_and_call, mock from collections import namedtuple import os from io import StringIO diff --git a/cloudinit/cmd/tests/test_cloud_id.py b/tests/unittests/cmd/test_cloud_id.py similarity index 99% rename from cloudinit/cmd/tests/test_cloud_id.py rename to tests/unittests/cmd/test_cloud_id.py index 3f3727fddeb..12fc80e87c1 100644 --- a/cloudinit/cmd/tests/test_cloud_id.py +++ b/tests/unittests/cmd/test_cloud_id.py @@ -8,7 +8,7 @@ from cloudinit.cmd import cloud_id -from cloudinit.tests.helpers import CiTestCase, mock +from tests.unittests.helpers import CiTestCase, mock class TestCloudId(CiTestCase): diff --git a/cloudinit/cmd/tests/test_main.py b/tests/unittests/cmd/test_main.py similarity index 99% rename from cloudinit/cmd/tests/test_main.py rename to tests/unittests/cmd/test_main.py index 2e3808487bf..e1ce682b407 100644 --- a/cloudinit/cmd/tests/test_main.py +++ b/tests/unittests/cmd/test_main.py @@ -12,7 +12,7 @@ from cloudinit import safeyaml from cloudinit.util import ( ensure_dir, load_file, write_file) -from cloudinit.tests.helpers import ( +from tests.unittests.helpers import ( FilesystemMockingTestCase, wrap_and_call) mypaths = namedtuple('MyPaths', 'run_dir') diff --git a/cloudinit/cmd/tests/test_query.py b/tests/unittests/cmd/test_query.py similarity index 99% rename from cloudinit/cmd/tests/test_query.py rename to tests/unittests/cmd/test_query.py index d96c394518d..b3f1d98dcfb 100644 --- a/cloudinit/cmd/tests/test_query.py +++ b/tests/unittests/cmd/test_query.py @@ -13,7 +13,7 @@ from cloudinit.helpers import Paths from cloudinit.sources import ( REDACT_SENSITIVE_VALUE, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE) -from cloudinit.tests.helpers import mock +from tests.unittests.helpers import mock from cloudinit.util import b64e, write_file diff --git a/cloudinit/cmd/tests/test_status.py b/tests/unittests/cmd/test_status.py similarity index 99% rename from cloudinit/cmd/tests/test_status.py rename to tests/unittests/cmd/test_status.py index 1c9eec376cc..49eae043bc3 100644 --- a/cloudinit/cmd/tests/test_status.py +++ b/tests/unittests/cmd/test_status.py @@ -8,7 +8,7 @@ from cloudinit.atomic_helper import write_json from cloudinit.cmd import status from cloudinit.util import ensure_file -from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock +from tests.unittests.helpers import CiTestCase, wrap_and_call, mock mypaths = namedtuple('MyPaths', 'run_dir') myargs = namedtuple('MyArgs', 'long wait') diff --git a/cloudinit/net/tests/__init__.py b/tests/unittests/config/__init__.py similarity index 100% rename from cloudinit/net/tests/__init__.py rename to tests/unittests/config/__init__.py diff --git a/tests/unittests/test_handler/test_handler_apt_conf_v1.py b/tests/unittests/config/test_apt_conf_v1.py similarity index 99% rename from tests/unittests/test_handler/test_handler_apt_conf_v1.py rename to tests/unittests/config/test_apt_conf_v1.py index 6a4b03ee136..98d9994593c 100644 --- a/tests/unittests/test_handler/test_handler_apt_conf_v1.py +++ b/tests/unittests/config/test_apt_conf_v1.py @@ -3,7 +3,7 @@ from cloudinit.config import cc_apt_configure from cloudinit import util -from cloudinit.tests.helpers import TestCase +from tests.unittests.helpers import TestCase import copy import os diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py b/tests/unittests/config/test_apt_configure_sources_list_v1.py similarity index 99% rename from tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py rename to tests/unittests/config/test_apt_configure_sources_list_v1.py index d69916f9d85..4aeaea248ab 100644 --- a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py +++ b/tests/unittests/config/test_apt_configure_sources_list_v1.py @@ -17,7 +17,7 @@ from cloudinit.distros.debian import Distro -from cloudinit.tests import helpers as t_help +from tests.unittests import helpers as t_help from tests.unittests.util import get_cloud LOG = logging.getLogger(__name__) diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py b/tests/unittests/config/test_apt_configure_sources_list_v3.py similarity index 99% rename from tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py rename to tests/unittests/config/test_apt_configure_sources_list_v3.py index cd6f92396f9..a8087bd1502 100644 --- a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py +++ b/tests/unittests/config/test_apt_configure_sources_list_v3.py @@ -15,7 +15,7 @@ from cloudinit import util from cloudinit.config import cc_apt_configure from cloudinit.distros.debian import Distro -from cloudinit.tests import helpers as t_help +from tests.unittests import helpers as t_help from tests.unittests.util import get_cloud diff --git a/tests/unittests/test_handler/test_handler_apt_key.py b/tests/unittests/config/test_apt_key.py similarity index 100% rename from tests/unittests/test_handler/test_handler_apt_key.py rename to tests/unittests/config/test_apt_key.py diff --git a/tests/unittests/test_handler/test_handler_apt_source_v1.py b/tests/unittests/config/test_apt_source_v1.py similarity index 99% rename from tests/unittests/test_handler/test_handler_apt_source_v1.py rename to tests/unittests/config/test_apt_source_v1.py index 2357d69958a..684c24959b7 100644 --- a/tests/unittests/test_handler/test_handler_apt_source_v1.py +++ b/tests/unittests/config/test_apt_source_v1.py @@ -18,7 +18,7 @@ from cloudinit import subp from cloudinit import util -from cloudinit.tests.helpers import TestCase +from tests.unittests.helpers import TestCase EXPECTEDKEY = """-----BEGIN PGP PUBLIC KEY BLOCK----- Version: GnuPG v1 diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/config/test_apt_source_v3.py similarity index 99% rename from tests/unittests/test_handler/test_handler_apt_source_v3.py rename to tests/unittests/config/test_apt_source_v3.py index 202891211bb..0b78037e362 100644 --- a/tests/unittests/test_handler/test_handler_apt_source_v3.py +++ b/tests/unittests/config/test_apt_source_v3.py @@ -19,7 +19,7 @@ from cloudinit import subp from cloudinit import util from cloudinit.config import cc_apt_configure -from cloudinit.tests import helpers as t_help +from tests.unittests import helpers as t_help from tests.unittests.util import get_cloud diff --git a/tests/unittests/test_handler/test_handler_apk_configure.py b/tests/unittests/config/test_cc_apk_configure.py similarity index 99% rename from tests/unittests/test_handler/test_handler_apk_configure.py rename to tests/unittests/config/test_cc_apk_configure.py index 8acc0b33c78..70139451fb9 100644 --- a/tests/unittests/test_handler/test_handler_apk_configure.py +++ b/tests/unittests/config/test_cc_apk_configure.py @@ -11,7 +11,7 @@ from cloudinit import (cloud, helpers, util) from cloudinit.config import cc_apk_configure -from cloudinit.tests.helpers import (FilesystemMockingTestCase, mock) +from tests.unittests.helpers import (FilesystemMockingTestCase, mock) REPO_FILE = "/etc/apk/repositories" DEFAULT_MIRROR_URL = "https://alpine.global.ssl.fastly.net/alpine" diff --git a/cloudinit/config/tests/test_apt_pipelining.py b/tests/unittests/config/test_cc_apt_pipelining.py similarity index 95% rename from cloudinit/config/tests/test_apt_pipelining.py rename to tests/unittests/config/test_cc_apt_pipelining.py index 2a6bb10bbef..d7589d35f4b 100644 --- a/cloudinit/config/tests/test_apt_pipelining.py +++ b/tests/unittests/config/test_cc_apt_pipelining.py @@ -4,7 +4,7 @@ import cloudinit.config.cc_apt_pipelining as cc_apt_pipelining -from cloudinit.tests.helpers import CiTestCase, mock +from tests.unittests.helpers import CiTestCase, mock class TestAptPipelining(CiTestCase): diff --git a/tests/unittests/test_handler/test_handler_bootcmd.py b/tests/unittests/config/test_cc_bootcmd.py similarity index 99% rename from tests/unittests/test_handler/test_handler_bootcmd.py rename to tests/unittests/config/test_cc_bootcmd.py index 8cd3a5e1c76..6f38f12a7dd 100644 --- a/tests/unittests/test_handler/test_handler_bootcmd.py +++ b/tests/unittests/config/test_cc_bootcmd.py @@ -4,7 +4,7 @@ from cloudinit.config.cc_bootcmd import handle, schema from cloudinit import (subp, util) -from cloudinit.tests.helpers import ( +from tests.unittests.helpers import ( CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema) from tests.unittests.util import get_cloud diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/config/test_cc_ca_certs.py similarity index 99% rename from tests/unittests/test_handler/test_handler_ca_certs.py rename to tests/unittests/config/test_cc_ca_certs.py index 2a4ab49eea2..91b005d0fac 100644 --- a/tests/unittests/test_handler/test_handler_ca_certs.py +++ b/tests/unittests/config/test_cc_ca_certs.py @@ -11,7 +11,7 @@ from cloudinit import helpers from cloudinit import subp from cloudinit import util -from cloudinit.tests.helpers import TestCase +from tests.unittests.helpers import TestCase from tests.unittests.util import get_cloud diff --git a/tests/unittests/test_handler/test_handler_chef.py b/tests/unittests/config/test_cc_chef.py similarity index 99% rename from tests/unittests/test_handler/test_handler_chef.py rename to tests/unittests/config/test_cc_chef.py index 0672cebc6aa..060293c8e4c 100644 --- a/tests/unittests/test_handler/test_handler_chef.py +++ b/tests/unittests/config/test_cc_chef.py @@ -8,7 +8,7 @@ from cloudinit.config import cc_chef from cloudinit import util -from cloudinit.tests.helpers import ( +from tests.unittests.helpers import ( HttprettyTestCase, FilesystemMockingTestCase, mock, skipIf) from tests.unittests.util import get_cloud diff --git a/tests/unittests/test_handler/test_handler_debug.py b/tests/unittests/config/test_cc_debug.py similarity index 96% rename from tests/unittests/test_handler/test_handler_debug.py rename to tests/unittests/config/test_cc_debug.py index 41e9d9bdaef..174f772f088 100644 --- a/tests/unittests/test_handler/test_handler_debug.py +++ b/tests/unittests/config/test_cc_debug.py @@ -7,7 +7,7 @@ from cloudinit import util from cloudinit.config import cc_debug -from cloudinit.tests.helpers import (FilesystemMockingTestCase, mock) +from tests.unittests.helpers import (FilesystemMockingTestCase, mock) from tests.unittests.util import get_cloud diff --git a/cloudinit/config/tests/test_disable_ec2_metadata.py b/tests/unittests/config/test_cc_disable_ec2_metadata.py similarity index 97% rename from cloudinit/config/tests/test_disable_ec2_metadata.py rename to tests/unittests/config/test_cc_disable_ec2_metadata.py index b00f2083483..7a794845cca 100644 --- a/cloudinit/config/tests/test_disable_ec2_metadata.py +++ b/tests/unittests/config/test_cc_disable_ec2_metadata.py @@ -4,7 +4,7 @@ import cloudinit.config.cc_disable_ec2_metadata as ec2_meta -from cloudinit.tests.helpers import CiTestCase, mock +from tests.unittests.helpers import CiTestCase, mock import logging diff --git a/tests/unittests/test_handler/test_handler_disk_setup.py b/tests/unittests/config/test_cc_disk_setup.py similarity index 99% rename from tests/unittests/test_handler/test_handler_disk_setup.py rename to tests/unittests/config/test_cc_disk_setup.py index 4f4a57fa9f2..fa5655599d9 100644 --- a/tests/unittests/test_handler/test_handler_disk_setup.py +++ b/tests/unittests/config/test_cc_disk_setup.py @@ -3,7 +3,7 @@ import random from cloudinit.config import cc_disk_setup -from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, TestCase +from tests.unittests.helpers import CiTestCase, ExitStack, mock, TestCase class TestIsDiskUsed(TestCase): diff --git a/cloudinit/config/tests/test_final_message.py b/tests/unittests/config/test_cc_final_message.py similarity index 100% rename from cloudinit/config/tests/test_final_message.py rename to tests/unittests/config/test_cc_final_message.py diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/config/test_cc_growpart.py similarity index 99% rename from tests/unittests/test_handler/test_handler_growpart.py rename to tests/unittests/config/test_cc_growpart.py index b7d5d7ba5c4..b007f24f850 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/config/test_cc_growpart.py @@ -5,7 +5,7 @@ from cloudinit import subp from cloudinit import temp_utils -from cloudinit.tests.helpers import TestCase +from tests.unittests.helpers import TestCase import errno import logging diff --git a/cloudinit/config/tests/test_grub_dpkg.py b/tests/unittests/config/test_cc_grub_dpkg.py similarity index 100% rename from cloudinit/config/tests/test_grub_dpkg.py rename to tests/unittests/config/test_cc_grub_dpkg.py diff --git a/tests/unittests/test_handler/test_handler_install_hotplug.py b/tests/unittests/config/test_cc_install_hotplug.py similarity index 100% rename from tests/unittests/test_handler/test_handler_install_hotplug.py rename to tests/unittests/config/test_cc_install_hotplug.py diff --git a/cloudinit/config/tests/test_keys_to_console.py b/tests/unittests/config/test_cc_keys_to_console.py similarity index 100% rename from cloudinit/config/tests/test_keys_to_console.py rename to tests/unittests/config/test_cc_keys_to_console.py diff --git a/tests/unittests/test_handler/test_handler_landscape.py b/tests/unittests/config/test_cc_landscape.py similarity index 98% rename from tests/unittests/test_handler/test_handler_landscape.py rename to tests/unittests/config/test_cc_landscape.py index 1cc73ea26b4..07b3f899670 100644 --- a/tests/unittests/test_handler/test_handler_landscape.py +++ b/tests/unittests/config/test_cc_landscape.py @@ -4,7 +4,7 @@ from cloudinit.config import cc_landscape from cloudinit import util -from cloudinit.tests.helpers import (FilesystemMockingTestCase, mock, +from tests.unittests.helpers import (FilesystemMockingTestCase, mock, wrap_and_call) from tests.unittests.util import get_cloud diff --git a/tests/unittests/test_handler/test_handler_locale.py b/tests/unittests/config/test_cc_locale.py similarity index 98% rename from tests/unittests/test_handler/test_handler_locale.py rename to tests/unittests/config/test_cc_locale.py index 3c17927e71b..6cd95a290dd 100644 --- a/tests/unittests/test_handler/test_handler_locale.py +++ b/tests/unittests/config/test_cc_locale.py @@ -13,7 +13,7 @@ from cloudinit import util from cloudinit.config import cc_locale -from cloudinit.tests import helpers as t_help +from tests.unittests import helpers as t_help from tests.unittests.util import get_cloud diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/config/test_cc_lxd.py similarity index 99% rename from tests/unittests/test_handler/test_handler_lxd.py rename to tests/unittests/config/test_cc_lxd.py index ea8b6e9050c..887987c0809 100644 --- a/tests/unittests/test_handler/test_handler_lxd.py +++ b/tests/unittests/config/test_cc_lxd.py @@ -2,7 +2,7 @@ from unittest import mock from cloudinit.config import cc_lxd -from cloudinit.tests import helpers as t_help +from tests.unittests import helpers as t_help from tests.unittests.util import get_cloud diff --git a/tests/unittests/test_handler/test_handler_mcollective.py b/tests/unittests/config/test_cc_mcollective.py similarity index 99% rename from tests/unittests/test_handler/test_handler_mcollective.py rename to tests/unittests/config/test_cc_mcollective.py index 9cda6fbe815..fff777b60a9 100644 --- a/tests/unittests/test_handler/test_handler_mcollective.py +++ b/tests/unittests/config/test_cc_mcollective.py @@ -8,7 +8,7 @@ from cloudinit import (util) from cloudinit.config import cc_mcollective -from cloudinit.tests import helpers as t_help +from tests.unittests import helpers as t_help from tests.unittests.util import get_cloud diff --git a/tests/unittests/test_handler/test_handler_mounts.py b/tests/unittests/config/test_cc_mounts.py similarity index 88% rename from tests/unittests/test_handler/test_handler_mounts.py rename to tests/unittests/config/test_cc_mounts.py index 69e8b30da65..fc65f108883 100644 --- a/tests/unittests/test_handler/test_handler_mounts.py +++ b/tests/unittests/config/test_cc_mounts.py @@ -1,11 +1,15 @@ # This file is part of cloud-init. See LICENSE file for license information. +import pytest import os.path from unittest import mock +from tests.unittests import helpers as test_helpers from cloudinit.config import cc_mounts +from cloudinit.config.cc_mounts import create_swapfile +from cloudinit.subp import ProcessExecutionError -from cloudinit.tests import helpers as test_helpers +M_PATH = 'cloudinit.config.cc_mounts.' class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase): @@ -403,4 +407,55 @@ def test_no_change_fstab_sets_needs_mount_all(self): mock.call(['mount', '-a']), mock.call(['systemctl', 'daemon-reload'])]) + +class TestCreateSwapfile: + + @pytest.mark.parametrize('fstype', ('xfs', 'btrfs', 'ext4', 'other')) + @mock.patch(M_PATH + 'util.get_mount_info') + @mock.patch(M_PATH + 'subp.subp') + def test_happy_path(self, m_subp, m_get_mount_info, fstype, tmpdir): + swap_file = tmpdir.join("swap-file") + fname = str(swap_file) + + # Some of the calls to subp.subp should create the swap file; this + # roughly approximates that + m_subp.side_effect = lambda *args, **kwargs: swap_file.write('') + + m_get_mount_info.return_value = (mock.ANY, fstype) + + create_swapfile(fname, '') + assert mock.call(['mkswap', fname]) in m_subp.call_args_list + + @mock.patch(M_PATH + "util.get_mount_info") + @mock.patch(M_PATH + "subp.subp") + def test_fallback_from_fallocate_to_dd( + self, m_subp, m_get_mount_info, caplog, tmpdir + ): + swap_file = tmpdir.join("swap-file") + fname = str(swap_file) + + def subp_side_effect(cmd, *args, **kwargs): + # Mock fallocate failing, to initiate fallback + if cmd[0] == "fallocate": + raise ProcessExecutionError() + + m_subp.side_effect = subp_side_effect + # Use ext4 so both fallocate and dd are valid swap creation methods + m_get_mount_info.return_value = (mock.ANY, "ext4") + + create_swapfile(fname, "") + + cmds = [args[0][0] for args, _kwargs in m_subp.call_args_list] + assert "fallocate" in cmds, "fallocate was not called" + assert "dd" in cmds, "fallocate failure did not fallback to dd" + + assert cmds.index("dd") > cmds.index( + "fallocate" + ), "dd ran before fallocate" + + assert mock.call(["mkswap", fname]) in m_subp.call_args_list + + msg = "fallocate swap creation failed, will attempt with dd" + assert msg in caplog.text + # vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/config/test_cc_ntp.py similarity index 99% rename from tests/unittests/test_handler/test_handler_ntp.py rename to tests/unittests/config/test_cc_ntp.py index b34a18cb12a..3426533a57d 100644 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ b/tests/unittests/config/test_cc_ntp.py @@ -7,7 +7,7 @@ from cloudinit import (helpers, util) from cloudinit.config import cc_ntp -from cloudinit.tests.helpers import ( +from tests.unittests.helpers import ( CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema) from tests.unittests.util import get_cloud diff --git a/tests/unittests/test_handler/test_handler_power_state.py b/tests/unittests/config/test_cc_power_state_change.py similarity index 98% rename from tests/unittests/test_handler/test_handler_power_state.py rename to tests/unittests/config/test_cc_power_state_change.py index 4ac494249ac..e699f4246b6 100644 --- a/tests/unittests/test_handler/test_handler_power_state.py +++ b/tests/unittests/config/test_cc_power_state_change.py @@ -7,8 +7,8 @@ from cloudinit import distros from cloudinit import helpers -from cloudinit.tests import helpers as t_help -from cloudinit.tests.helpers import mock +from tests.unittests import helpers as t_help +from tests.unittests.helpers import mock class TestLoadPowerState(t_help.TestCase): diff --git a/tests/unittests/test_handler/test_handler_puppet.py b/tests/unittests/config/test_cc_puppet.py similarity index 99% rename from tests/unittests/test_handler/test_handler_puppet.py rename to tests/unittests/config/test_cc_puppet.py index 8d99f535705..1f67dc4cb6b 100644 --- a/tests/unittests/test_handler/test_handler_puppet.py +++ b/tests/unittests/config/test_cc_puppet.py @@ -4,7 +4,7 @@ from cloudinit.config import cc_puppet from cloudinit import util -from cloudinit.tests.helpers import CiTestCase, HttprettyTestCase, mock +from tests.unittests.helpers import CiTestCase, HttprettyTestCase, mock from tests.unittests.util import get_cloud diff --git a/tests/unittests/test_handler/test_handler_refresh_rmc_and_interface.py b/tests/unittests/config/test_cc_refresh_rmc_and_interface.py similarity index 97% rename from tests/unittests/test_handler/test_handler_refresh_rmc_and_interface.py rename to tests/unittests/config/test_cc_refresh_rmc_and_interface.py index e13b77935b5..522de23d8ae 100644 --- a/tests/unittests/test_handler/test_handler_refresh_rmc_and_interface.py +++ b/tests/unittests/config/test_cc_refresh_rmc_and_interface.py @@ -2,8 +2,8 @@ from cloudinit import util -from cloudinit.tests import helpers as t_help -from cloudinit.tests.helpers import mock +from tests.unittests import helpers as t_help +from tests.unittests.helpers import mock from textwrap import dedent import logging diff --git a/tests/unittests/test_handler/test_handler_resizefs.py b/tests/unittests/config/test_cc_resizefs.py similarity index 99% rename from tests/unittests/test_handler/test_handler_resizefs.py rename to tests/unittests/config/test_cc_resizefs.py index 28d5507248c..1f9e24daa6c 100644 --- a/tests/unittests/test_handler/test_handler_resizefs.py +++ b/tests/unittests/config/test_cc_resizefs.py @@ -8,7 +8,7 @@ import logging from cloudinit.subp import ProcessExecutionError -from cloudinit.tests.helpers import ( +from tests.unittests.helpers import ( CiTestCase, mock, skipUnlessJsonSchema, util, wrap_and_call) diff --git a/tests/unittests/test_handler/test_handler_resolv_conf.py b/tests/unittests/config/test_cc_resolv_conf.py similarity index 52% rename from tests/unittests/test_handler/test_handler_resolv_conf.py rename to tests/unittests/config/test_cc_resolv_conf.py index 961390013e4..0aa90a236ca 100644 --- a/tests/unittests/test_handler/test_handler_resolv_conf.py +++ b/tests/unittests/config/test_cc_resolv_conf.py @@ -1,22 +1,30 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.config import cc_resolv_conf +import logging +import os +import shutil +import tempfile +import pytest +from copy import deepcopy +from unittest import mock from cloudinit import cloud from cloudinit import distros from cloudinit import helpers from cloudinit import util -from copy import deepcopy -from cloudinit.tests import helpers as t_help - -import logging -import os -import shutil -import tempfile -from unittest import mock +from tests.unittests import helpers as t_help +from tests.unittests.util import MockDistro +from cloudinit.config import cc_resolv_conf +from cloudinit.config.cc_resolv_conf import generate_resolv_conf LOG = logging.getLogger(__name__) +EXPECTED_HEADER = """\ +# Your system has been configured with 'manage-resolv-conf' set to true. +# As a result, cloud-init has written this file with configuration data +# that it has been provided. Cloud-init, by default, will write this file +# a single time (PER_ONCE). +#\n\n""" class TestResolvConf(t_help.FilesystemMockingTestCase): @@ -102,4 +110,84 @@ def test_resolv_conf_invalid_resolve_conf_fn(self, m_render_to_file): mock.call(mock.ANY, '/etc/resolv.conf', mock.ANY) ] not in m_render_to_file.call_args_list + +class TestGenerateResolvConf: + + dist = MockDistro() + tmpl_fn = "templates/resolv.conf.tmpl" + + @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") + def test_dist_resolv_conf_fn(self, m_render_to_file): + self.dist.resolve_conf_fn = "/tmp/resolv-test.conf" + generate_resolv_conf(self.tmpl_fn, + mock.MagicMock(), + self.dist.resolve_conf_fn) + + assert [ + mock.call(mock.ANY, self.dist.resolve_conf_fn, mock.ANY) + ] == m_render_to_file.call_args_list + + @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") + def test_target_fname_is_used_if_passed(self, m_render_to_file): + path = "/use/this/path" + generate_resolv_conf(self.tmpl_fn, mock.MagicMock(), path) + + assert [ + mock.call(mock.ANY, path, mock.ANY) + ] == m_render_to_file.call_args_list + + # Patch in templater so we can assert on the actual generated content + @mock.patch("cloudinit.templater.util.write_file") + # Parameterise with the value to be passed to generate_resolv_conf as the + # params parameter, and the expected line after the header as + # expected_extra_line. + @pytest.mark.parametrize( + "params,expected_extra_line", + [ + # No options + ({}, None), + # Just a true flag + ({"options": {"foo": True}}, "options foo"), + # Just a false flag + ({"options": {"foo": False}}, None), + # Just an option + ({"options": {"foo": "some_value"}}, "options foo:some_value"), + # A true flag and an option + ( + {"options": {"foo": "some_value", "bar": True}}, + "options bar foo:some_value", + ), + # Two options + ( + {"options": {"foo": "some_value", "bar": "other_value"}}, + "options bar:other_value foo:some_value", + ), + # Everything + ( + { + "options": { + "foo": "some_value", + "bar": "other_value", + "baz": False, + "spam": True, + } + }, + "options spam bar:other_value foo:some_value", + ), + ], + ) + def test_flags_and_options( + self, m_write_file, params, expected_extra_line + ): + target_fn = "/etc/resolv.conf" + generate_resolv_conf(self.tmpl_fn, params, target_fn) + + expected_content = EXPECTED_HEADER + if expected_extra_line is not None: + # If we have any extra lines, expect a trailing newline + expected_content += "\n".join([expected_extra_line, ""]) + assert [ + mock.call(mock.ANY, expected_content, mode=mock.ANY) + ] == m_write_file.call_args_list + # vi: ts=4 expandtab diff --git a/tests/unittests/test_rh_subscription.py b/tests/unittests/config/test_cc_rh_subscription.py similarity index 99% rename from tests/unittests/test_rh_subscription.py rename to tests/unittests/config/test_cc_rh_subscription.py index 53d3cd5a5ef..bd7ebc98951 100644 --- a/tests/unittests/test_rh_subscription.py +++ b/tests/unittests/config/test_cc_rh_subscription.py @@ -8,7 +8,7 @@ from cloudinit.config import cc_rh_subscription from cloudinit import subp -from cloudinit.tests.helpers import CiTestCase, mock +from tests.unittests.helpers import CiTestCase, mock SUBMGR = cc_rh_subscription.SubscriptionManager SUB_MAN_CLI = 'cloudinit.config.cc_rh_subscription._sub_man_cli' diff --git a/tests/unittests/test_handler/test_handler_rsyslog.py b/tests/unittests/config/test_cc_rsyslog.py similarity index 99% rename from tests/unittests/test_handler/test_handler_rsyslog.py rename to tests/unittests/config/test_cc_rsyslog.py index 8c8e2838e72..bc147dac6f9 100644 --- a/tests/unittests/test_handler/test_handler_rsyslog.py +++ b/tests/unittests/config/test_cc_rsyslog.py @@ -9,7 +9,7 @@ parse_remotes_line, remotes_to_rsyslog_cfg) from cloudinit import util -from cloudinit.tests import helpers as t_help +from tests.unittests import helpers as t_help class TestLoadConfig(t_help.TestCase): diff --git a/tests/unittests/test_handler/test_handler_runcmd.py b/tests/unittests/config/test_cc_runcmd.py similarity index 99% rename from tests/unittests/test_handler/test_handler_runcmd.py rename to tests/unittests/config/test_cc_runcmd.py index 672e8093156..01de6af0956 100644 --- a/tests/unittests/test_handler/test_handler_runcmd.py +++ b/tests/unittests/config/test_cc_runcmd.py @@ -6,7 +6,7 @@ from cloudinit.config.cc_runcmd import handle, schema from cloudinit import (helpers, subp, util) -from cloudinit.tests.helpers import ( +from tests.unittests.helpers import ( CiTestCase, FilesystemMockingTestCase, SchemaTestCaseMixin, skipUnlessJsonSchema) diff --git a/tests/unittests/test_handler/test_handler_seed_random.py b/tests/unittests/config/test_cc_seed_random.py similarity index 99% rename from tests/unittests/test_handler/test_handler_seed_random.py rename to tests/unittests/config/test_cc_seed_random.py index 2ab153d2682..cfd67dce889 100644 --- a/tests/unittests/test_handler/test_handler_seed_random.py +++ b/tests/unittests/config/test_cc_seed_random.py @@ -15,7 +15,7 @@ from cloudinit import subp from cloudinit import util from cloudinit.config import cc_seed_random -from cloudinit.tests import helpers as t_help +from tests.unittests import helpers as t_help from tests.unittests.util import get_cloud diff --git a/tests/unittests/test_handler/test_handler_set_hostname.py b/tests/unittests/config/test_cc_set_hostname.py similarity index 99% rename from tests/unittests/test_handler/test_handler_set_hostname.py rename to tests/unittests/config/test_cc_set_hostname.py index 1a524c7d409..b9a783a77c9 100644 --- a/tests/unittests/test_handler/test_handler_set_hostname.py +++ b/tests/unittests/config/test_cc_set_hostname.py @@ -7,7 +7,7 @@ from cloudinit import helpers from cloudinit import util -from cloudinit.tests import helpers as t_help +from tests.unittests import helpers as t_help from configobj import ConfigObj import logging diff --git a/cloudinit/config/tests/test_set_passwords.py b/tests/unittests/config/test_cc_set_passwords.py similarity index 90% rename from cloudinit/config/tests/test_set_passwords.py rename to tests/unittests/config/test_cc_set_passwords.py index 2a27f72f267..9bcd04398f6 100644 --- a/cloudinit/config/tests/test_set_passwords.py +++ b/tests/unittests/config/test_cc_set_passwords.py @@ -3,7 +3,7 @@ from unittest import mock from cloudinit.config import cc_set_passwords as setpass -from cloudinit.tests.helpers import CiTestCase +from tests.unittests.helpers import CiTestCase from cloudinit import util MODPATH = "cloudinit.config.cc_set_passwords." @@ -79,8 +79,7 @@ def test_handle_on_empty_config(self, *args): 'ssh_pwauth=None\n', self.logs.getvalue()) - @mock.patch(MODPATH + "subp.subp") - def test_handle_on_chpasswd_list_parses_common_hashes(self, m_subp): + def test_handle_on_chpasswd_list_parses_common_hashes(self): """handle parses command password hashes.""" cloud = self.tmp_cloud(distro='ubuntu') valid_hashed_pwds = [ @@ -89,7 +88,7 @@ def test_handle_on_chpasswd_list_parses_common_hashes(self, m_subp): 'ubuntu:$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9acWCVEoakMMC7dR52q' 'SDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXazGGx3oo1'] cfg = {'chpasswd': {'list': valid_hashed_pwds}} - with mock.patch(MODPATH + 'subp.subp') as m_subp: + with mock.patch.object(setpass, 'chpasswd') as chpasswd: setpass.handle( 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[]) self.assertIn( @@ -98,10 +97,9 @@ def test_handle_on_chpasswd_list_parses_common_hashes(self, m_subp): self.assertIn( "DEBUG: Setting hashed password for ['root', 'ubuntu']", self.logs.getvalue()) - self.assertEqual( - [mock.call(['chpasswd', '-e'], - '\n'.join(valid_hashed_pwds) + '\n')], - m_subp.call_args_list) + valid = '\n'.join(valid_hashed_pwds) + '\n' + called = chpasswd.call_args[0][1] + self.assertEqual(valid, called) @mock.patch(MODPATH + "util.is_BSD") @mock.patch(MODPATH + "subp.subp") @@ -131,22 +129,18 @@ def test_handle_on_chpasswd_list_creates_random_passwords( 'root:R', 'ubuntu:RANDOM'] cfg = {'chpasswd': {'expire': 'false', 'list': valid_random_pwds}} - with mock.patch(MODPATH + 'subp.subp') as m_subp: + with mock.patch.object(setpass, 'chpasswd') as chpasswd: setpass.handle( 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[]) self.assertIn( 'DEBUG: Handling input for chpasswd as list.', self.logs.getvalue()) - - self.assertEqual(1, m_subp.call_count) - args, _kwargs = m_subp.call_args - self.assertEqual(["chpasswd"], args[0]) - - stdin = args[1] + self.assertEqual(1, chpasswd.call_count) + passwords, _ = chpasswd.call_args user_pass = { user: password for user, password - in (line.split(":") for line in stdin.splitlines()) + in (line.split(":") for line in passwords[1].splitlines()) } self.assertEqual(1, m_multi_log.call_count) diff --git a/cloudinit/config/tests/test_snap.py b/tests/unittests/config/test_cc_snap.py similarity index 99% rename from cloudinit/config/tests/test_snap.py rename to tests/unittests/config/test_cc_snap.py index 6d4c014a5ae..e8113ecafe0 100644 --- a/cloudinit/config/tests/test_snap.py +++ b/tests/unittests/config/test_cc_snap.py @@ -8,7 +8,7 @@ run_commands, schema) from cloudinit.config.schema import validate_cloudconfig_schema from cloudinit import util -from cloudinit.tests.helpers import ( +from tests.unittests.helpers import ( CiTestCase, SchemaTestCaseMixin, mock, wrap_and_call, skipUnlessJsonSchema) diff --git a/tests/unittests/test_handler/test_handler_spacewalk.py b/tests/unittests/config/test_cc_spacewalk.py similarity index 97% rename from tests/unittests/test_handler/test_handler_spacewalk.py rename to tests/unittests/config/test_cc_spacewalk.py index 26f7648f854..96efccf09ea 100644 --- a/tests/unittests/test_handler/test_handler_spacewalk.py +++ b/tests/unittests/config/test_cc_spacewalk.py @@ -3,7 +3,7 @@ from cloudinit.config import cc_spacewalk from cloudinit import subp -from cloudinit.tests import helpers +from tests.unittests import helpers import logging from unittest import mock diff --git a/cloudinit/config/tests/test_ssh.py b/tests/unittests/config/test_cc_ssh.py similarity index 99% rename from cloudinit/config/tests/test_ssh.py rename to tests/unittests/config/test_cc_ssh.py index 87ccdb60958..ba179bbfd5d 100644 --- a/cloudinit/config/tests/test_ssh.py +++ b/tests/unittests/config/test_cc_ssh.py @@ -4,7 +4,7 @@ from cloudinit.config import cc_ssh from cloudinit import ssh_util -from cloudinit.tests.helpers import CiTestCase, mock +from tests.unittests.helpers import CiTestCase, mock import logging LOG = logging.getLogger(__name__) diff --git a/tests/unittests/test_handler/test_handler_timezone.py b/tests/unittests/config/test_cc_timezone.py similarity index 96% rename from tests/unittests/test_handler/test_handler_timezone.py rename to tests/unittests/config/test_cc_timezone.py index 77cdb0c20cd..fb6aab5f4dd 100644 --- a/tests/unittests/test_handler/test_handler_timezone.py +++ b/tests/unittests/config/test_cc_timezone.py @@ -15,7 +15,7 @@ from configobj import ConfigObj from io import BytesIO -from cloudinit.tests import helpers as t_help +from tests.unittests import helpers as t_help from tests.unittests.util import get_cloud diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/tests/unittests/config/test_cc_ubuntu_advantage.py similarity index 99% rename from cloudinit/config/tests/test_ubuntu_advantage.py rename to tests/unittests/config/test_cc_ubuntu_advantage.py index db7fb726ba4..8d0c96652f8 100644 --- a/cloudinit/config/tests/test_ubuntu_advantage.py +++ b/tests/unittests/config/test_cc_ubuntu_advantage.py @@ -4,7 +4,7 @@ configure_ua, handle, maybe_install_ua_tools, schema) from cloudinit.config.schema import validate_cloudconfig_schema from cloudinit import subp -from cloudinit.tests.helpers import ( +from tests.unittests.helpers import ( CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema) diff --git a/cloudinit/config/tests/test_ubuntu_drivers.py b/tests/unittests/config/test_cc_ubuntu_drivers.py similarity index 99% rename from cloudinit/config/tests/test_ubuntu_drivers.py rename to tests/unittests/config/test_cc_ubuntu_drivers.py index 504ba356a8e..d341fbfd827 100644 --- a/cloudinit/config/tests/test_ubuntu_drivers.py +++ b/tests/unittests/config/test_cc_ubuntu_drivers.py @@ -3,7 +3,7 @@ import copy import os -from cloudinit.tests.helpers import CiTestCase, skipUnlessJsonSchema, mock +from tests.unittests.helpers import CiTestCase, skipUnlessJsonSchema, mock from cloudinit.config.schema import ( SchemaValidationError, validate_cloudconfig_schema) from cloudinit.config import cc_ubuntu_drivers as drivers diff --git a/tests/unittests/test_handler/test_handler_etc_hosts.py b/tests/unittests/config/test_cc_update_etc_hosts.py similarity index 98% rename from tests/unittests/test_handler/test_handler_etc_hosts.py rename to tests/unittests/config/test_cc_update_etc_hosts.py index e3778b11375..77a7f78fedd 100644 --- a/tests/unittests/test_handler/test_handler_etc_hosts.py +++ b/tests/unittests/config/test_cc_update_etc_hosts.py @@ -7,7 +7,7 @@ from cloudinit import helpers from cloudinit import util -from cloudinit.tests import helpers as t_help +from tests.unittests import helpers as t_help import logging import os diff --git a/cloudinit/config/tests/test_users_groups.py b/tests/unittests/config/test_cc_users_groups.py similarity index 99% rename from cloudinit/config/tests/test_users_groups.py rename to tests/unittests/config/test_cc_users_groups.py index df89ddb3ef2..4ef844cb014 100644 --- a/cloudinit/config/tests/test_users_groups.py +++ b/tests/unittests/config/test_cc_users_groups.py @@ -2,7 +2,7 @@ from cloudinit.config import cc_users_groups -from cloudinit.tests.helpers import CiTestCase, mock +from tests.unittests.helpers import CiTestCase, mock MODPATH = "cloudinit.config.cc_users_groups" diff --git a/tests/unittests/test_handler/test_handler_write_files.py b/tests/unittests/config/test_cc_write_files.py similarity index 99% rename from tests/unittests/test_handler/test_handler_write_files.py rename to tests/unittests/config/test_cc_write_files.py index 0af92805798..99248f742d5 100644 --- a/tests/unittests/test_handler/test_handler_write_files.py +++ b/tests/unittests/config/test_cc_write_files.py @@ -12,7 +12,7 @@ from cloudinit import log as logging from cloudinit import util -from cloudinit.tests.helpers import ( +from tests.unittests.helpers import ( CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema) LOG = logging.getLogger(__name__) diff --git a/tests/unittests/test_handler/test_handler_write_files_deferred.py b/tests/unittests/config/test_cc_write_files_deferred.py similarity index 96% rename from tests/unittests/test_handler/test_handler_write_files_deferred.py rename to tests/unittests/config/test_cc_write_files_deferred.py index 57b6934a083..d33d250a3e8 100644 --- a/tests/unittests/test_handler/test_handler_write_files_deferred.py +++ b/tests/unittests/config/test_cc_write_files_deferred.py @@ -4,11 +4,11 @@ import shutil from cloudinit.config.cc_write_files_deferred import (handle) -from .test_handler_write_files import (VALID_SCHEMA) +from .test_cc_write_files import (VALID_SCHEMA) from cloudinit import log as logging from cloudinit import util -from cloudinit.tests.helpers import ( +from tests.unittests.helpers import ( CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema) LOG = logging.getLogger(__name__) diff --git a/tests/unittests/test_handler/test_handler_yum_add_repo.py b/tests/unittests/config/test_cc_yum_add_repo.py similarity index 99% rename from tests/unittests/test_handler/test_handler_yum_add_repo.py rename to tests/unittests/config/test_cc_yum_add_repo.py index 7c61bbf98d5..2f11b96abb5 100644 --- a/tests/unittests/test_handler/test_handler_yum_add_repo.py +++ b/tests/unittests/config/test_cc_yum_add_repo.py @@ -7,7 +7,7 @@ from cloudinit import util from cloudinit.config import cc_yum_add_repo -from cloudinit.tests import helpers +from tests.unittests import helpers LOG = logging.getLogger(__name__) diff --git a/tests/unittests/test_handler/test_handler_zypper_add_repo.py b/tests/unittests/config/test_cc_zypper_add_repo.py similarity index 99% rename from tests/unittests/test_handler/test_handler_zypper_add_repo.py rename to tests/unittests/config/test_cc_zypper_add_repo.py index 0fb1de1ae03..4af04bee84e 100644 --- a/tests/unittests/test_handler/test_handler_zypper_add_repo.py +++ b/tests/unittests/config/test_cc_zypper_add_repo.py @@ -7,8 +7,8 @@ from cloudinit import util from cloudinit.config import cc_zypper_add_repo -from cloudinit.tests import helpers -from cloudinit.tests.helpers import mock +from tests.unittests import helpers +from tests.unittests.helpers import mock LOG = logging.getLogger(__name__) diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/config/test_schema.py similarity index 99% rename from tests/unittests/test_handler/test_schema.py rename to tests/unittests/config/test_schema.py index 1dae223d3f8..b01f5eea52f 100644 --- a/tests/unittests/test_handler/test_schema.py +++ b/tests/unittests/config/test_schema.py @@ -6,7 +6,7 @@ validate_cloudconfig_schema, main) from cloudinit.util import write_file -from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema +from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema from copy import copy import itertools diff --git a/tests/unittests/test_distros/__init__.py b/tests/unittests/distros/__init__.py similarity index 100% rename from tests/unittests/test_distros/__init__.py rename to tests/unittests/distros/__init__.py diff --git a/tests/unittests/test_distros/test_arch.py b/tests/unittests/distros/test_arch.py similarity index 96% rename from tests/unittests/test_distros/test_arch.py rename to tests/unittests/distros/test_arch.py index a95ba3b5114..590ba00e5ce 100644 --- a/tests/unittests/test_distros/test_arch.py +++ b/tests/unittests/distros/test_arch.py @@ -3,7 +3,7 @@ from cloudinit.distros.arch import _render_network from cloudinit import util -from cloudinit.tests.helpers import (CiTestCase, dir2dict) +from tests.unittests.helpers import (CiTestCase, dir2dict) from . import _get_distro diff --git a/tests/unittests/test_distros/test_bsd_utils.py b/tests/unittests/distros/test_bsd_utils.py similarity index 97% rename from tests/unittests/test_distros/test_bsd_utils.py rename to tests/unittests/distros/test_bsd_utils.py index 3a68f2a9a8c..55686dc908d 100644 --- a/tests/unittests/test_distros/test_bsd_utils.py +++ b/tests/unittests/distros/test_bsd_utils.py @@ -2,7 +2,7 @@ import cloudinit.distros.bsd_utils as bsd_utils -from cloudinit.tests.helpers import (CiTestCase, ExitStack, mock) +from tests.unittests.helpers import (CiTestCase, ExitStack, mock) RC_FILE = """ if something; then diff --git a/tests/unittests/test_distros/test_create_users.py b/tests/unittests/distros/test_create_users.py similarity index 99% rename from tests/unittests/test_distros/test_create_users.py rename to tests/unittests/distros/test_create_users.py index 685f08bac4c..5baa8a4b4f6 100644 --- a/tests/unittests/test_distros/test_create_users.py +++ b/tests/unittests/distros/test_create_users.py @@ -4,7 +4,7 @@ from cloudinit import distros from cloudinit import ssh_util -from cloudinit.tests.helpers import (CiTestCase, mock) +from tests.unittests.helpers import (CiTestCase, mock) from tests.unittests.util import abstract_to_concrete diff --git a/tests/unittests/test_distros/test_debian.py b/tests/unittests/distros/test_debian.py similarity index 99% rename from tests/unittests/test_distros/test_debian.py rename to tests/unittests/distros/test_debian.py index a88c26864fa..3d0db145b88 100644 --- a/tests/unittests/test_distros/test_debian.py +++ b/tests/unittests/distros/test_debian.py @@ -9,7 +9,7 @@ APT_GET_COMMAND, APT_GET_WRAPPER, ) -from cloudinit.tests.helpers import FilesystemMockingTestCase +from tests.unittests.helpers import FilesystemMockingTestCase from cloudinit import subp diff --git a/tests/unittests/test_distros/test_dragonflybsd.py b/tests/unittests/distros/test_dragonflybsd.py similarity index 94% rename from tests/unittests/test_distros/test_dragonflybsd.py rename to tests/unittests/distros/test_dragonflybsd.py index df2c00f41a1..f0cd1b248fa 100644 --- a/tests/unittests/test_distros/test_dragonflybsd.py +++ b/tests/unittests/distros/test_dragonflybsd.py @@ -2,7 +2,7 @@ import cloudinit.util -from cloudinit.tests.helpers import mock +from tests.unittests.helpers import mock def test_find_dragonflybsd_part(): diff --git a/tests/unittests/test_distros/test_freebsd.py b/tests/unittests/distros/test_freebsd.py similarity index 96% rename from tests/unittests/test_distros/test_freebsd.py rename to tests/unittests/distros/test_freebsd.py index be565b048bc..0279e86f3bb 100644 --- a/tests/unittests/test_distros/test_freebsd.py +++ b/tests/unittests/distros/test_freebsd.py @@ -1,7 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit.util import (find_freebsd_part, get_path_dev_freebsd) -from cloudinit.tests.helpers import (CiTestCase, mock) +from tests.unittests.helpers import (CiTestCase, mock) import os diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/distros/test_generic.py similarity index 99% rename from tests/unittests/test_distros/test_generic.py rename to tests/unittests/distros/test_generic.py index 336150bccf4..e542c26fd5a 100644 --- a/tests/unittests/test_distros/test_generic.py +++ b/tests/unittests/distros/test_generic.py @@ -3,7 +3,7 @@ from cloudinit import distros from cloudinit import util -from cloudinit.tests import helpers +from tests.unittests import helpers import os import pytest diff --git a/tests/unittests/test_distros/test_gentoo.py b/tests/unittests/distros/test_gentoo.py similarity index 95% rename from tests/unittests/test_distros/test_gentoo.py rename to tests/unittests/distros/test_gentoo.py index 37a4f51f1f8..4e4680b8da1 100644 --- a/tests/unittests/test_distros/test_gentoo.py +++ b/tests/unittests/distros/test_gentoo.py @@ -2,7 +2,7 @@ from cloudinit import util from cloudinit import atomic_helper -from cloudinit.tests.helpers import CiTestCase +from tests.unittests.helpers import CiTestCase from . import _get_distro diff --git a/tests/unittests/test_distros/test_hostname.py b/tests/unittests/distros/test_hostname.py similarity index 100% rename from tests/unittests/test_distros/test_hostname.py rename to tests/unittests/distros/test_hostname.py diff --git a/tests/unittests/test_distros/test_hosts.py b/tests/unittests/distros/test_hosts.py similarity index 100% rename from tests/unittests/test_distros/test_hosts.py rename to tests/unittests/distros/test_hosts.py diff --git a/cloudinit/distros/tests/test_init.py b/tests/unittests/distros/test_init.py similarity index 100% rename from cloudinit/distros/tests/test_init.py rename to tests/unittests/distros/test_init.py diff --git a/tests/unittests/test_distros/test_manage_service.py b/tests/unittests/distros/test_manage_service.py similarity index 76% rename from tests/unittests/test_distros/test_manage_service.py rename to tests/unittests/distros/test_manage_service.py index 47e7cfb03f9..6f1bd0b1f70 100644 --- a/tests/unittests/test_distros/test_manage_service.py +++ b/tests/unittests/distros/test_manage_service.py @@ -1,7 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.tests.helpers import (CiTestCase, mock) -from tests.unittests.util import TestingDistro +from tests.unittests.helpers import (CiTestCase, mock) +from tests.unittests.util import MockDistro class TestManageService(CiTestCase): @@ -10,9 +10,9 @@ class TestManageService(CiTestCase): def setUp(self): super(TestManageService, self).setUp() - self.dist = TestingDistro() + self.dist = MockDistro() - @mock.patch.object(TestingDistro, 'uses_systemd', return_value=False) + @mock.patch.object(MockDistro, 'uses_systemd', return_value=False) @mock.patch("cloudinit.distros.subp.subp") def test_manage_service_systemctl_initcmd(self, m_subp, m_sysd): self.dist.init_cmd = ['systemctl'] @@ -20,14 +20,14 @@ def test_manage_service_systemctl_initcmd(self, m_subp, m_sysd): m_subp.assert_called_with(['systemctl', 'start', 'myssh'], capture=True) - @mock.patch.object(TestingDistro, 'uses_systemd', return_value=False) + @mock.patch.object(MockDistro, 'uses_systemd', return_value=False) @mock.patch("cloudinit.distros.subp.subp") def test_manage_service_service_initcmd(self, m_subp, m_sysd): self.dist.init_cmd = ['service'] self.dist.manage_service('start', 'myssh') m_subp.assert_called_with(['service', 'myssh', 'start'], capture=True) - @mock.patch.object(TestingDistro, 'uses_systemd', return_value=True) + @mock.patch.object(MockDistro, 'uses_systemd', return_value=True) @mock.patch("cloudinit.distros.subp.subp") def test_manage_service_systemctl(self, m_subp, m_sysd): self.dist.init_cmd = ['ignore'] diff --git a/tests/unittests/test_distros/test_netbsd.py b/tests/unittests/distros/test_netbsd.py similarity index 100% rename from tests/unittests/test_distros/test_netbsd.py rename to tests/unittests/distros/test_netbsd.py diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/distros/test_netconfig.py similarity index 99% rename from tests/unittests/test_distros/test_netconfig.py rename to tests/unittests/distros/test_netconfig.py index e4eba1797ac..90ac557878a 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/distros/test_netconfig.py @@ -11,7 +11,7 @@ from cloudinit.distros.parsers.sys_conf import SysConf from cloudinit import helpers from cloudinit import settings -from cloudinit.tests.helpers import ( +from tests.unittests.helpers import ( FilesystemMockingTestCase, dir2dict) from cloudinit import subp from cloudinit import util diff --git a/cloudinit/distros/tests/test_networking.py b/tests/unittests/distros/test_networking.py similarity index 100% rename from cloudinit/distros/tests/test_networking.py rename to tests/unittests/distros/test_networking.py diff --git a/tests/unittests/test_distros/test_opensuse.py b/tests/unittests/distros/test_opensuse.py similarity index 84% rename from tests/unittests/test_distros/test_opensuse.py rename to tests/unittests/distros/test_opensuse.py index b9bb9b3e746..4ff26102ed8 100644 --- a/tests/unittests/test_distros/test_opensuse.py +++ b/tests/unittests/distros/test_opensuse.py @@ -1,6 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.tests.helpers import CiTestCase +from tests.unittests.helpers import CiTestCase from . import _get_distro diff --git a/tests/unittests/test_distros/test_photon.py b/tests/unittests/distros/test_photon.py similarity index 96% rename from tests/unittests/test_distros/test_photon.py rename to tests/unittests/distros/test_photon.py index 1c3145caf17..3858f723942 100644 --- a/tests/unittests/test_distros/test_photon.py +++ b/tests/unittests/distros/test_photon.py @@ -2,8 +2,8 @@ from . import _get_distro from cloudinit import util -from cloudinit.tests.helpers import mock -from cloudinit.tests.helpers import CiTestCase +from tests.unittests.helpers import mock +from tests.unittests.helpers import CiTestCase SYSTEM_INFO = { 'paths': { diff --git a/tests/unittests/test_distros/test_resolv.py b/tests/unittests/distros/test_resolv.py similarity index 98% rename from tests/unittests/test_distros/test_resolv.py rename to tests/unittests/distros/test_resolv.py index 7d9407508ca..e7971627075 100644 --- a/tests/unittests/test_distros/test_resolv.py +++ b/tests/unittests/distros/test_resolv.py @@ -2,7 +2,7 @@ from cloudinit.distros.parsers import resolv_conf -from cloudinit.tests.helpers import TestCase +from tests.unittests.helpers import TestCase import re diff --git a/tests/unittests/test_distros/test_sles.py b/tests/unittests/distros/test_sles.py similarity index 84% rename from tests/unittests/test_distros/test_sles.py rename to tests/unittests/distros/test_sles.py index 33e3c4577b5..04514a19310 100644 --- a/tests/unittests/test_distros/test_sles.py +++ b/tests/unittests/distros/test_sles.py @@ -1,6 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.tests.helpers import CiTestCase +from tests.unittests.helpers import CiTestCase from . import _get_distro diff --git a/tests/unittests/test_distros/test_sysconfig.py b/tests/unittests/distros/test_sysconfig.py similarity index 98% rename from tests/unittests/test_distros/test_sysconfig.py rename to tests/unittests/distros/test_sysconfig.py index c1d5b6931ed..4368496d0b1 100644 --- a/tests/unittests/test_distros/test_sysconfig.py +++ b/tests/unittests/distros/test_sysconfig.py @@ -4,7 +4,7 @@ from cloudinit.distros.parsers.sys_conf import SysConf -from cloudinit.tests.helpers import TestCase +from tests.unittests.helpers import TestCase # Lots of good examples @ diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/distros/test_user_data_normalize.py similarity index 99% rename from tests/unittests/test_distros/test_user_data_normalize.py rename to tests/unittests/distros/test_user_data_normalize.py index 50c86942d6e..bd8f2adbc13 100644 --- a/tests/unittests/test_distros/test_user_data_normalize.py +++ b/tests/unittests/distros/test_user_data_normalize.py @@ -7,7 +7,7 @@ from cloudinit import helpers from cloudinit import settings -from cloudinit.tests.helpers import TestCase +from tests.unittests.helpers import TestCase bcfg = { diff --git a/cloudinit/sources/tests/__init__.py b/tests/unittests/filters/__init__.py similarity index 100% rename from cloudinit/sources/tests/__init__.py rename to tests/unittests/filters/__init__.py diff --git a/tests/unittests/test_filters/test_launch_index.py b/tests/unittests/filters/test_launch_index.py similarity index 99% rename from tests/unittests/test_filters/test_launch_index.py rename to tests/unittests/filters/test_launch_index.py index 1492361eebe..0b1a7067bca 100644 --- a/tests/unittests/test_filters/test_launch_index.py +++ b/tests/unittests/filters/test_launch_index.py @@ -3,7 +3,7 @@ import copy from itertools import filterfalse -from cloudinit.tests import helpers +from tests.unittests import helpers from cloudinit.filters import launch_index from cloudinit import user_data as ud diff --git a/cloudinit/tests/helpers.py b/tests/unittests/helpers.py similarity index 100% rename from cloudinit/tests/helpers.py rename to tests/unittests/helpers.py diff --git a/cloudinit/tests/__init__.py b/tests/unittests/net/__init__.py similarity index 100% rename from cloudinit/tests/__init__.py rename to tests/unittests/net/__init__.py diff --git a/cloudinit/net/tests/test_dhcp.py b/tests/unittests/net/test_dhcp.py similarity index 99% rename from cloudinit/net/tests/test_dhcp.py rename to tests/unittests/net/test_dhcp.py index 28b4ecf7d95..d3da39816f9 100644 --- a/cloudinit/net/tests/test_dhcp.py +++ b/tests/unittests/net/test_dhcp.py @@ -11,7 +11,7 @@ parse_dhcp_lease_file, dhcp_discovery, networkd_load_leases, parse_static_routes) from cloudinit.util import ensure_file, write_file -from cloudinit.tests.helpers import ( +from tests.unittests.helpers import ( CiTestCase, HttprettyTestCase, mock, populate_dir, wrap_and_call) diff --git a/cloudinit/net/tests/test_init.py b/tests/unittests/net/test_init.py similarity index 99% rename from cloudinit/net/tests/test_init.py rename to tests/unittests/net/test_init.py index f9102f7bd44..666e8425961 100644 --- a/cloudinit/net/tests/test_init.py +++ b/tests/unittests/net/test_init.py @@ -13,7 +13,7 @@ import cloudinit.net as net from cloudinit import safeyaml as yaml -from cloudinit.tests.helpers import CiTestCase, HttprettyTestCase +from tests.unittests.helpers import CiTestCase, HttprettyTestCase from cloudinit.subp import ProcessExecutionError from cloudinit.util import ensure_file, write_file diff --git a/cloudinit/net/tests/test_network_state.py b/tests/unittests/net/test_network_state.py similarity index 99% rename from cloudinit/net/tests/test_network_state.py rename to tests/unittests/net/test_network_state.py index 45e991716e8..fdcd5296f9e 100644 --- a/cloudinit/net/tests/test_network_state.py +++ b/tests/unittests/net/test_network_state.py @@ -6,7 +6,7 @@ from cloudinit import safeyaml from cloudinit.net import network_state -from cloudinit.tests.helpers import CiTestCase +from tests.unittests.helpers import CiTestCase netstate_path = 'cloudinit.net.network_state' diff --git a/cloudinit/net/tests/test_networkd.py b/tests/unittests/net/test_networkd.py similarity index 100% rename from cloudinit/net/tests/test_networkd.py rename to tests/unittests/net/test_networkd.py diff --git a/tests/unittests/test_datasource/__init__.py b/tests/unittests/runs/__init__.py similarity index 100% rename from tests/unittests/test_datasource/__init__.py rename to tests/unittests/runs/__init__.py diff --git a/tests/unittests/test_runs/test_merge_run.py b/tests/unittests/runs/test_merge_run.py similarity index 98% rename from tests/unittests/test_runs/test_merge_run.py rename to tests/unittests/runs/test_merge_run.py index ff27a2801f7..29439c8a2c9 100644 --- a/tests/unittests/test_runs/test_merge_run.py +++ b/tests/unittests/runs/test_merge_run.py @@ -4,7 +4,7 @@ import shutil import tempfile -from cloudinit.tests import helpers +from tests.unittests import helpers from cloudinit.settings import PER_INSTANCE from cloudinit import safeyaml diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/runs/test_simple_run.py similarity index 99% rename from tests/unittests/test_runs/test_simple_run.py rename to tests/unittests/runs/test_simple_run.py index cb3aae60ceb..aa78dda32a5 100644 --- a/tests/unittests/test_runs/test_simple_run.py +++ b/tests/unittests/runs/test_simple_run.py @@ -7,7 +7,7 @@ from cloudinit.settings import PER_INSTANCE from cloudinit import safeyaml from cloudinit import stages -from cloudinit.tests import helpers +from tests.unittests import helpers from cloudinit import util diff --git a/tests/unittests/test_filters/__init__.py b/tests/unittests/sources/__init__.py similarity index 100% rename from tests/unittests/test_filters/__init__.py rename to tests/unittests/sources/__init__.py diff --git a/cloudinit/sources/helpers/tests/test_netlink.py b/tests/unittests/sources/helpers/test_netlink.py similarity index 99% rename from cloudinit/sources/helpers/tests/test_netlink.py rename to tests/unittests/sources/helpers/test_netlink.py index cafe3961a7a..478ce375e0c 100644 --- a/cloudinit/sources/helpers/tests/test_netlink.py +++ b/tests/unittests/sources/helpers/test_netlink.py @@ -2,7 +2,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.tests.helpers import CiTestCase, mock +from tests.unittests.helpers import CiTestCase, mock import socket import struct import codecs diff --git a/cloudinit/sources/helpers/tests/test_openstack.py b/tests/unittests/sources/helpers/test_openstack.py similarity index 97% rename from cloudinit/sources/helpers/tests/test_openstack.py rename to tests/unittests/sources/helpers/test_openstack.py index 95fb97432e9..74743e7c432 100644 --- a/cloudinit/sources/helpers/tests/test_openstack.py +++ b/tests/unittests/sources/helpers/test_openstack.py @@ -3,7 +3,7 @@ from unittest import mock from cloudinit.sources.helpers import openstack -from cloudinit.tests import helpers as test_helpers +from tests.unittests import helpers as test_helpers @mock.patch( diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/sources/test_aliyun.py similarity index 99% rename from tests/unittests/test_datasource/test_aliyun.py rename to tests/unittests/sources/test_aliyun.py index cab1ac2beb7..00209913ef4 100644 --- a/tests/unittests/test_datasource/test_aliyun.py +++ b/tests/unittests/sources/test_aliyun.py @@ -8,7 +8,7 @@ from cloudinit import helpers from cloudinit.sources import DataSourceAliYun as ay from cloudinit.sources.DataSourceEc2 import convert_ec2_metadata_network_config -from cloudinit.tests import helpers as test_helpers +from tests.unittests import helpers as test_helpers DEFAULT_METADATA = { 'instance-id': 'aliyun-test-vm-00', diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/sources/test_altcloud.py similarity index 99% rename from tests/unittests/test_datasource/test_altcloud.py rename to tests/unittests/sources/test_altcloud.py index 7a5393ac321..7384c1047ce 100644 --- a/tests/unittests/test_datasource/test_altcloud.py +++ b/tests/unittests/sources/test_altcloud.py @@ -19,7 +19,7 @@ from cloudinit import subp from cloudinit import util -from cloudinit.tests.helpers import CiTestCase, mock +from tests.unittests.helpers import CiTestCase, mock import cloudinit.sources.DataSourceAltCloud as dsac diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/sources/test_azure.py similarity index 99% rename from tests/unittests/test_datasource/test_azure.py rename to tests/unittests/sources/test_azure.py index 995d2b10583..b221a0d7311 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/sources/test_azure.py @@ -8,7 +8,7 @@ from cloudinit.util import (b64e, decode_binary, load_file, write_file, MountFailedError, json_dumps, load_json) from cloudinit.version import version_string as vs -from cloudinit.tests.helpers import ( +from tests.unittests.helpers import ( HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call, ExitStack, resourceLocation) from cloudinit.sources.helpers import netlink diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/sources/test_azure_helper.py similarity index 99% rename from tests/unittests/test_datasource/test_azure_helper.py rename to tests/unittests/sources/test_azure_helper.py index ab4f0b50fbf..24c582c215f 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/sources/test_azure_helper.py @@ -9,7 +9,7 @@ from xml.sax.saxutils import escape, unescape from cloudinit.sources.helpers import azure as azure_helper -from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, populate_dir +from tests.unittests.helpers import CiTestCase, ExitStack, mock, populate_dir from cloudinit.util import load_file from cloudinit.sources.helpers.azure import WALinuxAgentShim as wa_shim diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/sources/test_cloudsigma.py similarity index 98% rename from tests/unittests/test_datasource/test_cloudsigma.py rename to tests/unittests/sources/test_cloudsigma.py index 7aa3b1d152b..2eae16ee7b2 100644 --- a/tests/unittests/test_datasource/test_cloudsigma.py +++ b/tests/unittests/sources/test_cloudsigma.py @@ -8,7 +8,7 @@ from cloudinit import sources from cloudinit.sources import DataSourceCloudSigma -from cloudinit.tests import helpers as test_helpers +from tests.unittests import helpers as test_helpers SERVER_CONTEXT = { "cpu": 1000, diff --git a/tests/unittests/test_datasource/test_cloudstack.py b/tests/unittests/sources/test_cloudstack.py similarity index 99% rename from tests/unittests/test_datasource/test_cloudstack.py rename to tests/unittests/sources/test_cloudstack.py index e68168f2497..2b1a1b706fc 100644 --- a/tests/unittests/test_datasource/test_cloudstack.py +++ b/tests/unittests/sources/test_cloudstack.py @@ -5,7 +5,7 @@ from cloudinit.sources.DataSourceCloudStack import ( DataSourceCloudStack, get_latest_lease) -from cloudinit.tests.helpers import CiTestCase, ExitStack, mock +from tests.unittests.helpers import CiTestCase, ExitStack, mock import os import time diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/sources/test_common.py similarity index 98% rename from tests/unittests/test_datasource/test_common.py rename to tests/unittests/sources/test_common.py index 9089e5def69..bb8fa530486 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/sources/test_common.py @@ -34,7 +34,7 @@ ) from cloudinit.sources import DataSourceNone as DSNone -from cloudinit.tests import helpers as test_helpers +from tests.unittests import helpers as test_helpers DEFAULT_LOCAL = [ Azure.DataSourceAzure, diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/sources/test_configdrive.py similarity index 99% rename from tests/unittests/test_datasource/test_configdrive.py rename to tests/unittests/sources/test_configdrive.py index be13165c356..775d0622e10 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/sources/test_configdrive.py @@ -12,7 +12,7 @@ from cloudinit.sources.helpers import openstack from cloudinit import util -from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, populate_dir +from tests.unittests.helpers import CiTestCase, ExitStack, mock, populate_dir PUBKEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n' diff --git a/tests/unittests/test_datasource/test_digitalocean.py b/tests/unittests/sources/test_digitalocean.py similarity index 99% rename from tests/unittests/test_datasource/test_digitalocean.py rename to tests/unittests/sources/test_digitalocean.py index 3127014b651..351bf7bac11 100644 --- a/tests/unittests/test_datasource/test_digitalocean.py +++ b/tests/unittests/sources/test_digitalocean.py @@ -13,7 +13,7 @@ from cloudinit.sources import DataSourceDigitalOcean from cloudinit.sources.helpers import digitalocean -from cloudinit.tests.helpers import mock, CiTestCase +from tests.unittests.helpers import mock, CiTestCase DO_MULTIPLE_KEYS = ["ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@do.co", "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@do.co"] diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/sources/test_ec2.py similarity index 99% rename from tests/unittests/test_datasource/test_ec2.py rename to tests/unittests/sources/test_ec2.py index a93f2195cbf..19c2bbcd030 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/sources/test_ec2.py @@ -8,7 +8,7 @@ from cloudinit import helpers from cloudinit.sources import DataSourceEc2 as ec2 -from cloudinit.tests import helpers as test_helpers +from tests.unittests import helpers as test_helpers DYNAMIC_METADATA = { diff --git a/tests/unittests/test_datasource/test_exoscale.py b/tests/unittests/sources/test_exoscale.py similarity index 99% rename from tests/unittests/test_datasource/test_exoscale.py rename to tests/unittests/sources/test_exoscale.py index f00611994f0..b0ffb7a5689 100644 --- a/tests/unittests/test_datasource/test_exoscale.py +++ b/tests/unittests/sources/test_exoscale.py @@ -10,7 +10,7 @@ get_password, PASSWORD_SERVER_PORT, read_metadata) -from cloudinit.tests.helpers import HttprettyTestCase, mock +from tests.unittests.helpers import HttprettyTestCase, mock from cloudinit import util import httpretty diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/sources/test_gce.py similarity index 99% rename from tests/unittests/test_datasource/test_gce.py rename to tests/unittests/sources/test_gce.py index 1d91b301ff6..dc768e99c7c 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/sources/test_gce.py @@ -18,7 +18,7 @@ from cloudinit import settings from cloudinit.sources import DataSourceGCE -from cloudinit.tests import helpers as test_helpers +from tests.unittests import helpers as test_helpers GCE_META = { diff --git a/tests/unittests/test_datasource/test_hetzner.py b/tests/unittests/sources/test_hetzner.py similarity index 98% rename from tests/unittests/test_datasource/test_hetzner.py rename to tests/unittests/sources/test_hetzner.py index eadb92f1713..5af0f3db6b2 100644 --- a/tests/unittests/test_datasource/test_hetzner.py +++ b/tests/unittests/sources/test_hetzner.py @@ -8,7 +8,7 @@ import cloudinit.sources.helpers.hetzner as hc_helper from cloudinit import util, settings, helpers -from cloudinit.tests.helpers import mock, CiTestCase +from tests.unittests.helpers import mock, CiTestCase import base64 import pytest diff --git a/tests/unittests/test_datasource/test_ibmcloud.py b/tests/unittests/sources/test_ibmcloud.py similarity index 99% rename from tests/unittests/test_datasource/test_ibmcloud.py rename to tests/unittests/sources/test_ibmcloud.py index 9013ae9f878..38e8e89278c 100644 --- a/tests/unittests/test_datasource/test_ibmcloud.py +++ b/tests/unittests/sources/test_ibmcloud.py @@ -2,7 +2,7 @@ from cloudinit.helpers import Paths from cloudinit.sources import DataSourceIBMCloud as ibm -from cloudinit.tests import helpers as test_helpers +from tests.unittests import helpers as test_helpers from cloudinit import util import base64 diff --git a/cloudinit/sources/tests/test_init.py b/tests/unittests/sources/test_init.py similarity index 99% rename from cloudinit/sources/tests/test_init.py rename to tests/unittests/sources/test_init.py index ae09cb17f7c..a1d19518b91 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/tests/unittests/sources/test_init.py @@ -12,7 +12,7 @@ EXPERIMENTAL_TEXT, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE, METADATA_UNKNOWN, REDACT_SENSITIVE_VALUE, UNSET, DataSource, canonical_cloud_id, redact_sensitive_keys) -from cloudinit.tests.helpers import CiTestCase, mock +from tests.unittests.helpers import CiTestCase, mock from cloudinit.user_data import UserDataProcessor from cloudinit import util diff --git a/cloudinit/sources/tests/test_lxd.py b/tests/unittests/sources/test_lxd.py similarity index 100% rename from cloudinit/sources/tests/test_lxd.py rename to tests/unittests/sources/test_lxd.py diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/sources/test_maas.py similarity index 99% rename from tests/unittests/test_datasource/test_maas.py rename to tests/unittests/sources/test_maas.py index 41b6c27b722..34b79587aca 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/sources/test_maas.py @@ -9,7 +9,7 @@ from cloudinit.sources import DataSourceMAAS from cloudinit import url_helper -from cloudinit.tests.helpers import CiTestCase, populate_dir +from tests.unittests.helpers import CiTestCase, populate_dir class TestMAASDataSource(CiTestCase): diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/sources/test_nocloud.py similarity index 99% rename from tests/unittests/test_datasource/test_nocloud.py rename to tests/unittests/sources/test_nocloud.py index 02cc9b38b5b..26f9105407a 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/sources/test_nocloud.py @@ -7,7 +7,7 @@ _maybe_remove_top_network, parse_cmdline_data) from cloudinit import util -from cloudinit.tests.helpers import CiTestCase, populate_dir, mock, ExitStack +from tests.unittests.helpers import CiTestCase, populate_dir, mock, ExitStack import os import textwrap diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/sources/test_opennebula.py similarity index 99% rename from tests/unittests/test_datasource/test_opennebula.py rename to tests/unittests/sources/test_opennebula.py index 283b65c2cf8..e5963f5ae6a 100644 --- a/tests/unittests/test_datasource/test_opennebula.py +++ b/tests/unittests/sources/test_opennebula.py @@ -3,7 +3,7 @@ from cloudinit import helpers from cloudinit.sources import DataSourceOpenNebula as ds from cloudinit import util -from cloudinit.tests.helpers import mock, populate_dir, CiTestCase +from tests.unittests.helpers import mock, populate_dir, CiTestCase import os import pwd diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/sources/test_openstack.py similarity index 99% rename from tests/unittests/test_datasource/test_openstack.py rename to tests/unittests/sources/test_openstack.py index a9829c7525c..0d6fb04a646 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/sources/test_openstack.py @@ -11,7 +11,7 @@ from io import StringIO from urllib.parse import urlparse -from cloudinit.tests import helpers as test_helpers +from tests.unittests import helpers as test_helpers from cloudinit import helpers from cloudinit import settings diff --git a/cloudinit/sources/tests/test_oracle.py b/tests/unittests/sources/test_oracle.py similarity index 99% rename from cloudinit/sources/tests/test_oracle.py rename to tests/unittests/sources/test_oracle.py index 5f608cbbe72..2aab097cdec 100644 --- a/cloudinit/sources/tests/test_oracle.py +++ b/tests/unittests/sources/test_oracle.py @@ -11,7 +11,7 @@ from cloudinit.sources import DataSourceOracle as oracle from cloudinit.sources import NetworkConfigSource from cloudinit.sources.DataSourceOracle import OpcMetadata -from cloudinit.tests import helpers as test_helpers +from tests.unittests import helpers as test_helpers from cloudinit.url_helper import UrlError DS_PATH = "cloudinit.sources.DataSourceOracle" diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/sources/test_ovf.py similarity index 99% rename from tests/unittests/test_datasource/test_ovf.py rename to tests/unittests/sources/test_ovf.py index ad7446f853d..da516731eb2 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/sources/test_ovf.py @@ -12,7 +12,7 @@ from cloudinit import subp from cloudinit import util -from cloudinit.tests.helpers import CiTestCase, mock, wrap_and_call +from tests.unittests.helpers import CiTestCase, mock, wrap_and_call from cloudinit.helpers import Paths from cloudinit.sources import DataSourceOVF as dsovf from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( diff --git a/tests/unittests/test_datasource/test_rbx.py b/tests/unittests/sources/test_rbx.py similarity index 99% rename from tests/unittests/test_datasource/test_rbx.py rename to tests/unittests/sources/test_rbx.py index d017510e00a..c1294c927a4 100644 --- a/tests/unittests/test_datasource/test_rbx.py +++ b/tests/unittests/sources/test_rbx.py @@ -3,7 +3,7 @@ from cloudinit import helpers from cloudinit import distros from cloudinit.sources import DataSourceRbxCloud as ds -from cloudinit.tests.helpers import mock, CiTestCase, populate_dir +from tests.unittests.helpers import mock, CiTestCase, populate_dir from cloudinit import subp DS_PATH = "cloudinit.sources.DataSourceRbxCloud" diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/sources/test_scaleway.py similarity index 99% rename from tests/unittests/test_datasource/test_scaleway.py rename to tests/unittests/sources/test_scaleway.py index f9e968c5af5..33ae26b8369 100644 --- a/tests/unittests/test_datasource/test_scaleway.py +++ b/tests/unittests/sources/test_scaleway.py @@ -10,7 +10,7 @@ from cloudinit import sources from cloudinit.sources import DataSourceScaleway -from cloudinit.tests.helpers import mock, HttprettyTestCase, CiTestCase +from tests.unittests.helpers import mock, HttprettyTestCase, CiTestCase class DataResponses(object): diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/sources/test_smartos.py similarity index 99% rename from tests/unittests/test_datasource/test_smartos.py rename to tests/unittests/sources/test_smartos.py index 9c499672582..e306eded7f8 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/sources/test_smartos.py @@ -35,7 +35,7 @@ from cloudinit.util import (b64e, write_file) from cloudinit.subp import (subp, ProcessExecutionError, which) -from cloudinit.tests.helpers import ( +from tests.unittests.helpers import ( CiTestCase, mock, FilesystemMockingTestCase, skipIf) diff --git a/tests/unittests/test_datasource/test_upcloud.py b/tests/unittests/sources/test_upcloud.py similarity index 99% rename from tests/unittests/test_datasource/test_upcloud.py rename to tests/unittests/sources/test_upcloud.py index cec48b4b8c0..1d79206605b 100644 --- a/tests/unittests/test_datasource/test_upcloud.py +++ b/tests/unittests/sources/test_upcloud.py @@ -10,7 +10,7 @@ from cloudinit.sources.DataSourceUpCloud import DataSourceUpCloud, \ DataSourceUpCloudLocal -from cloudinit.tests.helpers import mock, CiTestCase +from tests.unittests.helpers import mock, CiTestCase UC_METADATA = json.loads(""" { diff --git a/tests/unittests/test_datasource/test_vmware.py b/tests/unittests/sources/test_vmware.py similarity index 99% rename from tests/unittests/test_datasource/test_vmware.py rename to tests/unittests/sources/test_vmware.py index 52f910b5ef5..d34d7782ca7 100644 --- a/tests/unittests/test_datasource/test_vmware.py +++ b/tests/unittests/sources/test_vmware.py @@ -13,7 +13,7 @@ from cloudinit import dmi, helpers, safeyaml from cloudinit import settings from cloudinit.sources import DataSourceVMware -from cloudinit.tests.helpers import ( +from tests.unittests.helpers import ( mock, CiTestCase, FilesystemMockingTestCase, diff --git a/tests/unittests/test_datasource/test_vultr.py b/tests/unittests/sources/test_vultr.py similarity index 99% rename from tests/unittests/test_datasource/test_vultr.py rename to tests/unittests/sources/test_vultr.py index 6323500936d..40594b9528b 100644 --- a/tests/unittests/test_datasource/test_vultr.py +++ b/tests/unittests/sources/test_vultr.py @@ -12,7 +12,7 @@ from cloudinit.sources import DataSourceVultr from cloudinit.sources.helpers import vultr -from cloudinit.tests.helpers import mock, CiTestCase +from tests.unittests.helpers import mock, CiTestCase # Vultr metadata test data VULTR_V1_1 = { diff --git a/tests/unittests/test_handler/__init__.py b/tests/unittests/sources/vmware/__init__.py similarity index 100% rename from tests/unittests/test_handler/__init__.py rename to tests/unittests/sources/vmware/__init__.py diff --git a/tests/unittests/test_vmware/test_custom_script.py b/tests/unittests/sources/vmware/test_custom_script.py similarity index 98% rename from tests/unittests/test_vmware/test_custom_script.py rename to tests/unittests/sources/vmware/test_custom_script.py index f89f8157495..fcbb9cd5d7d 100644 --- a/tests/unittests/test_vmware/test_custom_script.py +++ b/tests/unittests/sources/vmware/test_custom_script.py @@ -14,7 +14,7 @@ PreCustomScript, PostCustomScript, ) -from cloudinit.tests.helpers import CiTestCase, mock +from tests.unittests.helpers import CiTestCase, mock class TestVmwareCustomScript(CiTestCase): diff --git a/tests/unittests/test_vmware/test_guestcust_util.py b/tests/unittests/sources/vmware/test_guestcust_util.py similarity index 98% rename from tests/unittests/test_vmware/test_guestcust_util.py rename to tests/unittests/sources/vmware/test_guestcust_util.py index c8b59d836f5..9114f0b9804 100644 --- a/tests/unittests/test_vmware/test_guestcust_util.py +++ b/tests/unittests/sources/vmware/test_guestcust_util.py @@ -12,7 +12,7 @@ get_tools_config, set_gc_status, ) -from cloudinit.tests.helpers import CiTestCase, mock +from tests.unittests.helpers import CiTestCase, mock class TestGuestCustUtil(CiTestCase): diff --git a/tests/unittests/test_vmware_config_file.py b/tests/unittests/sources/vmware/test_vmware_config_file.py similarity index 99% rename from tests/unittests/test_vmware_config_file.py rename to tests/unittests/sources/vmware/test_vmware_config_file.py index 430cc69f157..54de113eaec 100644 --- a/tests/unittests/test_vmware_config_file.py +++ b/tests/unittests/sources/vmware/test_vmware_config_file.py @@ -19,7 +19,7 @@ from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile from cloudinit.sources.helpers.vmware.imc.config_nic import gen_subnet from cloudinit.sources.helpers.vmware.imc.config_nic import NicConfigurator -from cloudinit.tests.helpers import CiTestCase +from tests.unittests.helpers import CiTestCase logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) logger = logging.getLogger(__name__) diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py index 739bbebf296..4382a078e13 100644 --- a/tests/unittests/test__init__.py +++ b/tests/unittests/test__init__.py @@ -12,7 +12,7 @@ from cloudinit import url_helper from cloudinit import util -from cloudinit.tests.helpers import TestCase, CiTestCase, ExitStack, mock +from tests.unittests.helpers import TestCase, CiTestCase, ExitStack, mock class FakeModule(handlers.Handler): diff --git a/tests/unittests/test_atomic_helper.py b/tests/unittests/test_atomic_helper.py index 0101b0e3a00..0c8b8e53a06 100644 --- a/tests/unittests/test_atomic_helper.py +++ b/tests/unittests/test_atomic_helper.py @@ -6,7 +6,7 @@ from cloudinit import atomic_helper -from cloudinit.tests.helpers import CiTestCase +from tests.unittests.helpers import CiTestCase class TestAtomicHelper(CiTestCase): diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py index 230866b9d8a..cf2c0a4d8c4 100644 --- a/tests/unittests/test_builtin_handlers.py +++ b/tests/unittests/test_builtin_handlers.py @@ -11,7 +11,7 @@ from textwrap import dedent -from cloudinit.tests.helpers import ( +from tests.unittests.helpers import ( FilesystemMockingTestCase, CiTestCase, mock, skipUnlessJinja) from cloudinit import handlers diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index 1459fd9cc77..fd717f34a04 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -5,7 +5,7 @@ from collections import namedtuple from cloudinit.cmd import main as cli -from cloudinit.tests import helpers as test_helpers +from tests.unittests import helpers as test_helpers from cloudinit.util import load_file, load_json diff --git a/cloudinit/tests/test_conftest.py b/tests/unittests/test_conftest.py similarity index 97% rename from cloudinit/tests/test_conftest.py rename to tests/unittests/test_conftest.py index 6f1263a52aa..2e02b7a77c8 100644 --- a/cloudinit/tests/test_conftest.py +++ b/tests/unittests/test_conftest.py @@ -1,7 +1,7 @@ import pytest from cloudinit import subp -from cloudinit.tests.helpers import CiTestCase +from tests.unittests.helpers import CiTestCase class TestDisableSubpUsage: diff --git a/tests/unittests/test_cs_util.py b/tests/unittests/test_cs_util.py index bfd07ecf9b4..be9da40cc38 100644 --- a/tests/unittests/test_cs_util.py +++ b/tests/unittests/test_cs_util.py @@ -1,6 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.tests import helpers as test_helpers +from tests.unittests import helpers as test_helpers from cloudinit.cs_utils import Cepko diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py index 8c968ae9b31..2ee09bbb9ee 100644 --- a/tests/unittests/test_data.py +++ b/tests/unittests/test_data.py @@ -25,7 +25,7 @@ from cloudinit import safeyaml from cloudinit import util -from cloudinit.tests import helpers +from tests.unittests import helpers INSTANCE_ID = "i-testing" diff --git a/cloudinit/tests/test_dhclient_hook.py b/tests/unittests/test_dhclient_hook.py similarity index 98% rename from cloudinit/tests/test_dhclient_hook.py rename to tests/unittests/test_dhclient_hook.py index eadae81cadc..14549111124 100644 --- a/cloudinit/tests/test_dhclient_hook.py +++ b/tests/unittests/test_dhclient_hook.py @@ -3,7 +3,7 @@ """Tests for cloudinit.dhclient_hook.""" from cloudinit import dhclient_hook as dhc -from cloudinit.tests.helpers import CiTestCase, dir2dict, populate_dir +from tests.unittests.helpers import CiTestCase, dir2dict, populate_dir import argparse import json diff --git a/cloudinit/tests/test_dmi.py b/tests/unittests/test_dmi.py similarity index 99% rename from cloudinit/tests/test_dmi.py rename to tests/unittests/test_dmi.py index 78a721222d1..674e7b987c3 100644 --- a/cloudinit/tests/test_dmi.py +++ b/tests/unittests/test_dmi.py @@ -1,4 +1,4 @@ -from cloudinit.tests import helpers +from tests.unittests import helpers from cloudinit import dmi from cloudinit import util from cloudinit import subp diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 43603ea5e12..62c3e4031f9 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -8,7 +8,7 @@ from cloudinit import safeyaml from cloudinit import subp from cloudinit import util -from cloudinit.tests.helpers import ( +from tests.unittests.helpers import ( CiTestCase, dir2dict, populate_dir, populate_dir_with_ts) from cloudinit.sources import DataSourceIBMCloud as ds_ibm diff --git a/tests/unittests/test_ec2_util.py b/tests/unittests/test_ec2_util.py index 3f50f57d976..e8e0b5b1478 100644 --- a/tests/unittests/test_ec2_util.py +++ b/tests/unittests/test_ec2_util.py @@ -2,7 +2,7 @@ import httpretty as hp -from cloudinit.tests import helpers +from tests.unittests import helpers from cloudinit import ec2_utils as eu from cloudinit import url_helper as uh diff --git a/cloudinit/tests/test_event.py b/tests/unittests/test_event.py similarity index 100% rename from cloudinit/tests/test_event.py rename to tests/unittests/test_event.py diff --git a/cloudinit/tests/test_features.py b/tests/unittests/test_features.py similarity index 100% rename from cloudinit/tests/test_features.py rename to tests/unittests/test_features.py diff --git a/tests/unittests/test_gpg.py b/tests/unittests/test_gpg.py index 451ffa91761..ceada49aee4 100644 --- a/tests/unittests/test_gpg.py +++ b/tests/unittests/test_gpg.py @@ -4,6 +4,8 @@ from cloudinit import gpg from cloudinit import subp +from tests.unittests.helpers import CiTestCase + TEST_KEY_HUMAN = ''' /etc/apt/cloud-init.gpg.d/my_key.gpg -------------------------------------------- @@ -79,3 +81,50 @@ def test_gpg_dearmor_args(self): test_call = mock.call( ["gpg", "--dearmor"], data='key', decode=False) assert test_call == m_subp.call_args + + @mock.patch("cloudinit.gpg.time.sleep") + @mock.patch("cloudinit.gpg.subp.subp") + class TestReceiveKeys(CiTestCase): + """Test the recv_key method.""" + + def test_retries_on_subp_exc(self, m_subp, m_sleep): + """retry should be done on gpg receive keys failure.""" + retries = (1, 2, 4) + my_exc = subp.ProcessExecutionError( + stdout='', stderr='', exit_code=2, cmd=['mycmd']) + m_subp.side_effect = (my_exc, my_exc, ('', '')) + gpg.recv_key("ABCD", "keyserver.example.com", retries=retries) + self.assertEqual( + [mock.call(1), mock.call(2)], m_sleep.call_args_list) + + def test_raises_error_after_retries(self, m_subp, m_sleep): + """If the final run fails, error should be raised.""" + naplen = 1 + keyid, keyserver = ("ABCD", "keyserver.example.com") + m_subp.side_effect = subp.ProcessExecutionError( + stdout='', stderr='', exit_code=2, cmd=['mycmd']) + with self.assertRaises(ValueError) as rcm: + gpg.recv_key(keyid, keyserver, retries=(naplen,)) + self.assertIn(keyid, str(rcm.exception)) + self.assertIn(keyserver, str(rcm.exception)) + m_sleep.assert_called_with(naplen) + + def test_no_retries_on_none(self, m_subp, m_sleep): + """retry should not be done if retries is None.""" + m_subp.side_effect = subp.ProcessExecutionError( + stdout='', stderr='', exit_code=2, cmd=['mycmd']) + with self.assertRaises(ValueError): + gpg.recv_key("ABCD", "keyserver.example.com", retries=None) + m_sleep.assert_not_called() + + def test_expected_gpg_command(self, m_subp, m_sleep): + """Verify gpg is called with expected args.""" + key, keyserver = ("DEADBEEF", "keyserver.example.com") + retries = (1, 2, 4) + m_subp.return_value = ('', '') + gpg.recv_key(key, keyserver, retries=retries) + m_subp.assert_called_once_with( + ['gpg', '--no-tty', + '--keyserver=%s' % keyserver, '--recv-keys', key], + capture=True) + m_sleep.assert_not_called() diff --git a/tests/unittests/test_helpers.py b/tests/unittests/test_helpers.py index 2e4582a0400..c6f9b94a136 100644 --- a/tests/unittests/test_helpers.py +++ b/tests/unittests/test_helpers.py @@ -4,7 +4,7 @@ import os -from cloudinit.tests import helpers as test_helpers +from tests.unittests import helpers as test_helpers from cloudinit import sources diff --git a/tests/unittests/test_log.py b/tests/unittests/test_log.py index e069a487bf4..3d1b958256e 100644 --- a/tests/unittests/test_log.py +++ b/tests/unittests/test_log.py @@ -9,7 +9,7 @@ from cloudinit import log as ci_logging from cloudinit.analyze.dump import CLOUD_INIT_ASCTIME_FMT -from cloudinit.tests.helpers import CiTestCase +from tests.unittests.helpers import CiTestCase class TestCloudInitLogger(CiTestCase): diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py index 10871bcf7f0..48ab66021f1 100644 --- a/tests/unittests/test_merging.py +++ b/tests/unittests/test_merging.py @@ -1,6 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.tests import helpers +from tests.unittests import helpers from cloudinit.handlers import cloud_config from cloudinit.handlers import (CONTENT_START, CONTENT_END) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 57edc89aa6d..b5c38c5515a 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -12,7 +12,7 @@ from cloudinit import util from cloudinit import safeyaml as yaml -from cloudinit.tests.helpers import ( +from tests.unittests.helpers import ( CiTestCase, FilesystemMockingTestCase, dir2dict, mock, populate_dir) import base64 diff --git a/tests/unittests/test_net_freebsd.py b/tests/unittests/test_net_freebsd.py index e339e1324b3..f0dde0978b9 100644 --- a/tests/unittests/test_net_freebsd.py +++ b/tests/unittests/test_net_freebsd.py @@ -3,7 +3,7 @@ import cloudinit.net import cloudinit.net.network_state from cloudinit import safeyaml -from cloudinit.tests.helpers import (CiTestCase, mock, readResource, dir2dict) +from tests.unittests.helpers import (CiTestCase, mock, readResource, dir2dict) SAMPLE_FREEBSD_IFCONFIG_OUT = readResource("netinfo/freebsd-ifconfig-output") diff --git a/cloudinit/tests/test_netinfo.py b/tests/unittests/test_netinfo.py similarity index 99% rename from cloudinit/tests/test_netinfo.py rename to tests/unittests/test_netinfo.py index e44b16d8389..238f7b0a529 100644 --- a/cloudinit/tests/test_netinfo.py +++ b/tests/unittests/test_netinfo.py @@ -5,7 +5,7 @@ from copy import copy from cloudinit.netinfo import netdev_info, netdev_pformat, route_pformat -from cloudinit.tests.helpers import CiTestCase, mock, readResource +from tests.unittests.helpers import CiTestCase, mock, readResource # Example ifconfig and route output diff --git a/tests/unittests/test_pathprefix2dict.py b/tests/unittests/test_pathprefix2dict.py index abbb29b8955..4e737ad70c8 100644 --- a/tests/unittests/test_pathprefix2dict.py +++ b/tests/unittests/test_pathprefix2dict.py @@ -2,7 +2,7 @@ from cloudinit import util -from cloudinit.tests.helpers import TestCase, populate_dir +from tests.unittests.helpers import TestCase, populate_dir import shutil import tempfile diff --git a/cloudinit/tests/test_persistence.py b/tests/unittests/test_persistence.py similarity index 100% rename from cloudinit/tests/test_persistence.py rename to tests/unittests/test_persistence.py diff --git a/tests/unittests/test_registry.py b/tests/unittests/test_registry.py index 2b625026320..4c7df186a7b 100644 --- a/tests/unittests/test_registry.py +++ b/tests/unittests/test_registry.py @@ -2,7 +2,7 @@ from cloudinit.registry import DictRegistry -from cloudinit.tests.helpers import (mock, TestCase) +from tests.unittests.helpers import (mock, TestCase) class TestDictRegistry(TestCase): diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py index b78a69399f1..3aaeea4361a 100644 --- a/tests/unittests/test_reporting.py +++ b/tests/unittests/test_reporting.py @@ -8,7 +8,7 @@ from cloudinit.reporting import events from cloudinit.reporting import handlers -from cloudinit.tests.helpers import TestCase +from tests.unittests.helpers import TestCase def _fake_registry(): diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py index 9324b78d0d8..24a1dcc7945 100644 --- a/tests/unittests/test_reporting_hyperv.py +++ b/tests/unittests/test_reporting_hyperv.py @@ -13,7 +13,7 @@ from unittest import mock from cloudinit import util -from cloudinit.tests.helpers import CiTestCase +from tests.unittests.helpers import CiTestCase from cloudinit.sources.helpers import azure diff --git a/tests/unittests/test_runs/__init__.py b/tests/unittests/test_runs/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/cloudinit/tests/test_simpletable.py b/tests/unittests/test_simpletable.py similarity index 98% rename from cloudinit/tests/test_simpletable.py rename to tests/unittests/test_simpletable.py index a12a62a0098..69b30f0e8fd 100644 --- a/cloudinit/tests/test_simpletable.py +++ b/tests/unittests/test_simpletable.py @@ -10,7 +10,7 @@ """ from cloudinit.simpletable import SimpleTable -from cloudinit.tests.helpers import CiTestCase +from tests.unittests.helpers import CiTestCase # Examples rendered by cloud-init using PrettyTable NET_DEVICE_FIELDS = ( diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py index 08e2005026e..b210bd3bc61 100644 --- a/tests/unittests/test_sshutil.py +++ b/tests/unittests/test_sshutil.py @@ -7,7 +7,7 @@ from unittest.mock import patch from cloudinit import ssh_util -from cloudinit.tests import helpers as test_helpers +from tests.unittests import helpers as test_helpers from cloudinit import util # https://stackoverflow.com/questions/11351032/ diff --git a/cloudinit/tests/test_stages.py b/tests/unittests/test_stages.py similarity index 99% rename from cloudinit/tests/test_stages.py rename to tests/unittests/test_stages.py index a50836a407a..a722f03f906 100644 --- a/cloudinit/tests/test_stages.py +++ b/tests/unittests/test_stages.py @@ -13,7 +13,7 @@ from cloudinit.event import EventScope, EventType from cloudinit.util import write_file -from cloudinit.tests.helpers import CiTestCase, mock +from tests.unittests.helpers import CiTestCase, mock TEST_INSTANCE_ID = 'i-testing' diff --git a/cloudinit/tests/test_subp.py b/tests/unittests/test_subp.py similarity index 99% rename from cloudinit/tests/test_subp.py rename to tests/unittests/test_subp.py index 515d5d64573..ec513d01242 100644 --- a/cloudinit/tests/test_subp.py +++ b/tests/unittests/test_subp.py @@ -10,7 +10,7 @@ from unittest import mock from cloudinit import subp, util -from cloudinit.tests.helpers import CiTestCase +from tests.unittests.helpers import CiTestCase BASH = subp.which('bash') diff --git a/cloudinit/tests/test_temp_utils.py b/tests/unittests/test_temp_utils.py similarity index 98% rename from cloudinit/tests/test_temp_utils.py rename to tests/unittests/test_temp_utils.py index 4a52ef8905e..9d56d0d0604 100644 --- a/cloudinit/tests/test_temp_utils.py +++ b/tests/unittests/test_temp_utils.py @@ -3,7 +3,7 @@ """Tests for cloudinit.temp_utils""" from cloudinit.temp_utils import mkdtemp, mkstemp, tempdir -from cloudinit.tests.helpers import CiTestCase, wrap_and_call +from tests.unittests.helpers import CiTestCase, wrap_and_call import os diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py index cba098302f5..459e017b222 100644 --- a/tests/unittests/test_templating.py +++ b/tests/unittests/test_templating.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.tests import helpers as test_helpers +from tests.unittests import helpers as test_helpers import textwrap from cloudinit import templater diff --git a/cloudinit/tests/test_upgrade.py b/tests/unittests/test_upgrade.py similarity index 97% rename from cloudinit/tests/test_upgrade.py rename to tests/unittests/test_upgrade.py index da3ab23b37f..d7a721a28bd 100644 --- a/cloudinit/tests/test_upgrade.py +++ b/tests/unittests/test_upgrade.py @@ -19,7 +19,7 @@ import pytest from cloudinit.stages import _pkl_load -from cloudinit.tests.helpers import resourceLocation +from tests.unittests.helpers import resourceLocation class TestUpgrade: diff --git a/cloudinit/tests/test_url_helper.py b/tests/unittests/test_url_helper.py similarity index 99% rename from cloudinit/tests/test_url_helper.py rename to tests/unittests/test_url_helper.py index c3918f80864..501d9533ea7 100644 --- a/cloudinit/tests/test_url_helper.py +++ b/tests/unittests/test_url_helper.py @@ -3,7 +3,7 @@ from cloudinit.url_helper import ( NOT_FOUND, UrlError, REDACTED, oauth_headers, read_file_or_url, retry_on_url_exc) -from cloudinit.tests.helpers import CiTestCase, mock, skipIf +from tests.unittests.helpers import CiTestCase, mock, skipIf from cloudinit import util from cloudinit import version diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index bc30c90b2ef..1290cbc69f1 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -1,23 +1,1311 @@ # This file is part of cloud-init. See LICENSE file for license information. -import io +"""Tests for cloudinit.util""" + +import base64 import logging +import json +import platform +import pytest + +import io import os import re import shutil import stat import tempfile -import pytest import yaml from unittest import mock from cloudinit import subp from cloudinit import importer, util -from cloudinit.tests import helpers +from tests.unittests import helpers + + +from tests.unittests.helpers import CiTestCase +from textwrap import dedent + +LOG = logging.getLogger(__name__) + +MOUNT_INFO = [ + '68 0 8:3 / / ro,relatime shared:1 - btrfs /dev/sda1 ro,attr2,inode64', + '153 68 254:0 / /home rw,relatime shared:101 - xfs /dev/sda2 rw,attr2', +] + +OS_RELEASE_SLES = dedent( + """\ + NAME="SLES" + VERSION="12-SP3" + VERSION_ID="12.3" + PRETTY_NAME="SUSE Linux Enterprise Server 12 SP3" + ID="sles" + ANSI_COLOR="0;32" + CPE_NAME="cpe:/o:suse:sles:12:sp3" +""" +) + +OS_RELEASE_OPENSUSE = dedent( + """\ + NAME="openSUSE Leap" + VERSION="42.3" + ID=opensuse + ID_LIKE="suse" + VERSION_ID="42.3" + PRETTY_NAME="openSUSE Leap 42.3" + ANSI_COLOR="0;32" + CPE_NAME="cpe:/o:opensuse:leap:42.3" + BUG_REPORT_URL="https://bugs.opensuse.org" + HOME_URL="https://www.opensuse.org/" +""" +) + +OS_RELEASE_OPENSUSE_L15 = dedent( + """\ + NAME="openSUSE Leap" + VERSION="15.0" + ID="opensuse-leap" + ID_LIKE="suse opensuse" + VERSION_ID="15.0" + PRETTY_NAME="openSUSE Leap 15.0" + ANSI_COLOR="0;32" + CPE_NAME="cpe:/o:opensuse:leap:15.0" + BUG_REPORT_URL="https://bugs.opensuse.org" + HOME_URL="https://www.opensuse.org/" +""" +) + +OS_RELEASE_OPENSUSE_TW = dedent( + """\ + NAME="openSUSE Tumbleweed" + ID="opensuse-tumbleweed" + ID_LIKE="opensuse suse" + VERSION_ID="20180920" + PRETTY_NAME="openSUSE Tumbleweed" + ANSI_COLOR="0;32" + CPE_NAME="cpe:/o:opensuse:tumbleweed:20180920" + BUG_REPORT_URL="https://bugs.opensuse.org" + HOME_URL="https://www.opensuse.org/" +""" +) + +OS_RELEASE_CENTOS = dedent( + """\ + NAME="CentOS Linux" + VERSION="7 (Core)" + ID="centos" + ID_LIKE="rhel fedora" + VERSION_ID="7" + PRETTY_NAME="CentOS Linux 7 (Core)" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:centos:centos:7" + HOME_URL="https://www.centos.org/" + BUG_REPORT_URL="https://bugs.centos.org/" + + CENTOS_MANTISBT_PROJECT="CentOS-7" + CENTOS_MANTISBT_PROJECT_VERSION="7" + REDHAT_SUPPORT_PRODUCT="centos" + REDHAT_SUPPORT_PRODUCT_VERSION="7" +""" +) + +OS_RELEASE_REDHAT_7 = dedent( + """\ + NAME="Red Hat Enterprise Linux Server" + VERSION="7.5 (Maipo)" + ID="rhel" + ID_LIKE="fedora" + VARIANT="Server" + VARIANT_ID="server" + VERSION_ID="7.5" + PRETTY_NAME="Red Hat" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:redhat:enterprise_linux:7.5:GA:server" + HOME_URL="https://www.redhat.com/" + BUG_REPORT_URL="https://bugzilla.redhat.com/" + + REDHAT_BUGZILLA_PRODUCT="Red Hat Enterprise Linux 7" + REDHAT_BUGZILLA_PRODUCT_VERSION=7.5 + REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux" + REDHAT_SUPPORT_PRODUCT_VERSION="7.5" +""" +) + +OS_RELEASE_ALMALINUX_8 = dedent( + """\ + NAME="AlmaLinux" + VERSION="8.3 (Purple Manul)" + ID="almalinux" + ID_LIKE="rhel centos fedora" + VERSION_ID="8.3" + PLATFORM_ID="platform:el8" + PRETTY_NAME="AlmaLinux 8.3 (Purple Manul)" + ANSI_COLOR="0;34" + CPE_NAME="cpe:/o:almalinux:almalinux:8.3:GA" + HOME_URL="https://almalinux.org/" + BUG_REPORT_URL="https://bugs.almalinux.org/" + + ALMALINUX_MANTISBT_PROJECT="AlmaLinux-8" + ALMALINUX_MANTISBT_PROJECT_VERSION="8.3" +""" +) + +OS_RELEASE_EUROLINUX_7 = dedent( + """\ + VERSION="7.9 (Minsk)" + ID="eurolinux" + ID_LIKE="rhel scientific centos fedora" + VERSION_ID="7.9" + PRETTY_NAME="EuroLinux 7.9 (Minsk)" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:eurolinux:eurolinux:7.9:GA" + HOME_URL="http://www.euro-linux.com/" + BUG_REPORT_URL="mailto:support@euro-linux.com" + REDHAT_BUGZILLA_PRODUCT="EuroLinux 7" + REDHAT_BUGZILLA_PRODUCT_VERSION=7.9 + REDHAT_SUPPORT_PRODUCT="EuroLinux" + REDHAT_SUPPORT_PRODUCT_VERSION="7.9" +""" +) + +OS_RELEASE_EUROLINUX_8 = dedent( + """\ + NAME="EuroLinux" + VERSION="8.4 (Vaduz)" + ID="eurolinux" + ID_LIKE="rhel fedora centos" + VERSION_ID="8.4" + PLATFORM_ID="platform:el8" + PRETTY_NAME="EuroLinux 8.4 (Vaduz)" + ANSI_COLOR="0;34" + CPE_NAME="cpe:/o:eurolinux:eurolinux:8" + HOME_URL="https://www.euro-linux.com/" + BUG_REPORT_URL="https://github.com/EuroLinux/eurolinux-distro-bugs-and-rfc/" + REDHAT_SUPPORT_PRODUCT="EuroLinux" + REDHAT_SUPPORT_PRODUCT_VERSION="8" +""" +) + +OS_RELEASE_ROCKY_8 = dedent( + """\ + NAME="Rocky Linux" + VERSION="8.3 (Green Obsidian)" + ID="rocky" + ID_LIKE="rhel fedora" + VERSION_ID="8.3" + PLATFORM_ID="platform:el8" + PRETTY_NAME="Rocky Linux 8.3 (Green Obsidian)" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:rocky:rocky:8" + HOME_URL="https://rockylinux.org/" + BUG_REPORT_URL="https://bugs.rockylinux.org/" + ROCKY_SUPPORT_PRODUCT="Rocky Linux" + ROCKY_SUPPORT_PRODUCT_VERSION="8" +""" +) + +OS_RELEASE_VIRTUOZZO_8 = dedent( + """\ + NAME="Virtuozzo Linux" + VERSION="8" + ID="virtuozzo" + ID_LIKE="rhel fedora" + VERSION_ID="8" + PLATFORM_ID="platform:el8" + PRETTY_NAME="Virtuozzo Linux" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:virtuozzoproject:vzlinux:8" + HOME_URL="https://www.vzlinux.org" + BUG_REPORT_URL="https://bugs.openvz.org" +""" +) + +OS_RELEASE_CLOUDLINUX_8 = dedent( + """\ + NAME="CloudLinux" + VERSION="8.4 (Valery Rozhdestvensky)" + ID="cloudlinux" + ID_LIKE="rhel fedora centos" + VERSION_ID="8.4" + PLATFORM_ID="platform:el8" + PRETTY_NAME="CloudLinux 8.4 (Valery Rozhdestvensky)" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:cloudlinux:cloudlinux:8.4:GA:server" + HOME_URL="https://www.cloudlinux.com/" + BUG_REPORT_URL="https://www.cloudlinux.com/support" +""" +) + +OS_RELEASE_OPENEULER_20 = dedent( + """\ + NAME="openEuler" + VERSION="20.03 (LTS-SP2)" + ID="openEuler" + VERSION_ID="20.03" + PRETTY_NAME="openEuler 20.03 (LTS-SP2)" + ANSI_COLOR="0;31" +""" +) + +REDHAT_RELEASE_CENTOS_6 = "CentOS release 6.10 (Final)" +REDHAT_RELEASE_CENTOS_7 = "CentOS Linux release 7.5.1804 (Core)" +REDHAT_RELEASE_REDHAT_6 = ( + "Red Hat Enterprise Linux Server release 6.10 (Santiago)" +) +REDHAT_RELEASE_REDHAT_7 = "Red Hat Enterprise Linux Server release 7.5 (Maipo)" +REDHAT_RELEASE_ALMALINUX_8 = "AlmaLinux release 8.3 (Purple Manul)" +REDHAT_RELEASE_EUROLINUX_7 = "EuroLinux release 7.9 (Minsk)" +REDHAT_RELEASE_EUROLINUX_8 = "EuroLinux release 8.4 (Vaduz)" +REDHAT_RELEASE_ROCKY_8 = "Rocky Linux release 8.3 (Green Obsidian)" +REDHAT_RELEASE_VIRTUOZZO_8 = "Virtuozzo Linux release 8" +REDHAT_RELEASE_CLOUDLINUX_8 = "CloudLinux release 8.4 (Valery Rozhdestvensky)" +OS_RELEASE_DEBIAN = dedent( + """\ + PRETTY_NAME="Debian GNU/Linux 9 (stretch)" + NAME="Debian GNU/Linux" + VERSION_ID="9" + VERSION="9 (stretch)" + ID=debian + HOME_URL="https://www.debian.org/" + SUPPORT_URL="https://www.debian.org/support" + BUG_REPORT_URL="https://bugs.debian.org/" +""" +) + +OS_RELEASE_UBUNTU = dedent( + """\ + NAME="Ubuntu"\n + # comment test + VERSION="16.04.3 LTS (Xenial Xerus)"\n + ID=ubuntu\n + ID_LIKE=debian\n + PRETTY_NAME="Ubuntu 16.04.3 LTS"\n + VERSION_ID="16.04"\n + HOME_URL="http://www.ubuntu.com/"\n + SUPPORT_URL="http://help.ubuntu.com/"\n + BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"\n + VERSION_CODENAME=xenial\n + UBUNTU_CODENAME=xenial\n +""" +) + +OS_RELEASE_PHOTON = """\ + NAME="VMware Photon OS" + VERSION="4.0" + ID=photon + VERSION_ID=4.0 + PRETTY_NAME="VMware Photon OS/Linux" + ANSI_COLOR="1;34" + HOME_URL="https://vmware.github.io/photon/" + BUG_REPORT_URL="https://github.com/vmware/photon/issues" +""" + + +class FakeCloud(object): + def __init__(self, hostname, fqdn): + self.hostname = hostname + self.fqdn = fqdn + self.calls = [] + + def get_hostname(self, fqdn=None, metadata_only=None): + myargs = {} + if fqdn is not None: + myargs['fqdn'] = fqdn + if metadata_only is not None: + myargs['metadata_only'] = metadata_only + self.calls.append(myargs) + if fqdn: + return self.fqdn + return self.hostname + + +class TestUtil(CiTestCase): + def test_parse_mount_info_no_opts_no_arg(self): + result = util.parse_mount_info('/home', MOUNT_INFO, LOG) + self.assertEqual(('/dev/sda2', 'xfs', '/home'), result) + + def test_parse_mount_info_no_opts_arg(self): + result = util.parse_mount_info('/home', MOUNT_INFO, LOG, False) + self.assertEqual(('/dev/sda2', 'xfs', '/home'), result) + + def test_parse_mount_info_with_opts(self): + result = util.parse_mount_info('/', MOUNT_INFO, LOG, True) + self.assertEqual(('/dev/sda1', 'btrfs', '/', 'ro,relatime'), result) + + @mock.patch('cloudinit.util.get_mount_info') + def test_mount_is_rw(self, m_mount_info): + m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'rw,relatime') + is_rw = util.mount_is_read_write('/') + self.assertEqual(is_rw, True) + + @mock.patch('cloudinit.util.get_mount_info') + def test_mount_is_ro(self, m_mount_info): + m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'ro,relatime') + is_rw = util.mount_is_read_write('/') + self.assertEqual(is_rw, False) + + +class TestUptime(CiTestCase): + @mock.patch('cloudinit.util.boottime') + @mock.patch('cloudinit.util.os.path.exists') + @mock.patch('cloudinit.util.time.time') + def test_uptime_non_linux_path(self, m_time, m_exists, m_boottime): + boottime = 1000.0 + uptime = 10.0 + m_boottime.return_value = boottime + m_time.return_value = boottime + uptime + m_exists.return_value = False + result = util.uptime() + self.assertEqual(str(uptime), result) + + +class TestShellify(CiTestCase): + def test_input_dict_raises_type_error(self): + self.assertRaisesRegex( + TypeError, + 'Input.*was.*dict.*xpected', + util.shellify, + {'mykey': 'myval'}, + ) + def test_input_str_raises_type_error(self): + self.assertRaisesRegex( + TypeError, 'Input.*was.*str.*xpected', util.shellify, "foobar" + ) -class FakeSelinux(object): + def test_value_with_int_raises_type_error(self): + self.assertRaisesRegex( + TypeError, 'shellify.*int', util.shellify, ["foo", 1] + ) + + def test_supports_strings_and_lists(self): + self.assertEqual( + '\n'.join( + [ + "#!/bin/sh", + "echo hi mom", + "'echo' 'hi dad'", + "'echo' 'hi' 'sis'", + "", + ] + ), + util.shellify( + ["echo hi mom", ["echo", "hi dad"], ('echo', 'hi', 'sis')] + ), + ) + + def test_supports_comments(self): + self.assertEqual( + '\n'.join(["#!/bin/sh", "echo start", "echo end", ""]), + util.shellify(["echo start", None, "echo end"]), + ) + + +class TestGetHostnameFqdn(CiTestCase): + def test_get_hostname_fqdn_from_only_cfg_fqdn(self): + """When cfg only has the fqdn key, derive hostname and fqdn from it.""" + hostname, fqdn = util.get_hostname_fqdn( + cfg={'fqdn': 'myhost.domain.com'}, cloud=None + ) + self.assertEqual('myhost', hostname) + self.assertEqual('myhost.domain.com', fqdn) + + def test_get_hostname_fqdn_from_cfg_fqdn_and_hostname(self): + """When cfg has both fqdn and hostname keys, return them.""" + hostname, fqdn = util.get_hostname_fqdn( + cfg={'fqdn': 'myhost.domain.com', 'hostname': 'other'}, cloud=None + ) + self.assertEqual('other', hostname) + self.assertEqual('myhost.domain.com', fqdn) + + def test_get_hostname_fqdn_from_cfg_hostname_with_domain(self): + """When cfg has only hostname key which represents a fqdn, use that.""" + hostname, fqdn = util.get_hostname_fqdn( + cfg={'hostname': 'myhost.domain.com'}, cloud=None + ) + self.assertEqual('myhost', hostname) + self.assertEqual('myhost.domain.com', fqdn) + + def test_get_hostname_fqdn_from_cfg_hostname_without_domain(self): + """When cfg has a hostname without a '.' query cloud.get_hostname.""" + mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') + hostname, fqdn = util.get_hostname_fqdn( + cfg={'hostname': 'myhost'}, cloud=mycloud + ) + self.assertEqual('myhost', hostname) + self.assertEqual('cloudhost.mycloud.com', fqdn) + self.assertEqual( + [{'fqdn': True, 'metadata_only': False}], mycloud.calls + ) + + def test_get_hostname_fqdn_from_without_fqdn_or_hostname(self): + """When cfg has neither hostname nor fqdn cloud.get_hostname.""" + mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') + hostname, fqdn = util.get_hostname_fqdn(cfg={}, cloud=mycloud) + self.assertEqual('cloudhost', hostname) + self.assertEqual('cloudhost.mycloud.com', fqdn) + self.assertEqual( + [{'fqdn': True, 'metadata_only': False}, {'metadata_only': False}], + mycloud.calls, + ) + + def test_get_hostname_fqdn_from_passes_metadata_only_to_cloud(self): + """Calls to cloud.get_hostname pass the metadata_only parameter.""" + mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') + _hn, _fqdn = util.get_hostname_fqdn( + cfg={}, cloud=mycloud, metadata_only=True + ) + self.assertEqual( + [{'fqdn': True, 'metadata_only': True}, {'metadata_only': True}], + mycloud.calls, + ) + + +class TestBlkid(CiTestCase): + ids = { + "id01": "1111-1111", + "id02": "22222222-2222", + "id03": "33333333-3333", + "id04": "44444444-4444", + "id05": "55555555-5555-5555-5555-555555555555", + "id06": "66666666-6666-6666-6666-666666666666", + "id07": "52894610484658920398", + "id08": "86753098675309867530", + "id09": "99999999-9999-9999-9999-999999999999", + } + + blkid_out = dedent( + """\ + /dev/loop0: TYPE="squashfs" + /dev/loop1: TYPE="squashfs" + /dev/loop2: TYPE="squashfs" + /dev/loop3: TYPE="squashfs" + /dev/sda1: UUID="{id01}" TYPE="vfat" PARTUUID="{id02}" + /dev/sda2: UUID="{id03}" TYPE="ext4" PARTUUID="{id04}" + /dev/sda3: UUID="{id05}" TYPE="ext4" PARTUUID="{id06}" + /dev/sda4: LABEL="default" UUID="{id07}" UUID_SUB="{id08}" """ + """TYPE="zfs_member" PARTUUID="{id09}" + /dev/loop4: TYPE="squashfs" + """ + ) + + maxDiff = None + + def _get_expected(self): + return { + "/dev/loop0": {"DEVNAME": "/dev/loop0", "TYPE": "squashfs"}, + "/dev/loop1": {"DEVNAME": "/dev/loop1", "TYPE": "squashfs"}, + "/dev/loop2": {"DEVNAME": "/dev/loop2", "TYPE": "squashfs"}, + "/dev/loop3": {"DEVNAME": "/dev/loop3", "TYPE": "squashfs"}, + "/dev/loop4": {"DEVNAME": "/dev/loop4", "TYPE": "squashfs"}, + "/dev/sda1": { + "DEVNAME": "/dev/sda1", + "TYPE": "vfat", + "UUID": self.ids["id01"], + "PARTUUID": self.ids["id02"], + }, + "/dev/sda2": { + "DEVNAME": "/dev/sda2", + "TYPE": "ext4", + "UUID": self.ids["id03"], + "PARTUUID": self.ids["id04"], + }, + "/dev/sda3": { + "DEVNAME": "/dev/sda3", + "TYPE": "ext4", + "UUID": self.ids["id05"], + "PARTUUID": self.ids["id06"], + }, + "/dev/sda4": { + "DEVNAME": "/dev/sda4", + "TYPE": "zfs_member", + "LABEL": "default", + "UUID": self.ids["id07"], + "UUID_SUB": self.ids["id08"], + "PARTUUID": self.ids["id09"], + }, + } + + @mock.patch("cloudinit.subp.subp") + def test_functional_blkid(self, m_subp): + m_subp.return_value = (self.blkid_out.format(**self.ids), "") + self.assertEqual(self._get_expected(), util.blkid()) + m_subp.assert_called_with( + ["blkid", "-o", "full"], capture=True, decode="replace" + ) + + @mock.patch("cloudinit.subp.subp") + def test_blkid_no_cache_uses_no_cache(self, m_subp): + """blkid should turn off cache if disable_cache is true.""" + m_subp.return_value = (self.blkid_out.format(**self.ids), "") + self.assertEqual(self._get_expected(), util.blkid(disable_cache=True)) + m_subp.assert_called_with( + ["blkid", "-o", "full", "-c", "/dev/null"], + capture=True, + decode="replace", + ) + + +@mock.patch('cloudinit.subp.subp') +class TestUdevadmSettle(CiTestCase): + def test_with_no_params(self, m_subp): + """called with no parameters.""" + util.udevadm_settle() + m_subp.called_once_with(mock.call(['udevadm', 'settle'])) + + def test_with_exists_and_not_exists(self, m_subp): + """with exists=file where file does not exist should invoke subp.""" + mydev = self.tmp_path("mydev") + util.udevadm_settle(exists=mydev) + m_subp.called_once_with( + ['udevadm', 'settle', '--exit-if-exists=%s' % mydev] + ) + + def test_with_exists_and_file_exists(self, m_subp): + """with exists=file where file does exist should not invoke subp.""" + mydev = self.tmp_path("mydev") + util.write_file(mydev, "foo\n") + util.udevadm_settle(exists=mydev) + self.assertIsNone(m_subp.call_args) + + def test_with_timeout_int(self, m_subp): + """timeout can be an integer.""" + timeout = 9 + util.udevadm_settle(timeout=timeout) + m_subp.called_once_with( + ['udevadm', 'settle', '--timeout=%s' % timeout] + ) + + def test_with_timeout_string(self, m_subp): + """timeout can be a string.""" + timeout = "555" + util.udevadm_settle(timeout=timeout) + m_subp.assert_called_once_with( + ['udevadm', 'settle', '--timeout=%s' % timeout] + ) + + def test_with_exists_and_timeout(self, m_subp): + """test call with both exists and timeout.""" + mydev = self.tmp_path("mydev") + timeout = "3" + util.udevadm_settle(exists=mydev) + m_subp.called_once_with( + [ + 'udevadm', + 'settle', + '--exit-if-exists=%s' % mydev, + '--timeout=%s' % timeout, + ] + ) + + def test_subp_exception_raises_to_caller(self, m_subp): + m_subp.side_effect = subp.ProcessExecutionError("BOOM") + self.assertRaises(subp.ProcessExecutionError, util.udevadm_settle) + + +@mock.patch('os.path.exists') +class TestGetLinuxDistro(CiTestCase): + def setUp(self): + # python2 has no lru_cache, and therefore, no cache_clear() + if hasattr(util.get_linux_distro, "cache_clear"): + util.get_linux_distro.cache_clear() + + @classmethod + def os_release_exists(self, path): + """Side effect function""" + if path == '/etc/os-release': + return 1 + + @classmethod + def redhat_release_exists(self, path): + """Side effect function""" + if path == '/etc/redhat-release': + return 1 + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists): + """Verify we get the correct name if the os-release file has + the distro name in quotes""" + m_os_release.return_value = OS_RELEASE_SLES + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('sles', '12.3', platform.machine()), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_distro_bare_name(self, m_os_release, m_path_exists): + """Verify we get the correct name if the os-release file does not + have the distro name in quotes""" + m_os_release.return_value = OS_RELEASE_UBUNTU + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('ubuntu', '16.04', 'xenial'), dist) + + @mock.patch('platform.system') + @mock.patch('platform.release') + @mock.patch('cloudinit.util._parse_redhat_release') + def test_get_linux_freebsd( + self, + m_parse_redhat_release, + m_platform_release, + m_platform_system, + m_path_exists, + ): + """Verify we get the correct name and release name on FreeBSD.""" + m_path_exists.return_value = False + m_platform_release.return_value = '12.0-RELEASE-p10' + m_platform_system.return_value = 'FreeBSD' + m_parse_redhat_release.return_value = {} + util.is_BSD.cache_clear() + dist = util.get_linux_distro() + self.assertEqual(('freebsd', '12.0-RELEASE-p10', ''), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_centos6(self, m_os_release, m_path_exists): + """Verify we get the correct name and release name on CentOS 6.""" + m_os_release.return_value = REDHAT_RELEASE_CENTOS_6 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('centos', '6.10', 'Final'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_centos7_redhat_release(self, m_os_release, m_exists): + """Verify the correct release info on CentOS 7 without os-release.""" + m_os_release.return_value = REDHAT_RELEASE_CENTOS_7 + m_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('centos', '7.5.1804', 'Core'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_redhat7_osrelease(self, m_os_release, m_path_exists): + """Verify redhat 7 read from os-release.""" + m_os_release.return_value = OS_RELEASE_REDHAT_7 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('redhat', '7.5', 'Maipo'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_redhat7_rhrelease(self, m_os_release, m_path_exists): + """Verify redhat 7 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_REDHAT_7 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('redhat', '7.5', 'Maipo'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_redhat6_rhrelease(self, m_os_release, m_path_exists): + """Verify redhat 6 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_REDHAT_6 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('redhat', '6.10', 'Santiago'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_copr_centos(self, m_os_release, m_path_exists): + """Verify we get the correct name and release name on COPR CentOS.""" + m_os_release.return_value = OS_RELEASE_CENTOS + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('centos', '7', 'Core'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_almalinux8_rhrelease(self, m_os_release, m_path_exists): + """Verify almalinux 8 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_ALMALINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('almalinux', '8.3', 'Purple Manul'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_almalinux8_osrelease(self, m_os_release, m_path_exists): + """Verify almalinux 8 read from os-release.""" + m_os_release.return_value = OS_RELEASE_ALMALINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('almalinux', '8.3', 'Purple Manul'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_eurolinux7_rhrelease(self, m_os_release, m_path_exists): + """Verify eurolinux 7 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_7 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('eurolinux', '7.9', 'Minsk'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_eurolinux7_osrelease(self, m_os_release, m_path_exists): + """Verify eurolinux 7 read from os-release.""" + m_os_release.return_value = OS_RELEASE_EUROLINUX_7 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('eurolinux', '7.9', 'Minsk'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_eurolinux8_rhrelease(self, m_os_release, m_path_exists): + """Verify eurolinux 8 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('eurolinux', '8.4', 'Vaduz'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_eurolinux8_osrelease(self, m_os_release, m_path_exists): + """Verify eurolinux 8 read from os-release.""" + m_os_release.return_value = OS_RELEASE_EUROLINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('eurolinux', '8.4', 'Vaduz'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_rocky8_rhrelease(self, m_os_release, m_path_exists): + """Verify rocky linux 8 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_ROCKY_8 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('rocky', '8.3', 'Green Obsidian'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_rocky8_osrelease(self, m_os_release, m_path_exists): + """Verify rocky linux 8 read from os-release.""" + m_os_release.return_value = OS_RELEASE_ROCKY_8 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('rocky', '8.3', 'Green Obsidian'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_virtuozzo8_rhrelease(self, m_os_release, m_path_exists): + """Verify virtuozzo linux 8 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_VIRTUOZZO_8 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('virtuozzo', '8', 'Virtuozzo Linux'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_virtuozzo8_osrelease(self, m_os_release, m_path_exists): + """Verify virtuozzo linux 8 read from os-release.""" + m_os_release.return_value = OS_RELEASE_VIRTUOZZO_8 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('virtuozzo', '8', 'Virtuozzo Linux'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_cloud8_rhrelease(self, m_os_release, m_path_exists): + """Verify cloudlinux 8 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_CLOUDLINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('cloudlinux', '8.4', 'Valery Rozhdestvensky'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_cloud8_osrelease(self, m_os_release, m_path_exists): + """Verify cloudlinux 8 read from os-release.""" + m_os_release.return_value = OS_RELEASE_CLOUDLINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('cloudlinux', '8.4', 'Valery Rozhdestvensky'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_debian(self, m_os_release, m_path_exists): + """Verify we get the correct name and release name on Debian.""" + m_os_release.return_value = OS_RELEASE_DEBIAN + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('debian', '9', 'stretch'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_openeuler(self, m_os_release, m_path_exists): + """Verify get the correct name and release name on Openeuler.""" + m_os_release.return_value = OS_RELEASE_OPENEULER_20 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('openEuler', '20.03', 'LTS-SP2'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_opensuse(self, m_os_release, m_path_exists): + """Verify we get the correct name and machine arch on openSUSE + prior to openSUSE Leap 15. + """ + m_os_release.return_value = OS_RELEASE_OPENSUSE + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('opensuse', '42.3', platform.machine()), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_opensuse_l15(self, m_os_release, m_path_exists): + """Verify we get the correct name and machine arch on openSUSE + for openSUSE Leap 15.0 and later. + """ + m_os_release.return_value = OS_RELEASE_OPENSUSE_L15 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('opensuse-leap', '15.0', platform.machine()), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_opensuse_tw(self, m_os_release, m_path_exists): + """Verify we get the correct name and machine arch on openSUSE + for openSUSE Tumbleweed + """ + m_os_release.return_value = OS_RELEASE_OPENSUSE_TW + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual( + ('opensuse-tumbleweed', '20180920', platform.machine()), dist + ) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_photon_os_release(self, m_os_release, m_path_exists): + """Verify we get the correct name and machine arch on PhotonOS""" + m_os_release.return_value = OS_RELEASE_PHOTON + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('photon', '4.0', 'VMware Photon OS/Linux'), dist) + + @mock.patch('platform.system') + @mock.patch('platform.dist', create=True) + def test_get_linux_distro_no_data( + self, m_platform_dist, m_platform_system, m_path_exists + ): + """Verify we get no information if os-release does not exist""" + m_platform_dist.return_value = ('', '', '') + m_platform_system.return_value = "Linux" + m_path_exists.return_value = 0 + dist = util.get_linux_distro() + self.assertEqual(('', '', ''), dist) + + @mock.patch('platform.system') + @mock.patch('platform.dist', create=True) + def test_get_linux_distro_no_impl( + self, m_platform_dist, m_platform_system, m_path_exists + ): + """Verify we get an empty tuple when no information exists and + Exceptions are not propagated""" + m_platform_dist.side_effect = Exception() + m_platform_system.return_value = "Linux" + m_path_exists.return_value = 0 + dist = util.get_linux_distro() + self.assertEqual(('', '', ''), dist) + + @mock.patch('platform.system') + @mock.patch('platform.dist', create=True) + def test_get_linux_distro_plat_data( + self, m_platform_dist, m_platform_system, m_path_exists + ): + """Verify we get the correct platform information""" + m_platform_dist.return_value = ('foo', '1.1', 'aarch64') + m_platform_system.return_value = "Linux" + m_path_exists.return_value = 0 + dist = util.get_linux_distro() + self.assertEqual(('foo', '1.1', 'aarch64'), dist) + + +class TestGetVariant: + @pytest.mark.parametrize( + 'info, expected_variant', + [ + ({'system': 'Linux', 'dist': ('almalinux',)}, 'almalinux'), + ({'system': 'linux', 'dist': ('alpine',)}, 'alpine'), + ({'system': 'linux', 'dist': ('arch',)}, 'arch'), + ({'system': 'linux', 'dist': ('centos',)}, 'centos'), + ({'system': 'linux', 'dist': ('cloudlinux',)}, 'cloudlinux'), + ({'system': 'linux', 'dist': ('debian',)}, 'debian'), + ({'system': 'linux', 'dist': ('eurolinux',)}, 'eurolinux'), + ({'system': 'linux', 'dist': ('fedora',)}, 'fedora'), + ({'system': 'linux', 'dist': ('openEuler',)}, 'openeuler'), + ({'system': 'linux', 'dist': ('photon',)}, 'photon'), + ({'system': 'linux', 'dist': ('rhel',)}, 'rhel'), + ({'system': 'linux', 'dist': ('rocky',)}, 'rocky'), + ({'system': 'linux', 'dist': ('suse',)}, 'suse'), + ({'system': 'linux', 'dist': ('virtuozzo',)}, 'virtuozzo'), + ({'system': 'linux', 'dist': ('ubuntu',)}, 'ubuntu'), + ({'system': 'linux', 'dist': ('linuxmint',)}, 'ubuntu'), + ({'system': 'linux', 'dist': ('mint',)}, 'ubuntu'), + ({'system': 'linux', 'dist': ('redhat',)}, 'rhel'), + ({'system': 'linux', 'dist': ('opensuse',)}, 'suse'), + ({'system': 'linux', 'dist': ('opensuse-tumbleweed',)}, 'suse'), + ({'system': 'linux', 'dist': ('opensuse-leap',)}, 'suse'), + ({'system': 'linux', 'dist': ('sles',)}, 'suse'), + ({'system': 'linux', 'dist': ('sle_hpc',)}, 'suse'), + ({'system': 'linux', 'dist': ('my_distro',)}, 'linux'), + ({'system': 'Windows', 'dist': ('dontcare',)}, 'windows'), + ({'system': 'Darwin', 'dist': ('dontcare',)}, 'darwin'), + ({'system': 'Freebsd', 'dist': ('dontcare',)}, 'freebsd'), + ({'system': 'Netbsd', 'dist': ('dontcare',)}, 'netbsd'), + ({'system': 'Openbsd', 'dist': ('dontcare',)}, 'openbsd'), + ({'system': 'Dragonfly', 'dist': ('dontcare',)}, 'dragonfly'), + ], + ) + def test_get_variant(self, info, expected_variant): + """Verify we get the correct variant name""" + assert util._get_variant(info) == expected_variant + + +class TestJsonDumps(CiTestCase): + def test_is_str(self): + """json_dumps should return a string.""" + self.assertTrue(isinstance(util.json_dumps({'abc': '123'}), str)) + + def test_utf8(self): + smiley = '\\ud83d\\ude03' + self.assertEqual( + {'smiley': smiley}, json.loads(util.json_dumps({'smiley': smiley})) + ) + + def test_non_utf8(self): + blob = b'\xba\x03Qx-#y\xea' + self.assertEqual( + {'blob': 'ci-b64:' + base64.b64encode(blob).decode('utf-8')}, + json.loads(util.json_dumps({'blob': blob})), + ) + + +@mock.patch('os.path.exists') +class TestIsLXD(CiTestCase): + def test_is_lxd_true_on_sock_device(self, m_exists): + """When lxd's /dev/lxd/sock exists, is_lxd returns true.""" + m_exists.return_value = True + self.assertTrue(util.is_lxd()) + m_exists.assert_called_once_with('/dev/lxd/sock') + def test_is_lxd_false_when_sock_device_absent(self, m_exists): + """When lxd's /dev/lxd/sock is absent, is_lxd returns false.""" + m_exists.return_value = False + self.assertFalse(util.is_lxd()) + m_exists.assert_called_once_with('/dev/lxd/sock') + + +class TestReadCcFromCmdline: + @pytest.mark.parametrize( + "cmdline,expected_cfg", + [ + # Return None if cmdline has no cc:end_cc content. + (CiTestCase.random_string(), None), + # Return None if YAML content is empty string. + ('foo cc: end_cc bar', None), + # Return expected dictionary without trailing end_cc marker. + ('foo cc: ssh_pwauth: true', {'ssh_pwauth': True}), + # Return expected dictionary w escaped newline and no end_cc. + ('foo cc: ssh_pwauth: true\\n', {'ssh_pwauth': True}), + # Return expected dictionary of yaml between cc: and end_cc. + ('foo cc: ssh_pwauth: true end_cc bar', {'ssh_pwauth': True}), + # Return dict with list value w escaped newline, no end_cc. + ( + 'cc: ssh_import_id: [smoser, kirkland]\\n', + {'ssh_import_id': ['smoser', 'kirkland']}, + ), + # Parse urlencoded brackets in yaml content. + ( + 'cc: ssh_import_id: %5Bsmoser, kirkland%5D end_cc', + {'ssh_import_id': ['smoser', 'kirkland']}, + ), + # Parse complete urlencoded yaml content. + ( + 'cc: ssh_import_id%3A%20%5Buser1%2C%20user2%5D end_cc', + {'ssh_import_id': ['user1', 'user2']}, + ), + # Parse nested dictionary in yaml content. + ( + 'cc: ntp: {enabled: true, ntp_client: myclient} end_cc', + {'ntp': {'enabled': True, 'ntp_client': 'myclient'}}, + ), + # Parse single mapping value in yaml content. + ('cc: ssh_import_id: smoser end_cc', {'ssh_import_id': 'smoser'}), + # Parse multiline content with multiple mapping and nested lists. + ( + ( + 'cc: ssh_import_id: [smoser, bob]\\n' + 'runcmd: [ [ ls, -l ], echo hi ] end_cc' + ), + { + 'ssh_import_id': ['smoser', 'bob'], + 'runcmd': [['ls', '-l'], 'echo hi'], + }, + ), + # Parse multiline encoded content w/ mappings and nested lists. + ( + ( + 'cc: ssh_import_id: %5Bsmoser, bob%5D\\n' + 'runcmd: [ [ ls, -l ], echo hi ] end_cc' + ), + { + 'ssh_import_id': ['smoser', 'bob'], + 'runcmd': [['ls', '-l'], 'echo hi'], + }, + ), + # test encoded escaped newlines work. + # + # unquote(encoded_content) + # 'ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ]' + ( + ( + 'cc: ' + + ( + 'ssh_import_id%3A%20%5Bsmoser%2C%20bob%5D%5Cn' + 'runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%2C' + '%20echo%20hi%20%5D' + ) + + ' end_cc' + ), + { + 'ssh_import_id': ['smoser', 'bob'], + 'runcmd': [['ls', '-l'], 'echo hi'], + }, + ), + # test encoded newlines work. + # + # unquote(encoded_content) + # 'ssh_import_id: [smoser, bob]\nruncmd: [ [ ls, -l ], echo hi ]' + ( + ( + "cc: " + + ( + 'ssh_import_id%3A%20%5Bsmoser%2C%20bob%5D%0A' + 'runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%2C' + '%20echo%20hi%20%5D' + ) + + ' end_cc' + ), + { + 'ssh_import_id': ['smoser', 'bob'], + 'runcmd': [['ls', '-l'], 'echo hi'], + }, + ), + # Parse and merge multiple yaml content sections. + ( + ( + 'cc:ssh_import_id: [smoser, bob] end_cc ' + 'cc: runcmd: [ [ ls, -l ] ] end_cc' + ), + {'ssh_import_id': ['smoser', 'bob'], 'runcmd': [['ls', '-l']]}, + ), + # Parse and merge multiple encoded yaml content sections. + ( + ( + 'cc:ssh_import_id%3A%20%5Bsmoser%5D end_cc ' + 'cc:runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%20%5D end_cc' + ), + {'ssh_import_id': ['smoser'], 'runcmd': [['ls', '-l']]}, + ), + ], + ) + def test_read_conf_from_cmdline_config(self, expected_cfg, cmdline): + assert expected_cfg == util.read_conf_from_cmdline(cmdline=cmdline) + + +class TestMountCb: + """Tests for ``util.mount_cb``. + + These tests consider the "unit" under test to be ``util.mount_cb`` and + ``util.unmounter``, which is only used by ``mount_cb``. + + TODO: Test default mtype determination + TODO: Test the if/else branch that actually performs the mounting operation + """ + + @pytest.yield_fixture + def already_mounted_device_and_mountdict(self): + """Mock an already-mounted device, and yield (device, mount dict)""" + device = "/dev/fake0" + mountpoint = "/mnt/fake" + with mock.patch("cloudinit.util.subp.subp"): + with mock.patch("cloudinit.util.mounts") as m_mounts: + mounts = {device: {"mountpoint": mountpoint}} + m_mounts.return_value = mounts + yield device, mounts[device] + + @pytest.fixture + def already_mounted_device(self, already_mounted_device_and_mountdict): + """already_mounted_device_and_mountdict, but return only the device""" + return already_mounted_device_and_mountdict[0] + + @pytest.mark.parametrize( + "mtype,expected", + [ + # While the filesystem is called iso9660, the mount type is cd9660 + ("iso9660", "cd9660"), + # vfat is generally called "msdos" on BSD + ("vfat", "msdos"), + # judging from man pages, only FreeBSD has this alias + ("msdosfs", "msdos"), + # Test happy path + ("ufs", "ufs"), + ], + ) + @mock.patch("cloudinit.util.is_Linux", autospec=True) + @mock.patch("cloudinit.util.is_BSD", autospec=True) + @mock.patch("cloudinit.util.subp.subp") + @mock.patch("cloudinit.temp_utils.tempdir", autospec=True) + def test_normalize_mtype_on_bsd( + self, m_tmpdir, m_subp, m_is_BSD, m_is_Linux, mtype, expected + ): + m_is_BSD.return_value = True + m_is_Linux.return_value = False + m_tmpdir.return_value.__enter__ = mock.Mock( + autospec=True, return_value="/tmp/fake" + ) + m_tmpdir.return_value.__exit__ = mock.Mock( + autospec=True, return_value=True + ) + callback = mock.Mock(autospec=True) + + util.mount_cb('/dev/fake0', callback, mtype=mtype) + assert ( + mock.call( + [ + "mount", + "-o", + "ro", + "-t", + expected, + "/dev/fake0", + "/tmp/fake", + ], + update_env=None, + ) + in m_subp.call_args_list + ) + + @pytest.mark.parametrize("invalid_mtype", [int(0), float(0.0), dict()]) + def test_typeerror_raised_for_invalid_mtype(self, invalid_mtype): + with pytest.raises(TypeError): + util.mount_cb(mock.Mock(), mock.Mock(), mtype=invalid_mtype) + + @mock.patch("cloudinit.util.subp.subp") + def test_already_mounted_does_not_mount_or_umount_anything( + self, m_subp, already_mounted_device + ): + util.mount_cb(already_mounted_device, mock.Mock()) + + assert 0 == m_subp.call_count + + @pytest.mark.parametrize("trailing_slash_in_mounts", ["/", ""]) + def test_already_mounted_calls_callback( + self, trailing_slash_in_mounts, already_mounted_device_and_mountdict + ): + device, mount_dict = already_mounted_device_and_mountdict + mountpoint = mount_dict["mountpoint"] + mount_dict["mountpoint"] += trailing_slash_in_mounts + + callback = mock.Mock() + util.mount_cb(device, callback) + + # The mountpoint passed to callback should always have a trailing + # slash, regardless of the input + assert [mock.call(mountpoint + "/")] == callback.call_args_list + + def test_already_mounted_calls_callback_with_data( + self, already_mounted_device + ): + callback = mock.Mock() + util.mount_cb( + already_mounted_device, callback, data=mock.sentinel.data + ) + + assert [ + mock.call(mock.ANY, mock.sentinel.data) + ] == callback.call_args_list + + +@mock.patch("cloudinit.util.write_file") +class TestEnsureFile: + """Tests for ``cloudinit.util.ensure_file``.""" + + def test_parameters_passed_through(self, m_write_file): + """Test the parameters in the signature are passed to write_file.""" + util.ensure_file( + mock.sentinel.path, + mode=mock.sentinel.mode, + preserve_mode=mock.sentinel.preserve_mode, + ) + + assert 1 == m_write_file.call_count + args, kwargs = m_write_file.call_args + assert (mock.sentinel.path,) == args + assert mock.sentinel.mode == kwargs["mode"] + assert mock.sentinel.preserve_mode == kwargs["preserve_mode"] + + @pytest.mark.parametrize( + "kwarg,expected", + [ + # Files should be world-readable by default + ("mode", 0o644), + # The previous behaviour of not preserving mode should be retained + ("preserve_mode", False), + ], + ) + def test_defaults(self, m_write_file, kwarg, expected): + """Test that ensure_file defaults appropriately.""" + util.ensure_file(mock.sentinel.path) + + assert 1 == m_write_file.call_count + _args, kwargs = m_write_file.call_args + assert expected == kwargs[kwarg] + + def test_static_parameters_are_passed(self, m_write_file): + """Test that the static write_files parameters are passed correctly.""" + util.ensure_file(mock.sentinel.path) + + assert 1 == m_write_file.call_count + _args, kwargs = m_write_file.call_args + assert "" == kwargs["content"] + assert "ab" == kwargs["omode"] + + +@mock.patch("cloudinit.util.grp.getgrnam") +@mock.patch("cloudinit.util.os.setgid") +@mock.patch("cloudinit.util.os.umask") +class TestRedirectOutputPreexecFn: + """This tests specifically the preexec_fn used in redirect_output.""" + + @pytest.fixture(params=["outfmt", "errfmt"]) + def preexec_fn(self, request): + """A fixture to gather the preexec_fn used by redirect_output. + + This enables simpler direct testing of it, and parameterises any tests + using it to cover both the stdout and stderr code paths. + """ + test_string = "| piped output to invoke subprocess" + if request.param == "outfmt": + args = (test_string, None) + elif request.param == "errfmt": + args = (None, test_string) + with mock.patch("cloudinit.util.subprocess.Popen") as m_popen: + util.redirect_output(*args) + + assert 1 == m_popen.call_count + _args, kwargs = m_popen.call_args + assert "preexec_fn" in kwargs, "preexec_fn not passed to Popen" + return kwargs["preexec_fn"] + + def test_preexec_fn_sets_umask( + self, m_os_umask, _m_setgid, _m_getgrnam, preexec_fn + ): + """preexec_fn should set a mask that avoids world-readable files.""" + preexec_fn() + + assert [mock.call(0o037)] == m_os_umask.call_args_list + + def test_preexec_fn_sets_group_id_if_adm_group_present( + self, _m_os_umask, m_setgid, m_getgrnam, preexec_fn + ): + """We should setgrp to adm if present, so files are owned by them.""" + fake_group = mock.Mock(gr_gid=mock.sentinel.gr_gid) + m_getgrnam.return_value = fake_group + + preexec_fn() + + assert [mock.call("adm")] == m_getgrnam.call_args_list + assert [mock.call(mock.sentinel.gr_gid)] == m_setgid.call_args_list + + def test_preexec_fn_handles_absent_adm_group_gracefully( + self, _m_os_umask, m_setgid, m_getgrnam, preexec_fn + ): + """We should handle an absent adm group gracefully.""" + m_getgrnam.side_effect = KeyError("getgrnam(): name not found: 'adm'") + + preexec_fn() + + assert 0 == m_setgid.call_count + + +class FakeSelinux(object): def __init__(self, match_what): self.match_what = match_what self.restored = [] @@ -175,8 +1463,9 @@ def test_restorecon_if_possible_is_called(self): fake_se = FakeSelinux(my_file) - with mock.patch.object(importer, 'import_module', - return_value=fake_se) as mockobj: + with mock.patch.object( + importer, 'import_module', return_value=fake_se + ) as mockobj: with util.SeLinuxGuard(my_file) as is_on: self.assertTrue(is_on) @@ -261,8 +1550,9 @@ def test_keyval_str_to_dict(self): class TestGetCmdline(helpers.TestCase): def test_cmdline_reads_debug_env(self): - with mock.patch.dict("os.environ", - values={'DEBUG_PROC_CMDLINE': 'abcd 123'}): + with mock.patch.dict( + "os.environ", values={'DEBUG_PROC_CMDLINE': 'abcd 123'} + ): ret = util.get_cmdline() self.assertEqual("abcd 123", ret) @@ -279,52 +1569,68 @@ def test_nonallowed_returns_default(self): '''Any unallowed types result in returning default; log the issue.''' # for now, anything not in the allowed list just returns the default. myyaml = yaml.dump({'1': "one"}) - self.assertEqual(util.load_yaml(blob=myyaml, - default=self.mydefault, - allowed=(str,)), - self.mydefault) + self.assertEqual( + util.load_yaml( + blob=myyaml, default=self.mydefault, allowed=(str,) + ), + self.mydefault, + ) regex = re.compile( r'Yaml load allows \(<(class|type) \'str\'>,\) root types, but' - r' got dict') - self.assertTrue(regex.search(self.logs.getvalue()), - msg='Missing expected yaml load error') + r' got dict' + ) + self.assertTrue( + regex.search(self.logs.getvalue()), + msg='Missing expected yaml load error', + ) def test_bogus_scan_error_returns_default(self): '''On Yaml scan error, load_yaml returns the default and logs issue.''' badyaml = "1\n 2:" - self.assertEqual(util.load_yaml(blob=badyaml, - default=self.mydefault), - self.mydefault) + self.assertEqual( + util.load_yaml(blob=badyaml, default=self.mydefault), + self.mydefault, + ) self.assertIn( 'Failed loading yaml blob. Invalid format at line 2 column 3:' ' "mapping values are not allowed here', - self.logs.getvalue()) + self.logs.getvalue(), + ) def test_bogus_parse_error_returns_default(self): '''On Yaml parse error, load_yaml returns default and logs issue.''' badyaml = "{}}" - self.assertEqual(util.load_yaml(blob=badyaml, - default=self.mydefault), - self.mydefault) + self.assertEqual( + util.load_yaml(blob=badyaml, default=self.mydefault), + self.mydefault, + ) self.assertIn( 'Failed loading yaml blob. Invalid format at line 1 column 3:' " \"expected \'\', but found \'}\'", - self.logs.getvalue()) + self.logs.getvalue(), + ) def test_unsafe_types(self): # should not load complex types - unsafe_yaml = yaml.dump((1, 2, 3,)) - self.assertEqual(util.load_yaml(blob=unsafe_yaml, - default=self.mydefault), - self.mydefault) + unsafe_yaml = yaml.dump( + ( + 1, + 2, + 3, + ) + ) + self.assertEqual( + util.load_yaml(blob=unsafe_yaml, default=self.mydefault), + self.mydefault, + ) def test_python_unicode(self): # complex type of python/unicode is explicitly allowed myobj = {'1': "FOOBAR"} safe_yaml = yaml.dump(myobj) - self.assertEqual(util.load_yaml(blob=safe_yaml, - default=self.mydefault), - myobj) + self.assertEqual( + util.load_yaml(blob=safe_yaml, default=self.mydefault), myobj + ) def test_none_returns_default(self): """If yaml.load returns None, then default should be returned.""" @@ -332,13 +1638,16 @@ def test_none_returns_default(self): mdef = self.mydefault self.assertEqual( [(b, self.mydefault) for b in blobs], - [(b, util.load_yaml(blob=b, default=mdef)) for b in blobs]) + [(b, util.load_yaml(blob=b, default=mdef)) for b in blobs], + ) class TestMountinfoParsing(helpers.ResourceUsingTestCase): def test_invalid_mountinfo(self): - line = ("20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root" - "rw,errors=remount-ro,data=ordered") + line = ( + "20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root" + "rw,errors=remount-ro,data=ordered" + ) elements = line.split() for i in range(len(elements) + 1): lines = [' '.join(elements[0:i])] @@ -398,7 +1707,8 @@ def test_get_device_info_from_zpool(self, zpool_output, m_os): m_os.path.exists.return_value = True # mock subp command from util.get_mount_info_fs_on_zpool zpool_output.return_value = ( - helpers.readResource('zpool_status_simple.txt'), '' + helpers.readResource('zpool_status_simple.txt'), + '', ) # save function return values and do asserts ret = util.get_device_info_from_zpool('vmzroot') @@ -431,7 +1741,8 @@ def test_get_device_info_from_zpool_on_error(self, zpool_output, m_os): m_os.path.exists.return_value = True # mock subp command from util.get_mount_info_fs_on_zpool zpool_output.return_value = ( - helpers.readResource('zpool_status_simple.txt'), 'error' + helpers.readResource('zpool_status_simple.txt'), + 'error', ) # save function return values and do asserts ret = util.get_device_info_from_zpool('vmzroot') @@ -440,7 +1751,9 @@ def test_get_device_info_from_zpool_on_error(self, zpool_output, m_os): @mock.patch('cloudinit.subp.subp') def test_parse_mount_with_ext(self, mount_out): mount_out.return_value = ( - helpers.readResource('mount_parse_ext.txt'), '') + helpers.readResource('mount_parse_ext.txt'), + '', + ) # this one is valid and exists in mount_parse_ext.txt ret = util.parse_mount('/var') self.assertEqual(('/dev/mapper/vg00-lv_var', 'ext4', '/var'), ret) @@ -457,7 +1770,9 @@ def test_parse_mount_with_ext(self, mount_out): @mock.patch('cloudinit.subp.subp') def test_parse_mount_with_zfs(self, mount_out): mount_out.return_value = ( - helpers.readResource('mount_parse_zfs.txt'), '') + helpers.readResource('mount_parse_zfs.txt'), + '', + ) # this one is valid and exists in mount_parse_zfs.txt ret = util.parse_mount('/var') self.assertEqual(('vmzroot/ROOT/freebsd/var', 'zfs', '/var'), ret) @@ -470,20 +1785,21 @@ def test_parse_mount_with_zfs(self, mount_out): class TestIsX86(helpers.CiTestCase): - def test_is_x86_matches_x86_types(self): """is_x86 returns True if CPU architecture matches.""" matched_arches = ['x86_64', 'i386', 'i586', 'i686'] for arch in matched_arches: self.assertTrue( - util.is_x86(arch), 'Expected is_x86 for arch "%s"' % arch) + util.is_x86(arch), 'Expected is_x86 for arch "%s"' % arch + ) def test_is_x86_unmatched_types(self): """is_x86 returns Fale on non-intel x86 architectures.""" unmatched_arches = ['ia64', '9000/800', 'arm64v71'] for arch in unmatched_arches: self.assertFalse( - util.is_x86(arch), 'Expected not is_x86 for arch "%s"' % arch) + util.is_x86(arch), 'Expected not is_x86 for arch "%s"' % arch + ) @mock.patch('cloudinit.util.os.uname') def test_is_x86_calls_uname_for_architecture(self, m_uname): @@ -493,7 +1809,6 @@ def test_is_x86_calls_uname_for_architecture(self, m_uname): class TestGetConfigLogfiles(helpers.CiTestCase): - def test_empty_cfg_returns_empty_list(self): """An empty config passed to get_config_logfiles returns empty list.""" self.assertEqual([], util.get_config_logfiles(None)) @@ -502,36 +1817,53 @@ def test_empty_cfg_returns_empty_list(self): def test_default_log_file_present(self): """When default_log_file is set get_config_logfiles finds it.""" self.assertEqual( - ['/my.log'], - util.get_config_logfiles({'def_log_file': '/my.log'})) + ['/my.log'], util.get_config_logfiles({'def_log_file': '/my.log'}) + ) def test_output_logs_parsed_when_teeing_files(self): """When output configuration is parsed when teeing files.""" self.assertEqual( ['/himom.log', '/my.log'], - sorted(util.get_config_logfiles({ - 'def_log_file': '/my.log', - 'output': {'all': '|tee -a /himom.log'}}))) + sorted( + util.get_config_logfiles( + { + 'def_log_file': '/my.log', + 'output': {'all': '|tee -a /himom.log'}, + } + ) + ), + ) def test_output_logs_parsed_when_redirecting(self): """When output configuration is parsed when redirecting to a file.""" self.assertEqual( ['/my.log', '/test.log'], - sorted(util.get_config_logfiles({ - 'def_log_file': '/my.log', - 'output': {'all': '>/test.log'}}))) + sorted( + util.get_config_logfiles( + { + 'def_log_file': '/my.log', + 'output': {'all': '>/test.log'}, + } + ) + ), + ) def test_output_logs_parsed_when_appending(self): """When output configuration is parsed when appending to a file.""" self.assertEqual( ['/my.log', '/test.log'], - sorted(util.get_config_logfiles({ - 'def_log_file': '/my.log', - 'output': {'all': '>> /test.log'}}))) + sorted( + util.get_config_logfiles( + { + 'def_log_file': '/my.log', + 'output': {'all': '>> /test.log'}, + } + ) + ), + ) class TestMultiLog(helpers.FilesystemMockingTestCase): - def _createConsole(self, root): os.mkdir(os.path.join(root, 'dev')) open(os.path.join(root, 'dev', 'console'), 'a').close() @@ -580,8 +1912,9 @@ def test_logs_go_to_log_if_given(self): log = mock.MagicMock() logged_string = 'something very important' util.multi_log(logged_string, log=log) - self.assertEqual([((mock.ANY, logged_string), {})], - log.log.call_args_list) + self.assertEqual( + [((mock.ANY, logged_string), {})], log.log.call_args_list + ) def test_newlines_stripped_from_log_call(self): log = mock.MagicMock() @@ -602,7 +1935,6 @@ def test_given_log_level_used(self): class TestMessageFromString(helpers.TestCase): - def test_unicode_not_messed_up(self): roundtripped = util.message_from_string('\n').as_string() self.assertNotIn('\x00', roundtripped) @@ -618,8 +1950,9 @@ def test_unicode_not_messed_up(self): ud = b"userdatablob" vd = b"vendordatablob" helpers.populate_dir( - self.tmp, {'meta-data': "key1: val1", 'user-data': ud, - 'vendor-data': vd}) + self.tmp, + {'meta-data': "key1: val1", 'user-data': ud, 'vendor-data': vd}, + ) sdir = self.tmp + os.path.sep (found_md, found_ud, found_vd) = util.read_seeded(sdir) @@ -638,7 +1971,8 @@ def test_unicode_not_messed_up(self): ud = b"userdatablob" vd = None helpers.populate_dir( - self.tmp, {'meta-data': "key1: val1", 'user-data': ud}) + self.tmp, {'meta-data': "key1: val1", 'user-data': ud} + ) sdir = self.tmp + os.path.sep (found_md, found_ud, found_vd) = util.read_seeded(sdir) @@ -649,6 +1983,7 @@ def test_unicode_not_messed_up(self): class TestEncode(helpers.TestCase): """Test the encoding functions""" + def test_decode_binary_plain_text_with_hex(self): blob = 'BOOTABLE_FLAG=\x80init=/bin/systemd' text = util.decode_binary(blob) @@ -657,12 +1992,14 @@ def test_decode_binary_plain_text_with_hex(self): class TestProcessExecutionError(helpers.TestCase): - template = ('{description}\n' - 'Command: {cmd}\n' - 'Exit code: {exit_code}\n' - 'Reason: {reason}\n' - 'Stdout: {stdout}\n' - 'Stderr: {stderr}') + template = ( + '{description}\n' + 'Command: {cmd}\n' + 'Exit code: {exit_code}\n' + 'Reason: {reason}\n' + 'Stdout: {stdout}\n' + 'Stderr: {stderr}' + ) empty_attr = '-' empty_description = 'Unexpected error while running command.' @@ -671,23 +2008,37 @@ def test_pexec_error_indent_text(self): msg = 'abc\ndef' formatted = 'abc\n{0}def'.format(' ' * 4) self.assertEqual(error._indent_text(msg, indent_level=4), formatted) - self.assertEqual(error._indent_text(msg.encode(), indent_level=4), - formatted.encode()) + self.assertEqual( + error._indent_text(msg.encode(), indent_level=4), + formatted.encode(), + ) self.assertIsInstance( - error._indent_text(msg.encode()), type(msg.encode())) + error._indent_text(msg.encode()), type(msg.encode()) + ) def test_pexec_error_type(self): self.assertIsInstance(subp.ProcessExecutionError(), IOError) def test_pexec_error_empty_msgs(self): error = subp.ProcessExecutionError() - self.assertTrue(all(attr == self.empty_attr for attr in - (error.stderr, error.stdout, error.reason))) + self.assertTrue( + all( + attr == self.empty_attr + for attr in (error.stderr, error.stdout, error.reason) + ) + ) self.assertEqual(error.description, self.empty_description) - self.assertEqual(str(error), self.template.format( - description=self.empty_description, exit_code=self.empty_attr, - reason=self.empty_attr, stdout=self.empty_attr, - stderr=self.empty_attr, cmd=self.empty_attr)) + self.assertEqual( + str(error), + self.template.format( + description=self.empty_description, + exit_code=self.empty_attr, + reason=self.empty_attr, + stdout=self.empty_attr, + stderr=self.empty_attr, + cmd=self.empty_attr, + ), + ) def test_pexec_error_single_line_msgs(self): stdout_msg = 'out out' @@ -695,33 +2046,46 @@ def test_pexec_error_single_line_msgs(self): cmd = 'test command' exit_code = 3 error = subp.ProcessExecutionError( - stdout=stdout_msg, stderr=stderr_msg, exit_code=3, cmd=cmd) - self.assertEqual(str(error), self.template.format( - description=self.empty_description, stdout=stdout_msg, - stderr=stderr_msg, exit_code=str(exit_code), - reason=self.empty_attr, cmd=cmd)) + stdout=stdout_msg, stderr=stderr_msg, exit_code=3, cmd=cmd + ) + self.assertEqual( + str(error), + self.template.format( + description=self.empty_description, + stdout=stdout_msg, + stderr=stderr_msg, + exit_code=str(exit_code), + reason=self.empty_attr, + cmd=cmd, + ), + ) def test_pexec_error_multi_line_msgs(self): # make sure bytes is converted handled properly when formatting stdout_msg = 'multi\nline\noutput message'.encode() stderr_msg = 'multi\nline\nerror message\n\n\n' error = subp.ProcessExecutionError( - stdout=stdout_msg, stderr=stderr_msg) + stdout=stdout_msg, stderr=stderr_msg + ) self.assertEqual( str(error), - '\n'.join(( - '{description}', - 'Command: {empty_attr}', - 'Exit code: {empty_attr}', - 'Reason: {empty_attr}', - 'Stdout: multi', - ' line', - ' output message', - 'Stderr: multi', - ' line', - ' error message', - )).format(description=self.empty_description, - empty_attr=self.empty_attr)) + '\n'.join( + ( + '{description}', + 'Command: {empty_attr}', + 'Exit code: {empty_attr}', + 'Reason: {empty_attr}', + 'Stdout: multi', + ' line', + ' output message', + 'Stderr: multi', + ' line', + ' error message', + ) + ).format( + description=self.empty_description, empty_attr=self.empty_attr + ), + ) class TestSystemIsSnappy(helpers.FilesystemMockingTestCase): @@ -758,7 +2122,8 @@ def test_snap_core_in_cmdline_is_snappy(self, m_cmdline): "BOOT_IMAGE=(loop)/kernel.img root=LABEL=writable " "snap_core=core_x1.snap snap_kernel=pc-kernel_x1.snap ro " "net.ifnames=0 init=/lib/systemd/systemd console=tty1 " - "console=ttyS0 panic=-1") + "console=ttyS0 panic=-1" + ) m_cmdline.return_value = cmdline self.assertTrue(util.system_is_snappy()) self.assertTrue(m_cmdline.call_count > 0) @@ -777,8 +2142,7 @@ def test_channel_ini_with_snappy_is_snappy(self, m_cmdline): m_cmdline.return_value = 'root=/dev/sda' root_d = self.tmp_dir() content = '\n'.join(["[Foo]", "source = 'ubuntu-core'", ""]) - helpers.populate_dir( - root_d, {'etc/system-image/channel.ini': content}) + helpers.populate_dir(root_d, {'etc/system-image/channel.ini': content}) self.reRoot(root_d) self.assertTrue(util.system_is_snappy()) @@ -788,7 +2152,8 @@ def test_system_image_config_dir_is_snappy(self, m_cmdline): m_cmdline.return_value = 'root=/dev/sda' root_d = self.tmp_dir() helpers.populate_dir( - root_d, {'etc/system-image/config.d/my.file': "_unused"}) + root_d, {'etc/system-image/config.d/my.file': "_unused"} + ) self.reRoot(root_d) self.assertTrue(util.system_is_snappy()) @@ -798,18 +2163,24 @@ def test_comments_handled_correctly(self): """Shell comments should be allowed in the content.""" self.assertEqual( {'key1': 'val1', 'key2': 'val2', 'key3': 'val3 #tricky'}, - util.load_shell_content('\n'.join([ - "#top of file comment", - "key1=val1 #this is a comment", - "# second comment", - 'key2="val2" # inlin comment' - '#badkey=wark', - 'key3="val3 #tricky"', - '']))) + util.load_shell_content( + '\n'.join( + [ + "#top of file comment", + "key1=val1 #this is a comment", + "# second comment", + 'key2="val2" # inlin comment#badkey=wark', + 'key3="val3 #tricky"', + '', + ] + ) + ), + ) class TestGetProcEnv(helpers.TestCase): """test get_proc_env.""" + null = b'\x00' simple1 = b'HOME=/' simple2 = b'PATH=/bin:/sbin' @@ -824,14 +2195,19 @@ def _val_decoded(self, blob, encoding='utf-8', errors='replace'): def test_non_utf8_in_environment(self, m_load_file): """env may have non utf-8 decodable content.""" content = self.null.join( - (self.bootflag, self.simple1, self.simple2, self.mixed)) + (self.bootflag, self.simple1, self.simple2, self.mixed) + ) m_load_file.return_value = content self.assertEqual( - {'BOOTABLE_FLAG': self._val_decoded(self.bootflag), - 'HOME': '/', 'PATH': '/bin:/sbin', - 'MIXED': self._val_decoded(self.mixed)}, - util.get_proc_env(1)) + { + 'BOOTABLE_FLAG': self._val_decoded(self.bootflag), + 'HOME': '/', + 'PATH': '/bin:/sbin', + 'MIXED': self._val_decoded(self.mixed), + }, + util.get_proc_env(1), + ) self.assertEqual(1, m_load_file.call_count) @mock.patch("cloudinit.util.load_file") @@ -843,7 +2219,8 @@ def test_encoding_none_returns_bytes(self, m_load_file): self.assertEqual( dict([t.split(b'=') for t in lines]), - util.get_proc_env(1, encoding=None)) + util.get_proc_env(1, encoding=None), + ) self.assertEqual(1, m_load_file.call_count) @mock.patch("cloudinit.util.load_file") @@ -852,8 +2229,8 @@ def test_all_utf8_encoded(self, m_load_file): content = self.null.join((self.simple1, self.simple2)) m_load_file.return_value = content self.assertEqual( - {'HOME': '/', 'PATH': '/bin:/sbin'}, - util.get_proc_env(1)) + {'HOME': '/', 'PATH': '/bin:/sbin'}, util.get_proc_env(1) + ) self.assertEqual(1, m_load_file.call_count) @mock.patch("cloudinit.util.load_file") @@ -871,14 +2248,15 @@ def test_get_proc_ppid(self): self.assertEqual(my_ppid, util.get_proc_ppid(my_pid)) -class TestKernelVersion(): +class TestKernelVersion: """test kernel version function""" params = [ ('5.6.19-300.fc32.x86_64', (5, 6)), ('4.15.0-101-generic', (4, 15)), ('3.10.0-1062.12.1.vz7.131.10', (3, 10)), - ('4.18.0-144.el8.x86_64', (4, 18))] + ('4.18.0-144.el8.x86_64', (4, 18)), + ] @mock.patch('os.uname') @pytest.mark.parametrize("uname_release,expected", params) @@ -892,29 +2270,27 @@ class TestFindDevs: def test_find_devs_with(self, m_subp): m_subp.return_value = ( '/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"', - '' + '', ) devlist = util.find_devs_with() assert devlist == [ - '/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"'] + '/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"' + ] devlist = util.find_devs_with("LABEL_FATBOOT=A_LABEL") assert devlist == [ - '/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"'] + '/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"' + ] @mock.patch('cloudinit.subp.subp') def test_find_devs_with_openbsd(self, m_subp): - m_subp.return_value = ( - 'cd0:,sd0:630d98d32b5d3759,sd1:,fd0:', '' - ) + m_subp.return_value = ('cd0:,sd0:630d98d32b5d3759,sd1:,fd0:', '') devlist = util.find_devs_with_openbsd() assert devlist == ['/dev/cd0a', '/dev/sd1i'] @mock.patch('cloudinit.subp.subp') def test_find_devs_with_openbsd_with_criteria(self, m_subp): - m_subp.return_value = ( - 'cd0:,sd0:630d98d32b5d3759,sd1:,fd0:', '' - ) + m_subp.return_value = ('cd0:,sd0:630d98d32b5d3759,sd1:,fd0:', '') devlist = util.find_devs_with_openbsd(criteria="TYPE=iso9660") assert devlist == ['/dev/cd0a'] @@ -923,7 +2299,8 @@ def test_find_devs_with_openbsd_with_criteria(self, m_subp): assert devlist == ['/dev/cd0a', '/dev/sd1i'] @pytest.mark.parametrize( - 'criteria,expected_devlist', ( + 'criteria,expected_devlist', + ( (None, ['/dev/msdosfs/EFISYS', '/dev/iso9660/config-2']), ('TYPE=iso9660', ['/dev/iso9660/config-2']), ('TYPE=vfat', ['/dev/msdosfs/EFISYS']), @@ -940,19 +2317,23 @@ def fake_glob(pattern): elif pattern == "/dev/iso9660/*": return iso9660 raise Exception + m_glob.side_effect = fake_glob devlist = util.find_devs_with_freebsd(criteria=criteria) assert devlist == expected_devlist @pytest.mark.parametrize( - 'criteria,expected_devlist', ( + 'criteria,expected_devlist', + ( (None, ['/dev/ld0', '/dev/dk0', '/dev/dk1', '/dev/cd0']), ('TYPE=iso9660', ['/dev/cd0']), ('TYPE=vfat', ["/dev/ld0", "/dev/dk0", "/dev/dk1"]), - ('LABEL_FATBOOT=A_LABEL', # lp: #1841466 - ['/dev/ld0', '/dev/dk0', '/dev/dk1', '/dev/cd0']), - ) + ( + 'LABEL_FATBOOT=A_LABEL', # lp: #1841466 + ['/dev/ld0', '/dev/dk0', '/dev/dk1', '/dev/cd0'], + ), + ), ) @mock.patch("cloudinit.subp.subp") def test_find_devs_with_netbsd(self, m_subp, criteria, expected_devlist): @@ -1000,21 +2381,24 @@ def test_find_devs_with_netbsd(self, m_subp, criteria, expected_devlist): assert devlist == expected_devlist @pytest.mark.parametrize( - 'criteria,expected_devlist', ( + 'criteria,expected_devlist', + ( (None, ['/dev/vbd0', '/dev/cd0', '/dev/acd0']), ('TYPE=iso9660', ['/dev/cd0', '/dev/acd0']), ('TYPE=vfat', ['/dev/vbd0']), - ('LABEL_FATBOOT=A_LABEL', # lp: #1841466 - ['/dev/vbd0', '/dev/cd0', '/dev/acd0']), - ) + ( + 'LABEL_FATBOOT=A_LABEL', # lp: #1841466 + ['/dev/vbd0', '/dev/cd0', '/dev/acd0'], + ), + ), ) @mock.patch("cloudinit.subp.subp") - def test_find_devs_with_dragonflybsd(self, m_subp, criteria, - expected_devlist): - m_subp.return_value = ( - 'md2 md1 cd0 vbd0 acd0 vn3 vn2 vn1 vn0 md0', '' - ) + def test_find_devs_with_dragonflybsd( + self, m_subp, criteria, expected_devlist + ): + m_subp.return_value = ('md2 md1 cd0 vbd0 acd0 vn3 vn2 vn1 vn0 md0', '') devlist = util.find_devs_with_dragonflybsd(criteria=criteria) assert devlist == expected_devlist + # vi: ts=4 expandtab diff --git a/cloudinit/tests/test_version.py b/tests/unittests/test_version.py similarity index 95% rename from cloudinit/tests/test_version.py rename to tests/unittests/test_version.py index 778a762cd64..ed66b09fed8 100644 --- a/cloudinit/tests/test_version.py +++ b/tests/unittests/test_version.py @@ -2,7 +2,7 @@ from unittest import mock -from cloudinit.tests.helpers import CiTestCase +from tests.unittests.helpers import CiTestCase from cloudinit import version diff --git a/tests/unittests/test_vmware/__init__.py b/tests/unittests/test_vmware/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tests/unittests/util.py b/tests/unittests/util.py index 383f5f5c59c..2204c28f7fb 100644 --- a/tests/unittests/util.py +++ b/tests/unittests/util.py @@ -15,7 +15,7 @@ def get_cloud(distro=None, paths=None, sys_cfg=None, metadata=None): """ paths = paths or helpers.Paths({}) sys_cfg = sys_cfg or {} - cls = distros.fetch(distro) if distro else TestingDistro + cls = distros.fetch(distro) if distro else MockDistro mydist = cls(distro, sys_cfg, paths) myds = DataSourceTesting(sys_cfg, mydist, paths) if metadata: @@ -49,14 +49,14 @@ def cloud_name(self): return 'testing' -class TestingDistro(distros.Distro): - # TestingDistro is here to test base Distro class implementations +class MockDistro(distros.Distro): + # MockDistro is here to test base Distro class implementations def __init__(self, name="testingdistro", cfg=None, paths=None): if not cfg: cfg = {} if not paths: paths = {} - super(TestingDistro, self).__init__(name, cfg, paths) + super(MockDistro, self).__init__(name, cfg, paths) def install_packages(self, pkglist): pass diff --git a/tox.ini b/tox.ini index 874d3f20b12..ff88826662e 100644 --- a/tox.ini +++ b/tox.ini @@ -3,7 +3,7 @@ envlist = py3, xenial-dev, flake8, pylint recreate = True [testenv] -commands = {envpython} -m pytest {posargs:tests/unittests cloudinit} +commands = {envpython} -m pytest {posargs:tests/unittests} setenv = LC_ALL = en_US.utf-8 passenv= @@ -37,7 +37,7 @@ deps = commands = {envpython} -m pytest \ --durations 10 \ {posargs:--cov=cloudinit --cov-branch \ - tests/unittests cloudinit} + tests/unittests} [testenv:py27] basepython = python2.7 @@ -86,7 +86,7 @@ deps = # [testenv:xenial-dev]. See the comment there for details. commands = python ./tools/pipremove jsonschema - python -m pytest {posargs:tests/unittests cloudinit} + python -m pytest {posargs:tests/unittests} basepython = python3 deps = # Refer to the comment in [xenial-shared-deps] for details @@ -104,7 +104,7 @@ deps = # changes here are reflected in [testenv:xenial]. commands = python ./tools/pipremove jsonschema - python -m pytest {posargs:tests/unittests cloudinit} + python -m pytest {posargs:tests/unittests} basepython = {[testenv:xenial]basepython} deps = # Refer to the comment in [xenial-shared-deps] for details @@ -163,7 +163,7 @@ setenv = [pytest] # TODO: s/--strict/--strict-markers/ once xenial support is dropped -testpaths = cloudinit tests/unittests +testpaths = tests/unittests addopts = --strict log_format = %(asctime)s %(levelname)-9s %(name)s:%(filename)s:%(lineno)d %(message)s log_date_format = %Y-%m-%d %H:%M:%S From 6e39613da5cee66f2162f53c53358f2516b904d0 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 6 Dec 2021 11:06:13 -0600 Subject: [PATCH 0009/2310] docs: pin mistune dependency (#1134) --- doc-requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/doc-requirements.txt b/doc-requirements.txt index b8d6ba90240..5bcac862361 100644 --- a/doc-requirements.txt +++ b/doc-requirements.txt @@ -6,3 +6,4 @@ pyyaml # Indirect dependencies docutils<0.18 +mistune<2.0.0 # https://github.com/miyakogi/m2r/issues/66 From 0e25076b34fa995161b83996e866c0974cee431f Mon Sep 17 00:00:00 2001 From: Emanuele Giuseppe Esposito Date: Mon, 6 Dec 2021 18:34:26 +0100 Subject: [PATCH 0010/2310] cloudinit/net: handle two different routes for the same ip (#1124) If we set a dhcp server side like this: $ cat /var/tmp/cloud-init/cloud-init-dhcp-f0rie5tm/dhcp.leases lease { ... option classless-static-routes 31.169.254.169.254 0.0.0.0,31.169.254.169.254 10.112.143.127,22.10.112.140 0.0.0.0,0 10.112.140.1; ... } cloud-init fails to configure the routes via 'ip route add' because to there are two different routes for 169.254.169.254: $ ip -4 route add 192.168.1.1/32 via 0.0.0.0 dev eth0 $ ip -4 route add 192.168.1.1/32 via 10.112.140.248 dev eth0 But NetworkManager can handle such scenario successfully as it uses "ip route append". So change cloud-init to also use "ip route append" to fix the issue: $ ip -4 route append 192.168.1.1/32 via 0.0.0.0 dev eth0 $ ip -4 route append 192.168.1.1/32 via 10.112.140.248 dev eth0 Signed-off-by: Emanuele Giuseppe Esposito RHBZ: #2003231 --- cloudinit/net/__init__.py | 2 +- tests/unittests/net/test_init.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 7558745f2c8..f81f3a7b681 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -1157,7 +1157,7 @@ def _bringup_static_routes(self): if gateway != "0.0.0.0": via_arg = ['via', gateway] subp.subp( - ['ip', '-4', 'route', 'add', net_address] + via_arg + + ['ip', '-4', 'route', 'append', net_address] + via_arg + ['dev', self.interface], capture=True) self.cleanup_cmds.insert( 0, ['ip', '-4', 'route', 'del', net_address] + via_arg + diff --git a/tests/unittests/net/test_init.py b/tests/unittests/net/test_init.py index 666e8425961..82854ab322a 100644 --- a/tests/unittests/net/test_init.py +++ b/tests/unittests/net/test_init.py @@ -723,13 +723,13 @@ def test_ephemeral_ipv4_network_with_rfc3442_static_routes(self, m_subp): ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', 'up'], capture=True), mock.call( - ['ip', '-4', 'route', 'add', '192.168.2.1/32', + ['ip', '-4', 'route', 'append', '192.168.2.1/32', 'dev', 'eth0'], capture=True), mock.call( - ['ip', '-4', 'route', 'add', '169.254.169.254/32', + ['ip', '-4', 'route', 'append', '169.254.169.254/32', 'via', '192.168.2.1', 'dev', 'eth0'], capture=True), mock.call( - ['ip', '-4', 'route', 'add', '0.0.0.0/0', + ['ip', '-4', 'route', 'append', '0.0.0.0/0', 'via', '192.168.2.1', 'dev', 'eth0'], capture=True)] expected_teardown_calls = [ mock.call( From f428ed1611bdb685598832dd42495f0bcda40ec4 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Mon, 6 Dec 2021 10:35:25 -0700 Subject: [PATCH 0011/2310] update dead link (#1133) Fix dead link to "contributing" page in README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e96541ef8ac..27098b1188b 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,7 @@ get in contact with that distribution and send them our way! ## To start developing cloud-init -Checkout the [hacking](https://cloudinit.readthedocs.io/en/latest/topics/hacking.html) +Checkout the [contributing](https://cloudinit.readthedocs.io/en/latest/topics/contributing.html) document that outlines the steps necessary to develop, test, and submit code. ## Daily builds From bedac77e9348e7a54c0ec364fb61df90cd893972 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Mon, 6 Dec 2021 15:27:12 -0700 Subject: [PATCH 0012/2310] Add Strict Metaschema Validation (#1101) Improve schema validation. This adds strict validation of config module definitions at testing time, with plumbing included for future runtime validation. This eliminates a class of bugs resulting from schemas that have definitions that are incorrect, but get interpreted by jsonschema as "additionalProperties" that are therefore ignored. - Add strict meta-schema for jsonschema unit test validation - Separate schema from module metadata structure - Improve type annotations for various functions and data types Cleanup: - Remove unused jsonschema "required" elements - Eliminate manual memoization in schema.py:get_schema(), reference module.__doc__ directly --- cloudinit/cmd/clean.py | 8 +- cloudinit/cmd/cloud_id.py | 6 +- cloudinit/config/cc_apk_configure.py | 11 +- cloudinit/config/cc_apt_configure.py | 11 +- cloudinit/config/cc_bootcmd.py | 11 +- cloudinit/config/cc_chef.py | 11 +- cloudinit/config/cc_install_hotplug.py | 9 +- cloudinit/config/cc_locale.py | 9 +- cloudinit/config/cc_ntp.py | 11 +- cloudinit/config/cc_resizefs.py | 10 +- cloudinit/config/cc_runcmd.py | 11 +- cloudinit/config/cc_snap.py | 11 +- cloudinit/config/cc_ubuntu_advantage.py | 10 +- cloudinit/config/cc_ubuntu_drivers.py | 10 +- cloudinit/config/cc_write_files.py | 10 +- cloudinit/config/cc_write_files_deferred.py | 41 ++- cloudinit/config/cc_zypper_add_repo.py | 10 +- cloudinit/config/schema.py | 288 ++++++++++++----- cloudinit/importer.py | 24 +- cloudinit/util.py | 17 +- doc/rtd/conf.py | 13 +- tests/unittests/cmd/test_clean.py | 2 +- tests/unittests/cmd/test_cloud_id.py | 4 +- tests/unittests/config/test_schema.py | 339 +++++++++++++++----- tests/unittests/test_cli.py | 105 +++++- 25 files changed, 701 insertions(+), 291 deletions(-) diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py index 928a8eea379..3502dd56956 100644 --- a/cloudinit/cmd/clean.py +++ b/cloudinit/cmd/clean.py @@ -11,11 +11,9 @@ from cloudinit.stages import Init from cloudinit.subp import (ProcessExecutionError, subp) -from cloudinit.util import (del_dir, del_file, get_config_logfiles, is_link) - - -def error(msg): - sys.stderr.write("ERROR: " + msg + "\n") +from cloudinit.util import ( + del_dir, del_file, get_config_logfiles, is_link, error +) def get_parser(parser=None): diff --git a/cloudinit/cmd/cloud_id.py b/cloudinit/cmd/cloud_id.py index 97608921b57..0cdc96754c5 100755 --- a/cloudinit/cmd/cloud_id.py +++ b/cloudinit/cmd/cloud_id.py @@ -6,6 +6,7 @@ import json import sys +from cloudinit.util import error from cloudinit.sources import ( INSTANCE_JSON_FILE, METADATA_UNKNOWN, canonical_cloud_id) @@ -40,11 +41,6 @@ def get_parser(parser=None): return parser -def error(msg): - sys.stderr.write('ERROR: %s\n' % msg) - return 1 - - def handle_args(name, args): """Handle calls to 'cloud-id' cli. diff --git a/cloudinit/config/cc_apk_configure.py b/cloudinit/config/cc_apk_configure.py index 84d7a0b61b1..d227a58df00 100644 --- a/cloudinit/config/cc_apk_configure.py +++ b/cloudinit/config/cc_apk_configure.py @@ -12,8 +12,7 @@ from cloudinit import temp_utils from cloudinit import templater from cloudinit import util -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) @@ -56,7 +55,7 @@ frequency = PER_INSTANCE distros = ['alpine'] -schema = { +meta = { 'id': 'cc_apk_configure', 'name': 'APK Configure', 'title': 'Configure apk repositories file', @@ -95,6 +94,9 @@ """), ], 'frequency': frequency, +} + +schema = { 'type': 'object', 'properties': { 'apk_repos': { @@ -171,14 +173,13 @@ """) } }, - 'required': [], 'minProperties': 1, # Either preserve_repositories or alpine_repo 'additionalProperties': False, } } } -__doc__ = get_schema_doc(schema) +__doc__ = get_meta_doc(meta, schema) def handle(name, cfg, cloud, log, _args): diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 86d0feae31c..2e844c2cb20 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -14,8 +14,7 @@ import pathlib from textwrap import dedent -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit import gpg from cloudinit import log as logging from cloudinit import subp @@ -75,7 +74,8 @@ } } } -schema = { + +meta = { 'id': 'cc_apt_configure', 'name': 'Apt Configure', 'title': 'Configure apt for the user', @@ -155,6 +155,9 @@ ------END PGP PUBLIC KEY BLOCK-------""")], 'frequency': frequency, +} + +schema = { 'type': 'object', 'properties': { 'apt': { @@ -398,7 +401,7 @@ } } -__doc__ = get_schema_doc(schema) +__doc__ = get_meta_doc(meta, schema) # place where apt stores cached repository data diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py index 246e4497253..06f7a26e5f2 100644 --- a/cloudinit/config/cc_bootcmd.py +++ b/cloudinit/config/cc_bootcmd.py @@ -12,8 +12,7 @@ import os from textwrap import dedent -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_ALWAYS from cloudinit import temp_utils from cloudinit import subp @@ -29,7 +28,7 @@ distros = ['all'] -schema = { +meta = { 'id': 'cc_bootcmd', 'name': 'Bootcmd', 'title': 'Run arbitrary commands early in the boot process', @@ -57,6 +56,9 @@ - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ] """)], 'frequency': PER_ALWAYS, +} + +schema = { 'type': 'object', 'properties': { 'bootcmd': { @@ -69,12 +71,11 @@ 'additionalItems': False, # Reject items of non-string non-list 'additionalProperties': False, 'minItems': 1, - 'required': [], } } } -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() def handle(name, cfg, cloud, log, _args): diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index 7b20222e5c1..ed734d1cdc0 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -14,8 +14,7 @@ from textwrap import dedent from cloudinit import subp -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit import templater from cloudinit import temp_utils from cloudinit import url_helper @@ -89,7 +88,8 @@ frequency = PER_ALWAYS distros = ["all"] -schema = { + +meta = { 'id': 'cc_chef', 'name': 'Chef', 'title': 'module that configures, starts and installs chef', @@ -126,6 +126,9 @@ ssl_verify_mode: :verify_peer validation_name: yourorg-validator""")], 'frequency': frequency, +} + +schema = { 'type': 'object', 'properties': { 'chef': { @@ -357,7 +360,7 @@ } } -__doc__ = get_schema_doc(schema) +__doc__ = get_meta_doc(meta, schema) def post_run_chef(chef_cfg, log): diff --git a/cloudinit/config/cc_install_hotplug.py b/cloudinit/config/cc_install_hotplug.py index da98c409dfe..9b4075cc24c 100644 --- a/cloudinit/config/cc_install_hotplug.py +++ b/cloudinit/config/cc_install_hotplug.py @@ -6,7 +6,7 @@ from cloudinit import util from cloudinit import subp from cloudinit import stages -from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.distros import ALL_DISTROS from cloudinit.event import EventType, EventScope from cloudinit.settings import PER_INSTANCE @@ -15,7 +15,7 @@ frequency = PER_INSTANCE distros = [ALL_DISTROS] -schema = { +meta = { "id": "cc_install_hotplug", "name": "Install Hotplug", "title": "Install hotplug if supported and enabled", @@ -49,6 +49,9 @@ """), ], "frequency": frequency, +} + +schema = { "type": "object", "properties": { "updates": { @@ -81,7 +84,7 @@ } } -__doc__ = get_schema_doc(schema) +__doc__ = get_meta_doc(meta, schema) HOTPLUG_UDEV_PATH = "/etc/udev/rules.d/10-cloud-init-hook-hotplug.rules" diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py index 4f8b7bf642a..7fed9abd875 100644 --- a/cloudinit/config/cc_locale.py +++ b/cloudinit/config/cc_locale.py @@ -11,13 +11,13 @@ from textwrap import dedent from cloudinit import util -from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE frequency = PER_INSTANCE distros = ['all'] -schema = { +meta = { 'id': 'cc_locale', 'name': 'Locale', 'title': 'Set system locale', @@ -39,6 +39,9 @@ """), ], 'frequency': frequency, +} + +schema = { 'type': 'object', 'properties': { 'locale': { @@ -57,7 +60,7 @@ }, } -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() def handle(name, cfg, cloud, log, args): diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index c3aee798487..9c085a04c68 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -16,7 +16,7 @@ from cloudinit import type_utils from cloudinit import subp from cloudinit import util -from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) @@ -140,7 +140,7 @@ # configuration options before actually attempting to deploy with said # configuration. -schema = { +meta = { 'id': 'cc_ntp', 'name': 'NTP', 'title': 'enable and configure ntp', @@ -190,6 +190,9 @@ - ntp.ubuntu.com - 192.168.23.2""")], 'frequency': PER_INSTANCE, +} + +schema = { 'type': 'object', 'properties': { 'ntp': { @@ -289,12 +292,10 @@ }, # Don't use REQUIRED_NTP_CONFIG_KEYS to allow for override # of builtin client values. - 'required': [], 'minProperties': 1, # If we have config, define something 'additionalProperties': False }, }, - 'required': [], 'additionalProperties': False } } @@ -303,7 +304,7 @@ 'check_exe', 'confpath', 'packages', 'service_name']) -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() def distro_ntp_client_configs(distro): diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 990a6939248..00bb7ae704a 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -13,8 +13,7 @@ import stat from textwrap import dedent -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_ALWAYS from cloudinit import subp from cloudinit import util @@ -24,7 +23,7 @@ frequency = PER_ALWAYS distros = ['all'] -schema = { +meta = { 'id': 'cc_resizefs', 'name': 'Resizefs', 'title': 'Resize filesystem', @@ -42,6 +41,9 @@ 'examples': [ 'resize_rootfs: false # disable root filesystem resize operation'], 'frequency': PER_ALWAYS, +} + +schema = { 'type': 'object', 'properties': { 'resize_rootfs': { @@ -52,7 +54,7 @@ } } -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() def _resize_btrfs(mount_point, devpth): diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py index 15960c7d3d6..2f5e02cb076 100644 --- a/cloudinit/config/cc_runcmd.py +++ b/cloudinit/config/cc_runcmd.py @@ -8,8 +8,7 @@ """Runcmd: run arbitrary commands at rc.local with output to the console""" -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.distros import ALL_DISTROS from cloudinit.settings import PER_INSTANCE from cloudinit import util @@ -26,7 +25,7 @@ distros = [ALL_DISTROS] -schema = { +meta = { 'id': 'cc_runcmd', 'name': 'Runcmd', 'title': 'Run arbitrary commands', @@ -58,6 +57,9 @@ - [ wget, "http://example.org", -O, /tmp/index.html ] """)], 'frequency': PER_INSTANCE, +} + +schema = { 'type': 'object', 'properties': { 'runcmd': { @@ -71,12 +73,11 @@ 'additionalItems': False, # Reject items of non-string non-list 'additionalProperties': False, 'minItems': 1, - 'required': [], } } } -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() def handle(name, cfg, cloud, log, _args): diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py index 20ed7d2f031..21f30b5739b 100644 --- a/cloudinit/config/cc_snap.py +++ b/cloudinit/config/cc_snap.py @@ -8,8 +8,7 @@ from textwrap import dedent from cloudinit import log as logging -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE from cloudinit.subp import prepend_base_command from cloudinit import subp @@ -21,7 +20,7 @@ LOG = logging.getLogger(__name__) -schema = { +meta = { 'id': 'cc_snap', 'name': 'Snap', 'title': 'Install, configure and manage snapd and snap packages', @@ -103,6 +102,9 @@ signed_assertion_blob_here """)], 'frequency': PER_INSTANCE, +} + +schema = { 'type': 'object', 'properties': { 'snap': { @@ -139,13 +141,12 @@ } }, 'additionalProperties': False, # Reject keys not in schema - 'required': [], 'minProperties': 1 } } } -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() SNAP_CMD = "snap" ASSERTIONS_FILE = "/var/lib/cloud/instance/snapd.assertions" diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py index d61dc65518a..831a92a2656 100644 --- a/cloudinit/config/cc_ubuntu_advantage.py +++ b/cloudinit/config/cc_ubuntu_advantage.py @@ -4,8 +4,7 @@ from textwrap import dedent -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE from cloudinit import subp @@ -16,7 +15,7 @@ distros = ['ubuntu'] -schema = { +meta = { 'id': 'cc_ubuntu_advantage', 'name': 'Ubuntu Advantage', 'title': 'Configure Ubuntu Advantage support services', @@ -61,6 +60,9 @@ - fips """)], 'frequency': PER_INSTANCE, +} + +schema = { 'type': 'object', 'properties': { 'ubuntu_advantage': { @@ -82,7 +84,7 @@ } } -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() LOG = logging.getLogger(__name__) diff --git a/cloudinit/config/cc_ubuntu_drivers.py b/cloudinit/config/cc_ubuntu_drivers.py index 2d1d2b321fc..7f617efe888 100644 --- a/cloudinit/config/cc_ubuntu_drivers.py +++ b/cloudinit/config/cc_ubuntu_drivers.py @@ -5,8 +5,7 @@ import os from textwrap import dedent -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE from cloudinit import subp @@ -18,7 +17,7 @@ frequency = PER_INSTANCE distros = ['ubuntu'] -schema = { +meta = { 'id': 'cc_ubuntu_drivers', 'name': 'Ubuntu Drivers', 'title': 'Interact with third party drivers in Ubuntu.', @@ -32,6 +31,9 @@ license-accepted: true """)], 'frequency': frequency, +} + +schema = { 'type': 'object', 'properties': { 'drivers': { @@ -64,7 +66,7 @@ OLD_UBUNTU_DRIVERS_STDERR_NEEDLE = ( "ubuntu-drivers: error: argument : invalid choice: 'install'") -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() # Use a debconf template to configure a global debconf variable diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py index 41c75fa2de0..55f8c68468b 100644 --- a/cloudinit/config/cc_write_files.py +++ b/cloudinit/config/cc_write_files.py @@ -10,8 +10,7 @@ import os from textwrap import dedent -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE from cloudinit import util @@ -38,7 +37,7 @@ 'gz', 'gzip', 'gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64', 'b64', 'base64'] -schema = { +meta = { 'id': 'cc_write_files', 'name': 'Write Files', 'title': 'write arbitrary files', @@ -111,6 +110,9 @@ defer: true """)], 'frequency': frequency, +} + +schema = { 'type': 'object', 'properties': { 'write_files': { @@ -187,7 +189,7 @@ } } -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() def handle(name, cfg, _cloud, log, _args): diff --git a/cloudinit/config/cc_write_files_deferred.py b/cloudinit/config/cc_write_files_deferred.py index 0c75aa22890..4fc8659cf15 100644 --- a/cloudinit/config/cc_write_files_deferred.py +++ b/cloudinit/config/cc_write_files_deferred.py @@ -4,34 +4,31 @@ """Defer writing certain files""" -from textwrap import dedent - from cloudinit.config.schema import validate_cloudconfig_schema from cloudinit import util from cloudinit.config.cc_write_files import ( schema as write_files_schema, write_files, DEFAULT_DEFER) +# meta is not used in this module, but it remains as code documentation +# +# id: cc_write_files_deferred' +# name: 'Write Deferred Files +# distros: ['all'], +# frequency: PER_INSTANCE, +# title: +# write certain files, whose creation as been deferred, during +# final stage +# description: +# This module is based on `'Write Files' `__, and +# will handle all files from the write_files list, that have been +# marked as deferred and thus are not being processed by the +# write-files module. +# +# *Please note that his module is not exposed to the user through +# its own dedicated top-level directive.* + +schema = write_files_schema -schema = util.mergemanydict([ - { - 'id': 'cc_write_files_deferred', - 'name': 'Write Deferred Files', - 'title': dedent("""\ - write certain files, whose creation as been deferred, during - final stage - """), - 'description': dedent("""\ - This module is based on `'Write Files' `__, and - will handle all files from the write_files list, that have been - marked as deferred and thus are not being processed by the - write-files module. - - *Please note that his module is not exposed to the user through - its own dedicated top-level directive.* - """) - }, - write_files_schema -]) # Not exposed, because related modules should document this behaviour __doc__ = None diff --git a/cloudinit/config/cc_zypper_add_repo.py b/cloudinit/config/cc_zypper_add_repo.py index 05855b0c580..bf1638fba77 100644 --- a/cloudinit/config/cc_zypper_add_repo.py +++ b/cloudinit/config/cc_zypper_add_repo.py @@ -9,14 +9,14 @@ import os from textwrap import dedent -from cloudinit.config.schema import get_schema_doc +from cloudinit.config.schema import get_meta_doc from cloudinit import log as logging from cloudinit.settings import PER_ALWAYS from cloudinit import util distros = ['opensuse', 'sles'] -schema = { +meta = { 'id': 'cc_zypper_add_repo', 'name': 'ZypperAddRepo', 'title': 'Configure zypper behavior and add zypper repositories', @@ -51,6 +51,9 @@ # any setting in /etc/zypp/zypp.conf """)], 'frequency': PER_ALWAYS, +} + +schema = { 'type': 'object', 'properties': { 'zypper': { @@ -86,14 +89,13 @@ /etc/zypp/zypp.conf'""") } }, - 'required': [], 'minProperties': 1, # Either config or repo must be provided 'additionalProperties': False, # only repos and config allowed } } } -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() LOG = logging.getLogger(__name__) diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index 456bab2c090..d32b7c01f8f 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -3,19 +3,22 @@ from cloudinit.cmd.devel import read_cfg_paths from cloudinit import importer -from cloudinit.util import find_modules, load_file +from cloudinit.importer import MetaSchema +from cloudinit.util import find_modules, load_file, error import argparse from collections import defaultdict from copy import deepcopy +from functools import partial import logging import os import re import sys import yaml +error = partial(error, sys_exit=True) + _YAML_MAP = {True: 'true', False: 'false', None: 'null'} -SCHEMA_UNDEFINED = b'UNDEFINED' CLOUD_CONFIG_HEADER = b'#cloud-config' SCHEMA_DOC_TMPL = """ {name} @@ -34,7 +37,7 @@ {property_doc} {examples} """ -SCHEMA_PROPERTY_TMPL = '{prefix}**{prop_name}:** ({type}) {description}' +SCHEMA_PROPERTY_TMPL = "{prefix}**{prop_name}:** ({prop_type}) {description}" SCHEMA_LIST_ITEM_TMPL = ( '{prefix}Each item in **{prop_name}** list supports the following keys:') SCHEMA_EXAMPLES_HEADER = '\n**Examples**::\n\n' @@ -72,45 +75,102 @@ def is_schema_byte_string(checker, instance): isinstance(instance, (bytes,))) -def validate_cloudconfig_schema(config, schema, strict=False): - """Validate provided config meets the schema definition. +def get_jsonschema_validator(): + """Get metaschema validator and format checker - @param config: Dict of cloud configuration settings validated against - schema. - @param schema: jsonschema dict describing the supported schema definition - for the cloud config module (config.cc_*). - @param strict: Boolean, when True raise SchemaValidationErrors instead of - logging warnings. + Older versions of jsonschema require some compatibility changes. - @raises: SchemaValidationError when provided config does not validate - against the provided schema. + @returns: Tuple: (jsonschema.Validator, FormatChecker) + @raises: ImportError when jsonschema is not present """ - try: - from jsonschema import Draft4Validator, FormatChecker - from jsonschema.validators import create, extend - except ImportError: - logging.debug( - 'Ignoring schema validation. python-jsonschema is not present') - return + from jsonschema import Draft4Validator, FormatChecker + from jsonschema.validators import create # Allow for bytes to be presented as an acceptable valid value for string # type jsonschema attributes in cloud-init's schema. # This allows #cloud-config to provide valid yaml "content: !!binary | ..." + + strict_metaschema = deepcopy(Draft4Validator.META_SCHEMA) + strict_metaschema['additionalProperties'] = False if hasattr(Draft4Validator, 'TYPE_CHECKER'): # jsonschema 3.0+ type_checker = Draft4Validator.TYPE_CHECKER.redefine( 'string', is_schema_byte_string) - cloudinitValidator = extend(Draft4Validator, type_checker=type_checker) + cloudinitValidator = create( + meta_schema=strict_metaschema, + validators=Draft4Validator.VALIDATORS, + version="draft4", + type_checker=type_checker) else: # jsonschema 2.6 workaround types = Draft4Validator.DEFAULT_TYPES - # Allow bytes as well as string (and disable a spurious - # unsupported-assignment-operation pylint warning which appears because - # this code path isn't written against the latest jsonschema). + # Allow bytes as well as string (and disable a spurious unsupported + # assignment-operation pylint warning which appears because this + # code path isn't written against the latest jsonschema). types['string'] = (str, bytes) # pylint: disable=E1137 cloudinitValidator = create( - meta_schema=Draft4Validator.META_SCHEMA, + meta_schema=strict_metaschema, validators=Draft4Validator.VALIDATORS, version="draft4", default_types=types) + return (cloudinitValidator, FormatChecker) + + +def validate_cloudconfig_metaschema(validator, schema: dict, throw=True): + """Validate provided schema meets the metaschema definition. Return strict + Validator and FormatChecker for use in validation + @param validator: Draft4Validator instance used to validate the schema + @param schema: schema to validate + @param throw: Sometimes the validator and checker are required, even if + the schema is invalid. Toggle for whether to raise + SchemaValidationError or log warnings. + + @raises: ImportError when jsonschema is not present + @raises: SchemaValidationError when the schema is invalid + """ + + from jsonschema.exceptions import SchemaError + + try: + validator.check_schema(schema) + except SchemaError as err: + # Raise SchemaValidationError to avoid jsonschema imports at call + # sites + if throw: + raise SchemaValidationError( + schema_errors=( + ('.'.join([str(p) for p in err.path]), err.message), + ) + ) from err + logging.warning( + "Meta-schema validation failed, attempting to validate config " + "anyway: %s", err) + + +def validate_cloudconfig_schema( + config: dict, schema: dict, strict=False, strict_metaschema=False +): + """Validate provided config meets the schema definition. + + @param config: Dict of cloud configuration settings validated against + schema. Ignored if strict_metaschema=True + @param schema: jsonschema dict describing the supported schema definition + for the cloud config module (config.cc_*). + @param strict: Boolean, when True raise SchemaValidationErrors instead of + logging warnings. + @param strict_metaschema: Boolean, when True validates schema using strict + metaschema definition at runtime (currently unused) + + @raises: SchemaValidationError when provided config does not validate + against the provided schema. + """ + try: + (cloudinitValidator, FormatChecker) = get_jsonschema_validator() + if strict_metaschema: + validate_cloudconfig_metaschema( + cloudinitValidator, schema, throw=False) + except ImportError: + logging.debug("Ignoring schema validation. jsonschema is not present") + return + validator = cloudinitValidator(schema, format_checker=FormatChecker()) errors = () for error in sorted(validator.iter_errors(config), key=lambda e: e.path): @@ -301,12 +361,15 @@ def _schemapath_for_cloudconfig(config, original_content): return schema_line_numbers -def _get_property_type(property_dict): - """Return a string representing a property type from a given jsonschema.""" - property_type = property_dict.get('type', SCHEMA_UNDEFINED) - if property_type == SCHEMA_UNDEFINED and property_dict.get('enum'): +def _get_property_type(property_dict: dict) -> str: + """Return a string representing a property type from a given + jsonschema. + """ + property_type = property_dict.get("type") + if property_type is None and property_dict.get("enum"): property_type = [ - str(_YAML_MAP.get(k, k)) for k in property_dict['enum']] + str(_YAML_MAP.get(k, k)) for k in property_dict["enum"] + ] if isinstance(property_type, list): property_type = '/'.join(property_type) items = property_dict.get('items', {}) @@ -317,12 +380,12 @@ def _get_property_type(property_dict): sub_property_type += '/' sub_property_type += '(' + _get_property_type(sub_item) + ')' if sub_property_type: - return '{0} of {1}'.format(property_type, sub_property_type) - return property_type + return "{0} of {1}".format(property_type, sub_property_type) + return property_type or "UNDEFINED" -def _parse_description(description, prefix): - """Parse description from the schema in a format that we can better +def _parse_description(description, prefix) -> str: + """Parse description from the meta in a format that we can better display in our docs. This parser does three things: - Guarantee that a paragraph will be in a single line @@ -330,7 +393,7 @@ def _parse_description(description, prefix): the first paragraph - Proper align lists of items - @param description: The original description in the schema. + @param description: The original description in the meta. @param prefix: The number of spaces used to align the current description """ list_paragraph = prefix * 3 @@ -343,20 +406,24 @@ def _parse_description(description, prefix): return description -def _get_property_doc(schema, prefix=' '): +def _get_property_doc(schema: dict, prefix=" ") -> str: """Return restructured text describing the supported schema properties.""" new_prefix = prefix + ' ' properties = [] for prop_key, prop_config in schema.get('properties', {}).items(): - # Define prop_name and dscription for SCHEMA_PROPERTY_TMPL + # Define prop_name and description for SCHEMA_PROPERTY_TMPL description = prop_config.get('description', '') - properties.append(SCHEMA_PROPERTY_TMPL.format( - prefix=prefix, - prop_name=prop_key, - type=_get_property_type(prop_config), - description=_parse_description(description, prefix))) - items = prop_config.get('items') + # Define prop_name and description for SCHEMA_PROPERTY_TMPL + properties.append( + SCHEMA_PROPERTY_TMPL.format( + prefix=prefix, + prop_name=prop_key, + description=_parse_description(description, prefix), + prop_type=_get_property_type(prop_config), + ) + ) + items = prop_config.get("items") if items: if isinstance(items, list): for item in items: @@ -373,9 +440,9 @@ def _get_property_doc(schema, prefix=' '): return '\n\n'.join(properties) -def _get_schema_examples(schema, prefix=''): - """Return restructured text describing the schema examples if present.""" - examples = schema.get('examples') +def _get_examples(meta: MetaSchema) -> str: + """Return restructured text describing the meta examples if present.""" + examples = meta.get("examples") if not examples: return '' rst_content = SCHEMA_EXAMPLES_HEADER @@ -390,48 +457,111 @@ def _get_schema_examples(schema, prefix=''): return rst_content -def get_schema_doc(schema): - """Return reStructured text rendering the provided jsonschema. +def get_meta_doc(meta: MetaSchema, schema: dict) -> str: + """Return reStructured text rendering the provided metadata. - @param schema: Dict of jsonschema to render. - @raise KeyError: If schema lacks an expected key. + @param meta: Dict of metadata to render. + @raise KeyError: If metadata lacks an expected key. """ - schema_copy = deepcopy(schema) - schema_copy['property_doc'] = _get_property_doc(schema) - schema_copy['examples'] = _get_schema_examples(schema) - schema_copy['distros'] = ', '.join(schema['distros']) + + if not meta or not schema: + raise ValueError("Expected meta and schema") + keys = set(meta.keys()) + expected = set( + { + "id", + "title", + "examples", + "frequency", + "distros", + "description", + "name", + } + ) + error_message = "" + if expected - keys: + error_message = "Missing expected keys in module meta: {}".format( + expected - keys + ) + elif keys - expected: + error_message = ( + "Additional unexpected keys found in module meta: {}".format( + keys - expected + ) + ) + if error_message: + raise KeyError(error_message) + + # cast away type annotation + meta_copy = dict(deepcopy(meta)) + meta_copy["property_doc"] = _get_property_doc(schema) + meta_copy["examples"] = _get_examples(meta) + meta_copy["distros"] = ", ".join(meta["distros"]) # Need an underbar of the same length as the name - schema_copy['title_underbar'] = re.sub(r'.', '-', schema['name']) - return SCHEMA_DOC_TMPL.format(**schema_copy) + meta_copy["title_underbar"] = re.sub(r".", "-", meta["name"]) + template = SCHEMA_DOC_TMPL.format(**meta_copy) + return template + + +def get_modules() -> dict: + configs_dir = os.path.dirname(os.path.abspath(__file__)) + return find_modules(configs_dir) + +def load_doc(requested_modules: list) -> str: + """Load module docstrings -FULL_SCHEMA = None + Docstrings are generated on module load. Reduce, reuse, recycle. + """ + docs = "" + all_modules = list(get_modules().values()) + ["all"] + invalid_docs = set(requested_modules).difference(set(all_modules)) + if invalid_docs: + error( + "Invalid --docs value {}. Must be one of: {}".format( + list(invalid_docs), ", ".join(all_modules), + ) + ) + for mod_name in all_modules: + if "all" in requested_modules or mod_name in requested_modules: + (mod_locs, _) = importer.find_module( + mod_name, ["cloudinit.config"], ["schema"] + ) + if mod_locs: + mod = importer.import_module(mod_locs[0]) + docs += mod.__doc__ or "" + return docs -def get_schema(): +def get_schema() -> dict: """Return jsonschema coalesced from all cc_* cloud-config module.""" - global FULL_SCHEMA - if FULL_SCHEMA: - return FULL_SCHEMA full_schema = { - '$schema': 'http://json-schema.org/draft-04/schema#', - 'id': 'cloud-config-schema', 'allOf': []} - - configs_dir = os.path.dirname(os.path.abspath(__file__)) - potential_handlers = find_modules(configs_dir) - for (_fname, mod_name) in potential_handlers.items(): - mod_locs, _looked_locs = importer.find_module( - mod_name, ['cloudinit.config'], ['schema']) + "$schema": "http://json-schema.org/draft-04/schema#", + "id": "cloud-config-schema", + "allOf": [], + } + + for (_, mod_name) in get_modules().items(): + (mod_locs, _) = importer.find_module( + mod_name, ["cloudinit.config"], ["schema"] + ) if mod_locs: mod = importer.import_module(mod_locs[0]) - full_schema['allOf'].append(mod.schema) - FULL_SCHEMA = full_schema + full_schema["allOf"].append(mod.schema) return full_schema -def error(message): - print(message, file=sys.stderr) - sys.exit(1) +def get_meta() -> dict: + """Return metadata coalesced from all cc_* cloud-config module.""" + full_meta = dict() + for (_, mod_name) in get_modules().items(): + mod_locs, _ = importer.find_module( + mod_name, ["cloudinit.config"], ["meta"] + ) + if mod_locs: + mod = importer.import_module(mod_locs[0]) + full_meta[mod.meta["id"]] = mod.meta + return full_meta def get_parser(parser=None): @@ -474,15 +604,7 @@ def handle_schema_args(name, args): cfg_name = args.config_file print("Valid cloud-config:", cfg_name) elif args.docs: - schema_ids = [subschema['id'] for subschema in full_schema['allOf']] - schema_ids += ['all'] - invalid_docs = set(args.docs).difference(set(schema_ids)) - if invalid_docs: - error('Invalid --docs value {0}. Must be one of: {1}'.format( - list(invalid_docs), ', '.join(schema_ids))) - for subschema in full_schema['allOf']: - if 'all' in args.docs or subschema['id'] in args.docs: - print(get_schema_doc(subschema)) + print(load_doc(args.docs)) def main(): diff --git a/cloudinit/importer.py b/cloudinit/importer.py index f1194fbec8b..4e677af36ac 100644 --- a/cloudinit/importer.py +++ b/cloudinit/importer.py @@ -9,6 +9,27 @@ # This file is part of cloud-init. See LICENSE file for license information. import sys +import typing + +# annotations add value for development, but don't break old versions +# pyver: 3.5 -> 3.8 +# pylint: disable=E1101 +if sys.version_info >= (3, 8) and hasattr(typing, "TypeDict"): + MetaSchema = typing.TypedDict( + "MetaSchema", + { + "name": str, + "id": str, + "title": str, + "description": str, + "distros": typing.List[str], + "examples": typing.List[str], + "frequency": str, + }, + ) +else: + MetaSchema = dict +# pylint: enable=E1101 def import_module(module_name): @@ -16,7 +37,8 @@ def import_module(module_name): return sys.modules[module_name] -def find_module(base_name, search_paths, required_attrs=None): +def find_module(base_name: str, search_paths, required_attrs=None) -> tuple: + """Finds and imports specified modules""" if not required_attrs: required_attrs = [] # NOTE(harlowja): translate the search paths to include the base name. diff --git a/cloudinit/util.py b/cloudinit/util.py index 2045a6abfdd..1b462a38334 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -347,7 +347,7 @@ def extract_usergroup(ug_pair): return (u, g) -def find_modules(root_dir): +def find_modules(root_dir) -> dict: entries = dict() for fname in glob.glob(os.path.join(root_dir, "*.py")): if not os.path.isfile(fname): @@ -2751,4 +2751,19 @@ def get_proc_ppid(pid): ppid = int(parts[3]) return ppid + +def error(msg, rc=1, fmt='Error:\n{}', sys_exit=False): + """ + Print error to stderr and return or exit + + @param msg: message to print + @param rc: return code (default: 1) + @param fmt: format string for putting message in (default: 'Error:\n {}') + @param sys_exit: exit when called (default: false) + """ + print(fmt.format(msg), file=sys.stderr) + if sys_exit: + sys.exit(rc) + return rc + # vi: ts=4 expandtab diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py index 684822c2893..4316b5d9a9a 100644 --- a/doc/rtd/conf.py +++ b/doc/rtd/conf.py @@ -1,6 +1,8 @@ import os import sys +from cloudinit import version + # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. @@ -9,8 +11,6 @@ sys.path.insert(0, os.path.abspath('./')) sys.path.insert(0, os.path.abspath('.')) -from cloudinit import version -from cloudinit.config.schema import get_schema_doc # Supress warnings for docs that aren't used yet # unused_docs = [ @@ -66,12 +66,3 @@ # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = 'static/logo.png' - -def generate_docstring_from_schema(app, what, name, obj, options, lines): - """Override module docs from schema when present.""" - if what == 'module' and hasattr(obj, "schema"): - del lines[:] - lines.extend(get_schema_doc(obj.schema).split('\n')) - -def setup(app): - app.connect('autodoc-process-docstring', generate_docstring_from_schema) diff --git a/tests/unittests/cmd/test_clean.py b/tests/unittests/cmd/test_clean.py index 81fc930ee60..3bb0ee9b4d7 100644 --- a/tests/unittests/cmd/test_clean.py +++ b/tests/unittests/cmd/test_clean.py @@ -137,7 +137,7 @@ def test_remove_artifacts_returns_one_on_errors(self): clean.remove_artifacts, remove_logs=False) self.assertEqual(1, retcode) self.assertEqual( - 'ERROR: Could not remove %s/dir1: oops\n' % self.artifact_dir, + 'Error:\nCould not remove %s/dir1: oops\n' % self.artifact_dir, m_stderr.getvalue()) def test_handle_clean_args_reboots(self): diff --git a/tests/unittests/cmd/test_cloud_id.py b/tests/unittests/cmd/test_cloud_id.py index 12fc80e87c1..9a010402b17 100644 --- a/tests/unittests/cmd/test_cloud_id.py +++ b/tests/unittests/cmd/test_cloud_id.py @@ -51,7 +51,7 @@ def test_cloud_id_missing_instance_data_json(self): cloud_id.main() self.assertEqual(1, context_manager.exception.code) self.assertIn( - "ERROR: File not found '%s'" % self.instance_data, + "Error:\nFile not found '%s'" % self.instance_data, m_stderr.getvalue()) def test_cloud_id_non_json_instance_data(self): @@ -64,7 +64,7 @@ def test_cloud_id_non_json_instance_data(self): cloud_id.main() self.assertEqual(1, context_manager.exception.code) self.assertIn( - "ERROR: File '%s' is not valid json." % self.instance_data, + "Error:\nFile '%s' is not valid json." % self.instance_data, m_stderr.getvalue()) def test_cloud_id_from_cloud_name_in_instance_data(self): diff --git a/tests/unittests/config/test_schema.py b/tests/unittests/config/test_schema.py index b01f5eea52f..f90e0f62c5d 100644 --- a/tests/unittests/config/test_schema.py +++ b/tests/unittests/config/test_schema.py @@ -1,13 +1,10 @@ # This file is part of cloud-init. See LICENSE file for license information. -import cloudinit -from cloudinit.config.schema import ( - CLOUD_CONFIG_HEADER, SchemaValidationError, annotated_cloudconfig_file, - get_schema_doc, get_schema, validate_cloudconfig_file, - validate_cloudconfig_schema, main) -from cloudinit.util import write_file -from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema +import importlib +import sys +import inspect +import logging from copy import copy import itertools import pytest @@ -15,6 +12,63 @@ from textwrap import dedent from yaml import safe_load +import cloudinit +from cloudinit.config.schema import ( + CLOUD_CONFIG_HEADER, + SchemaValidationError, + annotated_cloudconfig_file, + get_meta_doc, + get_schema, + get_jsonschema_validator, + validate_cloudconfig_file, + validate_cloudconfig_metaschema, + validate_cloudconfig_schema, + main, + MetaSchema, +) +from cloudinit.util import write_file +from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema + + +def get_schemas() -> dict: + """Return all module schemas + + Assumes that module schemas have the variable name "schema" + """ + return get_module_variable("schema") + + +def get_metas() -> dict: + """Return all module metas + + Assumes that module schemas have the variable name "schema" + """ + return get_module_variable("meta") + + +def get_module_variable(var_name) -> dict: + """Inspect modules and get variable from module matching var_name""" + schemas = {} + + files = list(Path("../../cloudinit/config/").glob("cc_*.py")) + modules = [mod.stem for mod in files] + + for module in modules: + importlib.import_module("cloudinit.config.{}".format(module)) + + for k, v in sys.modules.items(): + path = Path(k) + + if "cloudinit.config" == path.stem and path.suffix[1:4] == "cc_": + module_name = path.suffix[1:] + members = inspect.getmembers(v) + schemas[module_name] = None + for name, value in members: + if name == var_name: + schemas[module_name] = value + break + return schemas + class GetSchemaTest(CiTestCase): @@ -34,25 +88,17 @@ def test_get_schema_coalesces_known_schema(self): 'cc_ubuntu_advantage', 'cc_ubuntu_drivers', 'cc_write_files', - 'cc_write_files_deferred', 'cc_zypper_add_repo', 'cc_chef', 'cc_install_hotplug', ], - [subschema['id'] for subschema in schema['allOf']]) - self.assertEqual('cloud-config-schema', schema['id']) + [meta["id"] for meta in get_metas().values() if meta is not None], + ) + self.assertEqual("cloud-config-schema", schema["id"]) self.assertEqual( - 'http://json-schema.org/draft-04/schema#', - schema['$schema']) - # FULL_SCHEMA is updated by the get_schema call - from cloudinit.config.schema import FULL_SCHEMA - self.assertCountEqual(['id', '$schema', 'allOf'], FULL_SCHEMA.keys()) - - def test_get_schema_returns_global_when_set(self): - """When FULL_SCHEMA global is already set, get_schema returns it.""" - m_schema_path = 'cloudinit.config.schema.FULL_SCHEMA' - with mock.patch(m_schema_path, {'here': 'iam'}): - self.assertEqual({'here': 'iam'}, get_schema()) + "http://json-schema.org/draft-04/schema#", schema["$schema"] + ) + self.assertCountEqual(["id", "$schema", "allOf"], get_schema().keys()) class SchemaValidationErrorTest(CiTestCase): @@ -93,8 +139,9 @@ def test_validateconfig_schema_emits_warning_on_missing_jsonschema(self): with mock.patch.dict('sys.modules', **{'jsonschema': ImportError()}): validate_cloudconfig_schema({'p1': -1}, schema, strict=True) self.assertIn( - 'Ignoring schema validation. python-jsonschema is not present', - self.logs.getvalue()) + "Ignoring schema validation. jsonschema is not present", + self.logs.getvalue(), + ) @skipUnlessJsonSchema() def test_validateconfig_schema_strict_raises_errors(self): @@ -117,14 +164,48 @@ def test_validateconfig_schema_honors_formats(self): "Cloud config schema errors: p1: '-1' is not a 'email'", str(context_mgr.exception)) + @skipUnlessJsonSchema() + def test_validateconfig_schema_honors_formats_strict_metaschema(self): + """With strict True and strict_metascheam True, ensure errors on format + """ + schema = {"properties": {"p1": {"type": "string", "format": "email"}}} + with self.assertRaises(SchemaValidationError) as context_mgr: + validate_cloudconfig_schema( + {"p1": "-1"}, schema, strict=True, strict_metaschema=True + ) + self.assertEqual( + "Cloud config schema errors: p1: '-1' is not a 'email'", + str(context_mgr.exception), + ) + + @skipUnlessJsonSchema() + def test_validateconfig_strict_metaschema_do_not_raise_exception(self): + """With strict_metaschema=True, do not raise exceptions. + + This flag is currently unused, but is intended for run-time validation. + This should warn, but not raise. + """ + schema = {"properties": {"p1": {"types": "string", "format": "email"}}} + validate_cloudconfig_schema( + {"p1": "-1"}, schema, strict_metaschema=True + ) + assert ( + "Meta-schema validation failed, attempting to validate config" + in self.logs.getvalue() + ) + class TestCloudConfigExamples: - schema = get_schema() + schema = get_schemas() + metas = get_metas() params = [ - (schema["id"], example) - for schema in schema["allOf"] for example in schema["examples"]] + (meta["id"], example) + for meta in metas.values() + if meta and meta.get("examples") + for example in meta.get("examples") + ] - @pytest.mark.parametrize("schema_id,example", params) + @pytest.mark.parametrize("schema_id, example", params) @skipUnlessJsonSchema() def test_validateconfig_schema_of_example(self, schema_id, example): """ For a given example in a config module we test if it is valid @@ -201,22 +282,42 @@ def test_validateconfig_file_sctrictly_validates_schema(self): class GetSchemaDocTest(CiTestCase): - """Tests for get_schema_doc.""" + """Tests for get_meta_doc.""" def setUp(self): super(GetSchemaDocTest, self).setUp() self.required_schema = { - 'title': 'title', 'description': 'description', 'id': 'id', - 'name': 'name', 'frequency': 'frequency', - 'distros': ['debian', 'rhel']} + "title": "title", + "description": "description", + "id": "id", + "name": "name", + "frequency": "frequency", + "distros": ["debian", "rhel"], + } + self.meta = MetaSchema( + { + "title": "title", + "description": "description", + "id": "id", + "name": "name", + "frequency": "frequency", + "distros": ["debian", "rhel"], + "examples": [ + 'ex1:\n [don\'t, expand, "this"]', + "ex2: true", + ], + } + ) - def test_get_schema_doc_returns_restructured_text(self): - """get_schema_doc returns restructured text for a cloudinit schema.""" + def test_get_meta_doc_returns_restructured_text(self): + """get_meta_doc returns restructured text for a cloudinit schema.""" full_schema = copy(self.required_schema) full_schema.update( {'properties': { 'prop1': {'type': 'array', 'description': 'prop-description', 'items': {'type': 'integer'}}}}) + + doc = get_meta_doc(self.meta, full_schema) self.assertEqual( dedent(""" name @@ -232,47 +333,51 @@ def test_get_schema_doc_returns_restructured_text(self): **Supported distros:** debian, rhel **Config schema**: - **prop1:** (array of integer) prop-description\n\n"""), - get_schema_doc(full_schema)) + **prop1:** (array of integer) prop-description - def test_get_schema_doc_handles_multiple_types(self): - """get_schema_doc delimits multiple property types with a '/'.""" - full_schema = copy(self.required_schema) - full_schema.update( - {'properties': { - 'prop1': {'type': ['string', 'integer'], - 'description': 'prop-description'}}}) + **Examples**:: + + ex1: + [don't, expand, "this"] + # --- Example2 --- + ex2: true + """), + doc, + ) + + def test_get_meta_doc_handles_multiple_types(self): + """get_meta_doc delimits multiple property types with a '/'.""" + schema = {"properties": {"prop1": {"type": ["string", "integer"]}}} self.assertIn( - '**prop1:** (string/integer) prop-description', - get_schema_doc(full_schema)) + "**prop1:** (string/integer)", get_meta_doc(self.meta, schema) + ) - def test_get_schema_doc_handles_enum_types(self): - """get_schema_doc converts enum types to yaml and delimits with '/'.""" - full_schema = copy(self.required_schema) - full_schema.update( - {'properties': { - 'prop1': {'enum': [True, False, 'stuff'], - 'description': 'prop-description'}}}) + def test_get_meta_doc_handles_enum_types(self): + """get_meta_doc converts enum types to yaml and delimits with '/'.""" + schema = {"properties": {"prop1": {"enum": [True, False, "stuff"]}}} self.assertIn( - '**prop1:** (true/false/stuff) prop-description', - get_schema_doc(full_schema)) + "**prop1:** (true/false/stuff)", get_meta_doc(self.meta, schema) + ) - def test_get_schema_doc_handles_nested_oneof_property_types(self): - """get_schema_doc describes array items oneOf declarations in type.""" - full_schema = copy(self.required_schema) - full_schema.update( - {'properties': { - 'prop1': {'type': 'array', - 'items': { - 'oneOf': [{'type': 'string'}, - {'type': 'integer'}]}, - 'description': 'prop-description'}}}) + def test_get_meta_doc_handles_nested_oneof_property_types(self): + """get_meta_doc describes array items oneOf declarations in type.""" + schema = { + "properties": { + "prop1": { + "type": "array", + "items": { + "oneOf": [{"type": "string"}, {"type": "integer"}] + }, + } + } + } self.assertIn( - '**prop1:** (array of (string)/(integer)) prop-description', - get_schema_doc(full_schema)) + "**prop1:** (array of (string)/(integer))", + get_meta_doc(self.meta, schema), + ) - def test_get_schema_doc_handles_string_examples(self): - """get_schema_doc properly indented examples as a list of strings.""" + def test_get_meta_doc_handles_string_examples(self): + """get_meta_doc properly indented examples as a list of strings.""" full_schema = copy(self.required_schema) full_schema.update( {'examples': ['ex1:\n [don\'t, expand, "this"]', 'ex2: true'], @@ -291,16 +396,17 @@ def test_get_schema_doc_handles_string_examples(self): # --- Example2 --- ex2: true """), - get_schema_doc(full_schema)) + get_meta_doc(self.meta, full_schema), + ) - def test_get_schema_doc_properly_parse_description(self): - """get_schema_doc description properly formatted""" - full_schema = copy(self.required_schema) - full_schema.update( - {'properties': { - 'p1': { - 'type': 'string', - 'description': dedent("""\ + def test_get_meta_doc_properly_parse_description(self): + """get_meta_doc description properly formatted""" + schema = { + "properties": { + "p1": { + "type": "string", + "description": dedent( + """\ This item has the following options: @@ -312,8 +418,8 @@ def test_get_schema_doc_properly_parse_description(self): The default value is option1""") } - }} - ) + } + } self.assertIn( dedent(""" @@ -325,16 +431,28 @@ def test_get_schema_doc_properly_parse_description(self): - option3 The default value is option1 + """), - get_schema_doc(full_schema)) + get_meta_doc(self.meta, schema), + ) - def test_get_schema_doc_raises_key_errors(self): - """get_schema_doc raises KeyErrors on missing keys.""" - for key in self.required_schema: - invalid_schema = copy(self.required_schema) - invalid_schema.pop(key) + def test_get_meta_doc_raises_key_errors(self): + """get_meta_doc raises KeyErrors on missing keys.""" + schema = { + "properties": { + "prop1": { + "type": "array", + "items": { + "oneOf": [{"type": "string"}, {"type": "integer"}] + }, + } + } + } + for key in self.meta: + invalid_meta = copy(self.meta) + invalid_meta.pop(key) with self.assertRaises(KeyError) as context_mgr: - get_schema_doc(invalid_schema) + get_meta_doc(invalid_meta, schema) self.assertIn(key, str(context_mgr.exception)) @@ -418,6 +536,7 @@ def test_main_exclusive_args(self, params, capsys): _out, err = capsys.readouterr() expected = ( + 'Error:\n' 'Expected one of --config-file, --system or --docs arguments\n' ) assert expected == err @@ -431,6 +550,7 @@ def test_main_missing_args(self, capsys): _out, err = capsys.readouterr() expected = ( + 'Error:\n' 'Expected one of --config-file, --system or --docs arguments\n' ) assert expected == err @@ -443,7 +563,7 @@ def test_main_absent_config_file(self, capsys): main() assert 1 == context_manager.value.code _out, err = capsys.readouterr() - assert 'Configfile NOT_A_FILE does not exist\n' == err + assert 'Error:\nConfigfile NOT_A_FILE does not exist\n' == err def test_main_prints_docs(self, capsys): """When --docs parameter is provided, main generates documentation.""" @@ -489,12 +609,13 @@ def test_main_system_userdata_requires_root(self, m_getuid, capsys, paths): assert 1 == context_manager.value.code _out, err = capsys.readouterr() expected = ( - 'Unable to read system userdata as non-root user. Try using sudo\n' + 'Error:\nUnable to read system userdata as non-root user. ' + 'Try using sudo\n' ) assert expected == err -def _get_schema_doc_examples(): +def _get_meta_doc_examples(): examples_dir = Path( cloudinit.__file__).parent.parent / 'doc' / 'examples' assert examples_dir.is_dir() @@ -507,9 +628,49 @@ def _get_schema_doc_examples(): class TestSchemaDocExamples: schema = get_schema() - @pytest.mark.parametrize("example_path", _get_schema_doc_examples()) + @pytest.mark.parametrize("example_path", _get_meta_doc_examples()) @skipUnlessJsonSchema() def test_schema_doc_examples(self, example_path): validate_cloudconfig_file(str(example_path), self.schema) + +class TestStrictMetaschema: + """Validate that schemas follow a stricter metaschema definition than + the default. This disallows arbitrary key/value pairs. + """ + + @skipUnlessJsonSchema() + def test_modules(self): + """Validate all modules with a stricter metaschema""" + (validator, _) = get_jsonschema_validator() + for (name, value) in get_schemas().items(): + if value: + validate_cloudconfig_metaschema(validator, value) + else: + logging.warning("module %s has no schema definition", name) + + @skipUnlessJsonSchema() + def test_validate_bad_module(self): + """Throw exception by default, don't throw if throw=False + + item should be 'items' and is therefore interpreted as an additional + property which is invalid with a strict metaschema + """ + (validator, _) = get_jsonschema_validator() + schema = { + "type": "array", + "item": { + "type": "object", + }, + } + with pytest.raises( + SchemaValidationError, + match=(r"Additional properties are not allowed.*") + ): + + validate_cloudconfig_metaschema(validator, schema) + + validate_cloudconfig_metaschema(validator, schema, throw=False) + + # vi: ts=4 expandtab syntax=python diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index fd717f34a04..d016267332c 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -1,6 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import os +import contextlib import io from collections import namedtuple @@ -214,26 +215,106 @@ def test_wb_devel_schema_subcommand_parser(self): self.assertEqual(1, exit_code) # Known whitebox output from schema subcommand self.assertEqual( + 'Error:\n' 'Expected one of --config-file, --system or --docs arguments\n', self.stderr.getvalue()) - def test_wb_devel_schema_subcommand_doc_content(self): - """Validate that doc content is sane from known examples.""" + def test_wb_devel_schema_subcommand_doc_all_spot_check(self): + """Validate that doc content has correct values from known examples. + + Ensure that schema doc is returned + """ + + # Note: patchStdoutAndStderr() is convenient for reducing boilerplate, + # but inspecting the code for debugging is not ideal + # contextlib.redirect_stdout() provides similar behavior as a context + # manager stdout = io.StringIO() - self.patchStdoutAndStderr(stdout=stdout) - self._call_main(['cloud-init', 'devel', 'schema', '--docs', 'all']) - expected_doc_sections = [ - '**Supported distros:** all', - ('**Supported distros:** almalinux, alpine, centos, cloudlinux, ' - 'debian, eurolinux, fedora, openEuler, opensuse, photon, rhel, ' - 'rocky, sles, ubuntu, virtuozzo'), - '**Config schema**:\n **resize_rootfs:** (true/false/noblock)', - '**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n' - ] + with contextlib.redirect_stdout(stdout): + self._call_main(["cloud-init", "devel", "schema", "--docs", "all"]) + expected_doc_sections = [ + "**Supported distros:** all", + ( + "**Supported distros:** almalinux, alpine, centos, " + "cloudlinux, debian, eurolinux, fedora, openEuler, " + "opensuse, photon, rhel, rocky, sles, ubuntu, virtuozzo" + ), + "**Config schema**:\n **resize_rootfs:** " + "(true/false/noblock)", + "**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n", + ] + stdout = stdout.getvalue() + for expected in expected_doc_sections: + self.assertIn(expected, stdout) + + def test_wb_devel_schema_subcommand_single_spot_check(self): + """Validate that doc content has correct values from known example. + + Validate 'all' arg + """ + + # Note: patchStdoutAndStderr() is convenient for reducing boilerplate, + # but inspecting the code for debugging is not ideal + # contextlib.redirect_stdout() provides similar behavior as a context + # manager + stdout = io.StringIO() + with contextlib.redirect_stdout(stdout): + self._call_main( + ["cloud-init", "devel", "schema", "--docs", "cc_runcmd"] + ) + expected_doc_sections = [ + "Runcmd\n------\n**Summary:** Run arbitrary commands" + ] stdout = stdout.getvalue() for expected in expected_doc_sections: self.assertIn(expected, stdout) + def test_wb_devel_schema_subcommand_multiple_spot_check(self): + """Validate that doc content has correct values from known example. + + Validate single arg + """ + + stdout = io.StringIO() + with contextlib.redirect_stdout(stdout): + self._call_main( + [ + "cloud-init", + "devel", + "schema", + "--docs", + "cc_runcmd", + "cc_resizefs", + ] + ) + expected_doc_sections = [ + "Runcmd\n------\n**Summary:** Run arbitrary commands", + "Resizefs\n--------\n**Summary:** Resize filesystem", + ] + stdout = stdout.getvalue() + for expected in expected_doc_sections: + self.assertIn(expected, stdout) + + def test_wb_devel_schema_subcommand_bad_arg_fails(self): + """Validate that doc content has correct values from known example. + + Validate multiple args + """ + + # Note: patchStdoutAndStderr() is convenient for reducing boilerplate, + # but inspecting the code for debugging is not ideal + # contextlib.redirect_stdout() provides similar behavior as a context + # manager + stderr = io.StringIO() + with contextlib.redirect_stderr(stderr): + self._call_main( + ["cloud-init", "devel", "schema", "--docs", "garbage_value"] + ) + expected_doc_sections = ["Invalid --docs value"] + stderr = stderr.getvalue() + for expected in expected_doc_sections: + self.assertIn(expected, stderr) + @mock.patch('cloudinit.cmd.main.main_single') def test_single_subcommand(self, m_main_single): """The subcommand 'single' calls main_single with valid args.""" From cd40789a583c38423cea76355b375aa838893292 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 6 Dec 2021 17:17:49 -0600 Subject: [PATCH 0013/2310] docs: Make MACs lowercase in network config (#1135) LP: #1876941 --- doc/rtd/topics/network-config-format-v1.rst | 3 ++- doc/rtd/topics/network-config-format-v2.rst | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/doc/rtd/topics/network-config-format-v1.rst b/doc/rtd/topics/network-config-format-v1.rst index 3202163b7a8..4c013d925e2 100644 --- a/doc/rtd/topics/network-config-format-v1.rst +++ b/doc/rtd/topics/network-config-format-v1.rst @@ -62,7 +62,8 @@ structure. **mac_address**: ** The MAC Address is a device unique identifier that most Ethernet-based network -devices possess. Specifying a MAC Address is optional. +devices possess. Specifying a MAC Address is optional. +Letters must be lowercase. .. note:: diff --git a/doc/rtd/topics/network-config-format-v2.rst b/doc/rtd/topics/network-config-format-v2.rst index af65a4ce77f..8b040ed2c4d 100644 --- a/doc/rtd/topics/network-config-format-v2.rst +++ b/doc/rtd/topics/network-config-format-v2.rst @@ -107,7 +107,8 @@ NetworkManager does not. **macaddress**: *<(scalar)>* -Device's MAC address in the form XX:XX:XX:XX:XX:XX. Globs are not allowed. +Device's MAC address in the form xx:xx:xx:xx:xx:xx. Globs are not allowed. +Letters must be lowercase. .. note:: @@ -131,7 +132,7 @@ supported. Matching on driver is *only* supported with networkd. # fixed MAC address match: - macaddress: 11:22:33:AA:BB:FF + macaddress: 11:22:33:aa:bb:ff # first card of driver ``ixgbe`` match: From 0ed00ad9cdebc2d4dabd8bd6d7c901584963def5 Mon Sep 17 00:00:00 2001 From: Haruki TSURUMOTO Date: Tue, 7 Dec 2021 23:23:45 +0900 Subject: [PATCH 0014/2310] Add miraclelinux support (#1128) --- README.md | 2 +- cloudinit/config/cc_ntp.py | 4 +-- cloudinit/config/cc_yum_add_repo.py | 2 +- cloudinit/distros/__init__.py | 3 ++- cloudinit/distros/miraclelinux.py | 8 ++++++ cloudinit/net/sysconfig.py | 3 ++- cloudinit/util.py | 4 +-- config/cloud.cfg.tmpl | 6 ++--- systemd/cloud-init-generator.tmpl | 2 +- systemd/cloud-init.service.tmpl | 2 +- tests/unittests/test_cli.py | 5 ++-- tests/unittests/test_util.py | 38 +++++++++++++++++++++++++++++ tools/.github-cla-signers | 1 + tools/read-dependencies | 4 +++ tools/render-cloudcfg | 2 +- 15 files changed, 70 insertions(+), 16 deletions(-) create mode 100644 cloudinit/distros/miraclelinux.py diff --git a/README.md b/README.md index 27098b1188b..f2a745f8707 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ get in contact with that distribution and send them our way! | Supported OSes | Supported Public Clouds | Supported Private Clouds | | --- | --- | --- | -| Alpine Linux
ArchLinux
Debian
DragonFlyBSD
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
openEuler
RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo/EuroLinux/CloudLinux
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
DigitalOcean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)
VMware















| +| Alpine Linux
ArchLinux
Debian
DragonFlyBSD
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
openEuler
RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo/EuroLinux/CloudLinux/MIRACLE LINUX
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
DigitalOcean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)
VMware















| ## To start developing cloud-init diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index 9c085a04c68..c55d5d86606 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -25,8 +25,8 @@ NTP_CONF = '/etc/ntp.conf' NR_POOL_SERVERS = 4 distros = ['almalinux', 'alpine', 'centos', 'cloudlinux', 'debian', - 'eurolinux', 'fedora', 'openEuler', 'opensuse', 'photon', - 'rhel', 'rocky', 'sles', 'ubuntu', 'virtuozzo'] + 'eurolinux', 'fedora', 'miraclelinux', 'openEuler', 'opensuse', + 'photon', 'rhel', 'rocky', 'sles', 'ubuntu', 'virtuozzo'] NTP_CLIENT_CONFIG = { 'chrony': { diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index d66d3ae4d95..046a2852852 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -19,7 +19,7 @@ **Module frequency:** always **Supported distros:** almalinux, centos, cloudlinux, eurolinux, fedora, - openEuler, photon, rhel, rocky, virtuozzo + miraclelinux, openEuler, photon, rhel, rocky, virtuozzo **Config keys**:: diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index fe44f20eee7..742804ea5f2 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -50,7 +50,8 @@ 'freebsd': ['freebsd'], 'gentoo': ['gentoo'], 'redhat': ['almalinux', 'amazon', 'centos', 'cloudlinux', 'eurolinux', - 'fedora', 'openEuler', 'photon', 'rhel', 'rocky', 'virtuozzo'], + 'fedora', 'miraclelinux', 'openEuler', 'photon', 'rhel', + 'rocky', 'virtuozzo'], 'suse': ['opensuse', 'sles'], } diff --git a/cloudinit/distros/miraclelinux.py b/cloudinit/distros/miraclelinux.py new file mode 100644 index 00000000000..c7753387b00 --- /dev/null +++ b/cloudinit/distros/miraclelinux.py @@ -0,0 +1,8 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.distros import rhel + + +class Distro(rhel.Distro): + pass +# vi: ts=4 expandtab diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index ef4543b4382..85342219a93 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -19,7 +19,8 @@ LOG = logging.getLogger(__name__) KNOWN_DISTROS = ['almalinux', 'centos', 'cloudlinux', 'eurolinux', 'fedora', - 'openEuler', 'rhel', 'rocky', 'suse', 'virtuozzo'] + 'miraclelinux', 'openEuler', 'rhel', 'rocky', 'suse', + 'virtuozzo'] NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf" diff --git a/cloudinit/util.py b/cloudinit/util.py index 1b462a38334..cad087a1615 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -540,8 +540,8 @@ def _get_variant(info): linux_dist = info['dist'][0].lower() if linux_dist in ( 'almalinux', 'alpine', 'arch', 'centos', 'cloudlinux', - 'debian', 'eurolinux', 'fedora', 'openeuler', 'photon', - 'rhel', 'rocky', 'suse', 'virtuozzo'): + 'debian', 'eurolinux', 'fedora', 'miraclelinux', 'openeuler', + 'photon', 'rhel', 'rocky', 'suse', 'virtuozzo'): variant = linux_dist elif linux_dist in ('ubuntu', 'linuxmint', 'mint'): variant = 'ubuntu' diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index b66bbe60d7d..741b23d5248 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -33,7 +33,7 @@ disable_root: true {% endif %} {% if variant in ["almalinux", "alpine", "amazon", "centos", "cloudlinux", "eurolinux", - "fedora", "openEuler", "rhel", "rocky", "virtuozzo"] %} + "fedora", "miraclelinux", "openEuler", "rhel", "rocky", "virtuozzo"] %} mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2'] {% if variant == "amazon" %} resize_rootfs: noblock @@ -176,7 +176,7 @@ cloud_final_modules: system_info: # This will affect which distro class gets used {% if variant in ["almalinux", "alpine", "amazon", "arch", "centos", "cloudlinux", "debian", - "eurolinux", "fedora", "freebsd", "netbsd", "openbsd", "openEuler", + "eurolinux", "fedora", "freebsd", "netbsd", "miraclelinux", "openbsd", "openEuler", "photon", "rhel", "rocky", "suse", "ubuntu", "virtuozzo"] %} distro: {{ variant }} {% elif variant in ["dragonfly"] %} @@ -231,7 +231,7 @@ system_info: security: http://ports.ubuntu.com/ubuntu-ports ssh_svcname: ssh {% elif variant in ["almalinux", "alpine", "amazon", "arch", "centos", "cloudlinux", "eurolinux", - "fedora", "openEuler", "rhel", "rocky", "suse", "virtuozzo"] %} + "fedora", "miraclelinux", "openEuler", "rhel", "rocky", "suse", "virtuozzo"] %} # Default user name + that default users groups (if added/used) default_user: {% if variant == "amazon" %} diff --git a/systemd/cloud-init-generator.tmpl b/systemd/cloud-init-generator.tmpl index 7d1e7256d6a..74d474283bf 100644 --- a/systemd/cloud-init-generator.tmpl +++ b/systemd/cloud-init-generator.tmpl @@ -84,7 +84,7 @@ default() { check_for_datasource() { local ds_rc="" {% if variant in ["almalinux", "centos", "cloudlinux", "eurolinux", "fedora", - "openEuler", "rhel", "rocky", "virtuozzo"] %} + "miraclelinux", "openEuler", "rhel", "rocky", "virtuozzo"] %} local dsidentify="/usr/libexec/cloud-init/ds-identify" {% else %} local dsidentify="/usr/lib/cloud-init/ds-identify" diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl index de3f3d91f92..e71e5679065 100644 --- a/systemd/cloud-init.service.tmpl +++ b/systemd/cloud-init.service.tmpl @@ -13,7 +13,7 @@ After=systemd-networkd-wait-online.service After=networking.service {% endif %} {% if variant in ["almalinux", "centos", "cloudlinux", "eurolinux", "fedora", - "openEuler", "rhel", "rocky", "virtuozzo"] %} + "miraclelinux", "openEuler", "rhel", "rocky", "virtuozzo"] %} After=network.service After=NetworkManager.service {% endif %} diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index d016267332c..e30e89a7456 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -236,8 +236,9 @@ def test_wb_devel_schema_subcommand_doc_all_spot_check(self): "**Supported distros:** all", ( "**Supported distros:** almalinux, alpine, centos, " - "cloudlinux, debian, eurolinux, fedora, openEuler, " - "opensuse, photon, rhel, rocky, sles, ubuntu, virtuozzo" + "cloudlinux, debian, eurolinux, fedora, miraclelinux, " + "openEuler, opensuse, photon, rhel, rocky, sles, ubuntu, " + "virtuozzo" ), "**Config schema**:\n **resize_rootfs:** " "(true/false/noblock)", diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 1290cbc69f1..3b76ead867b 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -185,6 +185,25 @@ """ ) +OS_RELEASE_MIRACLELINUX_8 = dedent( + """\ + NAME="MIRACLE LINUX" + VERSION="8.4 (Peony)" + ID="miraclelinux" + ID_LIKE="rhel fedora" + PLATFORM_ID="platform:el8" + VERSION_ID="8" + PRETTY_NAME="MIRACLE LINUX 8.4 (Peony)" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:cybertrust_japan:miracle_linux:8" + HOME_URL="https://www.cybertrust.co.jp/miracle-linux/" + DOCUMENTATION_URL="https://www.miraclelinux.com/support/miraclelinux8" + BUG_REPORT_URL="https://bugzilla.asianux.com/" + MIRACLELINUX_SUPPORT_PRODUCT="MIRACLE LINUX" + MIRACLELINUX_SUPPORT_PRODUCT_VERSION="8" +""" +) + OS_RELEASE_ROCKY_8 = dedent( """\ NAME="Rocky Linux" @@ -255,6 +274,7 @@ REDHAT_RELEASE_ALMALINUX_8 = "AlmaLinux release 8.3 (Purple Manul)" REDHAT_RELEASE_EUROLINUX_7 = "EuroLinux release 7.9 (Minsk)" REDHAT_RELEASE_EUROLINUX_8 = "EuroLinux release 8.4 (Vaduz)" +REDHAT_RELEASE_MIRACLELINUX_8 = "MIRACLE LINUX release 8.4 (Peony)" REDHAT_RELEASE_ROCKY_8 = "Rocky Linux release 8.3 (Green Obsidian)" REDHAT_RELEASE_VIRTUOZZO_8 = "Virtuozzo Linux release 8" REDHAT_RELEASE_CLOUDLINUX_8 = "CloudLinux release 8.4 (Valery Rozhdestvensky)" @@ -754,6 +774,24 @@ def test_get_linux_eurolinux8_osrelease(self, m_os_release, m_path_exists): dist = util.get_linux_distro() self.assertEqual(('eurolinux', '8.4', 'Vaduz'), dist) + @mock.patch('cloudinit.util.load_file') + def test_get_linux_miraclelinux8_rhrelease(self, m_os_release, + m_path_exists): + """Verify miraclelinux 8 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_MIRACLELINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('miracle', '8.4', 'Peony'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_miraclelinux8_osrelease(self, m_os_release, + m_path_exists): + """Verify miraclelinux 8 read from os-release.""" + m_os_release.return_value = OS_RELEASE_MIRACLELINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('miraclelinux', '8', 'Peony'), dist) + @mock.patch('cloudinit.util.load_file') def test_get_linux_rocky8_rhrelease(self, m_os_release, m_path_exists): """Verify rocky linux 8 read from redhat-release.""" diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index 492ed15e0f2..a2da8a625cc 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -73,6 +73,7 @@ timothegenzmer tnt-dev tomponline tsanghan +tSU-RooT vteratipally Vultaire WebSpider diff --git a/tools/read-dependencies b/tools/read-dependencies index 810154e42ac..efa5879cccc 100755 --- a/tools/read-dependencies +++ b/tools/read-dependencies @@ -24,6 +24,7 @@ DEFAULT_REQUIREMENTS = 'requirements.txt' DISTRO_PKG_TYPE_MAP = { 'centos': 'redhat', 'eurolinux': 'redhat', + 'miraclelinux': 'redhat', 'rocky': 'redhat', 'redhat': 'redhat', 'debian': 'debian', @@ -70,12 +71,14 @@ DRY_DISTRO_INSTALL_PKG_CMD = { 'rocky': ['yum', 'install', '--assumeyes'], 'centos': ['yum', 'install', '--assumeyes'], 'eurolinux': ['yum', 'install', '--assumeyes'], + 'miraclelinux': ['yum', 'install', '--assumeyes'], 'redhat': ['yum', 'install', '--assumeyes'], } DISTRO_INSTALL_PKG_CMD = { 'rocky': MAYBE_RELIABLE_YUM_INSTALL, 'eurolinux': MAYBE_RELIABLE_YUM_INSTALL, + 'miraclelinux': MAYBE_RELIABLE_YUM_INSTALL, 'centos': MAYBE_RELIABLE_YUM_INSTALL, 'redhat': MAYBE_RELIABLE_YUM_INSTALL, 'debian': ['apt', 'install', '-y'], @@ -89,6 +92,7 @@ DISTRO_INSTALL_PKG_CMD = { CI_SYSTEM_BASE_PKGS = { 'common': ['make', 'sudo', 'tar'], 'eurolinux': ['python3-tox'], + 'miraclelinux': ['python3-tox'], 'redhat': ['python3-tox'], 'centos': ['python3-tox'], 'ubuntu': ['devscripts', 'python3-dev', 'libssl-dev', 'tox', 'sbuild'], diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg index 186d61b7479..6642bd5897c 100755 --- a/tools/render-cloudcfg +++ b/tools/render-cloudcfg @@ -5,7 +5,7 @@ import os import sys VARIANTS = ["almalinux", "alpine", "amazon", "arch", "centos", "cloudlinux", "debian", - "eurolinux", "fedora", "freebsd", "netbsd", "openbsd", "openEuler", "photon", + "eurolinux", "fedora", "freebsd", "miraclelinux", "netbsd", "openbsd", "openEuler", "photon", "rhel", "suse","rocky", "ubuntu", "unknown", "virtuozzo"] From 2969ceaa6447774f921fe7dd06a39c175e87e45c Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Tue, 7 Dec 2021 11:14:13 -0700 Subject: [PATCH 0015/2310] mock sleep() in azure test (#1137) --- tests/unittests/sources/test_azure.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py index b221a0d7311..9728a1e7af7 100644 --- a/tests/unittests/sources/test_azure.py +++ b/tests/unittests/sources/test_azure.py @@ -2981,7 +2981,8 @@ def is_up_mock(key): m_is_up.side_effect = is_up_mock - dsa.wait_for_link_up("eth0") + with mock.patch('cloudinit.sources.DataSourceAzure.sleep'): + dsa.wait_for_link_up("eth0") self.assertEqual(2, m_try_set_link_up.call_count) self.assertEqual(2, m_is_up.call_count) From b21afb0a8ab64543715dffff490253db8ecefb9f Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Tue, 7 Dec 2021 13:36:23 -0700 Subject: [PATCH 0016/2310] testing: Add deterministic test id (#1138) Parametrized pytest tests get named by on their parameters. If a name has random characters, it can break the test collection of when using pytest-xdist. Replace random name with deterministic name. --- tests/unittests/test_util.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 3b76ead867b..eab374bc788 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -1010,11 +1010,17 @@ def test_is_lxd_false_when_sock_device_absent(self, m_exists): class TestReadCcFromCmdline: + if hasattr(pytest, "param"): + random_string = pytest.param( + CiTestCase.random_string(), None, id="random_string") + else: + random_string = (CiTestCase.random_string(), None) + @pytest.mark.parametrize( "cmdline,expected_cfg", [ # Return None if cmdline has no cc:end_cc content. - (CiTestCase.random_string(), None), + random_string, # Return None if YAML content is empty string. ('foo cc: end_cc bar', None), # Return expected dictionary without trailing end_cc marker. From 65c2cfd7f21758746444c8c79444994a4638d563 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Wed, 8 Dec 2021 14:27:37 -0700 Subject: [PATCH 0017/2310] factor out function for getting top level directory of cloudinit (#1136) Add a test helper to get top level directory Many tests need to get the location of files & dirs within the cloud-init project directory. Tests implement this in various different ways, and often those ways depend on the current working directory of the pytest invocation. Create helper functions (and tests) that gets the path of the top directory or any sub directory under the top directory. This function does not depend on the environment. --- tests/unittests/config/test_cc_chef.py | 17 ++++++---- tests/unittests/config/test_cc_resolv_conf.py | 2 +- .../config/test_cc_update_etc_hosts.py | 5 ++- tests/unittests/config/test_schema.py | 16 ++++++--- tests/unittests/helpers.py | 22 ++++++++++++- .../sources/vmware/test_vmware_config_file.py | 10 ++++-- tests/unittests/test_ds_identify.py | 10 ++++-- tests/unittests/test_helpers.py | 33 +++++++++++++++++++ tests/unittests/test_render_cloudcfg.py | 6 ++-- tests/unittests/test_subp.py | 20 ++++++----- 10 files changed, 111 insertions(+), 30 deletions(-) diff --git a/tests/unittests/config/test_cc_chef.py b/tests/unittests/config/test_cc_chef.py index 060293c8e4c..1c90a4fc019 100644 --- a/tests/unittests/config/test_cc_chef.py +++ b/tests/unittests/config/test_cc_chef.py @@ -9,13 +9,18 @@ from cloudinit import util from tests.unittests.helpers import ( - HttprettyTestCase, FilesystemMockingTestCase, mock, skipIf) + HttprettyTestCase, + FilesystemMockingTestCase, + mock, + skipIf, + cloud_init_project_dir, +) from tests.unittests.util import get_cloud LOG = logging.getLogger(__name__) -CLIENT_TEMPL = os.path.sep.join(["templates", "chef_client.rb.tmpl"]) +CLIENT_TEMPL = cloud_init_project_dir("templates/chef_client.rb.tmpl") # This is adjusted to use http because using with https causes issue # in some openssl/httpretty combinations. @@ -138,7 +143,7 @@ def test_basic_config(self): Chef::Log::Formatter.show_time = true encrypted_data_bag_secret "/etc/chef/encrypted_data_bag_secret" """ - tpl_file = util.load_file('templates/chef_client.rb.tmpl') + tpl_file = util.load_file(CLIENT_TEMPL) self.patchUtils(self.tmp) self.patchOS(self.tmp) @@ -200,7 +205,7 @@ def test_firstboot_json(self): @skipIf(not os.path.isfile(CLIENT_TEMPL), CLIENT_TEMPL + " is not available") def test_template_deletes(self): - tpl_file = util.load_file('templates/chef_client.rb.tmpl') + tpl_file = util.load_file(CLIENT_TEMPL) self.patchUtils(self.tmp) self.patchOS(self.tmp) @@ -222,7 +227,7 @@ def test_template_deletes(self): CLIENT_TEMPL + " is not available") def test_validation_cert_and_validation_key(self): # test validation_cert content is written to validation_key path - tpl_file = util.load_file('templates/chef_client.rb.tmpl') + tpl_file = util.load_file(CLIENT_TEMPL) self.patchUtils(self.tmp) self.patchOS(self.tmp) @@ -245,7 +250,7 @@ def test_validation_cert_and_validation_key(self): def test_validation_cert_with_system(self): # test validation_cert content is not written over system file - tpl_file = util.load_file('templates/chef_client.rb.tmpl') + tpl_file = util.load_file(CLIENT_TEMPL) self.patchUtils(self.tmp) self.patchOS(self.tmp) diff --git a/tests/unittests/config/test_cc_resolv_conf.py b/tests/unittests/config/test_cc_resolv_conf.py index 0aa90a236ca..ab2de17ab1e 100644 --- a/tests/unittests/config/test_cc_resolv_conf.py +++ b/tests/unittests/config/test_cc_resolv_conf.py @@ -114,7 +114,7 @@ def test_resolv_conf_invalid_resolve_conf_fn(self, m_render_to_file): class TestGenerateResolvConf: dist = MockDistro() - tmpl_fn = "templates/resolv.conf.tmpl" + tmpl_fn = t_help.cloud_init_project_dir("templates/resolv.conf.tmpl") @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") def test_dist_resolv_conf_fn(self, m_render_to_file): diff --git a/tests/unittests/config/test_cc_update_etc_hosts.py b/tests/unittests/config/test_cc_update_etc_hosts.py index 77a7f78fedd..35ad6413bbc 100644 --- a/tests/unittests/config/test_cc_update_etc_hosts.py +++ b/tests/unittests/config/test_cc_update_etc_hosts.py @@ -55,7 +55,10 @@ def test_write_etc_hosts_suse_template(self): 'manage_etc_hosts': 'template', 'hostname': 'cloud-init.test.us' } - shutil.copytree('templates', '%s/etc/cloud/templates' % self.tmp) + shutil.copytree( + t_help.cloud_init_project_dir('templates'), + '%s/etc/cloud/templates' % self.tmp, + ) distro = self._fetch_distro('sles') paths = helpers.Paths({}) paths.template_tpl = '%s' % self.tmp + '/etc/cloud/templates/%s.tmpl' diff --git a/tests/unittests/config/test_schema.py b/tests/unittests/config/test_schema.py index f90e0f62c5d..ed7ab5279ce 100644 --- a/tests/unittests/config/test_schema.py +++ b/tests/unittests/config/test_schema.py @@ -12,7 +12,6 @@ from textwrap import dedent from yaml import safe_load -import cloudinit from cloudinit.config.schema import ( CLOUD_CONFIG_HEADER, SchemaValidationError, @@ -27,7 +26,12 @@ MetaSchema, ) from cloudinit.util import write_file -from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema +from tests.unittests.helpers import ( + CiTestCase, + mock, + skipUnlessJsonSchema, + cloud_init_project_dir, +) def get_schemas() -> dict: @@ -50,7 +54,10 @@ def get_module_variable(var_name) -> dict: """Inspect modules and get variable from module matching var_name""" schemas = {} - files = list(Path("../../cloudinit/config/").glob("cc_*.py")) + files = list( + Path(cloud_init_project_dir("../../cloudinit/config/")).glob("cc_*.py") + ) + modules = [mod.stem for mod in files] for module in modules: @@ -616,8 +623,7 @@ def test_main_system_userdata_requires_root(self, m_getuid, capsys, paths): def _get_meta_doc_examples(): - examples_dir = Path( - cloudinit.__file__).parent.parent / 'doc' / 'examples' + examples_dir = Path(cloud_init_project_dir('doc/examples')) assert examples_dir.is_dir() all_text_files = (f for f in examples_dir.glob('cloud-config*.txt') diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index ccd567931ea..e9afbd36629 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -12,10 +12,12 @@ import tempfile import time import unittest +from pathlib import Path from contextlib import ExitStack, contextmanager from unittest import mock from unittest.util import strclass +import cloudinit from cloudinit.config.schema import ( SchemaValidationError, validate_cloudconfig_schema) from cloudinit import cloud @@ -462,7 +464,7 @@ def wrap_and_call(prefix, mocks, func, *args, **kwargs): def resourceLocation(subname=None): - path = os.path.join('tests', 'data') + path = cloud_init_project_dir('tests/data') if not subname: return path return os.path.join(path, subname) @@ -504,4 +506,22 @@ def __mock_assert_not_called(mmock): raise AssertionError(msg) mock.Mock.assert_not_called = __mock_assert_not_called + +def get_top_level_dir() -> Path: + """Return the absolute path to the top cloudinit project directory + + @return Path('') + """ + return Path(cloudinit.__file__).parent.parent.resolve() + + +def cloud_init_project_dir(sub_path: str) -> str: + """Get a path within the cloudinit project directory + + @return str of the combined path + + Example: cloud_init_project_dir("my/path") -> "/path/to/cloud-init/my/path" + """ + return str(get_top_level_dir() / sub_path) + # vi: ts=4 expandtab diff --git a/tests/unittests/sources/vmware/test_vmware_config_file.py b/tests/unittests/sources/vmware/test_vmware_config_file.py index 54de113eaec..1d66ab4a6da 100644 --- a/tests/unittests/sources/vmware/test_vmware_config_file.py +++ b/tests/unittests/sources/vmware/test_vmware_config_file.py @@ -16,15 +16,21 @@ from cloudinit.sources.DataSourceOVF import read_vmware_imc from cloudinit.sources.helpers.vmware.imc.boot_proto import BootProtoEnum from cloudinit.sources.helpers.vmware.imc.config import Config -from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile +from cloudinit.sources.helpers.vmware.imc.config_file import ( + ConfigFile as WrappedConfigFile, +) from cloudinit.sources.helpers.vmware.imc.config_nic import gen_subnet from cloudinit.sources.helpers.vmware.imc.config_nic import NicConfigurator -from tests.unittests.helpers import CiTestCase +from tests.unittests.helpers import CiTestCase, cloud_init_project_dir logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) logger = logging.getLogger(__name__) +def ConfigFile(path: str): + return WrappedConfigFile(cloud_init_project_dir(path)) + + class TestVmwareConfigFile(CiTestCase): def test_utility_methods(self): diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 62c3e4031f9..eb8992d9bb1 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -9,8 +9,12 @@ from cloudinit import subp from cloudinit import util from tests.unittests.helpers import ( - CiTestCase, dir2dict, populate_dir, populate_dir_with_ts) - + CiTestCase, + dir2dict, + populate_dir, + populate_dir_with_ts, + cloud_init_project_dir, +) from cloudinit.sources import DataSourceIBMCloud as ds_ibm from cloudinit.sources import DataSourceSmartOS as ds_smartos from cloudinit.sources import DataSourceOracle as ds_oracle @@ -92,7 +96,7 @@ class DsIdentifyBase(CiTestCase): - dsid_path = os.path.realpath('tools/ds-identify') + dsid_path = cloud_init_project_dir('tools/ds-identify') allowed_subp = ['sh'] def call(self, rootd=None, mocks=None, func="main", args=None, files=None, diff --git a/tests/unittests/test_helpers.py b/tests/unittests/test_helpers.py index c6f9b94a136..f491f8cdb5c 100644 --- a/tests/unittests/test_helpers.py +++ b/tests/unittests/test_helpers.py @@ -3,6 +3,7 @@ """Tests of the built-in user data handlers.""" import os +from pathlib import Path from tests.unittests import helpers as test_helpers @@ -34,4 +35,36 @@ def test_get_ipath_and_empty_instance_id_returns_none(self): self.assertIsNone(mypaths.get_ipath()) + +class Testcloud_init_project_dir: + top_dir = test_helpers.get_top_level_dir() + + @staticmethod + def _get_top_level_dir_alt_implementation(): + """Alternative implementation for comparing against. + + Note: Recursively searching for .git/ fails during build tests due to + .git not existing. This implementation assumes that ../../../ is the + relative path to the cloud-init project directory form this file. + """ + out = Path(__file__).parent.parent.parent.resolve() + return out + + def test_top_level_dir(self): + """Assert the location of the top project directory is correct""" + assert (self.top_dir == + self._get_top_level_dir_alt_implementation()) + + def test_cloud_init_project_dir(self): + """Assert cloud_init_project_dir produces an expected location + + Compare the returned value to an alternate (naive) implementation + """ + assert ( + str(Path(self.top_dir, "test")) + == test_helpers.cloud_init_project_dir("test") + == str(Path(self._get_top_level_dir_alt_implementation(), "test")) + ) + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_render_cloudcfg.py b/tests/unittests/test_render_cloudcfg.py index 00d50e668d9..b22227470de 100644 --- a/tests/unittests/test_render_cloudcfg.py +++ b/tests/unittests/test_render_cloudcfg.py @@ -1,12 +1,12 @@ """Tests for tools/render-cloudcfg""" -import os import sys import pytest from cloudinit import subp from cloudinit import util +from tests.unittests.helpers import cloud_init_project_dir # TODO(Look to align with tools.render-cloudcfg or cloudinit.distos.OSFAMILIES) DISTRO_VARIANTS = ["amazon", "arch", "centos", "debian", "eurolinux", "fedora", @@ -17,8 +17,8 @@ @pytest.mark.allow_subp_for(sys.executable) class TestRenderCloudCfg: - cmd = [sys.executable, os.path.realpath('tools/render-cloudcfg')] - tmpl_path = os.path.realpath('config/cloud.cfg.tmpl') + cmd = [sys.executable, cloud_init_project_dir('tools/render-cloudcfg')] + tmpl_path = cloud_init_project_dir('config/cloud.cfg.tmpl') @pytest.mark.parametrize('variant', (DISTRO_VARIANTS)) def test_variant_sets_distro_in_cloud_cfg(self, variant, tmpdir): diff --git a/tests/unittests/test_subp.py b/tests/unittests/test_subp.py index ec513d01242..572510d7a84 100644 --- a/tests/unittests/test_subp.py +++ b/tests/unittests/test_subp.py @@ -10,7 +10,7 @@ from unittest import mock from cloudinit import subp, util -from tests.unittests.helpers import CiTestCase +from tests.unittests.helpers import CiTestCase, get_top_level_dir BASH = subp.which('bash') @@ -232,13 +232,17 @@ def test_c_lang_can_take_utf8_args(self): the default encoding will be set to ascii. In such an environment Popen(['command', 'non-ascii-arg']) would cause a UnicodeDecodeError. """ - python_prog = '\n'.join([ - 'import json, sys', - 'from cloudinit.subp import subp', - 'data = sys.stdin.read()', - 'cmd = json.loads(data)', - 'subp(cmd, capture=False)', - '']) + python_prog = '\n'.join( + [ + 'import json, sys', + 'sys.path.insert(0, "{}")'.format(get_top_level_dir()), + 'from cloudinit.subp import subp', + 'data = sys.stdin.read()', + 'cmd = json.loads(data)', + 'subp(cmd, capture=False)', + '', + ] + ) cmd = [BASH, '-c', 'echo -n "$@"', '--', self.utf8_valid.decode("utf-8")] python_subp = [sys.executable, '-c', python_prog] From 6760bf1462d1686ee97d08a626cc0892cea588bf Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Wed, 8 Dec 2021 17:31:33 -0700 Subject: [PATCH 0018/2310] travis - don't run integration tests if no deb (#1139) If building the *.deb fails, exit Currently integration tests will run and fail with a non-obvious message. This makes it so the last thing in the logs is whatever caused the build to fail. --- .travis.yml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 1582e8294ac..9470cc31de8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -119,9 +119,11 @@ matrix: sudo tar --sparse --xattrs --xattrs-include=* -cf "$TRAVIS_BUILD_DIR/chroots/xenial-amd64.tar" -C /var/lib/schroot/chroots/xenial-amd64 . fi # Use sudo to get a new shell where we're in the sbuild group - - sudo -E su $USER -c 'sbuild --nolog --no-run-lintian --verbose --dist=xenial cloud-init_*.dsc' - - ssh-keygen -P "" -q -f ~/.ssh/id_rsa - - sg lxd -c 'CLOUD_INIT_CLOUD_INIT_SOURCE="$(ls *.deb)" tox -e integration-tests-ci' + # Don't run integration tests when build fails + - | + sudo -E su $USER -c 'sbuild --nolog --no-run-lintian --verbose --dist=xenial cloud-init_*.dsc' && + ssh-keygen -P "" -q -f ~/.ssh/id_rsa && + sg lxd -c 'CLOUD_INIT_CLOUD_INIT_SOURCE="$(ls *.deb)" tox -e integration-tests-ci' - python: 3.5 env: TOXENV=xenial From 8df8f43a2769b8f743ce127f2bb1d2a892b19d68 Mon Sep 17 00:00:00 2001 From: Ksenija Stanojevic Date: Thu, 9 Dec 2021 12:30:45 -0800 Subject: [PATCH 0019/2310] add KsenijaS as a contributor (#1145) --- tools/.github-cla-signers | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index a2da8a625cc..aed7b4d0168 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -41,6 +41,7 @@ jqueuniet jsf9k jshen28 klausenbusk +KsenijaS landon912 lucasmoura lucendio From b591e9dba6c85f3934bc309032c3e436b8dcb3ac Mon Sep 17 00:00:00 2001 From: Ksenija Stanojevic Date: Thu, 9 Dec 2021 14:45:37 -0800 Subject: [PATCH 0020/2310] Improve error log message when mount failed (#1140) --- cloudinit/util.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cloudinit/util.py b/cloudinit/util.py index cad087a1615..d7208f11219 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1752,8 +1752,10 @@ def mount_cb(device, callback, data=None, mtype=None, mountpoint = tmpd break except (IOError, OSError) as exc: - LOG.debug("Failed mount of '%s' as '%s': %s", - device, mtype, exc) + LOG.debug("Failed to mount device: '%s' with type: '%s' " + "using mount command: '%s', " + "which caused exception: %s", + device, mtype, ' '.join(mountcmd), exc) failure_reason = exc if not mountpoint: raise MountFailedError("Failed mounting %s to %s due to: %s" % From 24739592217e5ba91e09e8c28b852d31a2c0cc77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A9ri=20Le=20Bouder?= Date: Thu, 9 Dec 2021 17:46:27 -0500 Subject: [PATCH 0021/2310] find_devs/openbsd: accept ISO on disk (#1132) When the metadata is an ISO image and is exposed through a disk, the device is called `/dev/sd?a` internally. For instance `/dev/sd1a`. It can then be mounted with `mount_cd9660 /dev/sd1a /mnt`. Metadata in the FAT32 format are exposed as `/dev/sd?i`. With this change, we try to mount `/dev/sd?a` in addition to `/dev/sd?i`. Closes: https://github.com/ContainerCraft/kmi/issues/12 --- cloudinit/util.py | 5 +++-- tests/unittests/test_util.py | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/cloudinit/util.py b/cloudinit/util.py index d7208f11219..b9c584d1053 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1217,8 +1217,9 @@ def find_devs_with_openbsd(criteria=None, oformat='device', continue if entry == 'fd0:': continue - part_id = 'a' if entry.startswith('cd') else 'i' - devlist.append(entry[:-1] + part_id) + devlist.append(entry[:-1] + 'a') + if not entry.startswith('cd'): + devlist.append(entry[:-1] + 'i') if criteria == "TYPE=iso9660": devlist = [i for i in devlist if i.startswith('cd')] elif criteria in ["LABEL=CONFIG-2", "TYPE=vfat"]: diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index eab374bc788..c551835f573 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -2330,7 +2330,7 @@ def test_find_devs_with(self, m_subp): def test_find_devs_with_openbsd(self, m_subp): m_subp.return_value = ('cd0:,sd0:630d98d32b5d3759,sd1:,fd0:', '') devlist = util.find_devs_with_openbsd() - assert devlist == ['/dev/cd0a', '/dev/sd1i'] + assert devlist == ['/dev/cd0a', '/dev/sd1a', '/dev/sd1i'] @mock.patch('cloudinit.subp.subp') def test_find_devs_with_openbsd_with_criteria(self, m_subp): @@ -2340,7 +2340,7 @@ def test_find_devs_with_openbsd_with_criteria(self, m_subp): # lp: #1841466 devlist = util.find_devs_with_openbsd(criteria="LABEL_FATBOOT=A_LABEL") - assert devlist == ['/dev/cd0a', '/dev/sd1i'] + assert devlist == ['/dev/cd0a', '/dev/sd1a', '/dev/sd1i'] @pytest.mark.parametrize( 'criteria,expected_devlist', From e9634266ea52bf184727fb0782d5dc35f9ed1468 Mon Sep 17 00:00:00 2001 From: Chris Patterson Date: Fri, 10 Dec 2021 12:16:16 -0500 Subject: [PATCH 0022/2310] sources/azure: remove unnecessary hostname bounce (#1143) Thanks to [1], the hostname is set prior to network bring-up. The Azure data source has been bouncing the hostname during setup(), occurring after the hostname has already been properly configured. Note that this doesn't prevent leaking the image's hostname during Azure's _get_data() when it brings up ephemeral DHCP. However, as are not guaranteed to have the hostname metadata available from a truly "local" source, this behavior is to be expected unless we disable `send host-name` from dhclient config. [1]: https://github.com/canonical/cloud-init/commit/133ad2cb327ad17b7b81319fac8f9f14577c04df Signed-off-by: Chris Patterson --- cloudinit/sources/DataSourceAzure.py | 126 ----------- doc/examples/cloud-config-datasources.txt | 6 - doc/rtd/topics/datasources/azure.rst | 20 -- tests/unittests/sources/test_azure.py | 263 ---------------------- 4 files changed, 415 deletions(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 6c1bc085cd7..eee98fa870e 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -6,7 +6,6 @@ import base64 from collections import namedtuple -import contextlib import crypt from functools import partial import os @@ -52,20 +51,10 @@ DS_NAME = 'Azure' DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"} -BOUNCE_COMMAND_IFUP = [ - 'sh', '-xc', - "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x" -] -BOUNCE_COMMAND_FREEBSD = [ - 'sh', '-xc', - ("i=$interface; x=0; ifconfig down $i || x=$?; " - "ifconfig up $i || x=$?; exit $x") -] # azure systems will always have a resource disk, and 66-azure-ephemeral.rules # ensures that it gets linked to this path. RESOURCE_DISK_PATH = '/dev/disk/cloud/azure_resource' -DEFAULT_PRIMARY_NIC = 'eth0' LEASE_FILE = '/var/lib/dhcp/dhclient.eth0.leases' DEFAULT_FS = 'ext4' # DMI chassis-asset-tag is set static for all azure instances @@ -247,7 +236,6 @@ def get_resource_disk_on_freebsd(port_id): # update the FreeBSD specific information if util.is_FreeBSD(): - DEFAULT_PRIMARY_NIC = 'hn0' LEASE_FILE = '/var/db/dhclient.leases.hn0' DEFAULT_FS = 'freebsd-ufs' res_disk = get_resource_disk_on_freebsd(1) @@ -261,13 +249,6 @@ def get_resource_disk_on_freebsd(port_id): BUILTIN_DS_CONFIG = { 'data_dir': AGENT_SEED_DIR, - 'set_hostname': True, - 'hostname_bounce': { - 'interface': DEFAULT_PRIMARY_NIC, - 'policy': True, - 'command': 'builtin', - 'hostname_command': 'hostname', - }, 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH}, 'dhclient_lease_file': LEASE_FILE, 'apply_network_config': True, # Use IMDS published network configuration @@ -293,46 +274,6 @@ def get_resource_disk_on_freebsd(port_id): DEF_PASSWD_REDACTION = 'REDACTED' -def get_hostname(hostname_command='hostname'): - if not isinstance(hostname_command, (list, tuple)): - hostname_command = (hostname_command,) - return subp.subp(hostname_command, capture=True)[0].strip() - - -def set_hostname(hostname, hostname_command='hostname'): - subp.subp([hostname_command, hostname]) - - -@azure_ds_telemetry_reporter -@contextlib.contextmanager -def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): - """ - Set a temporary hostname, restoring the previous hostname on exit. - - Will have the value of the previous hostname when used as a context - manager, or None if the hostname was not changed. - """ - policy = cfg['hostname_bounce']['policy'] - previous_hostname = get_hostname(hostname_command) - if (not util.is_true(cfg.get('set_hostname')) or - util.is_false(policy) or - (previous_hostname == temp_hostname and policy != 'force')): - yield None - return - try: - set_hostname(temp_hostname, hostname_command) - except Exception as e: - report_diagnostic_event( - 'Failed setting temporary hostname: %s' % e, - logger_func=LOG.warning) - yield None - return - try: - yield previous_hostname - finally: - set_hostname(previous_hostname, hostname_command) - - class DataSourceAzure(sources.DataSource): dsname = 'Azure' @@ -369,34 +310,6 @@ def __str__(self): root = sources.DataSource.__str__(self) return "%s [seed=%s]" % (root, self.seed) - @azure_ds_telemetry_reporter - def bounce_network_with_azure_hostname(self): - # When using cloud-init to provision, we have to set the hostname from - # the metadata and "bounce" the network to force DDNS to update via - # dhclient - azure_hostname = self.metadata.get('local-hostname') - LOG.debug("Hostname in metadata is %s", azure_hostname) - hostname_command = self.ds_cfg['hostname_bounce']['hostname_command'] - - with temporary_hostname(azure_hostname, self.ds_cfg, - hostname_command=hostname_command) \ - as previous_hn: - if (previous_hn is not None and - util.is_true(self.ds_cfg.get('set_hostname'))): - cfg = self.ds_cfg['hostname_bounce'] - - # "Bouncing" the network - try: - return perform_hostname_bounce(hostname=azure_hostname, - cfg=cfg, - prev_hostname=previous_hn) - except Exception as e: - report_diagnostic_event( - "Failed publishing hostname: %s" % e, - logger_func=LOG.warning) - util.logexc(LOG, "handling set_hostname failed") - return False - def _get_subplatform(self): """Return the subplatform metadata source details.""" if self.seed.startswith('/dev'): @@ -1502,9 +1415,6 @@ def _negotiate(self): On success, returns a dictionary including 'public_keys'. On failure, returns False. """ - - self.bounce_network_with_azure_hostname() - pubkey_info = None ssh_keys_and_source = self._get_public_ssh_keys_and_source() @@ -1763,42 +1673,6 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, return -@azure_ds_telemetry_reporter -def perform_hostname_bounce(hostname, cfg, prev_hostname): - # set the hostname to 'hostname' if it is not already set to that. - # then, if policy is not off, bounce the interface using command - # Returns True if the network was bounced, False otherwise. - command = cfg['command'] - interface = cfg['interface'] - policy = cfg['policy'] - - msg = ("hostname=%s policy=%s interface=%s" % - (hostname, policy, interface)) - env = os.environ.copy() - env['interface'] = interface - env['hostname'] = hostname - env['old_hostname'] = prev_hostname - - if command == "builtin": - if util.is_FreeBSD(): - command = BOUNCE_COMMAND_FREEBSD - elif subp.which('ifup'): - command = BOUNCE_COMMAND_IFUP - else: - LOG.debug( - "Skipping network bounce: ifupdown utils aren't present.") - # Don't bounce as networkd handles hostname DDNS updates - return False - LOG.debug("pubhname: publishing hostname [%s]", msg) - shell = not isinstance(command, (list, tuple)) - # capture=False, see comments in bug 1202758 and bug 1206164. - util.log_time(logfunc=LOG.debug, msg="publishing hostname", - get_uptime=True, func=subp.subp, - kwargs={'args': command, 'shell': shell, 'capture': False, - 'env': env}) - return True - - @azure_ds_telemetry_reporter def write_files(datadir, files, dirmode=None): diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt index d1a4d79eb7f..7a8c42842af 100644 --- a/doc/examples/cloud-config-datasources.txt +++ b/doc/examples/cloud-config-datasources.txt @@ -45,12 +45,6 @@ datasource: instance-id: i-87018aed local-hostname: myhost.internal - Azure: - set_hostname: True - hostname_bounce: - interface: eth0 - policy: on # [can be 'on', 'off' or 'force'] - SmartOS: # For KVM guests: # Smart OS datasource works over a serial console interacting with diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst index ad9f22361bd..bc672486605 100644 --- a/doc/rtd/topics/datasources/azure.rst +++ b/doc/rtd/topics/datasources/azure.rst @@ -60,20 +60,6 @@ The settings that may be configured are: custom DHCP option 245 from Azure fabric. * **disk_aliases**: A dictionary defining which device paths should be interpreted as ephemeral images. See cc_disk_setup module for more info. - * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to - metadata changes. The '``hostname_bounce: command``' entry can be either - the literal string 'builtin' or a command to execute. The command will be - invoked after the hostname is set, and will have the 'interface' in its - environment. If ``set_hostname`` is not true, then ``hostname_bounce`` - will be ignored. An example might be: - - ``command: ["sh", "-c", "killall dhclient; dhclient $interface"]`` - - * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to - metadata changes. Azure will throttle ifup/down in some cases after metadata - has been updated to inform dhcp server about updated hostnames. - * **set_hostname**: Boolean set to True when we want Azure to set the hostname - based on metadata. Configuration for the datasource can also be read from a ``dscfg`` entry in the ``LinuxProvisioningConfigurationSet``. Content in @@ -91,12 +77,6 @@ An example configuration with the default values is provided below: dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases disk_aliases: ephemeral0: /dev/disk/cloud/azure_resource - hostname_bounce: - interface: eth0 - command: builtin - policy: true - hostname_command: hostname - set_hostname: true Userdata diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py index 9728a1e7af7..ad8be04b201 100644 --- a/tests/unittests/sources/test_azure.py +++ b/tests/unittests/sources/test_azure.py @@ -696,9 +696,6 @@ def _dmi_mocks(key): self.apply_patches([ (dsaz, 'list_possible_azure_ds', self.m_list_possible_azure_ds), - (dsaz, 'perform_hostname_bounce', mock.MagicMock()), - (dsaz, 'get_hostname', mock.MagicMock()), - (dsaz, 'set_hostname', mock.MagicMock()), (dsaz, '_is_platform_viable', self.m_is_platform_viable), (dsaz, 'get_metadata_from_fabric', @@ -1794,21 +1791,6 @@ def test_blacklist_through_distro( m_net_get_interfaces.assert_called_with( blacklist_drivers=dsaz.BLACKLIST_DRIVERS) - @mock.patch(MOCKPATH + 'subp.subp', autospec=True) - def test_get_hostname_with_no_args(self, m_subp): - dsaz.get_hostname() - m_subp.assert_called_once_with(("hostname",), capture=True) - - @mock.patch(MOCKPATH + 'subp.subp', autospec=True) - def test_get_hostname_with_string_arg(self, m_subp): - dsaz.get_hostname(hostname_command="hostname") - m_subp.assert_called_once_with(("hostname",), capture=True) - - @mock.patch(MOCKPATH + 'subp.subp', autospec=True) - def test_get_hostname_with_iterable_arg(self, m_subp): - dsaz.get_hostname(hostname_command=("hostname",)) - m_subp.assert_called_once_with(("hostname",), capture=True) - @mock.patch( 'cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates') def test_get_public_ssh_keys_with_imds(self, m_parse_certificates): @@ -2023,251 +2005,6 @@ def test_userdata_from_imds_with_customdata_from_OVF( self.assertEqual(dsrc.userdata_raw, userdataOVF.encode('utf-8')) -class TestAzureBounce(CiTestCase): - - with_logs = True - - def mock_out_azure_moving_parts(self): - - def _load_possible_azure_ds(seed_dir, cache_dir): - yield seed_dir - yield dsaz.DEFAULT_PROVISIONING_ISO_DEV - if cache_dir: - yield cache_dir - - self.patches.enter_context( - mock.patch.object(dsaz.util, 'wait_for_files')) - self.patches.enter_context( - mock.patch.object( - dsaz, 'list_possible_azure_ds', - mock.MagicMock(side_effect=_load_possible_azure_ds))) - self.patches.enter_context( - mock.patch.object(dsaz, 'get_metadata_from_fabric', - mock.MagicMock(return_value={}))) - self.patches.enter_context( - mock.patch.object(dsaz, 'get_metadata_from_imds', - mock.MagicMock(return_value={}))) - self.patches.enter_context( - mock.patch.object(dsaz.subp, 'which', lambda x: True)) - self.patches.enter_context(mock.patch.object( - dsaz, '_get_random_seed', return_value='wild')) - - def _dmi_mocks(key): - if key == 'system-uuid': - return 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8' - elif key == 'chassis-asset-tag': - return '7783-7084-3265-9085-8269-3286-77' - raise RuntimeError('should not get here') - - self.patches.enter_context( - mock.patch.object(dsaz.dmi, 'read_dmi_data', - mock.MagicMock(side_effect=_dmi_mocks))) - - def setUp(self): - super(TestAzureBounce, self).setUp() - self.tmp = self.tmp_dir() - self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent') - self.paths = helpers.Paths( - {'cloud_dir': self.tmp, 'run_dir': self.tmp}) - dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d - self.patches = ExitStack() - self.mock_out_azure_moving_parts() - self.get_hostname = self.patches.enter_context( - mock.patch.object(dsaz, 'get_hostname')) - self.set_hostname = self.patches.enter_context( - mock.patch.object(dsaz, 'set_hostname')) - self.subp = self.patches.enter_context( - mock.patch(MOCKPATH + 'subp.subp')) - self.find_fallback_nic = self.patches.enter_context( - mock.patch('cloudinit.net.find_fallback_nic', return_value='eth9')) - - def tearDown(self): - self.patches.close() - super(TestAzureBounce, self).tearDown() - - def _get_ds(self, ovfcontent=None): - if ovfcontent is not None: - populate_dir(os.path.join(self.paths.seed_dir, "azure"), - {'ovf-env.xml': ovfcontent}) - dsrc = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - return dsrc - - def _get_and_setup(self, dsrc): - ret = dsrc.get_data() - if ret: - dsrc.setup(True) - return ret - - def get_ovf_env_with_dscfg(self, hostname, cfg): - odata = { - 'HostName': hostname, - 'dscfg': { - 'text': b64e(yaml.dump(cfg)), - 'encoding': 'base64' - } - } - return construct_valid_ovf_env(data=odata) - - def test_disabled_bounce_does_not_change_hostname(self): - cfg = {'hostname_bounce': {'policy': 'off'}} - ds = self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)) - ds.get_data() - self.assertEqual(0, self.set_hostname.call_count) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_disabled_bounce_does_not_perform_bounce( - self, perform_hostname_bounce): - cfg = {'hostname_bounce': {'policy': 'off'}} - ds = self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)) - ds.get_data() - self.assertEqual(0, perform_hostname_bounce.call_count) - - def test_same_hostname_does_not_change_hostname(self): - host_name = 'unchanged-host-name' - self.get_hostname.return_value = host_name - cfg = {'hostname_bounce': {'policy': 'yes'}} - ds = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) - ds.get_data() - self.assertEqual(0, self.set_hostname.call_count) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_unchanged_hostname_does_not_perform_bounce( - self, perform_hostname_bounce): - host_name = 'unchanged-host-name' - self.get_hostname.return_value = host_name - cfg = {'hostname_bounce': {'policy': 'yes'}} - ds = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) - ds.get_data() - self.assertEqual(0, perform_hostname_bounce.call_count) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_force_performs_bounce_regardless(self, perform_hostname_bounce): - host_name = 'unchanged-host-name' - self.get_hostname.return_value = host_name - cfg = {'hostname_bounce': {'policy': 'force'}} - dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(1, perform_hostname_bounce.call_count) - - def test_bounce_skipped_on_ifupdown_absent(self): - host_name = 'unchanged-host-name' - self.get_hostname.return_value = host_name - cfg = {'hostname_bounce': {'policy': 'force'}} - dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) - patch_path = MOCKPATH + 'subp.which' - with mock.patch(patch_path) as m_which: - m_which.return_value = None - ret = self._get_and_setup(dsrc) - self.assertEqual([mock.call('ifup')], m_which.call_args_list) - self.assertTrue(ret) - self.assertIn( - "Skipping network bounce: ifupdown utils aren't present.", - self.logs.getvalue()) - - def test_different_hostnames_sets_hostname(self): - expected_hostname = 'azure-expected-host-name' - self.get_hostname.return_value = 'default-host-name' - dsrc = self._get_ds( - self.get_ovf_env_with_dscfg(expected_hostname, {})) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(expected_hostname, - self.set_hostname.call_args_list[0][0][0]) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_different_hostnames_performs_bounce( - self, perform_hostname_bounce): - expected_hostname = 'azure-expected-host-name' - self.get_hostname.return_value = 'default-host-name' - dsrc = self._get_ds( - self.get_ovf_env_with_dscfg(expected_hostname, {})) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(1, perform_hostname_bounce.call_count) - - def test_different_hostnames_sets_hostname_back(self): - initial_host_name = 'default-host-name' - self.get_hostname.return_value = initial_host_name - dsrc = self._get_ds( - self.get_ovf_env_with_dscfg('some-host-name', {})) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(initial_host_name, - self.set_hostname.call_args_list[-1][0][0]) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_failure_in_bounce_still_resets_host_name( - self, perform_hostname_bounce): - perform_hostname_bounce.side_effect = Exception - initial_host_name = 'default-host-name' - self.get_hostname.return_value = initial_host_name - dsrc = self._get_ds( - self.get_ovf_env_with_dscfg('some-host-name', {})) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(initial_host_name, - self.set_hostname.call_args_list[-1][0][0]) - - @mock.patch.object(dsaz, 'get_boot_telemetry') - def test_environment_correct_for_bounce_command( - self, mock_get_boot_telemetry): - interface = 'int0' - hostname = 'my-new-host' - old_hostname = 'my-old-host' - self.get_hostname.return_value = old_hostname - cfg = {'hostname_bounce': {'interface': interface, 'policy': 'force'}} - data = self.get_ovf_env_with_dscfg(hostname, cfg) - dsrc = self._get_ds(data) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(1, self.subp.call_count) - bounce_env = self.subp.call_args[1]['env'] - self.assertEqual(interface, bounce_env['interface']) - self.assertEqual(hostname, bounce_env['hostname']) - self.assertEqual(old_hostname, bounce_env['old_hostname']) - - @mock.patch.object(dsaz, 'get_boot_telemetry') - def test_default_bounce_command_ifup_used_by_default( - self, mock_get_boot_telemetry): - cfg = {'hostname_bounce': {'policy': 'force'}} - data = self.get_ovf_env_with_dscfg('some-hostname', cfg) - dsrc = self._get_ds(data) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(1, self.subp.call_count) - bounce_args = self.subp.call_args[1]['args'] - self.assertEqual( - dsaz.BOUNCE_COMMAND_IFUP, bounce_args) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_set_hostname_option_can_disable_bounce( - self, perform_hostname_bounce): - cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}} - data = self.get_ovf_env_with_dscfg('some-hostname', cfg) - self._get_ds(data).get_data() - - self.assertEqual(0, perform_hostname_bounce.call_count) - - def test_set_hostname_option_can_disable_hostname_set(self): - cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}} - data = self.get_ovf_env_with_dscfg('some-hostname', cfg) - self._get_ds(data).get_data() - - self.assertEqual(0, self.set_hostname.call_count) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_set_hostname_failed_disable_bounce( - self, perform_hostname_bounce): - cfg = {'set_hostname': True, 'hostname_bounce': {'policy': 'force'}} - self.get_hostname.return_value = "old-hostname" - self.set_hostname.side_effect = Exception - data = self.get_ovf_env_with_dscfg('some-hostname', cfg) - self._get_ds(data).get_data() - - self.assertEqual(0, perform_hostname_bounce.call_count) - - class TestLoadAzureDsDir(CiTestCase): """Tests for load_azure_ds_dir.""" From f4692c5d96323dc635fca26b742199d4c41f88d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A9ri=20Le=20Bouder?= Date: Mon, 13 Dec 2021 12:31:39 -0500 Subject: [PATCH 0023/2310] find_devs_with_openbsd: ensure we return the last entry (#1149) `sysctl -n hw.disknames` returns a trailing `\n`. We need to clean this up. In addition, the criteria matching system is a source of problem because: - we don't have a way to look up the label of the partition - we've got situation where an ISO image can be exposed through a virtio block device. So we just totally ignore the value of `criteria`. We end-up with a slightly longer loop of mount-retry. But this way we're sure we don't miss a configuration disk. Tested on Kubvirt with the help of Brady Pratt @jbpratt. --- cloudinit/util.py | 8 +------- tests/unittests/test_util.py | 2 +- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/cloudinit/util.py b/cloudinit/util.py index b9c584d1053..27821de57e5 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1211,7 +1211,7 @@ def find_devs_with_openbsd(criteria=None, oformat='device', tag=None, no_cache=False, path=None): out, _err = subp.subp(['sysctl', '-n', 'hw.disknames'], rcs=[0]) devlist = [] - for entry in out.split(','): + for entry in out.rstrip().split(','): if not entry.endswith(':'): # ffs partition with a serial, not a config-drive continue @@ -1220,12 +1220,6 @@ def find_devs_with_openbsd(criteria=None, oformat='device', devlist.append(entry[:-1] + 'a') if not entry.startswith('cd'): devlist.append(entry[:-1] + 'i') - if criteria == "TYPE=iso9660": - devlist = [i for i in devlist if i.startswith('cd')] - elif criteria in ["LABEL=CONFIG-2", "TYPE=vfat"]: - devlist = [i for i in devlist if not i.startswith('cd')] - elif criteria: - LOG.debug("Unexpected criteria: %s", criteria) return ['/dev/' + i for i in devlist] diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index c551835f573..61b9e303249 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -2336,7 +2336,7 @@ def test_find_devs_with_openbsd(self, m_subp): def test_find_devs_with_openbsd_with_criteria(self, m_subp): m_subp.return_value = ('cd0:,sd0:630d98d32b5d3759,sd1:,fd0:', '') devlist = util.find_devs_with_openbsd(criteria="TYPE=iso9660") - assert devlist == ['/dev/cd0a'] + assert devlist == ['/dev/cd0a', '/dev/sd1a', '/dev/sd1i'] # lp: #1841466 devlist = util.find_devs_with_openbsd(criteria="LABEL_FATBOOT=A_LABEL") From 3303b7041276a8bf7ef23550da5f06b48a4e054b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A9ri=20Le=20Bouder?= Date: Mon, 13 Dec 2021 12:32:27 -0500 Subject: [PATCH 0024/2310] netbsd: install new dep packages (#1151) - netifaces - jsonschema --- tools/build-on-netbsd | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/build-on-netbsd b/tools/build-on-netbsd index 328370586ae..0d4eb58be4d 100755 --- a/tools/build-on-netbsd +++ b/tools/build-on-netbsd @@ -19,7 +19,9 @@ pkgs=" ${py_prefix}-oauthlib ${py_prefix}-requests ${py_prefix}-setuptools + ${py_prefix}-netifaces ${py_prefix}-yaml + ${py_prefix}-jsonschema sudo " [ -f "$depschecked" ] || pkg_add ${pkgs} || fail "install packages" From 35711700e10c47dcfa16238ba0bd395813e5a511 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Mon, 13 Dec 2021 10:43:58 -0700 Subject: [PATCH 0025/2310] Add dependency workaround for impish in bddeb (#1148) dh_systemd is now included in the default helper, no need to specify it anymore for impish --- packages/bddeb | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packages/bddeb b/packages/bddeb index a3fb884852e..b009021a039 100755 --- a/packages/bddeb +++ b/packages/bddeb @@ -94,6 +94,8 @@ def write_debian_folder(root, templ_data, cloud_util_deps): requires.extend(['python3'] + reqs + test_reqs) if templ_data['debian_release'] == 'xenial': requires.append('python3-pytest-catchlog') + elif templ_data['debian_release'] == 'impish': + requires.remove('dh-systemd') templater.render_to_file(util.abs_join(find_root(), 'packages', 'debian', 'control.in'), util.abs_join(deb_dir, 'control'), From 9a6e65a2a575055aadc1802004dbe3f343a54b89 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 13 Dec 2021 18:14:50 -0600 Subject: [PATCH 0026/2310] Schema processing changes (SC-676) (#1144) * Use proper logging * Add parsing for patternProperties * Add label to annotate patternProperties * Log warning if schema parsing fails during metaschema processing * Some schema test fixes --- cloudinit/config/schema.py | 97 ++++++++++++++++++--------- tests/unittests/config/test_schema.py | 55 +++++++++++++-- 2 files changed, 113 insertions(+), 39 deletions(-) diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index d32b7c01f8f..d772b4f9226 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -17,6 +17,7 @@ import yaml error = partial(error, sys_exit=True) +LOG = logging.getLogger(__name__) _YAML_MAP = {True: 'true', False: 'false', None: 'null'} CLOUD_CONFIG_HEADER = b'#cloud-config' @@ -91,7 +92,16 @@ def get_jsonschema_validator(): # This allows #cloud-config to provide valid yaml "content: !!binary | ..." strict_metaschema = deepcopy(Draft4Validator.META_SCHEMA) - strict_metaschema['additionalProperties'] = False + strict_metaschema["additionalProperties"] = False + + # This additional label allows us to specify a different name + # than the property key when generating docs. + # This is especially useful when using a "patternProperties" regex, + # otherwise the property label in the generated docs will be a + # regular expression. + # http://json-schema.org/understanding-json-schema/reference/object.html#pattern-properties + strict_metaschema["properties"]["label"] = {"type": "string"} + if hasattr(Draft4Validator, 'TYPE_CHECKER'): # jsonschema 3.0+ type_checker = Draft4Validator.TYPE_CHECKER.redefine( 'string', is_schema_byte_string) @@ -140,7 +150,7 @@ def validate_cloudconfig_metaschema(validator, schema: dict, throw=True): ('.'.join([str(p) for p in err.path]), err.message), ) ) from err - logging.warning( + LOG.warning( "Meta-schema validation failed, attempting to validate config " "anyway: %s", err) @@ -168,7 +178,7 @@ def validate_cloudconfig_schema( validate_cloudconfig_metaschema( cloudinitValidator, schema, throw=False) except ImportError: - logging.debug("Ignoring schema validation. jsonschema is not present") + LOG.debug("Ignoring schema validation. jsonschema is not present") return validator = cloudinitValidator(schema, format_checker=FormatChecker()) @@ -180,8 +190,8 @@ def validate_cloudconfig_schema( if strict: raise SchemaValidationError(errors) else: - messages = ['{0}: {1}'.format(k, msg) for k, msg in errors] - logging.warning('Invalid config:\n%s', '\n'.join(messages)) + messages = ["{0}: {1}".format(k, msg) for k, msg in errors] + LOG.warning("Invalid config:\n%s", "\n".join(messages)) def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors): @@ -410,34 +420,53 @@ def _get_property_doc(schema: dict, prefix=" ") -> str: """Return restructured text describing the supported schema properties.""" new_prefix = prefix + ' ' properties = [] - for prop_key, prop_config in schema.get('properties', {}).items(): - # Define prop_name and description for SCHEMA_PROPERTY_TMPL - description = prop_config.get('description', '') - - # Define prop_name and description for SCHEMA_PROPERTY_TMPL - properties.append( - SCHEMA_PROPERTY_TMPL.format( - prefix=prefix, - prop_name=prop_key, - description=_parse_description(description, prefix), - prop_type=_get_property_type(prop_config), + property_keys = [ + schema.get("properties", {}), + schema.get("patternProperties", {}), + ] + + for props in property_keys: + for prop_key, prop_config in props.items(): + # Define prop_name and description for SCHEMA_PROPERTY_TMPL + description = prop_config.get("description", "") + + # Define prop_name and description for SCHEMA_PROPERTY_TMPL + label = prop_config.get("label", prop_key) + properties.append( + SCHEMA_PROPERTY_TMPL.format( + prefix=prefix, + prop_name=label, + description=_parse_description(description, prefix), + prop_type=_get_property_type(prop_config), + ) ) - ) - items = prop_config.get("items") - if items: - if isinstance(items, list): - for item in items: + items = prop_config.get("items") + if items: + if isinstance(items, list): + for item in items: + properties.append( + _get_property_doc(item, prefix=new_prefix) + ) + elif isinstance(items, dict) and ( + items.get("properties") or items.get("patternProperties") + ): properties.append( - _get_property_doc(item, prefix=new_prefix)) - elif isinstance(items, dict) and items.get('properties'): - properties.append(SCHEMA_LIST_ITEM_TMPL.format( - prefix=new_prefix, prop_name=prop_key)) - new_prefix += ' ' - properties.append(_get_property_doc(items, prefix=new_prefix)) - if 'properties' in prop_config: - properties.append( - _get_property_doc(prop_config, prefix=new_prefix)) - return '\n\n'.join(properties) + SCHEMA_LIST_ITEM_TMPL.format( + prefix=new_prefix, prop_name=label + ) + ) + new_prefix += " " + properties.append( + _get_property_doc(items, prefix=new_prefix) + ) + if ( + "properties" in prop_config + or "patternProperties" in prop_config + ): + properties.append( + _get_property_doc(prop_config, prefix=new_prefix) + ) + return "\n\n".join(properties) def _get_examples(meta: MetaSchema) -> str: @@ -494,7 +523,11 @@ def get_meta_doc(meta: MetaSchema, schema: dict) -> str: # cast away type annotation meta_copy = dict(deepcopy(meta)) - meta_copy["property_doc"] = _get_property_doc(schema) + try: + meta_copy["property_doc"] = _get_property_doc(schema) + except AttributeError: + LOG.warning("Unable to render property_doc due to invalid schema") + meta_copy["property_doc"] = "" meta_copy["examples"] = _get_examples(meta) meta_copy["distros"] = ", ".join(meta["distros"]) # Need an underbar of the same length as the name diff --git a/tests/unittests/config/test_schema.py b/tests/unittests/config/test_schema.py index ed7ab5279ce..40803cae2c6 100644 --- a/tests/unittests/config/test_schema.py +++ b/tests/unittests/config/test_schema.py @@ -55,7 +55,7 @@ def get_module_variable(var_name) -> dict: schemas = {} files = list( - Path(cloud_init_project_dir("../../cloudinit/config/")).glob("cc_*.py") + Path(cloud_init_project_dir("cloudinit/config/")).glob("cc_*.py") ) modules = [mod.stem for mod in files] @@ -215,12 +215,13 @@ class TestCloudConfigExamples: @pytest.mark.parametrize("schema_id, example", params) @skipUnlessJsonSchema() def test_validateconfig_schema_of_example(self, schema_id, example): - """ For a given example in a config module we test if it is valid + """For a given example in a config module we test if it is valid according to the unified schema of all config modules """ config_load = safe_load(example) validate_cloudconfig_schema( - config_load, self.schema, strict=True) + config_load, self.schema[schema_id], strict=True + ) class ValidateCloudConfigFileTest(CiTestCase): @@ -462,6 +463,44 @@ def test_get_meta_doc_raises_key_errors(self): get_meta_doc(invalid_meta, schema) self.assertIn(key, str(context_mgr.exception)) + def test_label_overrides_property_name(self): + """get_meta_doc overrides property name with label.""" + schema = { + "properties": { + "prop1": { + "type": "string", + "label": "label1", + }, + "prop_no_label": { + "type": "string", + }, + "prop_array": { + "label": 'array_label', + "type": "array", + "items": { + "type": "object", + "properties": { + "some_prop": {"type": "number"}, + }, + }, + }, + }, + "patternProperties": { + "^.*$": { + "type": "string", + "label": "label2", + } + } + } + meta_doc = get_meta_doc(self.meta, schema) + assert "**label1:** (string)" in meta_doc + assert "**label2:** (string" in meta_doc + assert "**prop_no_label:** (string)" in meta_doc + assert "Each item in **array_label** list" in meta_doc + + assert "prop1" not in meta_doc + assert ".*" not in meta_doc + class AnnotatedCloudconfigFileTest(CiTestCase): maxDiff = None @@ -626,9 +665,11 @@ def _get_meta_doc_examples(): examples_dir = Path(cloud_init_project_dir('doc/examples')) assert examples_dir.is_dir() - all_text_files = (f for f in examples_dir.glob('cloud-config*.txt') - if not f.name.startswith('cloud-config-archive')) - return all_text_files + return ( + str(f) + for f in examples_dir.glob("cloud-config*.txt") + if not f.name.startswith("cloud-config-archive") + ) class TestSchemaDocExamples: @@ -637,7 +678,7 @@ class TestSchemaDocExamples: @pytest.mark.parametrize("example_path", _get_meta_doc_examples()) @skipUnlessJsonSchema() def test_schema_doc_examples(self, example_path): - validate_cloudconfig_file(str(example_path), self.schema) + validate_cloudconfig_file(example_path, self.schema) class TestStrictMetaschema: From 3da3bdae5e5b41bb3a9f6b23c8573c8fbc23e629 Mon Sep 17 00:00:00 2001 From: Chris Patterson Date: Tue, 14 Dec 2021 11:53:56 -0500 Subject: [PATCH 0027/2310] tests/cmd/query: fix test run as root and add coverage for defaults (#1156) test_handle_args_error_on_invalid_vaname_paths() would fail when run as root due to invocation of load_userdata() on the default user/vendor data locations under the instance link. - Mock load_userdata() for this test case to avoid loads. - Update _setup_paths() to configure cloud_dir in temporary location. - Add new test case to verify that the default locations are loaded when unspecified. LP: #1825027 Signed-off-by: Chris Patterson --- tests/unittests/cmd/test_query.py | 44 ++++++++++++++++++++++++++++--- 1 file changed, 41 insertions(+), 3 deletions(-) diff --git a/tests/unittests/cmd/test_query.py b/tests/unittests/cmd/test_query.py index b3f1d98dcfb..b7d02d13a9e 100644 --- a/tests/unittests/cmd/test_query.py +++ b/tests/unittests/cmd/test_query.py @@ -2,8 +2,9 @@ import errno import gzip -from io import BytesIO import json +import os +from io import BytesIO from textwrap import dedent import pytest @@ -51,8 +52,14 @@ def _setup_paths(self, tmpdir, ud_val=None, vd_val=None): vendor_data = None run_dir = tmpdir.join('run_dir') run_dir.ensure_dir() + + cloud_dir = tmpdir.join('cloud_dir') + cloud_dir.ensure_dir() + return ( - Paths({'run_dir': run_dir.strpath}), + Paths( + {'cloud_dir': cloud_dir.strpath, 'run_dir': run_dir.strpath} + ), run_dir, user_data, vendor_data @@ -106,7 +113,9 @@ def test_handle_args_error_on_invalid_vaname_paths( with mock.patch( "cloudinit.cmd.query.addLogHandlerCLI", return_value="" ): - assert 1 == query.handle_args('anyname', args) + with mock.patch('cloudinit.cmd.query.load_userdata') as m_lud: + m_lud.return_value = "ud" + assert 1 == query.handle_args('anyname', args) assert expected_error in caplog.text def test_handle_args_error_on_missing_instance_data(self, caplog, tmpdir): @@ -208,6 +217,35 @@ def test_handle_args_root_processes_user_data( assert ud_expected == cmd_output['userdata'] assert vd_expected == cmd_output['vendordata'] + def test_handle_args_user_vendor_data_defaults_to_instance_link( + self, capsys, tmpdir + ): + """When no instance_data argument, root uses sensitive json.""" + paths, run_dir, _, _ = self._setup_paths(tmpdir) + sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE) + sensitive_file.write('{"my-var": "it worked"}') + + ud_path = os.path.join(paths.instance_link, "user-data.txt") + write_file(ud_path, "instance_link_ud") + vd_path = os.path.join(paths.instance_link, "vendor-data.txt") + write_file(vd_path, "instance_link_vd") + + args = self.args( + debug=False, dump_all=True, format=None, instance_data=None, + list_keys=False, user_data=None, + vendor_data=None, varname=None) + with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths: + m_paths.return_value = paths + with mock.patch('os.getuid', return_value=0): + assert 0 == query.handle_args('anyname', args) + expected = ( + '{\n "my-var": "it worked",\n ' + '"userdata": "instance_link_ud",\n ' + '"vendordata": "instance_link_vd"\n}\n' + ) + out, _ = capsys.readouterr() + assert expected == out + def test_handle_args_root_uses_instance_sensitive_data( self, capsys, tmpdir ): From 2bcf4fa972fde686c2e3141c58e640640b44dd00 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 14 Dec 2021 21:26:20 -0600 Subject: [PATCH 0028/2310] Include dpkg frontend lock in APT_LOCK_FILES (#1153) --- cloudinit/distros/debian.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index f390147005a..b2af086698b 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -43,10 +43,17 @@ NETWORK_CONF_FN = "/etc/network/interfaces.d/50-cloud-init" LOCALE_CONF_FN = "/etc/default/locale" +# The frontend lock needs to be acquired first followed by the order that +# apt uses. /var/lib/apt/lists is locked independently of that install chain, +# and only locked during update, so you can acquire it either order. +# Also update does not acquire the dpkg frontend lock. +# More context: +# https://github.com/canonical/cloud-init/pull/1034#issuecomment-986971376 APT_LOCK_FILES = [ + '/var/lib/dpkg/lock-frontend', '/var/lib/dpkg/lock', - '/var/lib/apt/lists/lock', '/var/cache/apt/archives/lock', + '/var/lib/apt/lists/lock', ] From bae9b11da9ed7dd0b16fe5adeaf4774b7cc628cf Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 15 Dec 2021 20:16:38 -0600 Subject: [PATCH 0029/2310] Adopt Black and isort (SC-700) (#1157) Applied Black and isort, fixed any linting issues, updated tox.ini and CI. --- .travis.yml | 4 + CONTRIBUTING.rst | 5 + cloudinit/analyze/__main__.py | 269 +- cloudinit/analyze/dump.py | 71 +- cloudinit/analyze/show.py | 192 +- cloudinit/apport.py | 153 +- cloudinit/atomic_helper.py | 25 +- cloudinit/cloud.py | 14 +- cloudinit/cmd/clean.py | 59 +- cloudinit/cmd/cloud_id.py | 68 +- cloudinit/cmd/devel/__init__.py | 3 +- cloudinit/cmd/devel/hotplug_hook.py | 138 +- cloudinit/cmd/devel/logs.py | 120 +- cloudinit/cmd/devel/make_mime.py | 76 +- cloudinit/cmd/devel/net_convert.py | 145 +- cloudinit/cmd/devel/parser.py | 48 +- cloudinit/cmd/devel/render.py | 54 +- cloudinit/cmd/main.py | 595 +- cloudinit/cmd/query.py | 170 +- cloudinit/cmd/status.py | 101 +- cloudinit/config/__init__.py | 20 +- cloudinit/config/cc_apk_configure.py | 195 +- cloudinit/config/cc_apt_configure.py | 618 ++- cloudinit/config/cc_apt_pipelining.py | 13 +- cloudinit/config/cc_bootcmd.py | 65 +- cloudinit/config/cc_byobu.py | 27 +- cloudinit/config/cc_ca_certs.py | 84 +- cloudinit/config/cc_chef.py | 659 ++- cloudinit/config/cc_debug.py | 21 +- cloudinit/config/cc_disable_ec2_metadata.py | 25 +- cloudinit/config/cc_disk_setup.py | 334 +- cloudinit/config/cc_emit_upstart.py | 24 +- cloudinit/config/cc_fan.py | 34 +- cloudinit/config/cc_final_message.py | 24 +- cloudinit/config/cc_foo.py | 1 + cloudinit/config/cc_growpart.py | 134 +- cloudinit/config/cc_grub_dpkg.py | 54 +- cloudinit/config/cc_install_hotplug.py | 48 +- cloudinit/config/cc_keys_to_console.py | 36 +- cloudinit/config/cc_landscape.py | 24 +- cloudinit/config/cc_locale.py | 51 +- cloudinit/config/cc_lxd.py | 186 +- cloudinit/config/cc_mcollective.py | 50 +- cloudinit/config/cc_migrator.py | 25 +- cloudinit/config/cc_mounts.py | 172 +- cloudinit/config/cc_ntp.py | 546 +- .../cc_package_update_upgrade_install.py | 30 +- cloudinit/config/cc_phone_home.py | 98 +- cloudinit/config/cc_power_state_change.py | 58 +- cloudinit/config/cc_puppet.py | 194 +- .../config/cc_refresh_rmc_and_interface.py | 51 +- cloudinit/config/cc_reset_rmc.py | 43 +- cloudinit/config/cc_resizefs.py | 183 +- cloudinit/config/cc_resolv_conf.py | 41 +- cloudinit/config/cc_rh_subscription.py | 240 +- cloudinit/config/cc_rightscale_userdata.py | 31 +- cloudinit/config/cc_rsyslog.py | 86 +- cloudinit/config/cc_runcmd.py | 70 +- cloudinit/config/cc_salt_minion.py | 69 +- cloudinit/config/cc_scripts_per_boot.py | 14 +- cloudinit/config/cc_scripts_per_instance.py | 14 +- cloudinit/config/cc_scripts_per_once.py | 14 +- cloudinit/config/cc_scripts_user.py | 12 +- cloudinit/config/cc_scripts_vendor.py | 22 +- cloudinit/config/cc_seed_random.py | 41 +- cloudinit/config/cc_set_hostname.py | 30 +- cloudinit/config/cc_set_passwords.py | 65 +- cloudinit/config/cc_snap.py | 168 +- cloudinit/config/cc_spacewalk.py | 67 +- cloudinit/config/cc_ssh.py | 106 +- .../config/cc_ssh_authkey_fingerprints.py | 73 +- cloudinit/config/cc_ssh_import_id.py | 23 +- cloudinit/config/cc_timezone.py | 2 +- cloudinit/config/cc_ubuntu_advantage.py | 154 +- cloudinit/config/cc_ubuntu_drivers.py | 133 +- cloudinit/config/cc_update_etc_hosts.py | 42 +- cloudinit/config/cc_update_hostname.py | 25 +- cloudinit/config/cc_users_groups.py | 39 +- cloudinit/config/cc_write_files.py | 242 +- cloudinit/config/cc_write_files_deferred.py | 22 +- cloudinit/config/cc_yum_add_repo.py | 65 +- cloudinit/config/cc_zypper_add_repo.py | 159 +- cloudinit/config/schema.py | 239 +- cloudinit/cs_utils.py | 20 +- cloudinit/dhclient_hook.py | 21 +- cloudinit/distros/__init__.py | 420 +- cloudinit/distros/almalinux.py | 1 + cloudinit/distros/alpine.py | 45 +- cloudinit/distros/amazon.py | 1 - cloudinit/distros/arch.py | 147 +- cloudinit/distros/bsd.py | 66 +- cloudinit/distros/bsd_utils.py | 18 +- cloudinit/distros/centos.py | 1 + cloudinit/distros/cloudlinux.py | 1 + cloudinit/distros/debian.py | 168 +- cloudinit/distros/dragonflybsd.py | 2 +- cloudinit/distros/eurolinux.py | 1 + cloudinit/distros/fedora.py | 1 + cloudinit/distros/freebsd.py | 93 +- cloudinit/distros/gentoo.py | 140 +- cloudinit/distros/miraclelinux.py | 2 + cloudinit/distros/net_util.py | 68 +- cloudinit/distros/netbsd.py | 85 +- cloudinit/distros/networking.py | 13 +- cloudinit/distros/openEuler.py | 1 + cloudinit/distros/openbsd.py | 20 +- cloudinit/distros/opensuse.py | 119 +- cloudinit/distros/parsers/__init__.py | 3 +- cloudinit/distros/parsers/hostname.py | 24 +- cloudinit/distros/parsers/hosts.py | 24 +- .../distros/parsers/networkmanager_conf.py | 6 +- cloudinit/distros/parsers/resolv_conf.py | 73 +- cloudinit/distros/parsers/sys_conf.py | 38 +- cloudinit/distros/photon.py | 86 +- cloudinit/distros/rhel.py | 76 +- cloudinit/distros/rhel_util.py | 4 +- cloudinit/distros/rocky.py | 1 + cloudinit/distros/sles.py | 1 + cloudinit/distros/ubuntu.py | 33 +- cloudinit/distros/ug_util.py | 106 +- cloudinit/distros/virtuozzo.py | 1 + cloudinit/dmi.py | 68 +- cloudinit/ec2_utils.py | 165 +- cloudinit/event.py | 8 +- cloudinit/filters/launch_index.py | 12 +- cloudinit/gpg.py | 48 +- cloudinit/handlers/__init__.py | 152 +- cloudinit/handlers/boot_hook.py | 21 +- cloudinit/handlers/cloud_config.py | 29 +- cloudinit/handlers/jinja_template.py | 87 +- cloudinit/handlers/shell_script.py | 15 +- cloudinit/handlers/upstart_job.py | 22 +- cloudinit/helpers.py | 111 +- cloudinit/importer.py | 3 +- cloudinit/log.py | 21 +- cloudinit/mergers/__init__.py | 43 +- cloudinit/mergers/m_dict.py | 34 +- cloudinit/mergers/m_list.py | 37 +- cloudinit/mergers/m_str.py | 5 +- cloudinit/net/__init__.py | 579 +- cloudinit/net/activators.py | 87 +- cloudinit/net/bsd.py | 112 +- cloudinit/net/cmdline.py | 97 +- cloudinit/net/dhcp.py | 194 +- cloudinit/net/eni.py | 454 +- cloudinit/net/freebsd.py | 44 +- cloudinit/net/netbsd.py | 27 +- cloudinit/net/netplan.py | 313 +- cloudinit/net/network_state.py | 734 +-- cloudinit/net/networkd.py | 208 +- cloudinit/net/openbsd.py | 33 +- cloudinit/net/renderer.py | 31 +- cloudinit/net/renderers.py | 40 +- cloudinit/net/sysconfig.py | 886 +-- cloudinit/net/udev.py | 23 +- cloudinit/netinfo.py | 403 +- cloudinit/patcher.py | 9 +- cloudinit/registry.py | 4 +- cloudinit/reporting/__init__.py | 9 +- cloudinit/reporting/events.py | 97 +- cloudinit/reporting/handlers.py | 128 +- cloudinit/safeyaml.py | 25 +- cloudinit/serial.py | 25 +- cloudinit/settings.py | 82 +- cloudinit/signal_handler.py | 12 +- cloudinit/simpletable.py | 26 +- cloudinit/sources/DataSourceAliYun.py | 18 +- cloudinit/sources/DataSourceAltCloud.py | 113 +- cloudinit/sources/DataSourceAzure.py | 1350 +++-- cloudinit/sources/DataSourceBigstep.py | 9 +- cloudinit/sources/DataSourceCloudSigma.py | 39 +- cloudinit/sources/DataSourceCloudStack.py | 135 +- cloudinit/sources/DataSourceConfigDrive.py | 117 +- cloudinit/sources/DataSourceDigitalOcean.py | 65 +- cloudinit/sources/DataSourceEc2.py | 461 +- cloudinit/sources/DataSourceExoscale.py | 171 +- cloudinit/sources/DataSourceGCE.py | 221 +- cloudinit/sources/DataSourceHetzner.py | 74 +- cloudinit/sources/DataSourceIBMCloud.py | 128 +- cloudinit/sources/DataSourceLXD.py | 61 +- cloudinit/sources/DataSourceMAAS.py | 180 +- cloudinit/sources/DataSourceNoCloud.py | 154 +- cloudinit/sources/DataSourceNone.py | 15 +- cloudinit/sources/DataSourceOVF.py | 311 +- cloudinit/sources/DataSourceOpenNebula.py | 190 +- cloudinit/sources/DataSourceOpenStack.py | 129 +- cloudinit/sources/DataSourceOracle.py | 125 +- cloudinit/sources/DataSourceRbxCloud.py | 194 +- cloudinit/sources/DataSourceScaleway.py | 131 +- cloudinit/sources/DataSourceSmartOS.py | 555 +- cloudinit/sources/DataSourceUpCloud.py | 7 +- cloudinit/sources/DataSourceVMware.py | 13 +- cloudinit/sources/DataSourceVultr.py | 86 +- cloudinit/sources/__init__.py | 385 +- cloudinit/sources/helpers/azure.py | 693 ++- cloudinit/sources/helpers/digitalocean.py | 195 +- cloudinit/sources/helpers/hetzner.py | 15 +- cloudinit/sources/helpers/netlink.py | 187 +- cloudinit/sources/helpers/openstack.py | 438 +- cloudinit/sources/helpers/upcloud.py | 12 +- .../sources/helpers/vmware/imc/boot_proto.py | 5 +- .../sources/helpers/vmware/imc/config.py | 59 +- .../vmware/imc/config_custom_script.py | 45 +- .../sources/helpers/vmware/imc/config_file.py | 7 +- .../helpers/vmware/imc/config_namespace.py | 1 + .../sources/helpers/vmware/imc/config_nic.py | 84 +- .../helpers/vmware/imc/config_passwd.py | 38 +- .../helpers/vmware/imc/config_source.py | 1 + .../helpers/vmware/imc/guestcust_error.py | 1 + .../helpers/vmware/imc/guestcust_event.py | 1 + .../helpers/vmware/imc/guestcust_state.py | 1 + .../helpers/vmware/imc/guestcust_util.py | 46 +- .../sources/helpers/vmware/imc/ipv4_mode.py | 11 +- cloudinit/sources/helpers/vmware/imc/nic.py | 33 +- .../sources/helpers/vmware/imc/nic_base.py | 29 +- cloudinit/sources/helpers/vultr.py | 172 +- cloudinit/ssh_util.py | 172 +- cloudinit/stages.py | 649 ++- cloudinit/subp.py | 165 +- cloudinit/temp_utils.py | 20 +- cloudinit/templater.py | 96 +- cloudinit/type_utils.py | 4 +- cloudinit/url_helper.py | 273 +- cloudinit/user_data.py | 121 +- cloudinit/util.py | 873 +-- cloudinit/version.py | 9 +- cloudinit/warnings.py | 21 +- conftest.py | 5 +- doc/rtd/conf.py | 30 +- pyproject.toml | 8 + setup.py | 263 +- tests/integration_tests/__init__.py | 8 +- tests/integration_tests/bugs/test_gh570.py | 13 +- tests/integration_tests/bugs/test_gh626.py | 25 +- tests/integration_tests/bugs/test_gh632.py | 20 +- tests/integration_tests/bugs/test_gh668.py | 15 +- tests/integration_tests/bugs/test_gh671.py | 35 +- tests/integration_tests/bugs/test_gh868.py | 3 +- .../integration_tests/bugs/test_lp1813396.py | 3 +- .../integration_tests/bugs/test_lp1835584.py | 19 +- .../integration_tests/bugs/test_lp1886531.py | 2 - .../integration_tests/bugs/test_lp1897099.py | 13 +- .../integration_tests/bugs/test_lp1898997.py | 14 +- .../integration_tests/bugs/test_lp1900837.py | 2 +- .../integration_tests/bugs/test_lp1901011.py | 49 +- .../integration_tests/bugs/test_lp1910835.py | 1 - .../integration_tests/bugs/test_lp1912844.py | 4 +- tests/integration_tests/clouds.py | 163 +- tests/integration_tests/conftest.py | 130 +- .../datasources/test_lxd_discovery.py | 43 +- .../datasources/test_network_dependency.py | 17 +- tests/integration_tests/instances.py | 83 +- .../integration_tests/integration_settings.py | 13 +- tests/integration_tests/modules/test_apt.py | 88 +- .../modules/test_ca_certs.py | 1 - tests/integration_tests/modules/test_cli.py | 9 +- .../modules/test_combined.py | 155 +- .../modules/test_command_output.py | 5 +- .../modules/test_disk_setup.py | 76 +- .../modules/test_growpart.py | 38 +- .../integration_tests/modules/test_hotplug.py | 55 +- .../modules/test_jinja_templating.py | 11 +- .../modules/test_keys_to_console.py | 9 +- .../modules/test_lxd_bridge.py | 2 - .../modules/test_ntp_servers.py | 30 +- .../test_package_update_upgrade_install.py | 18 +- .../modules/test_persistence.py | 26 +- .../modules/test_power_state_change.py | 48 +- .../integration_tests/modules/test_puppet.py | 6 +- .../modules/test_set_hostname.py | 10 +- .../modules/test_set_password.py | 15 +- .../modules/test_ssh_auth_key_fingerprints.py | 13 +- .../modules/test_ssh_generate.py | 16 +- .../modules/test_ssh_keys_provided.py | 58 +- .../modules/test_ssh_keysfile.py | 159 +- .../modules/test_user_events.py | 50 +- .../modules/test_users_groups.py | 21 +- .../modules/test_version_change.py | 45 +- .../modules/test_write_files.py | 32 +- tests/integration_tests/test_upgrade.py | 120 +- tests/integration_tests/util.py | 39 +- tests/unittests/__init__.py | 1 + tests/unittests/analyze/test_boot.py | 135 +- tests/unittests/analyze/test_dump.py | 213 +- .../unittests/cmd/devel/test_hotplug_hook.py | 162 +- tests/unittests/cmd/devel/test_logs.py | 232 +- tests/unittests/cmd/devel/test_render.py | 152 +- tests/unittests/cmd/test_clean.py | 179 +- tests/unittests/cmd/test_cloud_id.py | 99 +- tests/unittests/cmd/test_main.py | 223 +- tests/unittests/cmd/test_query.py | 403 +- tests/unittests/cmd/test_status.py | 561 +- tests/unittests/config/test_apt_conf_v1.py | 68 +- .../test_apt_configure_sources_list_v1.py | 131 +- .../test_apt_configure_sources_list_v3.py | 158 +- tests/unittests/config/test_apt_key.py | 117 +- tests/unittests/config/test_apt_source_v1.py | 765 ++- tests/unittests/config/test_apt_source_v3.py | 1220 +++-- .../unittests/config/test_cc_apk_configure.py | 148 +- .../config/test_cc_apt_pipelining.py | 12 +- tests/unittests/config/test_cc_bootcmd.py | 100 +- tests/unittests/config/test_cc_ca_certs.py | 220 +- tests/unittests/config/test_cc_chef.py | 202 +- tests/unittests/config/test_cc_debug.py | 39 +- .../config/test_cc_disable_ec2_metadata.py | 44 +- tests/unittests/config/test_cc_disk_setup.py | 270 +- tests/unittests/config/test_cc_growpart.py | 232 +- tests/unittests/config/test_cc_grub_dpkg.py | 121 +- .../config/test_cc_install_hotplug.py | 58 +- .../config/test_cc_keys_to_console.py | 18 +- tests/unittests/config/test_cc_landscape.py | 178 +- tests/unittests/config/test_cc_locale.py | 99 +- tests/unittests/config/test_cc_lxd.py | 250 +- tests/unittests/config/test_cc_mcollective.py | 104 +- tests/unittests/config/test_cc_mounts.py | 449 +- tests/unittests/config/test_cc_ntp.py | 682 ++- .../config/test_cc_power_state_change.py | 74 +- tests/unittests/config/test_cc_puppet.py | 432 +- .../test_cc_refresh_rmc_and_interface.py | 162 +- tests/unittests/config/test_cc_resizefs.py | 436 +- tests/unittests/config/test_cc_resolv_conf.py | 76 +- .../config/test_cc_rh_subscription.py | 366 +- tests/unittests/config/test_cc_rsyslog.py | 112 +- tests/unittests/config/test_cc_runcmd.py | 74 +- tests/unittests/config/test_cc_seed_random.py | 158 +- .../unittests/config/test_cc_set_hostname.py | 185 +- .../unittests/config/test_cc_set_passwords.py | 111 +- tests/unittests/config/test_cc_snap.py | 445 +- tests/unittests/config/test_cc_spacewalk.py | 36 +- tests/unittests/config/test_cc_ssh.py | 356 +- tests/unittests/config/test_cc_timezone.py | 31 +- .../config/test_cc_ubuntu_advantage.py | 311 +- .../config/test_cc_ubuntu_drivers.py | 213 +- .../config/test_cc_update_etc_hosts.py | 63 +- .../unittests/config/test_cc_users_groups.py | 264 +- tests/unittests/config/test_cc_write_files.py | 148 +- .../config/test_cc_write_files_deferred.py | 62 +- .../unittests/config/test_cc_yum_add_repo.py | 105 +- .../config/test_cc_zypper_add_repo.py | 166 +- tests/unittests/config/test_schema.py | 301 +- tests/unittests/distros/__init__.py | 10 +- tests/unittests/distros/test_arch.py | 50 +- tests/unittests/distros/test_bsd_utils.py | 49 +- tests/unittests/distros/test_create_users.py | 252 +- tests/unittests/distros/test_debian.py | 155 +- tests/unittests/distros/test_freebsd.py | 28 +- tests/unittests/distros/test_generic.py | 300 +- tests/unittests/distros/test_gentoo.py | 11 +- tests/unittests/distros/test_hostname.py | 16 +- tests/unittests/distros/test_hosts.py | 36 +- tests/unittests/distros/test_init.py | 273 +- .../unittests/distros/test_manage_service.py | 33 +- tests/unittests/distros/test_netbsd.py | 11 +- tests/unittests/distros/test_netconfig.py | 605 ++- tests/unittests/distros/test_networking.py | 30 +- tests/unittests/distros/test_opensuse.py | 3 +- tests/unittests/distros/test_photon.py | 42 +- tests/unittests/distros/test_resolv.py | 55 +- tests/unittests/distros/test_sles.py | 3 +- tests/unittests/distros/test_sysconfig.py | 62 +- .../distros/test_user_data_normalize.py | 383 +- tests/unittests/filters/test_launch_index.py | 23 +- tests/unittests/helpers.py | 191 +- tests/unittests/net/test_dhcp.py | 678 ++- tests/unittests/net/test_init.py | 1368 +++-- tests/unittests/net/test_network_state.py | 82 +- tests/unittests/net/test_networkd.py | 2 +- tests/unittests/runs/test_merge_run.py | 49 +- tests/unittests/runs/test_simple_run.py | 132 +- .../unittests/sources/helpers/test_netlink.py | 357 +- .../sources/helpers/test_openstack.py | 51 +- tests/unittests/sources/test_aliyun.py | 217 +- tests/unittests/sources/test_altcloud.py | 311 +- tests/unittests/sources/test_azure.py | 3174 ++++++----- tests/unittests/sources/test_azure_helper.py | 1138 ++-- tests/unittests/sources/test_cloudsigma.py | 72 +- tests/unittests/sources/test_cloudstack.py | 121 +- tests/unittests/sources/test_common.py | 86 +- tests/unittests/sources/test_configdrive.py | 1100 ++-- tests/unittests/sources/test_digitalocean.py | 283 +- tests/unittests/sources/test_ec2.py | 851 +-- tests/unittests/sources/test_exoscale.py | 248 +- tests/unittests/sources/test_gce.py | 304 +- tests/unittests/sources/test_hetzner.py | 85 +- tests/unittests/sources/test_ibmcloud.py | 299 +- tests/unittests/sources/test_init.py | 879 +-- tests/unittests/sources/test_lxd.py | 134 +- tests/unittests/sources/test_maas.py | 147 +- tests/unittests/sources/test_nocloud.py | 320 +- tests/unittests/sources/test_opennebula.py | 888 +-- tests/unittests/sources/test_openstack.py | 652 ++- tests/unittests/sources/test_oracle.py | 412 +- tests/unittests/sources/test_ovf.py | 1053 ++-- tests/unittests/sources/test_rbx.py | 215 +- tests/unittests/sources/test_scaleway.py | 481 +- tests/unittests/sources/test_smartos.py | 956 ++-- tests/unittests/sources/test_upcloud.py | 161 +- tests/unittests/sources/test_vmware.py | 12 +- tests/unittests/sources/test_vultr.py | 375 +- .../sources/vmware/test_custom_script.py | 61 +- .../sources/vmware/test_guestcust_util.py | 79 +- .../sources/vmware/test_vmware_config_file.py | 430 +- tests/unittests/test__init__.py | 193 +- tests/unittests/test_atomic_helper.py | 4 +- tests/unittests/test_builtin_handlers.py | 405 +- tests/unittests/test_cli.py | 214 +- tests/unittests/test_conftest.py | 10 +- tests/unittests/test_cs_util.py | 39 +- tests/unittests/test_data.py | 526 +- tests/unittests/test_dhclient_hook.py | 89 +- tests/unittests/test_dmi.py | 90 +- tests/unittests/test_ds_identify.py | 1609 +++--- tests/unittests/test_ec2_util.py | 376 +- tests/unittests/test_event.py | 16 +- tests/unittests/test_features.py | 36 +- tests/unittests/test_gpg.py | 103 +- tests/unittests/test_helpers.py | 11 +- tests/unittests/test_log.py | 12 +- tests/unittests/test_merging.py | 123 +- tests/unittests/test_net.py | 4833 ++++++++++------- tests/unittests/test_net_activators.py | 154 +- tests/unittests/test_net_freebsd.py | 45 +- tests/unittests/test_netinfo.py | 193 +- tests/unittests/test_pathprefix2dict.py | 28 +- tests/unittests/test_registry.py | 21 +- tests/unittests/test_render_cloudcfg.py | 71 +- tests/unittests/test_reporting.py | 379 +- tests/unittests/test_reporting_hyperv.py | 193 +- tests/unittests/test_simpletable.py | 47 +- tests/unittests/test_sshutil.py | 817 +-- tests/unittests/test_stages.py | 444 +- tests/unittests/test_subp.py | 289 +- tests/unittests/test_temp_utils.py | 118 +- tests/unittests/test_templating.py | 103 +- tests/unittests/test_url_helper.py | 134 +- tests/unittests/test_util.py | 934 ++-- tests/unittests/test_version.py | 11 +- tests/unittests/util.py | 14 +- tools/mock-meta.py | 301 +- tools/validate-yaml.py | 4 +- tox.ini | 28 +- 441 files changed, 43425 insertions(+), 31496 deletions(-) create mode 100644 pyproject.toml diff --git a/.travis.yml b/.travis.yml index 9470cc31de8..c458db48aa6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -133,6 +133,10 @@ matrix: env: TOXENV=flake8 - python: 3.6 env: TOXENV=pylint + - python: 3.6 + env: TOXENV=black + - python: 3.6 + env: TOXENV=isort - python: 3.7 env: TOXENV=doc # Test all supported Python versions (but at the end, so we schedule diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 06b314970da..aa09c61e19c 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -19,6 +19,7 @@ Before any pull request can be accepted, you must do the following: `tools/.github-cla-signers`_ * Add or update any `unit tests`_ accordingly * Add or update any `integration tests`_ (if applicable) +* Format code (using black and isort) with `tox -e format` * Ensure unit tests and linting pass using `tox`_ * Submit a PR against the `main` branch of the `cloud-init` repository @@ -133,6 +134,10 @@ Do these things for each feature or bug git commit +* Apply black and isort formatting rules with `tox`_:: + + tox -e format + * Run unit tests and lint/formatting checks with `tox`_:: tox diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py index 99e5c203cb7..36a5be78696 100644 --- a/cloudinit/analyze/__main__.py +++ b/cloudinit/analyze/__main__.py @@ -5,62 +5,111 @@ import argparse import re import sys +from datetime import datetime from cloudinit.util import json_dumps -from datetime import datetime -from . import dump -from . import show + +from . import dump, show def get_parser(parser=None): if not parser: parser = argparse.ArgumentParser( - prog='cloudinit-analyze', - description='Devel tool: Analyze cloud-init logs and data') - subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand') + prog="cloudinit-analyze", + description="Devel tool: Analyze cloud-init logs and data", + ) + subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand") subparsers.required = True parser_blame = subparsers.add_parser( - 'blame', help='Print list of executed stages ordered by time to init') + "blame", help="Print list of executed stages ordered by time to init" + ) parser_blame.add_argument( - '-i', '--infile', action='store', dest='infile', - default='/var/log/cloud-init.log', - help='specify where to read input.') + "-i", + "--infile", + action="store", + dest="infile", + default="/var/log/cloud-init.log", + help="specify where to read input.", + ) parser_blame.add_argument( - '-o', '--outfile', action='store', dest='outfile', default='-', - help='specify where to write output. ') - parser_blame.set_defaults(action=('blame', analyze_blame)) + "-o", + "--outfile", + action="store", + dest="outfile", + default="-", + help="specify where to write output. ", + ) + parser_blame.set_defaults(action=("blame", analyze_blame)) parser_show = subparsers.add_parser( - 'show', help='Print list of in-order events during execution') - parser_show.add_argument('-f', '--format', action='store', - dest='print_format', default='%I%D @%Es +%ds', - help='specify formatting of output.') - parser_show.add_argument('-i', '--infile', action='store', - dest='infile', default='/var/log/cloud-init.log', - help='specify where to read input.') - parser_show.add_argument('-o', '--outfile', action='store', - dest='outfile', default='-', - help='specify where to write output.') - parser_show.set_defaults(action=('show', analyze_show)) + "show", help="Print list of in-order events during execution" + ) + parser_show.add_argument( + "-f", + "--format", + action="store", + dest="print_format", + default="%I%D @%Es +%ds", + help="specify formatting of output.", + ) + parser_show.add_argument( + "-i", + "--infile", + action="store", + dest="infile", + default="/var/log/cloud-init.log", + help="specify where to read input.", + ) + parser_show.add_argument( + "-o", + "--outfile", + action="store", + dest="outfile", + default="-", + help="specify where to write output.", + ) + parser_show.set_defaults(action=("show", analyze_show)) parser_dump = subparsers.add_parser( - 'dump', help='Dump cloud-init events in JSON format') - parser_dump.add_argument('-i', '--infile', action='store', - dest='infile', default='/var/log/cloud-init.log', - help='specify where to read input. ') - parser_dump.add_argument('-o', '--outfile', action='store', - dest='outfile', default='-', - help='specify where to write output. ') - parser_dump.set_defaults(action=('dump', analyze_dump)) + "dump", help="Dump cloud-init events in JSON format" + ) + parser_dump.add_argument( + "-i", + "--infile", + action="store", + dest="infile", + default="/var/log/cloud-init.log", + help="specify where to read input. ", + ) + parser_dump.add_argument( + "-o", + "--outfile", + action="store", + dest="outfile", + default="-", + help="specify where to write output. ", + ) + parser_dump.set_defaults(action=("dump", analyze_dump)) parser_boot = subparsers.add_parser( - 'boot', help='Print list of boot times for kernel and cloud-init') - parser_boot.add_argument('-i', '--infile', action='store', - dest='infile', default='/var/log/cloud-init.log', - help='specify where to read input. ') - parser_boot.add_argument('-o', '--outfile', action='store', - dest='outfile', default='-', - help='specify where to write output.') - parser_boot.set_defaults(action=('boot', analyze_boot)) + "boot", help="Print list of boot times for kernel and cloud-init" + ) + parser_boot.add_argument( + "-i", + "--infile", + action="store", + dest="infile", + default="/var/log/cloud-init.log", + help="specify where to read input. ", + ) + parser_boot.add_argument( + "-o", + "--outfile", + action="store", + dest="outfile", + default="-", + help="specify where to write output.", + ) + parser_boot.set_defaults(action=("boot", analyze_boot)) return parser @@ -78,61 +127,68 @@ def analyze_boot(name, args): """ infh, outfh = configure_io(args) kernel_info = show.dist_check_timestamp() - status_code, kernel_start, kernel_end, ci_sysd_start = \ - kernel_info + status_code, kernel_start, kernel_end, ci_sysd_start = kernel_info kernel_start_timestamp = datetime.utcfromtimestamp(kernel_start) kernel_end_timestamp = datetime.utcfromtimestamp(kernel_end) ci_sysd_start_timestamp = datetime.utcfromtimestamp(ci_sysd_start) try: - last_init_local = \ - [e for e in _get_events(infh) if e['name'] == 'init-local' and - 'starting search' in e['description']][-1] - ci_start = datetime.utcfromtimestamp(last_init_local['timestamp']) + last_init_local = [ + e + for e in _get_events(infh) + if e["name"] == "init-local" + and "starting search" in e["description"] + ][-1] + ci_start = datetime.utcfromtimestamp(last_init_local["timestamp"]) except IndexError: - ci_start = 'Could not find init-local log-line in cloud-init.log' + ci_start = "Could not find init-local log-line in cloud-init.log" status_code = show.FAIL_CODE - FAILURE_MSG = 'Your Linux distro or container does not support this ' \ - 'functionality.\n' \ - 'You must be running a Kernel Telemetry supported ' \ - 'distro.\nPlease check ' \ - 'https://cloudinit.readthedocs.io/en/latest' \ - '/topics/analyze.html for more ' \ - 'information on supported distros.\n' - - SUCCESS_MSG = '-- Most Recent Boot Record --\n' \ - ' Kernel Started at: {k_s_t}\n' \ - ' Kernel ended boot at: {k_e_t}\n' \ - ' Kernel time to boot (seconds): {k_r}\n' \ - ' Cloud-init activated by systemd at: {ci_sysd_t}\n' \ - ' Time between Kernel end boot and Cloud-init ' \ - 'activation (seconds): {bt_r}\n' \ - ' Cloud-init start: {ci_start}\n' - - CONTAINER_MSG = '-- Most Recent Container Boot Record --\n' \ - ' Container started at: {k_s_t}\n' \ - ' Cloud-init activated by systemd at: {ci_sysd_t}\n' \ - ' Cloud-init start: {ci_start}\n' \ - + FAILURE_MSG = ( + "Your Linux distro or container does not support this " + "functionality.\n" + "You must be running a Kernel Telemetry supported " + "distro.\nPlease check " + "https://cloudinit.readthedocs.io/en/latest" + "/topics/analyze.html for more " + "information on supported distros.\n" + ) + + SUCCESS_MSG = ( + "-- Most Recent Boot Record --\n" + " Kernel Started at: {k_s_t}\n" + " Kernel ended boot at: {k_e_t}\n" + " Kernel time to boot (seconds): {k_r}\n" + " Cloud-init activated by systemd at: {ci_sysd_t}\n" + " Time between Kernel end boot and Cloud-init " + "activation (seconds): {bt_r}\n" + " Cloud-init start: {ci_start}\n" + ) + + CONTAINER_MSG = ( + "-- Most Recent Container Boot Record --\n" + " Container started at: {k_s_t}\n" + " Cloud-init activated by systemd at: {ci_sysd_t}\n" + " Cloud-init start: {ci_start}\n" + ) status_map = { show.FAIL_CODE: FAILURE_MSG, show.CONTAINER_CODE: CONTAINER_MSG, - show.SUCCESS_CODE: SUCCESS_MSG + show.SUCCESS_CODE: SUCCESS_MSG, } kernel_runtime = kernel_end - kernel_start between_process_runtime = ci_sysd_start - kernel_end kwargs = { - 'k_s_t': kernel_start_timestamp, - 'k_e_t': kernel_end_timestamp, - 'k_r': kernel_runtime, - 'bt_r': between_process_runtime, - 'k_e': kernel_end, - 'k_s': kernel_start, - 'ci_sysd': ci_sysd_start, - 'ci_sysd_t': ci_sysd_start_timestamp, - 'ci_start': ci_start + "k_s_t": kernel_start_timestamp, + "k_e_t": kernel_end_timestamp, + "k_r": kernel_runtime, + "bt_r": between_process_runtime, + "k_e": kernel_end, + "k_s": kernel_start, + "ci_sysd": ci_sysd_start, + "ci_sysd_t": ci_sysd_start_timestamp, + "ci_start": ci_start, } outfh.write(status_map[status_code].format(**kwargs)) @@ -152,15 +208,16 @@ def analyze_blame(name, args): and sorting by record data ('delta') """ (infh, outfh) = configure_io(args) - blame_format = ' %ds (%n)' - r = re.compile(r'(^\s+\d+\.\d+)', re.MULTILINE) - for idx, record in enumerate(show.show_events(_get_events(infh), - blame_format)): + blame_format = " %ds (%n)" + r = re.compile(r"(^\s+\d+\.\d+)", re.MULTILINE) + for idx, record in enumerate( + show.show_events(_get_events(infh), blame_format) + ): srecs = sorted(filter(r.match, record), reverse=True) - outfh.write('-- Boot Record %02d --\n' % (idx + 1)) - outfh.write('\n'.join(srecs) + '\n') - outfh.write('\n') - outfh.write('%d boot records analyzed\n' % (idx + 1)) + outfh.write("-- Boot Record %02d --\n" % (idx + 1)) + outfh.write("\n".join(srecs) + "\n") + outfh.write("\n") + outfh.write("%d boot records analyzed\n" % (idx + 1)) def analyze_show(name, args): @@ -184,21 +241,25 @@ def analyze_show(name, args): Finished stage: (modules-final) 0.NNN seconds """ (infh, outfh) = configure_io(args) - for idx, record in enumerate(show.show_events(_get_events(infh), - args.print_format)): - outfh.write('-- Boot Record %02d --\n' % (idx + 1)) - outfh.write('The total time elapsed since completing an event is' - ' printed after the "@" character.\n') - outfh.write('The time the event takes is printed after the "+" ' - 'character.\n\n') - outfh.write('\n'.join(record) + '\n') - outfh.write('%d boot records analyzed\n' % (idx + 1)) + for idx, record in enumerate( + show.show_events(_get_events(infh), args.print_format) + ): + outfh.write("-- Boot Record %02d --\n" % (idx + 1)) + outfh.write( + "The total time elapsed since completing an event is" + ' printed after the "@" character.\n' + ) + outfh.write( + 'The time the event takes is printed after the "+" character.\n\n' + ) + outfh.write("\n".join(record) + "\n") + outfh.write("%d boot records analyzed\n" % (idx + 1)) def analyze_dump(name, args): """Dump cloud-init events in json format""" (infh, outfh) = configure_io(args) - outfh.write(json_dumps(_get_events(infh)) + '\n') + outfh.write(json_dumps(_get_events(infh)) + "\n") def _get_events(infile): @@ -211,28 +272,28 @@ def _get_events(infile): def configure_io(args): """Common parsing and setup of input/output files""" - if args.infile == '-': + if args.infile == "-": infh = sys.stdin else: try: - infh = open(args.infile, 'r') + infh = open(args.infile, "r") except OSError: - sys.stderr.write('Cannot open file %s\n' % args.infile) + sys.stderr.write("Cannot open file %s\n" % args.infile) sys.exit(1) - if args.outfile == '-': + if args.outfile == "-": outfh = sys.stdout else: try: - outfh = open(args.outfile, 'w') + outfh = open(args.outfile, "w") except OSError: - sys.stderr.write('Cannot open file %s\n' % args.outfile) + sys.stderr.write("Cannot open file %s\n" % args.outfile) sys.exit(1) return (infh, outfh) -if __name__ == '__main__': +if __name__ == "__main__": parser = get_parser() args = parser.parse_args() (name, action_functor) = args.action diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py index 62ad51fe267..8e6e3c6a74c 100644 --- a/cloudinit/analyze/dump.py +++ b/cloudinit/analyze/dump.py @@ -1,21 +1,20 @@ # This file is part of cloud-init. See LICENSE file for license information. import calendar -from datetime import datetime import sys +from datetime import datetime -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util stage_to_description = { - 'finished': 'finished running cloud-init', - 'init-local': 'starting search for local datasources', - 'init-network': 'searching for network datasources', - 'init': 'searching for network datasources', - 'modules-config': 'running config modules', - 'modules-final': 'finalizing modules', - 'modules': 'running modules for', - 'single': 'running single module ', + "finished": "finished running cloud-init", + "init-local": "starting search for local datasources", + "init-network": "searching for network datasources", + "init": "searching for network datasources", + "modules-config": "running config modules", + "modules-final": "finalizing modules", + "modules": "running modules for", + "single": "running single module ", } # logger's asctime format @@ -34,11 +33,11 @@ def parse_timestamp(timestampstr): if timestampstr.split()[0] in months: # Aug 29 22:55:26 FMT = DEFAULT_FMT - if '.' in timestampstr: + if "." in timestampstr: FMT = CLOUD_INIT_JOURNALCTL_FMT - dt = datetime.strptime(timestampstr + " " + - str(datetime.now().year), - FMT) + dt = datetime.strptime( + timestampstr + " " + str(datetime.now().year), FMT + ) timestamp = dt.strftime("%s.%f") elif "," in timestampstr: # 2016-09-12 14:39:20,839 @@ -52,7 +51,7 @@ def parse_timestamp(timestampstr): def parse_timestamp_from_date(timestampstr): - out, _ = subp.subp(['date', '+%s.%3N', '-d', timestampstr]) + out, _ = subp.subp(["date", "+%s.%3N", "-d", timestampstr]) timestamp = out.strip() return float(timestamp) @@ -79,8 +78,8 @@ def parse_ci_logline(line): # Apr 30 19:39:11 cloud-init[2673]: handlers.py[DEBUG]: start: \ # init-local/check-cache: attempting to read from cache [check] - amazon_linux_2_sep = ' cloud-init[' - separators = [' - ', ' [CLOUDINIT] ', amazon_linux_2_sep] + amazon_linux_2_sep = " cloud-init[" + separators = [" - ", " [CLOUDINIT] ", amazon_linux_2_sep] found = False for sep in separators: if sep in line: @@ -99,7 +98,7 @@ def parse_ci_logline(line): if "," in timehost: timestampstr, extra = timehost.split(",") timestampstr += ",%s" % extra.split()[0] - if ' ' in extra: + if " " in extra: hostname = extra.split()[-1] else: hostname = timehost.split()[-1] @@ -111,11 +110,11 @@ def parse_ci_logline(line): eventstr = eventstr.split(maxsplit=1)[1] else: timestampstr = timehost.split(hostname)[0].strip() - if 'Cloud-init v.' in eventstr: - event_type = 'start' - if 'running' in eventstr: - stage_and_timestamp = eventstr.split('running')[1].lstrip() - event_name, _ = stage_and_timestamp.split(' at ') + if "Cloud-init v." in eventstr: + event_type = "start" + if "running" in eventstr: + stage_and_timestamp = eventstr.split("running")[1].lstrip() + event_name, _ = stage_and_timestamp.split(" at ") event_name = event_name.replace("'", "").replace(":", "-") if event_name == "init": event_name = "init-network" @@ -128,17 +127,17 @@ def parse_ci_logline(line): event_description = eventstr.split(event_name)[1].strip() event = { - 'name': event_name.rstrip(":"), - 'description': event_description, - 'timestamp': parse_timestamp(timestampstr), - 'origin': 'cloudinit', - 'event_type': event_type.rstrip(":"), + "name": event_name.rstrip(":"), + "description": event_description, + "timestamp": parse_timestamp(timestampstr), + "origin": "cloudinit", + "event_type": event_type.rstrip(":"), } - if event['event_type'] == "finish": + if event["event_type"] == "finish": result = event_description.split(":")[0] - desc = event_description.split(result)[1].lstrip(':').strip() - event['result'] = result - event['description'] = desc.strip() + desc = event_description.split(result)[1].lstrip(":").strip() + event["result"] = result + event["description"] = desc.strip() return event @@ -146,10 +145,10 @@ def parse_ci_logline(line): def dump_events(cisource=None, rawdata=None): events = [] event = None - CI_EVENT_MATCHES = ['start:', 'finish:', 'Cloud-init v.'] + CI_EVENT_MATCHES = ["start:", "finish:", "Cloud-init v."] if not any([cisource, rawdata]): - raise ValueError('Either cisource or rawdata parameters are required') + raise ValueError("Either cisource or rawdata parameters are required") if rawdata: data = rawdata.splitlines() @@ -162,7 +161,7 @@ def dump_events(cisource=None, rawdata=None): try: event = parse_ci_logline(line) except ValueError: - sys.stderr.write('Skipping invalid entry\n') + sys.stderr.write("Skipping invalid entry\n") if event: events.append(event) diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py index 01a4d3e5de8..5fd9cdfd5df 100644 --- a/cloudinit/analyze/show.py +++ b/cloudinit/analyze/show.py @@ -8,11 +8,10 @@ import datetime import json import os -import time import sys +import time -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util from cloudinit.distros import uses_systemd # Example events: @@ -35,24 +34,25 @@ # } format_key = { - '%d': 'delta', - '%D': 'description', - '%E': 'elapsed', - '%e': 'event_type', - '%I': 'indent', - '%l': 'level', - '%n': 'name', - '%o': 'origin', - '%r': 'result', - '%t': 'timestamp', - '%T': 'total_time', + "%d": "delta", + "%D": "description", + "%E": "elapsed", + "%e": "event_type", + "%I": "indent", + "%l": "level", + "%n": "name", + "%o": "origin", + "%r": "result", + "%t": "timestamp", + "%T": "total_time", } -formatting_help = " ".join(["{0}: {1}".format(k.replace('%', '%%'), v) - for k, v in format_key.items()]) -SUCCESS_CODE = 'successful' -FAIL_CODE = 'failure' -CONTAINER_CODE = 'container' +formatting_help = " ".join( + ["{0}: {1}".format(k.replace("%", "%%"), v) for k, v in format_key.items()] +) +SUCCESS_CODE = "successful" +FAIL_CODE = "failure" +CONTAINER_CODE = "container" TIMESTAMP_UNKNOWN = (FAIL_CODE, -1, -1, -1) @@ -60,7 +60,7 @@ def format_record(msg, event): for i, j in format_key.items(): if i in msg: # ensure consistent formatting of time values - if j in ['delta', 'elapsed', 'timestamp']: + if j in ["delta", "elapsed", "timestamp"]: msg = msg.replace(i, "{%s:08.5f}" % j) else: msg = msg.replace(i, "{%s}" % j) @@ -68,13 +68,13 @@ def format_record(msg, event): def dump_event_files(event): - content = dict((k, v) for k, v in event.items() if k not in ['content']) - files = content['files'] + content = dict((k, v) for k, v in event.items() if k not in ["content"]) + files = content["files"] saved = [] for f in files: - fname = f['path'] + fname = f["path"] fn_local = os.path.basename(fname) - fcontent = base64.b64decode(f['content']).decode('ascii') + fcontent = base64.b64decode(f["content"]).decode("ascii") util.write_file(fn_local, fcontent) saved.append(fn_local) @@ -83,13 +83,13 @@ def dump_event_files(event): def event_name(event): if event: - return event.get('name') + return event.get("name") return None def event_type(event): if event: - return event.get('event_type') + return event.get("event_type") return None @@ -100,7 +100,7 @@ def event_parent(event): def event_timestamp(event): - return float(event.get('timestamp')) + return float(event.get("timestamp")) def event_datetime(event): @@ -117,41 +117,44 @@ def event_duration(start, finish): def event_record(start_time, start, finish): record = finish.copy() - record.update({ - 'delta': event_duration(start, finish), - 'elapsed': delta_seconds(start_time, event_datetime(start)), - 'indent': '|' + ' ' * (event_name(start).count('/') - 1) + '`->', - }) + record.update( + { + "delta": event_duration(start, finish), + "elapsed": delta_seconds(start_time, event_datetime(start)), + "indent": "|" + " " * (event_name(start).count("/") - 1) + "`->", + } + ) return record def total_time_record(total_time): - return 'Total Time: %3.5f seconds\n' % total_time + return "Total Time: %3.5f seconds\n" % total_time class SystemctlReader(object): - ''' + """ Class for dealing with all systemctl subp calls in a consistent manner. - ''' + """ + def __init__(self, property, parameter=None): self.epoch = None - self.args = ['/bin/systemctl', 'show'] + self.args = ["/bin/systemctl", "show"] if parameter: self.args.append(parameter) - self.args.extend(['-p', property]) + self.args.extend(["-p", property]) # Don't want the init of our object to break. Instead of throwing # an exception, set an error code that gets checked when data is # requested from the object self.failure = self.subp() def subp(self): - ''' + """ Make a subp call based on set args and handle errors by setting failure code :return: whether the subp call failed or not - ''' + """ try: value, err = subp.subp(self.args, capture=True) if err: @@ -162,41 +165,41 @@ def subp(self): return systemctl_fail def parse_epoch_as_float(self): - ''' + """ If subp call succeeded, return the timestamp from subp as a float. :return: timestamp as a float - ''' + """ # subp has 2 ways to fail: it either fails and throws an exception, # or returns an error code. Raise an exception here in order to make # sure both scenarios throw exceptions if self.failure: - raise RuntimeError('Subprocess call to systemctl has failed, ' - 'returning error code ({})' - .format(self.failure)) + raise RuntimeError( + "Subprocess call to systemctl has failed, " + "returning error code ({})".format(self.failure) + ) # Output from systemctl show has the format Property=Value. # For example, UserspaceMonotonic=1929304 - timestamp = self.epoch.split('=')[1] + timestamp = self.epoch.split("=")[1] # Timestamps reported by systemctl are in microseconds, converting return float(timestamp) / 1000000 def dist_check_timestamp(): - ''' + """ Determine which init system a particular linux distro is using. Each init system (systemd, upstart, etc) has a different way of providing timestamps. :return: timestamps of kernelboot, kernelendboot, and cloud-initstart or TIMESTAMP_UNKNOWN if the timestamps cannot be retrieved. - ''' + """ if uses_systemd(): return gather_timestamps_using_systemd() # Use dmesg to get timestamps if the distro does not have systemd - if util.is_FreeBSD() or 'gentoo' in \ - util.system_info()['system'].lower(): + if util.is_FreeBSD() or "gentoo" in util.system_info()["system"].lower(): return gather_timestamps_using_dmesg() # this distro doesn't fit anything that is supported by cloud-init. just @@ -205,20 +208,20 @@ def dist_check_timestamp(): def gather_timestamps_using_dmesg(): - ''' + """ Gather timestamps that corresponds to kernel begin initialization, kernel finish initialization using dmesg as opposed to systemctl :return: the two timestamps plus a dummy timestamp to keep consistency with gather_timestamps_using_systemd - ''' + """ try: - data, _ = subp.subp(['dmesg'], capture=True) + data, _ = subp.subp(["dmesg"], capture=True) split_entries = data[0].splitlines() for i in split_entries: - if i.decode('UTF-8').find('user') != -1: - splitup = i.decode('UTF-8').split() - stripped = splitup[1].strip(']') + if i.decode("UTF-8").find("user") != -1: + splitup = i.decode("UTF-8").split() + stripped = splitup[1].strip("]") # kernel timestamp from dmesg is equal to 0, # with the userspace timestamp relative to it. @@ -228,8 +231,7 @@ def gather_timestamps_using_dmesg(): # systemd wont start cloud-init in this case, # so we cannot get that timestamp - return SUCCESS_CODE, kernel_start, kernel_end, \ - kernel_end + return SUCCESS_CODE, kernel_start, kernel_end, kernel_end except Exception: pass @@ -237,18 +239,20 @@ def gather_timestamps_using_dmesg(): def gather_timestamps_using_systemd(): - ''' + """ Gather timestamps that corresponds to kernel begin initialization, kernel finish initialization. and cloud-init systemd unit activation :return: the three timestamps - ''' + """ kernel_start = float(time.time()) - float(util.uptime()) try: - delta_k_end = SystemctlReader('UserspaceTimestampMonotonic')\ - .parse_epoch_as_float() - delta_ci_s = SystemctlReader('InactiveExitTimestampMonotonic', - 'cloud-init-local').parse_epoch_as_float() + delta_k_end = SystemctlReader( + "UserspaceTimestampMonotonic" + ).parse_epoch_as_float() + delta_ci_s = SystemctlReader( + "InactiveExitTimestampMonotonic", "cloud-init-local" + ).parse_epoch_as_float() base_time = kernel_start status = SUCCESS_CODE # lxc based containers do not set their monotonic zero point to be when @@ -262,12 +266,13 @@ def gather_timestamps_using_systemd(): # in containers when https://github.com/lxc/lxcfs/issues/292 # is fixed, util.uptime() should be used instead of stat on try: - file_stat = os.stat('/proc/1/cmdline') + file_stat = os.stat("/proc/1/cmdline") kernel_start = file_stat.st_atime except OSError as err: - raise RuntimeError('Could not determine container boot ' - 'time from /proc/1/cmdline. ({})' - .format(err)) from err + raise RuntimeError( + "Could not determine container boot " + "time from /proc/1/cmdline. ({})".format(err) + ) from err status = CONTAINER_CODE else: status = FAIL_CODE @@ -283,10 +288,14 @@ def gather_timestamps_using_systemd(): return status, kernel_start, kernel_end, cloudinit_sysd -def generate_records(events, blame_sort=False, - print_format="(%n) %d seconds in %I%D", - dump_files=False, log_datafiles=False): - ''' +def generate_records( + events, + blame_sort=False, + print_format="(%n) %d seconds in %I%D", + dump_files=False, + log_datafiles=False, +): + """ Take in raw events and create parent-child dependencies between events in order to order events in chronological order. @@ -298,9 +307,9 @@ def generate_records(events, blame_sort=False, :param log_datafiles: whether or not to log events generated :return: boot records ordered chronologically - ''' + """ - sorted_events = sorted(events, key=lambda x: x['timestamp']) + sorted_events = sorted(events, key=lambda x: x["timestamp"]) records = [] start_time = None total_time = 0.0 @@ -316,8 +325,8 @@ def generate_records(events, blame_sort=False, except IndexError: next_evt = None - if event_type(event) == 'start': - if event.get('name') in stages_seen: + if event_type(event) == "start": + if event.get("name") in stages_seen: records.append(total_time_record(total_time)) boot_records.append(records) records = [] @@ -331,25 +340,28 @@ def generate_records(events, blame_sort=False, # see if we have a pair if event_name(event) == event_name(next_evt): - if event_type(next_evt) == 'finish': - records.append(format_record(print_format, - event_record(start_time, - event, - next_evt))) + if event_type(next_evt) == "finish": + records.append( + format_record( + print_format, + event_record(start_time, event, next_evt), + ) + ) else: # This is a parent event - records.append("Starting stage: %s" % event.get('name')) + records.append("Starting stage: %s" % event.get("name")) unprocessed.append(event) - stages_seen.append(event.get('name')) + stages_seen.append(event.get("name")) continue else: prev_evt = unprocessed.pop() if event_name(event) == event_name(prev_evt): record = event_record(start_time, prev_evt, event) - records.append(format_record("Finished stage: " - "(%n) %d seconds", - record) + "\n") - total_time += record.get('delta') + records.append( + format_record("Finished stage: (%n) %d seconds", record) + + "\n" + ) + total_time += record.get("delta") else: # not a match, put it back unprocessed.append(prev_evt) @@ -360,7 +372,7 @@ def generate_records(events, blame_sort=False, def show_events(events, print_format): - ''' + """ A passthrough method that makes it easier to call generate_records() :param events: JSONs from dump that represents events taken from logs @@ -368,18 +380,18 @@ def show_events(events, print_format): and time taken by the event in one line :return: boot records ordered chronologically - ''' + """ return generate_records(events, print_format=print_format) def load_events_infile(infile): - ''' + """ Takes in a log file, read it, and convert to json. :param infile: The Log file to be read :return: json version of logfile, raw file - ''' + """ data = infile.read() try: return json.loads(data), data diff --git a/cloudinit/apport.py b/cloudinit/apport.py index aadc638fb87..92068aa9b49 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -2,127 +2,143 @@ # # This file is part of cloud-init. See LICENSE file for license information. -'''Cloud-init apport interface''' +"""Cloud-init apport interface""" try: from apport.hookutils import ( - attach_file, attach_root_command_outputs, root_command_output) + attach_file, + attach_root_command_outputs, + root_command_output, + ) + has_apport = True except ImportError: has_apport = False KNOWN_CLOUD_NAMES = [ - 'AliYun', - 'AltCloud', - 'Amazon - Ec2', - 'Azure', - 'Bigstep', - 'Brightbox', - 'CloudSigma', - 'CloudStack', - 'DigitalOcean', - 'E24Cloud', - 'GCE - Google Compute Engine', - 'Exoscale', - 'Hetzner Cloud', - 'IBM - (aka SoftLayer or BlueMix)', - 'LXD', - 'MAAS', - 'NoCloud', - 'OpenNebula', - 'OpenStack', - 'Oracle', - 'OVF', - 'RbxCloud - (HyperOne, Rootbox, Rubikon)', - 'OpenTelekomCloud', - 'SAP Converged Cloud', - 'Scaleway', - 'SmartOS', - 'UpCloud', - 'VMware', - 'Vultr', - 'ZStack', - 'Other' + "AliYun", + "AltCloud", + "Amazon - Ec2", + "Azure", + "Bigstep", + "Brightbox", + "CloudSigma", + "CloudStack", + "DigitalOcean", + "E24Cloud", + "GCE - Google Compute Engine", + "Exoscale", + "Hetzner Cloud", + "IBM - (aka SoftLayer or BlueMix)", + "LXD", + "MAAS", + "NoCloud", + "OpenNebula", + "OpenStack", + "Oracle", + "OVF", + "RbxCloud - (HyperOne, Rootbox, Rubikon)", + "OpenTelekomCloud", + "SAP Converged Cloud", + "Scaleway", + "SmartOS", + "UpCloud", + "VMware", + "Vultr", + "ZStack", + "Other", ] # Potentially clear text collected logs -CLOUDINIT_LOG = '/var/log/cloud-init.log' -CLOUDINIT_OUTPUT_LOG = '/var/log/cloud-init-output.log' -USER_DATA_FILE = '/var/lib/cloud/instance/user-data.txt' # Optional +CLOUDINIT_LOG = "/var/log/cloud-init.log" +CLOUDINIT_OUTPUT_LOG = "/var/log/cloud-init-output.log" +USER_DATA_FILE = "/var/lib/cloud/instance/user-data.txt" # Optional def attach_cloud_init_logs(report, ui=None): - '''Attach cloud-init logs and tarfile from 'cloud-init collect-logs'.''' - attach_root_command_outputs(report, { - 'cloud-init-log-warnings': - 'egrep -i "warn|error" /var/log/cloud-init.log', - 'cloud-init-output.log.txt': 'cat /var/log/cloud-init-output.log'}) + """Attach cloud-init logs and tarfile from 'cloud-init collect-logs'.""" + attach_root_command_outputs( + report, + { + "cloud-init-log-warnings": ( + 'egrep -i "warn|error" /var/log/cloud-init.log' + ), + "cloud-init-output.log.txt": "cat /var/log/cloud-init-output.log", + }, + ) root_command_output( - ['cloud-init', 'collect-logs', '-t', '/tmp/cloud-init-logs.tgz']) - attach_file(report, '/tmp/cloud-init-logs.tgz', 'logs.tgz') + ["cloud-init", "collect-logs", "-t", "/tmp/cloud-init-logs.tgz"] + ) + attach_file(report, "/tmp/cloud-init-logs.tgz", "logs.tgz") def attach_hwinfo(report, ui=None): - '''Optionally attach hardware info from lshw.''' + """Optionally attach hardware info from lshw.""" prompt = ( - 'Your device details (lshw) may be useful to developers when' - ' addressing this bug, but gathering it requires admin privileges.' - ' Would you like to include this info?') + "Your device details (lshw) may be useful to developers when" + " addressing this bug, but gathering it requires admin privileges." + " Would you like to include this info?" + ) if ui and ui.yesno(prompt): - attach_root_command_outputs(report, {'lshw.txt': 'lshw'}) + attach_root_command_outputs(report, {"lshw.txt": "lshw"}) def attach_cloud_info(report, ui=None): - '''Prompt for cloud details if available.''' + """Prompt for cloud details if available.""" if ui: - prompt = 'Is this machine running in a cloud environment?' + prompt = "Is this machine running in a cloud environment?" response = ui.yesno(prompt) if response is None: raise StopIteration # User cancelled if response: - prompt = ('Please select the cloud vendor or environment in which' - ' this instance is running') + prompt = ( + "Please select the cloud vendor or environment in which" + " this instance is running" + ) response = ui.choice(prompt, KNOWN_CLOUD_NAMES) if response: - report['CloudName'] = KNOWN_CLOUD_NAMES[response[0]] + report["CloudName"] = KNOWN_CLOUD_NAMES[response[0]] else: - report['CloudName'] = 'None' + report["CloudName"] = "None" def attach_user_data(report, ui=None): - '''Optionally provide user-data if desired.''' + """Optionally provide user-data if desired.""" if ui: prompt = ( - 'Your user-data or cloud-config file can optionally be provided' - ' from {0} and could be useful to developers when addressing this' - ' bug. Do you wish to attach user-data to this bug?'.format( - USER_DATA_FILE)) + "Your user-data or cloud-config file can optionally be provided" + " from {0} and could be useful to developers when addressing this" + " bug. Do you wish to attach user-data to this bug?".format( + USER_DATA_FILE + ) + ) response = ui.yesno(prompt) if response is None: raise StopIteration # User cancelled if response: - attach_file(report, USER_DATA_FILE, 'user_data.txt') + attach_file(report, USER_DATA_FILE, "user_data.txt") def add_bug_tags(report): - '''Add any appropriate tags to the bug.''' - if 'JournalErrors' in report.keys(): - errors = report['JournalErrors'] - if 'Breaking ordering cycle' in errors: - report['Tags'] = 'systemd-ordering' + """Add any appropriate tags to the bug.""" + if "JournalErrors" in report.keys(): + errors = report["JournalErrors"] + if "Breaking ordering cycle" in errors: + report["Tags"] = "systemd-ordering" def add_info(report, ui): - '''This is an entry point to run cloud-init's apport functionality. + """This is an entry point to run cloud-init's apport functionality. Distros which want apport support will have a cloud-init package-hook at /usr/share/apport/package-hooks/cloud-init.py which defines an add_info function and returns the result of cloudinit.apport.add_info(report, ui). - ''' + """ if not has_apport: raise RuntimeError( - 'No apport imports discovered. Apport functionality disabled') + "No apport imports discovered. Apport functionality disabled" + ) attach_cloud_init_logs(report, ui) attach_hwinfo(report, ui) attach_cloud_info(report, ui) @@ -130,4 +146,5 @@ def add_info(report, ui): add_bug_tags(report) return True + # vi: ts=4 expandtab diff --git a/cloudinit/atomic_helper.py b/cloudinit/atomic_helper.py index 485ff92fb94..ae117fad5a2 100644 --- a/cloudinit/atomic_helper.py +++ b/cloudinit/atomic_helper.py @@ -10,8 +10,9 @@ LOG = logging.getLogger(__name__) -def write_file(filename, content, mode=_DEF_PERMS, - omode="wb", preserve_mode=False): +def write_file( + filename, content, mode=_DEF_PERMS, omode="wb", preserve_mode=False +): # open filename in mode 'omode', write content, set permissions to 'mode' if preserve_mode: @@ -23,12 +24,18 @@ def write_file(filename, content, mode=_DEF_PERMS, tf = None try: - tf = tempfile.NamedTemporaryFile(dir=os.path.dirname(filename), - delete=False, mode=omode) + tf = tempfile.NamedTemporaryFile( + dir=os.path.dirname(filename), delete=False, mode=omode + ) LOG.debug( "Atomically writing to file %s (via temporary file %s) - %s: [%o]" " %d bytes/chars", - filename, tf.name, omode, mode, len(content)) + filename, + tf.name, + omode, + mode, + len(content), + ) tf.write(content) tf.close() os.chmod(tf.name, mode) @@ -42,7 +49,11 @@ def write_file(filename, content, mode=_DEF_PERMS, def write_json(filename, data, mode=_DEF_PERMS): # dump json representation of data to file filename. return write_file( - filename, json.dumps(data, indent=1, sort_keys=True) + "\n", - omode="w", mode=mode) + filename, + json.dumps(data, indent=1, sort_keys=True) + "\n", + omode="w", + mode=mode, + ) + # vi: ts=4 expandtab diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py index 7ae98e1cb92..91e48103e94 100644 --- a/cloudinit/cloud.py +++ b/cloudinit/cloud.py @@ -35,7 +35,8 @@ def __init__(self, datasource, paths, cfg, distro, runners, reporter=None): reporter = events.ReportEventStack( name="unnamed-cloud-reporter", description="unnamed-cloud-reporter", - reporting_enabled=False) + reporting_enabled=False, + ) self.reporter = reporter # If a 'user' manipulates logging or logging services @@ -56,8 +57,11 @@ def run(self, name, functor, args, freq=None, clear_on_fail=False): def get_template_filename(self, name): fn = self.paths.template_tpl % (name) if not os.path.isfile(fn): - LOG.warning("No template found in %s for template named %s", - os.path.dirname(fn), name) + LOG.warning( + "No template found in %s for template named %s", + os.path.dirname(fn), + name, + ) return None return fn @@ -80,7 +84,8 @@ def get_locale(self): def get_hostname(self, fqdn=False, metadata_only=False): return self.datasource.get_hostname( - fqdn=fqdn, metadata_only=metadata_only) + fqdn=fqdn, metadata_only=metadata_only + ) def device_name_to_device(self, name): return self.datasource.device_name_to_device(name) @@ -94,4 +99,5 @@ def get_cpath(self, name=None): def get_ipath(self, name=None): return self.paths.get_ipath(name) + # vi: ts=4 expandtab diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py index 3502dd56956..0e1db1180b1 100644 --- a/cloudinit/cmd/clean.py +++ b/cloudinit/cmd/clean.py @@ -10,9 +10,13 @@ import sys from cloudinit.stages import Init -from cloudinit.subp import (ProcessExecutionError, subp) +from cloudinit.subp import ProcessExecutionError, subp from cloudinit.util import ( - del_dir, del_file, get_config_logfiles, is_link, error + del_dir, + del_file, + error, + get_config_logfiles, + is_link, ) @@ -27,18 +31,35 @@ def get_parser(parser=None): """ if not parser: parser = argparse.ArgumentParser( - prog='clean', - description=('Remove logs and artifacts so cloud-init re-runs on ' - 'a clean system')) + prog="clean", + description=( + "Remove logs and artifacts so cloud-init re-runs on " + "a clean system" + ), + ) parser.add_argument( - '-l', '--logs', action='store_true', default=False, dest='remove_logs', - help='Remove cloud-init logs.') + "-l", + "--logs", + action="store_true", + default=False, + dest="remove_logs", + help="Remove cloud-init logs.", + ) parser.add_argument( - '-r', '--reboot', action='store_true', default=False, - help='Reboot system after logs are cleaned so cloud-init re-runs.') + "-r", + "--reboot", + action="store_true", + default=False, + help="Reboot system after logs are cleaned so cloud-init re-runs.", + ) parser.add_argument( - '-s', '--seed', action='store_true', default=False, dest='remove_seed', - help='Remove cloud-init seed directory /var/lib/cloud/seed.') + "-s", + "--seed", + action="store_true", + default=False, + dest="remove_seed", + help="Remove cloud-init seed directory /var/lib/cloud/seed.", + ) return parser @@ -59,8 +80,8 @@ def remove_artifacts(remove_logs, remove_seed=False): if not os.path.isdir(init.paths.cloud_dir): return 0 # Artifacts dir already cleaned - seed_path = os.path.join(init.paths.cloud_dir, 'seed') - for path in glob.glob('%s/*' % init.paths.cloud_dir): + seed_path = os.path.join(init.paths.cloud_dir, "seed") + for path in glob.glob("%s/*" % init.paths.cloud_dir): if path == seed_path and not remove_seed: continue try: @@ -69,7 +90,7 @@ def remove_artifacts(remove_logs, remove_seed=False): else: del_file(path) except OSError as e: - error('Could not remove {0}: {1}'.format(path, str(e))) + error("Could not remove {0}: {1}".format(path, str(e))) return 1 return 0 @@ -78,13 +99,15 @@ def handle_clean_args(name, args): """Handle calls to 'cloud-init clean' as a subcommand.""" exit_code = remove_artifacts(args.remove_logs, args.remove_seed) if exit_code == 0 and args.reboot: - cmd = ['shutdown', '-r', 'now'] + cmd = ["shutdown", "-r", "now"] try: subp(cmd, capture=False) except ProcessExecutionError as e: error( 'Could not reboot this system using "{0}": {1}'.format( - cmd, str(e))) + cmd, str(e) + ) + ) exit_code = 1 return exit_code @@ -92,10 +115,10 @@ def handle_clean_args(name, args): def main(): """Tool to collect and tar all cloud-init related logs.""" parser = get_parser() - sys.exit(handle_clean_args('clean', parser.parse_args())) + sys.exit(handle_clean_args("clean", parser.parse_args())) -if __name__ == '__main__': +if __name__ == "__main__": main() # vi: ts=4 expandtab diff --git a/cloudinit/cmd/cloud_id.py b/cloudinit/cmd/cloud_id.py index 0cdc96754c5..b92b03a893d 100755 --- a/cloudinit/cmd/cloud_id.py +++ b/cloudinit/cmd/cloud_id.py @@ -6,13 +6,16 @@ import json import sys -from cloudinit.util import error from cloudinit.sources import ( - INSTANCE_JSON_FILE, METADATA_UNKNOWN, canonical_cloud_id) + INSTANCE_JSON_FILE, + METADATA_UNKNOWN, + canonical_cloud_id, +) +from cloudinit.util import error -DEFAULT_INSTANCE_JSON = '/run/cloud-init/%s' % INSTANCE_JSON_FILE +DEFAULT_INSTANCE_JSON = "/run/cloud-init/%s" % INSTANCE_JSON_FILE -NAME = 'cloud-id' +NAME = "cloud-id" def get_parser(parser=None): @@ -27,17 +30,30 @@ def get_parser(parser=None): if not parser: parser = argparse.ArgumentParser( prog=NAME, - description='Report the canonical cloud-id for this instance') + description="Report the canonical cloud-id for this instance", + ) parser.add_argument( - '-j', '--json', action='store_true', default=False, - help='Report all standardized cloud-id information as json.') + "-j", + "--json", + action="store_true", + default=False, + help="Report all standardized cloud-id information as json.", + ) parser.add_argument( - '-l', '--long', action='store_true', default=False, - help='Report extended cloud-id information as tab-delimited string.') + "-l", + "--long", + action="store_true", + default=False, + help="Report extended cloud-id information as tab-delimited string.", + ) parser.add_argument( - '-i', '--instance-data', type=str, default=DEFAULT_INSTANCE_JSON, - help=('Path to instance-data.json file. Default is %s' % - DEFAULT_INSTANCE_JSON)) + "-i", + "--instance-data", + type=str, + default=DEFAULT_INSTANCE_JSON, + help="Path to instance-data.json file. Default is %s" + % DEFAULT_INSTANCE_JSON, + ) return parser @@ -53,24 +69,28 @@ def handle_args(name, args): except IOError: return error( "File not found '%s'. Provide a path to instance data json file" - ' using --instance-data' % args.instance_data) + " using --instance-data" % args.instance_data + ) except ValueError as e: return error( - "File '%s' is not valid json. %s" % (args.instance_data, e)) - v1 = instance_data.get('v1', {}) + "File '%s' is not valid json. %s" % (args.instance_data, e) + ) + v1 = instance_data.get("v1", {}) cloud_id = canonical_cloud_id( - v1.get('cloud_name', METADATA_UNKNOWN), - v1.get('region', METADATA_UNKNOWN), - v1.get('platform', METADATA_UNKNOWN)) + v1.get("cloud_name", METADATA_UNKNOWN), + v1.get("region", METADATA_UNKNOWN), + v1.get("platform", METADATA_UNKNOWN), + ) if args.json: - v1['cloud_id'] = cloud_id - response = json.dumps( # Pretty, sorted json - v1, indent=1, sort_keys=True, separators=(',', ': ')) + v1["cloud_id"] = cloud_id + response = json.dumps( # Pretty, sorted json + v1, indent=1, sort_keys=True, separators=(",", ": ") + ) elif args.long: - response = '%s\t%s' % (cloud_id, v1.get('region', METADATA_UNKNOWN)) + response = "%s\t%s" % (cloud_id, v1.get("region", METADATA_UNKNOWN)) else: response = cloud_id - sys.stdout.write('%s\n' % response) + sys.stdout.write("%s\n" % response) return 0 @@ -80,7 +100,7 @@ def main(): sys.exit(handle_args(NAME, parser.parse_args())) -if __name__ == '__main__': +if __name__ == "__main__": main() # vi: ts=4 expandtab diff --git a/cloudinit/cmd/devel/__init__.py b/cloudinit/cmd/devel/__init__.py index 3ae28b69c92..ead5f7a9152 100644 --- a/cloudinit/cmd/devel/__init__.py +++ b/cloudinit/cmd/devel/__init__.py @@ -11,7 +11,7 @@ def addLogHandlerCLI(logger, log_level): """Add a commandline logging handler to emit messages to stderr.""" - formatter = logging.Formatter('%(levelname)s: %(message)s') + formatter = logging.Formatter("%(levelname)s: %(message)s") log.setupBasicLogging(log_level, formatter=formatter) return logger @@ -22,4 +22,5 @@ def read_cfg_paths(): init.read_cfg() return init.paths + # vi: ts=4 expandtab diff --git a/cloudinit/cmd/devel/hotplug_hook.py b/cloudinit/cmd/devel/hotplug_hook.py index f6f36a007cd..a9be037911d 100644 --- a/cloudinit/cmd/devel/hotplug_hook.py +++ b/cloudinit/cmd/devel/hotplug_hook.py @@ -6,20 +6,17 @@ import sys import time -from cloudinit import log -from cloudinit import reporting -from cloudinit import stages +from cloudinit import log, reporting, stages from cloudinit.event import EventScope, EventType from cloudinit.net import activators, read_sys_net_safe from cloudinit.net.network_state import parse_net_config_data from cloudinit.reporting import events -from cloudinit.stages import Init from cloudinit.sources import DataSource # noqa: F401 from cloudinit.sources import DataSourceNotFoundException - +from cloudinit.stages import Init LOG = log.getLogger(__name__) -NAME = 'hotplug-hook' +NAME = "hotplug-hook" def get_parser(parser=None): @@ -35,33 +32,38 @@ def get_parser(parser=None): parser.description = __doc__ parser.add_argument( - "-s", "--subsystem", required=True, + "-s", + "--subsystem", + required=True, help="subsystem to act on", - choices=['net'] + choices=["net"], ) subparsers = parser.add_subparsers( - title='Hotplug Action', - dest='hotplug_action' + title="Hotplug Action", dest="hotplug_action" ) subparsers.required = True subparsers.add_parser( - 'query', - help='query if hotplug is enabled for given subsystem' + "query", help="query if hotplug is enabled for given subsystem" ) parser_handle = subparsers.add_parser( - 'handle', help='handle the hotplug event') + "handle", help="handle the hotplug event" + ) parser_handle.add_argument( - "-d", "--devpath", required=True, + "-d", + "--devpath", + required=True, metavar="PATH", - help="sysfs path to hotplugged device" + help="sysfs path to hotplugged device", ) parser_handle.add_argument( - "-u", "--udevaction", required=True, + "-u", + "--udevaction", + required=True, help="action to take", - choices=['add', 'remove'] + choices=["add", "remove"], ) return parser @@ -90,27 +92,29 @@ def device_detected(self) -> bool: def detect_hotplugged_device(self): detect_presence = None - if self.action == 'add': + if self.action == "add": detect_presence = True - elif self.action == 'remove': + elif self.action == "remove": detect_presence = False else: - raise ValueError('Unknown action: %s' % self.action) + raise ValueError("Unknown action: %s" % self.action) if detect_presence != self.device_detected(): raise RuntimeError( - 'Failed to detect %s in updated metadata' % self.id) + "Failed to detect %s in updated metadata" % self.id + ) def success(self): return self.success_fn() def update_metadata(self): - result = self.datasource.update_metadata_if_supported([ - EventType.HOTPLUG]) + result = self.datasource.update_metadata_if_supported( + [EventType.HOTPLUG] + ) if not result: raise RuntimeError( - 'Datasource %s not updated for ' - 'event %s' % (self.datasource, EventType.HOTPLUG) + "Datasource %s not updated for event %s" + % (self.datasource, EventType.HOTPLUG) ) return result @@ -118,7 +122,7 @@ def update_metadata(self): class NetHandler(UeventHandler): def __init__(self, datasource, devpath, action, success_fn): # convert devpath to mac address - id = read_sys_net_safe(os.path.basename(devpath), 'address') + id = read_sys_net_safe(os.path.basename(devpath), "address") super().__init__(id, datasource, devpath, action, success_fn) def apply(self): @@ -128,14 +132,16 @@ def apply(self): ) interface_name = os.path.basename(self.devpath) activator = activators.select_activator() - if self.action == 'add': + if self.action == "add": if not activator.bring_up_interface(interface_name): raise RuntimeError( - 'Failed to bring up device: {}'.format(self.devpath)) - elif self.action == 'remove': + "Failed to bring up device: {}".format(self.devpath) + ) + elif self.action == "remove": if not activator.bring_down_interface(interface_name): raise RuntimeError( - 'Failed to bring down device: {}'.format(self.devpath)) + "Failed to bring down device: {}".format(self.devpath) + ) @property def config(self): @@ -144,15 +150,16 @@ def config(self): def device_detected(self) -> bool: netstate = parse_net_config_data(self.config) found = [ - iface for iface in netstate.iter_interfaces() - if iface.get('mac_address') == self.id + iface + for iface in netstate.iter_interfaces() + if iface.get("mac_address") == self.id ] - LOG.debug('Ifaces with ID=%s : %s', self.id, found) + LOG.debug("Ifaces with ID=%s : %s", self.id, found) return len(found) > 0 SUBSYSTEM_PROPERTES_MAP = { - 'net': (NetHandler, EventScope.NETWORK), + "net": (NetHandler, EventScope.NETWORK), } @@ -161,66 +168,65 @@ def is_enabled(hotplug_init, subsystem): scope = SUBSYSTEM_PROPERTES_MAP[subsystem][1] except KeyError as e: raise Exception( - 'hotplug-hook: cannot handle events for subsystem: {}'.format( - subsystem) + "hotplug-hook: cannot handle events for subsystem: {}".format( + subsystem + ) ) from e return stages.update_event_enabled( datasource=hotplug_init.datasource, cfg=hotplug_init.cfg, event_source_type=EventType.HOTPLUG, - scope=scope + scope=scope, ) def initialize_datasource(hotplug_init, subsystem): - LOG.debug('Fetching datasource') + LOG.debug("Fetching datasource") datasource = hotplug_init.fetch(existing="trust") if not datasource.get_supported_events([EventType.HOTPLUG]): - LOG.debug('hotplug not supported for event of type %s', subsystem) + LOG.debug("hotplug not supported for event of type %s", subsystem) return if not is_enabled(hotplug_init, subsystem): - LOG.debug('hotplug not enabled for event of type %s', subsystem) + LOG.debug("hotplug not enabled for event of type %s", subsystem) return return datasource -def handle_hotplug( - hotplug_init: Init, devpath, subsystem, udevaction -): +def handle_hotplug(hotplug_init: Init, devpath, subsystem, udevaction): datasource = initialize_datasource(hotplug_init, subsystem) if not datasource: return handler_cls = SUBSYSTEM_PROPERTES_MAP[subsystem][0] - LOG.debug('Creating %s event handler', subsystem) + LOG.debug("Creating %s event handler", subsystem) event_handler = handler_cls( datasource=datasource, devpath=devpath, action=udevaction, - success_fn=hotplug_init._write_to_cache + success_fn=hotplug_init._write_to_cache, ) # type: UeventHandler wait_times = [1, 3, 5, 10, 30] for attempt, wait in enumerate(wait_times): LOG.debug( - 'subsystem=%s update attempt %s/%s', + "subsystem=%s update attempt %s/%s", subsystem, attempt, - len(wait_times) + len(wait_times), ) try: - LOG.debug('Refreshing metadata') + LOG.debug("Refreshing metadata") event_handler.update_metadata() - LOG.debug('Detecting device in updated metadata') + LOG.debug("Detecting device in updated metadata") event_handler.detect_hotplugged_device() - LOG.debug('Applying config change') + LOG.debug("Applying config change") event_handler.apply() - LOG.debug('Updating cache') + LOG.debug("Updating cache") event_handler.success() break except Exception as e: - LOG.debug('Exception while processing hotplug event. %s', e) + LOG.debug("Exception while processing hotplug event. %s", e) time.sleep(wait) last_exception = e else: @@ -238,31 +244,33 @@ def handle_args(name, args): hotplug_init.read_cfg() log.setupLogging(hotplug_init.cfg) - if 'reporting' in hotplug_init.cfg: - reporting.update_configuration(hotplug_init.cfg.get('reporting')) + if "reporting" in hotplug_init.cfg: + reporting.update_configuration(hotplug_init.cfg.get("reporting")) # Logging isn't going to be setup until now LOG.debug( - '%s called with the following arguments: {' - 'hotplug_action: %s, subsystem: %s, udevaction: %s, devpath: %s}', + "%s called with the following arguments: {" + "hotplug_action: %s, subsystem: %s, udevaction: %s, devpath: %s}", name, args.hotplug_action, args.subsystem, - args.udevaction if 'udevaction' in args else None, - args.devpath if 'devpath' in args else None, + args.udevaction if "udevaction" in args else None, + args.devpath if "devpath" in args else None, ) with hotplug_reporter: try: - if args.hotplug_action == 'query': + if args.hotplug_action == "query": try: datasource = initialize_datasource( - hotplug_init, args.subsystem) + hotplug_init, args.subsystem + ) except DataSourceNotFoundException: print( "Unable to determine hotplug state. No datasource " - "detected") + "detected" + ) sys.exit(1) - print('enabled' if datasource else 'disabled') + print("enabled" if datasource else "disabled") else: handle_hotplug( hotplug_init=hotplug_init, @@ -271,13 +279,13 @@ def handle_args(name, args): udevaction=args.udevaction, ) except Exception: - LOG.exception('Received fatal exception handling hotplug!') + LOG.exception("Received fatal exception handling hotplug!") raise - LOG.debug('Exiting hotplug handler') + LOG.debug("Exiting hotplug handler") reporting.flush_events() -if __name__ == '__main__': +if __name__ == "__main__": args = get_parser().parse_args() handle_args(NAME, args) diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py index 31ade73d65c..d54b809acea 100644 --- a/cloudinit/cmd/devel/logs.py +++ b/cloudinit/cmd/devel/logs.py @@ -5,20 +5,19 @@ """Define 'collect-logs' utility and handler to include in cloud-init cmd.""" import argparse -from datetime import datetime import os import shutil import sys +from datetime import datetime from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE +from cloudinit.subp import ProcessExecutionError, subp from cloudinit.temp_utils import tempdir -from cloudinit.subp import (ProcessExecutionError, subp) -from cloudinit.util import (chdir, copy, ensure_dir, write_file) +from cloudinit.util import chdir, copy, ensure_dir, write_file - -CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log'] -CLOUDINIT_RUN_DIR = '/run/cloud-init' -USER_DATA_FILE = '/var/lib/cloud/instance/user-data.txt' # Optional +CLOUDINIT_LOGS = ["/var/log/cloud-init.log", "/var/log/cloud-init-output.log"] +CLOUDINIT_RUN_DIR = "/run/cloud-init" +USER_DATA_FILE = "/var/lib/cloud/instance/user-data.txt" # Optional def get_parser(parser=None): @@ -32,26 +31,44 @@ def get_parser(parser=None): """ if not parser: parser = argparse.ArgumentParser( - prog='collect-logs', - description='Collect and tar all cloud-init debug info') - parser.add_argument('--verbose', '-v', action='count', default=0, - dest='verbosity', help="Be more verbose.") + prog="collect-logs", + description="Collect and tar all cloud-init debug info", + ) + parser.add_argument( + "--verbose", + "-v", + action="count", + default=0, + dest="verbosity", + help="Be more verbose.", + ) parser.add_argument( - "--tarfile", '-t', default='cloud-init.tar.gz', - help=('The tarfile to create containing all collected logs.' - ' Default: cloud-init.tar.gz')) + "--tarfile", + "-t", + default="cloud-init.tar.gz", + help=( + "The tarfile to create containing all collected logs." + " Default: cloud-init.tar.gz" + ), + ) parser.add_argument( - "--include-userdata", '-u', default=False, action='store_true', - dest='userdata', help=( - 'Optionally include user-data from {0} which could contain' - ' sensitive information.'.format(USER_DATA_FILE))) + "--include-userdata", + "-u", + default=False, + action="store_true", + dest="userdata", + help=( + "Optionally include user-data from {0} which could contain" + " sensitive information.".format(USER_DATA_FILE) + ), + ) return parser def _copytree_rundir_ignore_files(curdir, files): """Return a list of files to ignore for /run/cloud-init directory""" ignored_files = [ - 'hook-hotplug-cmd', # named pipe for hotplug + "hook-hotplug-cmd", # named pipe for hotplug ] if os.getuid() != 0: # Ignore root-permissioned files @@ -94,52 +111,67 @@ def collect_logs(tarfile, include_userdata, verbosity=0): if include_userdata and os.getuid() != 0: sys.stderr.write( "To include userdata, root user is required." - " Try sudo cloud-init collect-logs\n") + " Try sudo cloud-init collect-logs\n" + ) return 1 tarfile = os.path.abspath(tarfile) - date = datetime.utcnow().date().strftime('%Y-%m-%d') - log_dir = 'cloud-init-logs-{0}'.format(date) - with tempdir(dir='/tmp') as tmp_dir: + date = datetime.utcnow().date().strftime("%Y-%m-%d") + log_dir = "cloud-init-logs-{0}".format(date) + with tempdir(dir="/tmp") as tmp_dir: log_dir = os.path.join(tmp_dir, log_dir) version = _write_command_output_to_file( - ['cloud-init', '--version'], - os.path.join(log_dir, 'version'), - "cloud-init --version", verbosity) + ["cloud-init", "--version"], + os.path.join(log_dir, "version"), + "cloud-init --version", + verbosity, + ) dpkg_ver = _write_command_output_to_file( - ['dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'], - os.path.join(log_dir, 'dpkg-version'), - "dpkg version", verbosity) + ["dpkg-query", "--show", "-f=${Version}\n", "cloud-init"], + os.path.join(log_dir, "dpkg-version"), + "dpkg version", + verbosity, + ) if not version: version = dpkg_ver if dpkg_ver else "not-available" _debug("collected cloud-init version: %s\n" % version, 1, verbosity) _write_command_output_to_file( - ['dmesg'], os.path.join(log_dir, 'dmesg.txt'), - "dmesg output", verbosity) + ["dmesg"], + os.path.join(log_dir, "dmesg.txt"), + "dmesg output", + verbosity, + ) _write_command_output_to_file( - ['journalctl', '--boot=0', '-o', 'short-precise'], - os.path.join(log_dir, 'journal.txt'), - "systemd journal of current boot", verbosity) + ["journalctl", "--boot=0", "-o", "short-precise"], + os.path.join(log_dir, "journal.txt"), + "systemd journal of current boot", + verbosity, + ) for log in CLOUDINIT_LOGS: _collect_file(log, log_dir, verbosity) if include_userdata: _collect_file(USER_DATA_FILE, log_dir, verbosity) - run_dir = os.path.join(log_dir, 'run') + run_dir = os.path.join(log_dir, "run") ensure_dir(run_dir) if os.path.exists(CLOUDINIT_RUN_DIR): try: - shutil.copytree(CLOUDINIT_RUN_DIR, - os.path.join(run_dir, 'cloud-init'), - ignore=_copytree_rundir_ignore_files) + shutil.copytree( + CLOUDINIT_RUN_DIR, + os.path.join(run_dir, "cloud-init"), + ignore=_copytree_rundir_ignore_files, + ) except shutil.Error as e: sys.stderr.write("Failed collecting file(s) due to error:\n") - sys.stderr.write(str(e) + '\n') + sys.stderr.write(str(e) + "\n") _debug("collected dir %s\n" % CLOUDINIT_RUN_DIR, 1, verbosity) else: - _debug("directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR, 1, - verbosity) + _debug( + "directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR, + 1, + verbosity, + ) with chdir(tmp_dir): - subp(['tar', 'czvf', tarfile, log_dir.replace(tmp_dir + '/', '')]) + subp(["tar", "czvf", tarfile, log_dir.replace(tmp_dir + "/", "")]) sys.stderr.write("Wrote %s\n" % tarfile) return 0 @@ -152,10 +184,10 @@ def handle_collect_logs_args(name, args): def main(): """Tool to collect and tar all cloud-init related logs.""" parser = get_parser() - return handle_collect_logs_args('collect-logs', parser.parse_args()) + return handle_collect_logs_args("collect-logs", parser.parse_args()) -if __name__ == '__main__': +if __name__ == "__main__": sys.exit(main()) # vi: ts=4 expandtab diff --git a/cloudinit/cmd/devel/make_mime.py b/cloudinit/cmd/devel/make_mime.py index 4e6a57786ac..a7493c74fac 100755 --- a/cloudinit/cmd/devel/make_mime.py +++ b/cloudinit/cmd/devel/make_mime.py @@ -9,19 +9,22 @@ from cloudinit import log from cloudinit.handlers import INCLUSION_TYPES_MAP + from . import addLogHandlerCLI -NAME = 'make-mime' +NAME = "make-mime" LOG = log.getLogger(NAME) -EPILOG = ("Example: make-mime -a config.yaml:cloud-config " - "-a script.sh:x-shellscript > user-data") +EPILOG = ( + "Example: make-mime -a config.yaml:cloud-config " + "-a script.sh:x-shellscript > user-data" +) def file_content_type(text): - """ Return file content type by reading the first line of the input. """ + """Return file content type by reading the first line of the input.""" try: filename, content_type = text.split(":", 1) - return (open(filename, 'r'), filename, content_type.strip()) + return (open(filename, "r"), filename, content_type.strip()) except ValueError as e: raise argparse.ArgumentError( text, "Invalid value for %r" % (text) @@ -41,26 +44,43 @@ def get_parser(parser=None): # update the parser's doc and add an epilog to show an example parser.description = __doc__ parser.epilog = EPILOG - parser.add_argument("-a", "--attach", dest="files", type=file_content_type, - action='append', default=[], - metavar=":", - help=("attach the given file as the specified " - "content-type")) - parser.add_argument('-l', '--list-types', action='store_true', - default=False, - help='List support cloud-init content types.') - parser.add_argument('-f', '--force', action='store_true', - default=False, - help='Ignore unknown content-type warnings') + parser.add_argument( + "-a", + "--attach", + dest="files", + type=file_content_type, + action="append", + default=[], + metavar=":", + help="attach the given file as the specified content-type", + ) + parser.add_argument( + "-l", + "--list-types", + action="store_true", + default=False, + help="List support cloud-init content types.", + ) + parser.add_argument( + "-f", + "--force", + action="store_true", + default=False, + help="Ignore unknown content-type warnings", + ) return parser def get_content_types(strip_prefix=False): - """ Return a list of cloud-init supported content types. Optionally - strip out the leading 'text/' of the type if strip_prefix=True. + """Return a list of cloud-init supported content types. Optionally + strip out the leading 'text/' of the type if strip_prefix=True. """ - return sorted([ctype.replace("text/", "") if strip_prefix else ctype - for ctype in INCLUSION_TYPES_MAP.values()]) + return sorted( + [ + ctype.replace("text/", "") if strip_prefix else ctype + for ctype in INCLUSION_TYPES_MAP.values() + ] + ) def handle_args(name, args): @@ -82,14 +102,16 @@ def handle_args(name, args): for i, (fh, filename, format_type) in enumerate(args.files): contents = fh.read() sub_message = MIMEText(contents, format_type, sys.getdefaultencoding()) - sub_message.add_header('Content-Disposition', - 'attachment; filename="%s"' % (filename)) + sub_message.add_header( + "Content-Disposition", 'attachment; filename="%s"' % (filename) + ) content_type = sub_message.get_content_type().lower() if content_type not in get_content_types(): level = "WARNING" if args.force else "ERROR" - msg = (level + ": content type %r for attachment %s " - "may be incorrect!") % (content_type, i + 1) - sys.stderr.write(msg + '\n') + msg = ( + level + ": content type %r for attachment %s may be incorrect!" + ) % (content_type, i + 1) + sys.stderr.write(msg + "\n") errors.append(msg) sub_messages.append(sub_message) if len(errors) and not args.force: @@ -104,10 +126,10 @@ def handle_args(name, args): def main(): args = get_parser().parse_args() - return(handle_args(NAME, args)) + return handle_args(NAME, args) -if __name__ == '__main__': +if __name__ == "__main__": sys.exit(main()) diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py index f4a98e5ebd8..18b1e7ff78f 100755 --- a/cloudinit/cmd/devel/net_convert.py +++ b/cloudinit/cmd/devel/net_convert.py @@ -6,15 +6,13 @@ import os import sys -from cloudinit.sources.helpers import openstack +from cloudinit import distros, log, safeyaml +from cloudinit.net import eni, netplan, network_state, networkd, sysconfig from cloudinit.sources import DataSourceAzure as azure from cloudinit.sources import DataSourceOVF as ovf +from cloudinit.sources.helpers import openstack -from cloudinit import distros, safeyaml -from cloudinit.net import eni, netplan, networkd, network_state, sysconfig -from cloudinit import log - -NAME = 'net-convert' +NAME = "net-convert" def get_parser(parser=None): @@ -27,33 +25,59 @@ def get_parser(parser=None): """ if not parser: parser = argparse.ArgumentParser(prog=NAME, description=__doc__) - parser.add_argument("-p", "--network-data", type=open, - metavar="PATH", required=True, - help="The network configuration to read") - parser.add_argument("-k", "--kind", - choices=['eni', 'network_data.json', 'yaml', - 'azure-imds', 'vmware-imc'], - required=True, - help="The format of the given network config") - parser.add_argument("-d", "--directory", - metavar="PATH", - help="directory to place output in", - required=True) - parser.add_argument("-D", "--distro", - choices=[item for sublist in - distros.OSFAMILIES.values() - for item in sublist], - required=True) - parser.add_argument("-m", "--mac", - metavar="name,mac", - action='append', - help="interface name to mac mapping") - parser.add_argument("--debug", action='store_true', - help='enable debug logging to stderr.') - parser.add_argument("-O", "--output-kind", - choices=['eni', 'netplan', 'networkd', 'sysconfig'], - required=True, - help="The network config format to emit") + parser.add_argument( + "-p", + "--network-data", + type=open, + metavar="PATH", + required=True, + help="The network configuration to read", + ) + parser.add_argument( + "-k", + "--kind", + choices=[ + "eni", + "network_data.json", + "yaml", + "azure-imds", + "vmware-imc", + ], + required=True, + help="The format of the given network config", + ) + parser.add_argument( + "-d", + "--directory", + metavar="PATH", + help="directory to place output in", + required=True, + ) + parser.add_argument( + "-D", + "--distro", + choices=[ + item for sublist in distros.OSFAMILIES.values() for item in sublist + ], + required=True, + ) + parser.add_argument( + "-m", + "--mac", + metavar="name,mac", + action="append", + help="interface name to mac mapping", + ) + parser.add_argument( + "--debug", action="store_true", help="enable debug logging to stderr." + ) + parser.add_argument( + "-O", + "--output-kind", + choices=["eni", "netplan", "networkd", "sysconfig"], + required=True, + help="The network config format to emit", + ) return parser @@ -81,59 +105,68 @@ def handle_args(name, args): pre_ns = eni.convert_eni_data(net_data) elif args.kind == "yaml": pre_ns = safeyaml.load(net_data) - if 'network' in pre_ns: - pre_ns = pre_ns.get('network') + if "network" in pre_ns: + pre_ns = pre_ns.get("network") if args.debug: - sys.stderr.write('\n'.join( - ["Input YAML", safeyaml.dumps(pre_ns), ""])) - elif args.kind == 'network_data.json': + sys.stderr.write( + "\n".join(["Input YAML", safeyaml.dumps(pre_ns), ""]) + ) + elif args.kind == "network_data.json": pre_ns = openstack.convert_net_json( - json.loads(net_data), known_macs=known_macs) - elif args.kind == 'azure-imds': + json.loads(net_data), known_macs=known_macs + ) + elif args.kind == "azure-imds": pre_ns = azure.parse_network_config(json.loads(net_data)) - elif args.kind == 'vmware-imc': + elif args.kind == "vmware-imc": config = ovf.Config(ovf.ConfigFile(args.network_data.name)) pre_ns = ovf.get_network_config_from_conf(config, False) ns = network_state.parse_net_config_data(pre_ns) if args.debug: - sys.stderr.write('\n'.join( - ["", "Internal State", safeyaml.dumps(ns), ""])) + sys.stderr.write( + "\n".join(["", "Internal State", safeyaml.dumps(ns), ""]) + ) distro_cls = distros.fetch(args.distro) distro = distro_cls(args.distro, {}, None) config = {} if args.output_kind == "eni": r_cls = eni.Renderer - config = distro.renderer_configs.get('eni') + config = distro.renderer_configs.get("eni") elif args.output_kind == "netplan": r_cls = netplan.Renderer - config = distro.renderer_configs.get('netplan') + config = distro.renderer_configs.get("netplan") # don't run netplan generate/apply - config['postcmds'] = False + config["postcmds"] = False # trim leading slash - config['netplan_path'] = config['netplan_path'][1:] + config["netplan_path"] = config["netplan_path"][1:] # enable some netplan features - config['features'] = ['dhcp-use-domains', 'ipv6-mtu'] + config["features"] = ["dhcp-use-domains", "ipv6-mtu"] elif args.output_kind == "networkd": r_cls = networkd.Renderer - config = distro.renderer_configs.get('networkd') + config = distro.renderer_configs.get("networkd") elif args.output_kind == "sysconfig": r_cls = sysconfig.Renderer - config = distro.renderer_configs.get('sysconfig') + config = distro.renderer_configs.get("sysconfig") else: raise RuntimeError("Invalid output_kind") r = r_cls(config=config) - sys.stderr.write(''.join([ - "Read input format '%s' from '%s'.\n" % ( - args.kind, args.network_data.name), - "Wrote output format '%s' to '%s'\n" % ( - args.output_kind, args.directory)]) + "\n") + sys.stderr.write( + "".join( + [ + "Read input format '%s' from '%s'.\n" + % (args.kind, args.network_data.name), + "Wrote output format '%s' to '%s'\n" + % (args.output_kind, args.directory), + ] + ) + + "\n" + ) r.render_network_state(network_state=ns, target=args.directory) -if __name__ == '__main__': +if __name__ == "__main__": args = get_parser().parse_args() handle_args(NAME, args) diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py index be304630f31..76b16c2eac7 100644 --- a/cloudinit/cmd/devel/parser.py +++ b/cloudinit/cmd/devel/parser.py @@ -5,33 +5,47 @@ """Define 'devel' subcommand argument parsers to include in cloud-init cmd.""" import argparse + from cloudinit.config import schema -from . import hotplug_hook -from . import net_convert -from . import render -from . import make_mime +from . import hotplug_hook, make_mime, net_convert, render def get_parser(parser=None): if not parser: parser = argparse.ArgumentParser( - prog='cloudinit-devel', - description='Run development cloud-init tools') - subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand') + prog="cloudinit-devel", + description="Run development cloud-init tools", + ) + subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand") subparsers.required = True subcmds = [ - (hotplug_hook.NAME, hotplug_hook.__doc__, - hotplug_hook.get_parser, hotplug_hook.handle_args), - ('schema', 'Validate cloud-config files for document schema', - schema.get_parser, schema.handle_schema_args), - (net_convert.NAME, net_convert.__doc__, - net_convert.get_parser, net_convert.handle_args), - (render.NAME, render.__doc__, - render.get_parser, render.handle_args), - (make_mime.NAME, make_mime.__doc__, - make_mime.get_parser, make_mime.handle_args), + ( + hotplug_hook.NAME, + hotplug_hook.__doc__, + hotplug_hook.get_parser, + hotplug_hook.handle_args, + ), + ( + "schema", + "Validate cloud-config files for document schema", + schema.get_parser, + schema.handle_schema_args, + ), + ( + net_convert.NAME, + net_convert.__doc__, + net_convert.get_parser, + net_convert.handle_args, + ), + (render.NAME, render.__doc__, render.get_parser, render.handle_args), + ( + make_mime.NAME, + make_mime.__doc__, + make_mime.get_parser, + make_mime.handle_args, + ), ] for (subcmd, helpmsg, get_parser, handler) in subcmds: parser = subparsers.add_parser(subcmd, help=helpmsg) diff --git a/cloudinit/cmd/devel/render.py b/cloudinit/cmd/devel/render.py index 1090aa16045..2f9a22a8b80 100755 --- a/cloudinit/cmd/devel/render.py +++ b/cloudinit/cmd/devel/render.py @@ -6,12 +6,13 @@ import os import sys -from cloudinit.handlers.jinja_template import render_jinja_payload_from_file from cloudinit import log +from cloudinit.handlers.jinja_template import render_jinja_payload_from_file from cloudinit.sources import INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE + from . import addLogHandlerCLI, read_cfg_paths -NAME = 'render' +NAME = "render" LOG = log.getLogger(NAME) @@ -27,13 +28,24 @@ def get_parser(parser=None): if not parser: parser = argparse.ArgumentParser(prog=NAME, description=__doc__) parser.add_argument( - 'user_data', type=str, help='Path to the user-data file to render') + "user_data", type=str, help="Path to the user-data file to render" + ) + parser.add_argument( + "-i", + "--instance-data", + type=str, + help=( + "Optional path to instance-data.json file. Defaults to" + " /run/cloud-init/instance-data.json" + ), + ) parser.add_argument( - '-i', '--instance-data', type=str, - help=('Optional path to instance-data.json file. Defaults to' - ' /run/cloud-init/instance-data.json')) - parser.add_argument('-d', '--debug', action='store_true', default=False, - help='Add verbose messages during template render') + "-d", + "--debug", + action="store_true", + default=False, + help="Add verbose messages during template render", + ) return parser @@ -54,34 +66,38 @@ def handle_args(name, args): redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE) if uid == 0: instance_data_fn = os.path.join( - paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE) + paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE + ) if not os.path.exists(instance_data_fn): LOG.warning( - 'Missing root-readable %s. Using redacted %s instead.', - instance_data_fn, redacted_data_fn + "Missing root-readable %s. Using redacted %s instead.", + instance_data_fn, + redacted_data_fn, ) instance_data_fn = redacted_data_fn else: instance_data_fn = redacted_data_fn if not os.path.exists(instance_data_fn): - LOG.error('Missing instance-data.json file: %s', instance_data_fn) + LOG.error("Missing instance-data.json file: %s", instance_data_fn) return 1 try: with open(args.user_data) as stream: user_data = stream.read() except IOError: - LOG.error('Missing user-data file: %s', args.user_data) + LOG.error("Missing user-data file: %s", args.user_data) return 1 try: rendered_payload = render_jinja_payload_from_file( - payload=user_data, payload_fn=args.user_data, + payload=user_data, + payload_fn=args.user_data, instance_data_file=instance_data_fn, - debug=True if args.debug else False) + debug=True if args.debug else False, + ) except RuntimeError as e: - LOG.error('Cannot render from instance data: %s', str(e)) + LOG.error("Cannot render from instance data: %s", str(e)) return 1 if not rendered_payload: - LOG.error('Unable to render user-data file: %s', args.user_data) + LOG.error("Unable to render user-data file: %s", args.user_data) return 1 sys.stdout.write(rendered_payload) return 0 @@ -89,10 +105,10 @@ def handle_args(name, args): def main(): args = get_parser().parse_args() - return(handle_args(NAME, args)) + return handle_args(NAME, args) -if __name__ == '__main__': +if __name__ == "__main__": sys.exit(main()) diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 63186d341b8..e67edbc35c7 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -19,6 +19,7 @@ import traceback from cloudinit import patcher + patcher.patch_logging() from cloudinit import log as logging @@ -34,8 +35,7 @@ from cloudinit import reporting from cloudinit.reporting import events -from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE, - CLOUD_CONFIG) +from cloudinit.settings import PER_INSTANCE, PER_ALWAYS, PER_ONCE, CLOUD_CONFIG from cloudinit import atomic_helper @@ -44,8 +44,10 @@ # Welcome message template -WELCOME_MSG_TPL = ("Cloud-init v. {version} running '{action}' at " - "{timestamp}. Up {uptime} seconds.") +WELCOME_MSG_TPL = ( + "Cloud-init v. {version} running '{action}' at " + "{timestamp}. Up {uptime} seconds." +) # Module section template MOD_SECTION_TPL = "cloud_%s_modules" @@ -53,9 +55,9 @@ # Frequency shortname to full name # (so users don't have to remember the full name...) FREQ_SHORT_NAMES = { - 'instance': PER_INSTANCE, - 'always': PER_ALWAYS, - 'once': PER_ONCE, + "instance": PER_INSTANCE, + "always": PER_ALWAYS, + "once": PER_ONCE, } LOG = logging.getLogger() @@ -63,21 +65,20 @@ # Used for when a logger may not be active # and we still want to print exceptions... -def print_exc(msg=''): +def print_exc(msg=""): if msg: sys.stderr.write("%s\n" % (msg)) - sys.stderr.write('-' * 60) + sys.stderr.write("-" * 60) sys.stderr.write("\n") traceback.print_exc(file=sys.stderr) - sys.stderr.write('-' * 60) + sys.stderr.write("-" * 60) sys.stderr.write("\n") def welcome(action, msg=None): if not msg: msg = welcome_format(action) - util.multi_log("%s\n" % (msg), - console=False, stderr=True, log=LOG) + util.multi_log("%s\n" % (msg), console=False, stderr=True, log=LOG) return msg @@ -86,7 +87,8 @@ def welcome_format(action): version=version.version_string(), uptime=util.uptime(), timestamp=util.time_rfc2822(), - action=action) + action=action, + ) def extract_fns(args): @@ -107,29 +109,31 @@ def run_module_section(mods, action_name, section): (which_ran, failures) = mods.run_section(full_section_name) total_attempted = len(which_ran) + len(failures) if total_attempted == 0: - msg = ("No '%s' modules to run" - " under section '%s'") % (action_name, full_section_name) + msg = "No '%s' modules to run under section '%s'" % ( + action_name, + full_section_name, + ) sys.stderr.write("%s\n" % (msg)) LOG.debug(msg) return [] else: - LOG.debug("Ran %s modules with %s failures", - len(which_ran), len(failures)) + LOG.debug( + "Ran %s modules with %s failures", len(which_ran), len(failures) + ) return failures def apply_reporting_cfg(cfg): - if cfg.get('reporting'): - reporting.update_configuration(cfg.get('reporting')) + if cfg.get("reporting"): + reporting.update_configuration(cfg.get("reporting")) -def parse_cmdline_url(cmdline, names=('cloud-config-url', 'url')): +def parse_cmdline_url(cmdline, names=("cloud-config-url", "url")): data = util.keyval_str_to_dict(cmdline) for key in names: if key in data: return key, data[key] - raise KeyError("No keys (%s) found in string '%s'" % - (cmdline, names)) + raise KeyError("No keys (%s) found in string '%s'" % (cmdline, names)) def attempt_cmdline_url(path, network=True, cmdline=None): @@ -163,51 +167,60 @@ def attempt_cmdline_url(path, network=True, cmdline=None): if path_is_local and os.path.exists(path): if network: - m = ("file '%s' existed, possibly from local stage download" - " of command line url '%s'. Not re-writing." % (path, url)) + m = ( + "file '%s' existed, possibly from local stage download" + " of command line url '%s'. Not re-writing." % (path, url) + ) level = logging.INFO if path_is_local: level = logging.DEBUG else: - m = ("file '%s' existed, possibly from previous boot download" - " of command line url '%s'. Not re-writing." % (path, url)) + m = ( + "file '%s' existed, possibly from previous boot download" + " of command line url '%s'. Not re-writing." % (path, url) + ) level = logging.WARN return (level, m) - kwargs = {'url': url, 'timeout': 10, 'retries': 2} + kwargs = {"url": url, "timeout": 10, "retries": 2} if network or path_is_local: level = logging.WARN - kwargs['sec_between'] = 1 + kwargs["sec_between"] = 1 else: level = logging.DEBUG - kwargs['sec_between'] = .1 + kwargs["sec_between"] = 0.1 data = None - header = b'#cloud-config' + header = b"#cloud-config" try: resp = url_helper.read_file_or_url(**kwargs) if resp.ok(): data = resp.contents if not resp.contents.startswith(header): - if cmdline_name == 'cloud-config-url': + if cmdline_name == "cloud-config-url": level = logging.WARN else: level = logging.INFO return ( level, - "contents of '%s' did not start with %s" % (url, header)) + "contents of '%s' did not start with %s" % (url, header), + ) else: - return (level, - "url '%s' returned code %s. Ignoring." % (url, resp.code)) + return ( + level, + "url '%s' returned code %s. Ignoring." % (url, resp.code), + ) except url_helper.UrlError as e: return (level, "retrieving url '%s' failed: %s" % (url, e)) util.write_file(path, data, mode=0o600) - return (logging.INFO, - "wrote cloud-config data from %s='%s' to %s" % - (cmdline_name, url, path)) + return ( + logging.INFO, + "wrote cloud-config data from %s='%s' to %s" + % (cmdline_name, url, path), + ) def purge_cache_on_python_version_change(init): @@ -216,31 +229,32 @@ def purge_cache_on_python_version_change(init): There could be changes not represented in our cache (obj.pkl) after we upgrade to a new version of python, so at that point clear the cache """ - current_python_version = '%d.%d' % ( - sys.version_info.major, sys.version_info.minor + current_python_version = "%d.%d" % ( + sys.version_info.major, + sys.version_info.minor, ) python_version_path = os.path.join( - init.paths.get_cpath('data'), 'python-version' + init.paths.get_cpath("data"), "python-version" ) if os.path.exists(python_version_path): cached_python_version = open(python_version_path).read() # The Python version has changed out from under us, anything that was # pickled previously is likely useless due to API changes. if cached_python_version != current_python_version: - LOG.debug('Python version change detected. Purging cache') + LOG.debug("Python version change detected. Purging cache") init.purge_cache(True) util.write_file(python_version_path, current_python_version) else: - if os.path.exists(init.paths.get_ipath_cur('obj_pkl')): + if os.path.exists(init.paths.get_ipath_cur("obj_pkl")): LOG.info( - 'Writing python-version file. ' - 'Cache compatibility status is currently unknown.' + "Writing python-version file. " + "Cache compatibility status is currently unknown." ) util.write_file(python_version_path, current_python_version) def _should_bring_up_interfaces(init, args): - if util.get_cfg_option_bool(init.cfg, 'disable_network_activation'): + if util.get_cfg_option_bool(init.cfg, "disable_network_activation"): return False return not args.local @@ -250,10 +264,14 @@ def main_init(name, args): if args.local: deps = [sources.DEP_FILESYSTEM] - early_logs = [attempt_cmdline_url( - path=os.path.join("%s.d" % CLOUD_CONFIG, - "91_kernel_cmdline_url.cfg"), - network=not args.local)] + early_logs = [ + attempt_cmdline_url( + path=os.path.join( + "%s.d" % CLOUD_CONFIG, "91_kernel_cmdline_url.cfg" + ), + network=not args.local, + ) + ] # Cloud-init 'init' stage is broken up into the following sub-stages # 1. Ensure that the init object fetches its config without errors @@ -289,8 +307,9 @@ def main_init(name, args): early_logs.append((logging.WARN, msg)) if args.debug: # Reset so that all the debug handlers are closed out - LOG.debug(("Logging being reset, this logger may no" - " longer be active shortly")) + LOG.debug( + "Logging being reset, this logger may no longer be active shortly" + ) logging.resetLogging() logging.setupLogging(init.cfg) apply_reporting_cfg(init.cfg) @@ -317,9 +336,11 @@ def main_init(name, args): if mode == sources.DSMODE_NETWORK: existing = "trust" sys.stderr.write("%s\n" % (netinfo.debug_info())) - LOG.debug(("Checking to see if files that we need already" - " exist from a previous run that would allow us" - " to stop early.")) + LOG.debug( + "Checking to see if files that we need already" + " exist from a previous run that would allow us" + " to stop early." + ) # no-net is written by upstart cloud-init-nonet when network failed # to come up stop_files = [ @@ -331,15 +352,18 @@ def main_init(name, args): existing_files.append(fn) if existing_files: - LOG.debug("[%s] Exiting. stop file %s existed", - mode, existing_files) + LOG.debug( + "[%s] Exiting. stop file %s existed", mode, existing_files + ) return (None, []) else: - LOG.debug("Execution continuing, no previous run detected that" - " would allow us to stop early.") + LOG.debug( + "Execution continuing, no previous run detected that" + " would allow us to stop early." + ) else: existing = "check" - mcfg = util.get_cfg_option_bool(init.cfg, 'manual_cache_clean', False) + mcfg = util.get_cfg_option_bool(init.cfg, "manual_cache_clean", False) if mcfg: LOG.debug("manual cache clean set from config") existing = "trust" @@ -360,8 +384,11 @@ def main_init(name, args): # if in network mode, and the datasource is local # then work was done at that stage. if mode == sources.DSMODE_NETWORK and init.datasource.dsmode != mode: - LOG.debug("[%s] Exiting. datasource %s in local mode", - mode, init.datasource) + LOG.debug( + "[%s] Exiting. datasource %s in local mode", + mode, + init.datasource, + ) return (None, []) except sources.DataSourceNotFoundException: # In the case of 'cloud-init init' without '--local' it is a bit @@ -371,8 +398,9 @@ def main_init(name, args): if mode == sources.DSMODE_LOCAL: LOG.debug("No local datasource found") else: - util.logexc(LOG, ("No instance datasource found!" - " Likely bad things to come!")) + util.logexc( + LOG, "No instance datasource found! Likely bad things to come!" + ) if not args.force: init.apply_network_config(bring_up=bring_up_interfaces) LOG.debug("[%s] Exiting without datasource", mode) @@ -381,46 +409,60 @@ def main_init(name, args): else: return (None, ["No instance datasource found."]) else: - LOG.debug("[%s] barreling on in force mode without datasource", - mode) + LOG.debug( + "[%s] barreling on in force mode without datasource", mode + ) _maybe_persist_instance_data(init) # Stage 6 iid = init.instancify() - LOG.debug("[%s] %s will now be targeting instance id: %s. new=%s", - mode, name, iid, init.is_new_instance()) + LOG.debug( + "[%s] %s will now be targeting instance id: %s. new=%s", + mode, + name, + iid, + init.is_new_instance(), + ) if mode == sources.DSMODE_LOCAL: # Before network comes up, set any configured hostname to allow # dhcp clients to advertize this hostname to any DDNS services # LP: #1746455. - _maybe_set_hostname(init, stage='local', retry_stage='network') + _maybe_set_hostname(init, stage="local", retry_stage="network") init.apply_network_config(bring_up=bring_up_interfaces) if mode == sources.DSMODE_LOCAL: if init.datasource.dsmode != mode: - LOG.debug("[%s] Exiting. datasource %s not in local mode.", - mode, init.datasource) + LOG.debug( + "[%s] Exiting. datasource %s not in local mode.", + mode, + init.datasource, + ) return (init.datasource, []) else: - LOG.debug("[%s] %s is in local mode, will apply init modules now.", - mode, init.datasource) + LOG.debug( + "[%s] %s is in local mode, will apply init modules now.", + mode, + init.datasource, + ) # Give the datasource a chance to use network resources. # This is used on Azure to communicate with the fabric over network. init.setup_datasource() # update fully realizes user-data (pulling in #include if necessary) init.update() - _maybe_set_hostname(init, stage='init-net', retry_stage='modules:config') + _maybe_set_hostname(init, stage="init-net", retry_stage="modules:config") # Stage 7 try: # Attempt to consume the data per instance. # This may run user-data handlers and/or perform # url downloads and such as needed. - (ran, _results) = init.cloudify().run('consume_data', - init.consume_data, - args=[PER_INSTANCE], - freq=PER_INSTANCE) + (ran, _results) = init.cloudify().run( + "consume_data", + init.consume_data, + args=[PER_INSTANCE], + freq=PER_INSTANCE, + ) if not ran: # Just consume anything that is set to run per-always # if nothing ran in the per-instance code @@ -442,8 +484,7 @@ def main_init(name, args): errfmt_orig = errfmt (outfmt, errfmt) = util.get_output_cfg(mods.cfg, name) if outfmt_orig != outfmt or errfmt_orig != errfmt: - LOG.warning("Stdout, stderr changing to (%s, %s)", - outfmt, errfmt) + LOG.warning("Stdout, stderr changing to (%s, %s)", outfmt, errfmt) (outfmt, errfmt) = util.fixup_output(mods.cfg, name) except Exception: util.logexc(LOG, "Failed to re-adjust output redirection!") @@ -459,11 +500,11 @@ def main_init(name, args): def di_report_warn(datasource, cfg): - if 'di_report' not in cfg: + if "di_report" not in cfg: LOG.debug("no di_report found in config.") return - dicfg = cfg['di_report'] + dicfg = cfg["di_report"] if dicfg is None: # ds-identify may write 'di_report:\n #comment\n' # which reads as {'di_report': None} @@ -474,7 +515,7 @@ def di_report_warn(datasource, cfg): LOG.warning("di_report config not a dictionary: %s", dicfg) return - dslist = dicfg.get('datasource_list') + dslist = dicfg.get("datasource_list") if dslist is None: LOG.warning("no 'datasource_list' found in di_report.") return @@ -486,18 +527,26 @@ def di_report_warn(datasource, cfg): # where Name is the thing that shows up in datasource_list. modname = datasource.__module__.rpartition(".")[2] if modname.startswith(sources.DS_PREFIX): - modname = modname[len(sources.DS_PREFIX):] + modname = modname[len(sources.DS_PREFIX) :] else: - LOG.warning("Datasource '%s' came from unexpected module '%s'.", - datasource, modname) + LOG.warning( + "Datasource '%s' came from unexpected module '%s'.", + datasource, + modname, + ) if modname in dslist: - LOG.debug("used datasource '%s' from '%s' was in di_report's list: %s", - datasource, modname, dslist) + LOG.debug( + "used datasource '%s' from '%s' was in di_report's list: %s", + datasource, + modname, + dslist, + ) return - warnings.show_warning('dsid_missing_source', cfg, - source=modname, dslist=str(dslist)) + warnings.show_warning( + "dsid_missing_source", cfg, source=modname, dslist=str(dslist) + ) def main_modules(action_name, args): @@ -521,8 +570,10 @@ def main_modules(action_name, args): init.fetch(existing="trust") except sources.DataSourceNotFoundException: # There was no datasource found, theres nothing to do - msg = ('Can not apply stage %s, no datasource found! Likely bad ' - 'things to come!' % name) + msg = ( + "Can not apply stage %s, no datasource found! Likely bad " + "things to come!" % name + ) util.logexc(LOG, msg) print_exc(msg) if not args.force: @@ -539,8 +590,9 @@ def main_modules(action_name, args): util.logexc(LOG, "Failed to setup output redirection!") if args.debug: # Reset so that all the debug handlers are closed out - LOG.debug(("Logging being reset, this logger may no" - " longer be active shortly")) + LOG.debug( + "Logging being reset, this logger may no longer be active shortly" + ) logging.resetLogging() logging.setupLogging(mods.cfg) apply_reporting_cfg(init.cfg) @@ -573,10 +625,12 @@ def main_single(name, args): # There was no datasource found, # that might be bad (or ok) depending on # the module being ran (so continue on) - util.logexc(LOG, ("Failed to fetch your datasource," - " likely bad things to come!")) - print_exc(("Failed to fetch your datasource," - " likely bad things to come!")) + util.logexc( + LOG, "Failed to fetch your datasource, likely bad things to come!" + ) + print_exc( + "Failed to fetch your datasource, likely bad things to come!" + ) if not args.force: return 1 _maybe_persist_instance_data(init) @@ -598,8 +652,9 @@ def main_single(name, args): util.logexc(LOG, "Failed to setup output redirection!") if args.debug: # Reset so that all the debug handlers are closed out - LOG.debug(("Logging being reset, this logger may no" - " longer be active shortly")) + LOG.debug( + "Logging being reset, this logger may no longer be active shortly" + ) logging.resetLogging() logging.setupLogging(mods.cfg) apply_reporting_cfg(init.cfg) @@ -608,9 +663,7 @@ def main_single(name, args): welcome(name, msg=w_msg) # Stage 5 - (which_ran, failures) = mods.run_single(mod_name, - mod_args, - mod_freq) + (which_ran, failures) = mods.run_single(mod_name, mod_args, mod_freq) if failures: LOG.warning("Ran %s but it failed!", mod_name) return 1 @@ -633,7 +686,12 @@ def status_wrapper(name, args, data_d=None, link_d=None): result_path = os.path.join(data_d, "result.json") result_link = os.path.join(link_d, "result.json") - util.ensure_dirs((data_d, link_d,)) + util.ensure_dirs( + ( + data_d, + link_d, + ) + ) (_name, functor) = args.action @@ -647,14 +705,20 @@ def status_wrapper(name, args, data_d=None, link_d=None): else: raise ValueError("unknown name: %s" % name) - modes = ('init', 'init-local', 'modules-init', 'modules-config', - 'modules-final') + modes = ( + "init", + "init-local", + "modules-init", + "modules-config", + "modules-final", + ) if mode not in modes: raise ValueError( - "Invalid cloud init mode specified '{0}'".format(mode)) + "Invalid cloud init mode specified '{0}'".format(mode) + ) status = None - if mode == 'init-local': + if mode == "init-local": for f in (status_link, result_link, status_path, result_path): util.del_file(f) else: @@ -664,45 +728,46 @@ def status_wrapper(name, args, data_d=None, link_d=None): pass nullstatus = { - 'errors': [], - 'start': None, - 'finished': None, + "errors": [], + "start": None, + "finished": None, } if status is None: - status = {'v1': {}} - status['v1']['datasource'] = None + status = {"v1": {}} + status["v1"]["datasource"] = None for m in modes: - if m not in status['v1']: - status['v1'][m] = nullstatus.copy() + if m not in status["v1"]: + status["v1"][m] = nullstatus.copy() - v1 = status['v1'] - v1['stage'] = mode - v1[mode]['start'] = time.time() + v1 = status["v1"] + v1["stage"] = mode + v1[mode]["start"] = time.time() atomic_helper.write_json(status_path, status) - util.sym_link(os.path.relpath(status_path, link_d), status_link, - force=True) + util.sym_link( + os.path.relpath(status_path, link_d), status_link, force=True + ) try: ret = functor(name, args) - if mode in ('init', 'init-local'): + if mode in ("init", "init-local"): (datasource, errors) = ret if datasource is not None: - v1['datasource'] = str(datasource) + v1["datasource"] = str(datasource) else: errors = ret - v1[mode]['errors'] = [str(e) for e in errors] + v1[mode]["errors"] = [str(e) for e in errors] except Exception as e: util.logexc(LOG, "failed stage %s", mode) print_exc("failed run of stage %s" % mode) - v1[mode]['errors'] = [str(e)] + v1[mode]["errors"] = [str(e)] - v1[mode]['finished'] = time.time() - v1['stage'] = None + v1[mode]["finished"] = time.time() + v1["stage"] = None atomic_helper.write_json(status_path, status) @@ -710,23 +775,26 @@ def status_wrapper(name, args, data_d=None, link_d=None): # write the 'finished' file errors = [] for m in modes: - if v1[m]['errors']: - errors.extend(v1[m].get('errors', [])) + if v1[m]["errors"]: + errors.extend(v1[m].get("errors", [])) atomic_helper.write_json( - result_path, {'v1': {'datasource': v1['datasource'], - 'errors': errors}}) - util.sym_link(os.path.relpath(result_path, link_d), result_link, - force=True) + result_path, + {"v1": {"datasource": v1["datasource"], "errors": errors}}, + ) + util.sym_link( + os.path.relpath(result_path, link_d), result_link, force=True + ) - return len(v1[mode]['errors']) + return len(v1[mode]["errors"]) def _maybe_persist_instance_data(init): """Write instance-data.json file if absent and datasource is restored.""" if init.ds_restored: instance_data_file = os.path.join( - init.paths.run_dir, sources.INSTANCE_JSON_FILE) + init.paths.run_dir, sources.INSTANCE_JSON_FILE + ) if not os.path.exists(instance_data_file): init.datasource.persist_instance_data() @@ -739,18 +807,23 @@ def _maybe_set_hostname(init, stage, retry_stage): """ cloud = init.cloudify() (hostname, _fqdn) = util.get_hostname_fqdn( - init.cfg, cloud, metadata_only=True) + init.cfg, cloud, metadata_only=True + ) if hostname: # meta-data or user-data hostname content try: - cc_set_hostname.handle('set-hostname', init.cfg, cloud, LOG, None) + cc_set_hostname.handle("set-hostname", init.cfg, cloud, LOG, None) except cc_set_hostname.SetHostnameError as e: LOG.debug( - 'Failed setting hostname in %s stage. Will' - ' retry in %s stage. Error: %s.', stage, retry_stage, str(e)) + "Failed setting hostname in %s stage. Will" + " retry in %s stage. Error: %s.", + stage, + retry_stage, + str(e), + ) def main_features(name, args): - sys.stdout.write('\n'.join(sorted(version.FEATURES)) + '\n') + sys.stdout.write("\n".join(sorted(version.FEATURES)) + "\n") def main(sysv_args=None): @@ -760,129 +833,182 @@ def main(sysv_args=None): sysv_args = sysv_args[1:] # Top level args - parser.add_argument('--version', '-v', action='version', - version='%(prog)s ' + (version.version_string())) - parser.add_argument('--file', '-f', action='append', - dest='files', - help=('additional yaml configuration' - ' files to use'), - type=argparse.FileType('rb')) - parser.add_argument('--debug', '-d', action='store_true', - help=('show additional pre-action' - ' logging (default: %(default)s)'), - default=False) - parser.add_argument('--force', action='store_true', - help=('force running even if no datasource is' - ' found (use at your own risk)'), - dest='force', - default=False) + parser.add_argument( + "--version", + "-v", + action="version", + version="%(prog)s " + (version.version_string()), + ) + parser.add_argument( + "--file", + "-f", + action="append", + dest="files", + help="additional yaml configuration files to use", + type=argparse.FileType("rb"), + ) + parser.add_argument( + "--debug", + "-d", + action="store_true", + help="show additional pre-action logging (default: %(default)s)", + default=False, + ) + parser.add_argument( + "--force", + action="store_true", + help=( + "force running even if no datasource is" + " found (use at your own risk)" + ), + dest="force", + default=False, + ) parser.set_defaults(reporter=None) - subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand') + subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand") subparsers.required = True # Each action and its sub-options (if any) - parser_init = subparsers.add_parser('init', - help=('initializes cloud-init and' - ' performs initial modules')) - parser_init.add_argument("--local", '-l', action='store_true', - help="start in local mode (default: %(default)s)", - default=False) + parser_init = subparsers.add_parser( + "init", help="initializes cloud-init and performs initial modules" + ) + parser_init.add_argument( + "--local", + "-l", + action="store_true", + help="start in local mode (default: %(default)s)", + default=False, + ) # This is used so that we can know which action is selected + # the functor to use to run this subcommand - parser_init.set_defaults(action=('init', main_init)) + parser_init.set_defaults(action=("init", main_init)) # These settings are used for the 'config' and 'final' stages - parser_mod = subparsers.add_parser('modules', - help=('activates modules using ' - 'a given configuration key')) - parser_mod.add_argument("--mode", '-m', action='store', - help=("module configuration name " - "to use (default: %(default)s)"), - default='config', - choices=('init', 'config', 'final')) - parser_mod.set_defaults(action=('modules', main_modules)) + parser_mod = subparsers.add_parser( + "modules", help="activates modules using a given configuration key" + ) + parser_mod.add_argument( + "--mode", + "-m", + action="store", + help="module configuration name to use (default: %(default)s)", + default="config", + choices=("init", "config", "final"), + ) + parser_mod.set_defaults(action=("modules", main_modules)) # This subcommand allows you to run a single module - parser_single = subparsers.add_parser('single', - help=('run a single module ')) - parser_single.add_argument("--name", '-n', action="store", - help="module name to run", - required=True) - parser_single.add_argument("--frequency", action="store", - help=("frequency of the module"), - required=False, - choices=list(FREQ_SHORT_NAMES.keys())) - parser_single.add_argument("--report", action="store_true", - help="enable reporting", - required=False) - parser_single.add_argument("module_args", nargs="*", - metavar='argument', - help=('any additional arguments to' - ' pass to this module')) - parser_single.set_defaults(action=('single', main_single)) + parser_single = subparsers.add_parser( + "single", help="run a single module " + ) + parser_single.add_argument( + "--name", + "-n", + action="store", + help="module name to run", + required=True, + ) + parser_single.add_argument( + "--frequency", + action="store", + help="frequency of the module", + required=False, + choices=list(FREQ_SHORT_NAMES.keys()), + ) + parser_single.add_argument( + "--report", + action="store_true", + help="enable reporting", + required=False, + ) + parser_single.add_argument( + "module_args", + nargs="*", + metavar="argument", + help="any additional arguments to pass to this module", + ) + parser_single.set_defaults(action=("single", main_single)) parser_query = subparsers.add_parser( - 'query', - help='Query standardized instance metadata from the command line.') + "query", + help="Query standardized instance metadata from the command line.", + ) parser_dhclient = subparsers.add_parser( - dhclient_hook.NAME, help=dhclient_hook.__doc__) + dhclient_hook.NAME, help=dhclient_hook.__doc__ + ) dhclient_hook.get_parser(parser_dhclient) - parser_features = subparsers.add_parser('features', - help=('list defined features')) - parser_features.set_defaults(action=('features', main_features)) + parser_features = subparsers.add_parser( + "features", help="list defined features" + ) + parser_features.set_defaults(action=("features", main_features)) parser_analyze = subparsers.add_parser( - 'analyze', help='Devel tool: Analyze cloud-init logs and data') + "analyze", help="Devel tool: Analyze cloud-init logs and data" + ) - parser_devel = subparsers.add_parser( - 'devel', help='Run development tools') + parser_devel = subparsers.add_parser("devel", help="Run development tools") parser_collect_logs = subparsers.add_parser( - 'collect-logs', help='Collect and tar all cloud-init debug info') + "collect-logs", help="Collect and tar all cloud-init debug info" + ) parser_clean = subparsers.add_parser( - 'clean', help='Remove logs and artifacts so cloud-init can re-run.') + "clean", help="Remove logs and artifacts so cloud-init can re-run." + ) parser_status = subparsers.add_parser( - 'status', help='Report cloud-init status or wait on completion.') + "status", help="Report cloud-init status or wait on completion." + ) if sysv_args: # Only load subparsers if subcommand is specified to avoid load cost - if sysv_args[0] == 'analyze': + if sysv_args[0] == "analyze": from cloudinit.analyze.__main__ import get_parser as analyze_parser + # Construct analyze subcommand parser analyze_parser(parser_analyze) - elif sysv_args[0] == 'devel': + elif sysv_args[0] == "devel": from cloudinit.cmd.devel.parser import get_parser as devel_parser + # Construct devel subcommand parser devel_parser(parser_devel) - elif sysv_args[0] == 'collect-logs': + elif sysv_args[0] == "collect-logs": from cloudinit.cmd.devel.logs import ( - get_parser as logs_parser, handle_collect_logs_args) + get_parser as logs_parser, + handle_collect_logs_args, + ) + logs_parser(parser_collect_logs) parser_collect_logs.set_defaults( - action=('collect-logs', handle_collect_logs_args)) - elif sysv_args[0] == 'clean': + action=("collect-logs", handle_collect_logs_args) + ) + elif sysv_args[0] == "clean": from cloudinit.cmd.clean import ( - get_parser as clean_parser, handle_clean_args) + get_parser as clean_parser, + handle_clean_args, + ) + clean_parser(parser_clean) - parser_clean.set_defaults( - action=('clean', handle_clean_args)) - elif sysv_args[0] == 'query': + parser_clean.set_defaults(action=("clean", handle_clean_args)) + elif sysv_args[0] == "query": from cloudinit.cmd.query import ( - get_parser as query_parser, handle_args as handle_query_args) + get_parser as query_parser, + handle_args as handle_query_args, + ) + query_parser(parser_query) - parser_query.set_defaults( - action=('render', handle_query_args)) - elif sysv_args[0] == 'status': + parser_query.set_defaults(action=("render", handle_query_args)) + elif sysv_args[0] == "status": from cloudinit.cmd.status import ( - get_parser as status_parser, handle_status_args) + get_parser as status_parser, + handle_status_args, + ) + status_parser(parser_status) - parser_status.set_defaults( - action=('status', handle_status_args)) + parser_status.set_defaults(action=("status", handle_status_args)) args = parser.parse_args(args=sysv_args) @@ -906,14 +1032,20 @@ def main(sysv_args=None): if args.local: rname, rdesc = ("init-local", "searching for local datasources") else: - rname, rdesc = ("init-network", - "searching for network datasources") + rname, rdesc = ( + "init-network", + "searching for network datasources", + ) elif name == "modules": - rname, rdesc = ("modules-%s" % args.mode, - "running modules for %s" % args.mode) + rname, rdesc = ( + "modules-%s" % args.mode, + "running modules for %s" % args.mode, + ) elif name == "single": - rname, rdesc = ("single/%s" % args.name, - "running single module %s" % args.name) + rname, rdesc = ( + "single/%s" % args.name, + "running single module %s" % args.name, + ) report_on = args.report else: rname = name @@ -921,19 +1053,24 @@ def main(sysv_args=None): report_on = False args.reporter = events.ReportEventStack( - rname, rdesc, reporting_enabled=report_on) + rname, rdesc, reporting_enabled=report_on + ) with args.reporter: retval = util.log_time( - logfunc=LOG.debug, msg="cloud-init mode '%s'" % name, - get_uptime=True, func=functor, args=(name, args)) + logfunc=LOG.debug, + msg="cloud-init mode '%s'" % name, + get_uptime=True, + func=functor, + args=(name, args), + ) reporting.flush_events() return retval -if __name__ == '__main__': - if 'TZ' not in os.environ: - os.environ['TZ'] = ":/etc/localtime" +if __name__ == "__main__": + if "TZ" not in os.environ: + os.environ["TZ"] = ":/etc/localtime" return_value = main(sys.argv) if return_value: sys.exit(return_value) diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py index e53cd855d23..46f176994f9 100644 --- a/cloudinit/cmd/query.py +++ b/cloudinit/cmd/query.py @@ -14,22 +14,24 @@ """ import argparse -from errno import EACCES import os import sys +from errno import EACCES +from cloudinit import log, util +from cloudinit.cmd.devel import addLogHandlerCLI, read_cfg_paths from cloudinit.handlers.jinja_template import ( convert_jinja_instance_data, get_jinja_variable_alias, - render_jinja_payload + render_jinja_payload, ) -from cloudinit.cmd.devel import addLogHandlerCLI, read_cfg_paths -from cloudinit import log from cloudinit.sources import ( - INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE, REDACT_SENSITIVE_VALUE) -from cloudinit import util + INSTANCE_JSON_FILE, + INSTANCE_JSON_SENSITIVE_FILE, + REDACT_SENSITIVE_VALUE, +) -NAME = 'query' +NAME = "query" LOG = log.getLogger(NAME) @@ -43,41 +45,79 @@ def get_parser(parser=None): @returns: ArgumentParser with proper argument configuration. """ if not parser: - parser = argparse.ArgumentParser( - prog=NAME, description=__doc__) + parser = argparse.ArgumentParser(prog=NAME, description=__doc__) parser.add_argument( - '-d', '--debug', action='store_true', default=False, - help='Add verbose messages during template render') + "-d", + "--debug", + action="store_true", + default=False, + help="Add verbose messages during template render", + ) parser.add_argument( - '-i', '--instance-data', type=str, - help=('Path to instance-data.json file. Default is /run/cloud-init/%s' - % INSTANCE_JSON_FILE)) + "-i", + "--instance-data", + type=str, + help="Path to instance-data.json file. Default is /run/cloud-init/%s" + % INSTANCE_JSON_FILE, + ) parser.add_argument( - '-l', '--list-keys', action='store_true', default=False, - help=('List query keys available at the provided instance-data' - ' .')) + "-l", + "--list-keys", + action="store_true", + default=False, + help=( + "List query keys available at the provided instance-data" + " ." + ), + ) parser.add_argument( - '-u', '--user-data', type=str, - help=('Path to user-data file. Default is' - ' /var/lib/cloud/instance/user-data.txt')) + "-u", + "--user-data", + type=str, + help=( + "Path to user-data file. Default is" + " /var/lib/cloud/instance/user-data.txt" + ), + ) parser.add_argument( - '-v', '--vendor-data', type=str, - help=('Path to vendor-data file. Default is' - ' /var/lib/cloud/instance/vendor-data.txt')) + "-v", + "--vendor-data", + type=str, + help=( + "Path to vendor-data file. Default is" + " /var/lib/cloud/instance/vendor-data.txt" + ), + ) parser.add_argument( - 'varname', type=str, nargs='?', - help=('A dot-delimited specific variable to query from' - ' instance-data. For example: v1.local_hostname. If the' - ' value is not JSON serializable, it will be base64-encoded and' - ' will contain the prefix "ci-b64:". ')) + "varname", + type=str, + nargs="?", + help=( + "A dot-delimited specific variable to query from" + " instance-data. For example: v1.local_hostname. If the" + " value is not JSON serializable, it will be base64-encoded and" + ' will contain the prefix "ci-b64:". ' + ), + ) parser.add_argument( - '-a', '--all', action='store_true', default=False, dest='dump_all', - help='Dump all available instance-data') + "-a", + "--all", + action="store_true", + default=False, + dest="dump_all", + help="Dump all available instance-data", + ) parser.add_argument( - '-f', '--format', type=str, dest='format', - help=('Optionally specify a custom output format string. Any' - ' instance-data variable can be specified between double-curly' - ' braces. For example -f "{{ v2.cloud_name }}"')) + "-f", + "--format", + type=str, + dest="format", + help=( + "Optionally specify a custom output format string. Any" + " instance-data variable can be specified between double-curly" + ' braces. For example -f "{{ v2.cloud_name }}"' + ), + ) return parser @@ -91,7 +131,7 @@ def load_userdata(ud_file_path): """ bdata = util.load_file(ud_file_path, decode=False) try: - return bdata.decode('utf-8') + return bdata.decode("utf-8") except UnicodeDecodeError: return util.decomp_gzip(bdata, quiet=False, decode=True) @@ -118,13 +158,15 @@ def _read_instance_data(instance_data, user_data, vendor_data) -> dict: redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE) if uid == 0: sensitive_data_fn = os.path.join( - paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE) + paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE + ) if os.path.exists(sensitive_data_fn): instance_data_fn = sensitive_data_fn else: LOG.warning( - 'Missing root-readable %s. Using redacted %s instead.', - sensitive_data_fn, redacted_data_fn + "Missing root-readable %s. Using redacted %s instead.", + sensitive_data_fn, + redacted_data_fn, ) instance_data_fn = redacted_data_fn else: @@ -132,11 +174,11 @@ def _read_instance_data(instance_data, user_data, vendor_data) -> dict: if user_data: user_data_fn = user_data else: - user_data_fn = os.path.join(paths.instance_link, 'user-data.txt') + user_data_fn = os.path.join(paths.instance_link, "user-data.txt") if vendor_data: vendor_data_fn = vendor_data else: - vendor_data_fn = os.path.join(paths.instance_link, 'vendor-data.txt') + vendor_data_fn = os.path.join(paths.instance_link, "vendor-data.txt") try: instance_json = util.load_file(instance_data_fn) @@ -144,24 +186,30 @@ def _read_instance_data(instance_data, user_data, vendor_data) -> dict: if e.errno == EACCES: LOG.error("No read permission on '%s'. Try sudo", instance_data_fn) else: - LOG.error('Missing instance-data file: %s', instance_data_fn) + LOG.error("Missing instance-data file: %s", instance_data_fn) raise instance_data = util.load_json(instance_json) if uid != 0: - instance_data['userdata'] = ( - '<%s> file:%s' % (REDACT_SENSITIVE_VALUE, user_data_fn)) - instance_data['vendordata'] = ( - '<%s> file:%s' % (REDACT_SENSITIVE_VALUE, vendor_data_fn)) + instance_data["userdata"] = "<%s> file:%s" % ( + REDACT_SENSITIVE_VALUE, + user_data_fn, + ) + instance_data["vendordata"] = "<%s> file:%s" % ( + REDACT_SENSITIVE_VALUE, + vendor_data_fn, + ) else: - instance_data['userdata'] = load_userdata(user_data_fn) - instance_data['vendordata'] = load_userdata(vendor_data_fn) + instance_data["userdata"] = load_userdata(user_data_fn) + instance_data["vendordata"] = load_userdata(vendor_data_fn) return instance_data def _find_instance_data_leaf_by_varname_path( - jinja_vars_without_aliases: dict, jinja_vars_with_aliases: dict, - varname: str, list_keys: bool + jinja_vars_without_aliases: dict, + jinja_vars_with_aliases: dict, + varname: str, + list_keys: bool, ): """Return the value of the dot-delimited varname path in instance-data @@ -174,7 +222,7 @@ def _find_instance_data_leaf_by_varname_path( """ walked_key_path = "" response = jinja_vars_without_aliases - for key_path_part in varname.split('.'): + for key_path_part in varname.split("."): try: # Walk key path using complete aliases dict, yet response # should only contain jinja_without_aliases @@ -205,8 +253,9 @@ def handle_args(name, args): addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING) if not any([args.list_keys, args.varname, args.format, args.dump_all]): LOG.error( - 'Expected one of the options: --all, --format,' - ' --list-keys or varname') + "Expected one of the options: --all, --format," + " --list-keys or varname" + ) get_parser().print_help() return 1 try: @@ -216,11 +265,13 @@ def handle_args(name, args): except (IOError, OSError): return 1 if args.format: - payload = '## template: jinja\n{fmt}'.format(fmt=args.format) + payload = "## template: jinja\n{fmt}".format(fmt=args.format) rendered_payload = render_jinja_payload( - payload=payload, payload_fn='query commandline', + payload=payload, + payload_fn="query commandline", instance_data=instance_data, - debug=True if args.debug else False) + debug=True if args.debug else False, + ) if rendered_payload: print(rendered_payload) return 0 @@ -240,7 +291,7 @@ def handle_args(name, args): jinja_vars_without_aliases=response, jinja_vars_with_aliases=jinja_vars_with_aliases, varname=args.varname, - list_keys=args.list_keys + list_keys=args.list_keys, ) except (KeyError, ValueError) as e: LOG.error(e) @@ -248,11 +299,10 @@ def handle_args(name, args): if args.list_keys: if not isinstance(response, dict): LOG.error( - "--list-keys provided but '%s' is not a dict", - args.varname + "--list-keys provided but '%s' is not a dict", args.varname ) return 1 - response = '\n'.join(sorted(response.keys())) + response = "\n".join(sorted(response.keys())) if not isinstance(response, str): response = util.json_dumps(response) print(response) @@ -265,7 +315,7 @@ def main(): sys.exit(handle_args(NAME, parser.parse_args())) -if __name__ == '__main__': +if __name__ == "__main__": main() # vi: ts=4 expandtab diff --git a/cloudinit/cmd/status.py b/cloudinit/cmd/status.py index ea79a85b122..cff16c344fd 100644 --- a/cloudinit/cmd/status.py +++ b/cloudinit/cmd/status.py @@ -7,20 +7,20 @@ import argparse import os import sys -from time import gmtime, strftime, sleep +from time import gmtime, sleep, strftime from cloudinit.distros import uses_systemd from cloudinit.stages import Init from cloudinit.util import get_cmdline, load_file, load_json -CLOUDINIT_DISABLED_FILE = '/etc/cloud/cloud-init.disabled' +CLOUDINIT_DISABLED_FILE = "/etc/cloud/cloud-init.disabled" # customer visible status messages -STATUS_ENABLED_NOT_RUN = 'not run' -STATUS_RUNNING = 'running' -STATUS_DONE = 'done' -STATUS_ERROR = 'error' -STATUS_DISABLED = 'disabled' +STATUS_ENABLED_NOT_RUN = "not run" +STATUS_RUNNING = "running" +STATUS_DONE = "done" +STATUS_ERROR = "error" +STATUS_DISABLED = "disabled" def get_parser(parser=None): @@ -34,15 +34,25 @@ def get_parser(parser=None): """ if not parser: parser = argparse.ArgumentParser( - prog='status', - description='Report run status of cloud init') + prog="status", description="Report run status of cloud init" + ) parser.add_argument( - '-l', '--long', action='store_true', default=False, - help=('Report long format of statuses including run stage name and' - ' error messages')) + "-l", + "--long", + action="store_true", + default=False, + help=( + "Report long format of statuses including run stage name and" + " error messages" + ), + ) parser.add_argument( - '-w', '--wait', action='store_true', default=False, - help='Block waiting on cloud-init to complete') + "-w", + "--wait", + action="store_true", + default=False, + help="Block waiting on cloud-init to complete", + ) return parser @@ -55,18 +65,18 @@ def handle_status_args(name, args): status, status_detail, time = _get_status_details(init.paths) if args.wait: while status in (STATUS_ENABLED_NOT_RUN, STATUS_RUNNING): - sys.stdout.write('.') + sys.stdout.write(".") sys.stdout.flush() status, status_detail, time = _get_status_details(init.paths) sleep(0.25) - sys.stdout.write('\n') + sys.stdout.write("\n") if args.long: - print('status: {0}'.format(status)) + print("status: {0}".format(status)) if time: - print('time: {0}'.format(time)) - print('detail:\n{0}'.format(status_detail)) + print("time: {0}".format(time)) + print("detail:\n{0}".format(status_detail)) else: - print('status: {0}'.format(status)) + print("status: {0}".format(status)) return 1 if status == STATUS_ERROR else 0 @@ -81,20 +91,20 @@ def _is_cloudinit_disabled(disable_file, paths): is_disabled = False cmdline_parts = get_cmdline().split() if not uses_systemd(): - reason = 'Cloud-init enabled on sysvinit' - elif 'cloud-init=enabled' in cmdline_parts: - reason = 'Cloud-init enabled by kernel command line cloud-init=enabled' + reason = "Cloud-init enabled on sysvinit" + elif "cloud-init=enabled" in cmdline_parts: + reason = "Cloud-init enabled by kernel command line cloud-init=enabled" elif os.path.exists(disable_file): is_disabled = True - reason = 'Cloud-init disabled by {0}'.format(disable_file) - elif 'cloud-init=disabled' in cmdline_parts: + reason = "Cloud-init disabled by {0}".format(disable_file) + elif "cloud-init=disabled" in cmdline_parts: is_disabled = True - reason = 'Cloud-init disabled by kernel parameter cloud-init=disabled' - elif not os.path.exists(os.path.join(paths.run_dir, 'enabled')): + reason = "Cloud-init disabled by kernel parameter cloud-init=disabled" + elif not os.path.exists(os.path.join(paths.run_dir, "enabled")): is_disabled = True - reason = 'Cloud-init disabled by cloud-init-generator' + reason = "Cloud-init disabled by cloud-init-generator" else: - reason = 'Cloud-init enabled by systemd cloud-init-generator' + reason = "Cloud-init enabled by systemd cloud-init-generator" return (is_disabled, reason) @@ -106,34 +116,35 @@ def _get_status_details(paths): Values are obtained from parsing paths.run_dir/status.json. """ status = STATUS_ENABLED_NOT_RUN - status_detail = '' + status_detail = "" status_v1 = {} - status_file = os.path.join(paths.run_dir, 'status.json') - result_file = os.path.join(paths.run_dir, 'result.json') + status_file = os.path.join(paths.run_dir, "status.json") + result_file = os.path.join(paths.run_dir, "result.json") (is_disabled, reason) = _is_cloudinit_disabled( - CLOUDINIT_DISABLED_FILE, paths) + CLOUDINIT_DISABLED_FILE, paths + ) if is_disabled: status = STATUS_DISABLED status_detail = reason if os.path.exists(status_file): if not os.path.exists(result_file): status = STATUS_RUNNING - status_v1 = load_json(load_file(status_file)).get('v1', {}) + status_v1 = load_json(load_file(status_file)).get("v1", {}) errors = [] latest_event = 0 for key, value in sorted(status_v1.items()): - if key == 'stage': + if key == "stage": if value: status = STATUS_RUNNING - status_detail = 'Running in stage: {0}'.format(value) - elif key == 'datasource': + status_detail = "Running in stage: {0}".format(value) + elif key == "datasource": status_detail = value elif isinstance(value, dict): - errors.extend(value.get('errors', [])) - start = value.get('start') or 0 - finished = value.get('finished') or 0 + errors.extend(value.get("errors", [])) + start = value.get("start") or 0 + finished = value.get("finished") or 0 if finished == 0 and start != 0: status = STATUS_RUNNING event_time = max(start, finished) @@ -141,23 +152,23 @@ def _get_status_details(paths): latest_event = event_time if errors: status = STATUS_ERROR - status_detail = '\n'.join(errors) + status_detail = "\n".join(errors) elif status == STATUS_ENABLED_NOT_RUN and latest_event > 0: status = STATUS_DONE if latest_event: - time = strftime('%a, %d %b %Y %H:%M:%S %z', gmtime(latest_event)) + time = strftime("%a, %d %b %Y %H:%M:%S %z", gmtime(latest_event)) else: - time = '' + time = "" return status, status_detail, time def main(): """Tool to report status of cloud-init.""" parser = get_parser() - sys.exit(handle_status_args('status', parser.parse_args())) + sys.exit(handle_status_args("status", parser.parse_args())) -if __name__ == '__main__': +if __name__ == "__main__": main() # vi: ts=4 expandtab diff --git a/cloudinit/config/__init__.py b/cloudinit/config/__init__.py index 0ef9a7483be..ed124180579 100644 --- a/cloudinit/config/__init__.py +++ b/cloudinit/config/__init__.py @@ -6,9 +6,8 @@ # # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.settings import (PER_INSTANCE, FREQUENCIES) - from cloudinit import log as logging +from cloudinit.settings import FREQUENCIES, PER_INSTANCE LOG = logging.getLogger(__name__) @@ -22,26 +21,27 @@ def form_module_name(name): canon_name = name.replace("-", "_") if canon_name.lower().endswith(".py"): - canon_name = canon_name[0:(len(canon_name) - 3)] + canon_name = canon_name[0 : (len(canon_name) - 3)] canon_name = canon_name.strip() if not canon_name: return None if not canon_name.startswith(MOD_PREFIX): - canon_name = '%s%s' % (MOD_PREFIX, canon_name) + canon_name = "%s%s" % (MOD_PREFIX, canon_name) return canon_name def fixup_module(mod, def_freq=PER_INSTANCE): - if not hasattr(mod, 'frequency'): - setattr(mod, 'frequency', def_freq) + if not hasattr(mod, "frequency"): + setattr(mod, "frequency", def_freq) else: freq = mod.frequency if freq and freq not in FREQUENCIES: LOG.warning("Module %s has an unknown frequency %s", mod, freq) - if not hasattr(mod, 'distros'): - setattr(mod, 'distros', []) - if not hasattr(mod, 'osfamilies'): - setattr(mod, 'osfamilies', []) + if not hasattr(mod, "distros"): + setattr(mod, "distros", []) + if not hasattr(mod, "osfamilies"): + setattr(mod, "osfamilies", []) return mod + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_apk_configure.py b/cloudinit/config/cc_apk_configure.py index d227a58df00..a615c814b58 100644 --- a/cloudinit/config/cc_apk_configure.py +++ b/cloudinit/config/cc_apk_configure.py @@ -9,9 +9,7 @@ from textwrap import dedent from cloudinit import log as logging -from cloudinit import temp_utils -from cloudinit import templater -from cloudinit import util +from cloudinit import temp_utils, templater, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE @@ -54,34 +52,41 @@ frequency = PER_INSTANCE -distros = ['alpine'] +distros = ["alpine"] meta = { - 'id': 'cc_apk_configure', - 'name': 'APK Configure', - 'title': 'Configure apk repositories file', - 'description': dedent("""\ + "id": "cc_apk_configure", + "name": "APK Configure", + "title": "Configure apk repositories file", + "description": dedent( + """\ This module handles configuration of the /etc/apk/repositories file. .. note:: To ensure that apk configuration is valid yaml, any strings containing special characters, especially ``:`` should be quoted. - """), - 'distros': distros, - 'examples': [ - dedent("""\ + """ + ), + "distros": distros, + "examples": [ + dedent( + """\ # Keep the existing /etc/apk/repositories file unaltered. apk_repos: preserve_repositories: true - """), - dedent("""\ + """ + ), + dedent( + """\ # Create repositories file for Alpine v3.12 main and community # using default mirror site. apk_repos: alpine_repo: community_enabled: true version: 'v3.12' - """), - dedent("""\ + """ + ), + dedent( + """\ # Create repositories file for Alpine Edge main, community, and # testing using a specified mirror site and also a local repo. apk_repos: @@ -91,21 +96,23 @@ testing_enabled: true version: 'edge' local_repo_base_url: 'https://my-local-server/local-alpine' - """), + """ + ), ], - 'frequency': frequency, + "frequency": frequency, } schema = { - 'type': 'object', - 'properties': { - 'apk_repos': { - 'type': 'object', - 'properties': { - 'preserve_repositories': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "type": "object", + "properties": { + "apk_repos": { + "type": "object", + "properties": { + "preserve_repositories": { + "type": "boolean", + "default": False, + "description": dedent( + """\ By default, cloud-init will generate a new repositories file ``/etc/apk/repositories`` based on any valid configuration settings specified within a apk_repos @@ -116,33 +123,41 @@ The ``preserve_repositories`` option overrides all other config keys that would alter ``/etc/apk/repositories``. - """) + """ + ), }, - 'alpine_repo': { - 'type': ['object', 'null'], - 'properties': { - 'base_url': { - 'type': 'string', - 'default': DEFAULT_MIRROR, - 'description': dedent("""\ + "alpine_repo": { + "type": ["object", "null"], + "properties": { + "base_url": { + "type": "string", + "default": DEFAULT_MIRROR, + "description": dedent( + """\ The base URL of an Alpine repository, or mirror, to download official packages from. If not specified then it defaults to ``{}`` - """.format(DEFAULT_MIRROR)) + """.format( + DEFAULT_MIRROR + ) + ), }, - 'community_enabled': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "community_enabled": { + "type": "boolean", + "default": False, + "description": dedent( + """\ Whether to add the Community repo to the repositories file. By default the Community repo is not included. - """) + """ + ), }, - 'testing_enabled': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "testing_enabled": { + "type": "boolean", + "default": False, + "description": dedent( + """\ Whether to add the Testing repo to the repositories file. By default the Testing repo is not included. It is only recommended @@ -151,32 +166,37 @@ installed from Testing may have dependancies that conflict with those in non-Edge Main or Community repos." - """) + """ + ), }, - 'version': { - 'type': 'string', - 'description': dedent("""\ + "version": { + "type": "string", + "description": dedent( + """\ The Alpine version to use (e.g. ``v3.12`` or ``edge``) - """) + """ + ), }, }, - 'required': ['version'], - 'minProperties': 1, - 'additionalProperties': False, + "required": ["version"], + "minProperties": 1, + "additionalProperties": False, }, - 'local_repo_base_url': { - 'type': 'string', - 'description': dedent("""\ + "local_repo_base_url": { + "type": "string", + "description": dedent( + """\ The base URL of an Alpine repository containing unofficial packages - """) - } + """ + ), + }, }, - 'minProperties': 1, # Either preserve_repositories or alpine_repo - 'additionalProperties': False, + "minProperties": 1, # Either preserve_repositories or alpine_repo + "additionalProperties": False, } - } + }, } __doc__ = get_meta_doc(meta, schema) @@ -195,38 +215,44 @@ def handle(name, cfg, cloud, log, _args): # If there is no "apk_repos" section in the configuration # then do nothing. - apk_section = cfg.get('apk_repos') + apk_section = cfg.get("apk_repos") if not apk_section: - LOG.debug(("Skipping module named %s," - " no 'apk_repos' section found"), name) + LOG.debug( + "Skipping module named %s, no 'apk_repos' section found", name + ) return validate_cloudconfig_schema(cfg, schema) # If "preserve_repositories" is explicitly set to True in # the configuration do nothing. - if util.get_cfg_option_bool(apk_section, 'preserve_repositories', False): - LOG.debug(("Skipping module named %s," - " 'preserve_repositories' is set"), name) + if util.get_cfg_option_bool(apk_section, "preserve_repositories", False): + LOG.debug( + "Skipping module named %s, 'preserve_repositories' is set", name + ) return # If there is no "alpine_repo" subsection of "apk_repos" present in the # configuration then do nothing, as at least "version" is required to # create valid repositories entries. - alpine_repo = apk_section.get('alpine_repo') + alpine_repo = apk_section.get("alpine_repo") if not alpine_repo: - LOG.debug(("Skipping module named %s," - " no 'alpine_repo' configuration found"), name) + LOG.debug( + "Skipping module named %s, no 'alpine_repo' configuration found", + name, + ) return # If there is no "version" value present in configuration then do nothing. - alpine_version = alpine_repo.get('version') + alpine_version = alpine_repo.get("version") if not alpine_version: - LOG.debug(("Skipping module named %s," - " 'version' not specified in alpine_repo"), name) + LOG.debug( + "Skipping module named %s, 'version' not specified in alpine_repo", + name, + ) return - local_repo = apk_section.get('local_repo_base_url', '') + local_repo = apk_section.get("local_repo_base_url", "") _write_repositories_file(alpine_repo, alpine_version, local_repo) @@ -240,22 +266,23 @@ def _write_repositories_file(alpine_repo, alpine_version, local_repo): @param local_repo: A string containing the base URL of a local repo. """ - repo_file = '/etc/apk/repositories' + repo_file = "/etc/apk/repositories" - alpine_baseurl = alpine_repo.get('base_url', DEFAULT_MIRROR) + alpine_baseurl = alpine_repo.get("base_url", DEFAULT_MIRROR) - params = {'alpine_baseurl': alpine_baseurl, - 'alpine_version': alpine_version, - 'community_enabled': alpine_repo.get('community_enabled'), - 'testing_enabled': alpine_repo.get('testing_enabled'), - 'local_repo': local_repo} + params = { + "alpine_baseurl": alpine_baseurl, + "alpine_version": alpine_version, + "community_enabled": alpine_repo.get("community_enabled"), + "testing_enabled": alpine_repo.get("testing_enabled"), + "local_repo": local_repo, + } - tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl") + tfile = temp_utils.mkstemp(prefix="template_name-", suffix=".tmpl") template_fn = tfile[1] # Filepath is second item in tuple util.write_file(template_fn, content=REPOSITORIES_TEMPLATE) - LOG.debug('Generating Alpine repository configuration file: %s', - repo_file) + LOG.debug("Generating Alpine repository configuration file: %s", repo_file) templater.render_to_file(template_fn, repo_file, params) # Clean up temporary template util.del_file(template_fn) diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 2e844c2cb20..b07285174f6 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -10,16 +10,14 @@ import glob import os -import re import pathlib +import re from textwrap import dedent -from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit import gpg from cloudinit import log as logging -from cloudinit import subp -from cloudinit import templater -from cloudinit import util +from cloudinit import subp, templater, util +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) @@ -27,59 +25,46 @@ # this will match 'XXX:YYY' (ie, 'cloud-archive:foo' or 'ppa:bar') ADD_APT_REPO_MATCH = r"^[\w-]+:\w" -APT_LOCAL_KEYS = '/etc/apt/trusted.gpg' -APT_TRUSTED_GPG_DIR = '/etc/apt/trusted.gpg.d/' -CLOUD_INIT_GPG_DIR = '/etc/apt/cloud-init.gpg.d/' +APT_LOCAL_KEYS = "/etc/apt/trusted.gpg" +APT_TRUSTED_GPG_DIR = "/etc/apt/trusted.gpg.d/" +CLOUD_INIT_GPG_DIR = "/etc/apt/cloud-init.gpg.d/" frequency = PER_INSTANCE distros = ["ubuntu", "debian"] mirror_property = { - 'type': 'array', - 'items': { - 'type': 'object', - 'additionalProperties': False, - 'required': ['arches'], - 'properties': { - 'arches': { - 'type': 'array', - 'items': { - 'type': 'string' - }, - 'minItems': 1 - }, - 'uri': { - 'type': 'string', - 'format': 'uri' - }, - 'search': { - 'type': 'array', - 'items': { - 'type': 'string', - 'format': 'uri' - }, - 'minItems': 1 - }, - 'search_dns': { - 'type': 'boolean', + "type": "array", + "items": { + "type": "object", + "additionalProperties": False, + "required": ["arches"], + "properties": { + "arches": { + "type": "array", + "items": {"type": "string"}, + "minItems": 1, }, - 'keyid': { - 'type': 'string' + "uri": {"type": "string", "format": "uri"}, + "search": { + "type": "array", + "items": {"type": "string", "format": "uri"}, + "minItems": 1, }, - 'key': { - 'type': 'string' + "search_dns": { + "type": "boolean", }, - 'keyserver': { - 'type': 'string' - } - } - } + "keyid": {"type": "string"}, + "key": {"type": "string"}, + "keyserver": {"type": "string"}, + }, + }, } meta = { - 'id': 'cc_apt_configure', - 'name': 'Apt Configure', - 'title': 'Configure apt for the user', - 'description': dedent("""\ + "id": "cc_apt_configure", + "name": "Apt Configure", + "title": "Configure apt for the user", + "description": dedent( + """\ This module handles both configuration of apt options and adding source lists. There are configuration options such as ``apt_get_wrapper`` and ``apt_get_command`` that control how @@ -94,9 +79,12 @@ .. note:: For more information about apt configuration, see the - ``Additional apt configuration`` example."""), - 'distros': distros, - 'examples': [dedent("""\ + ``Additional apt configuration`` example.""" + ), + "distros": distros, + "examples": [ + dedent( + """\ apt: preserve_sources_list: false disable_suites: @@ -153,21 +141,24 @@ key: | ------BEGIN PGP PUBLIC KEY BLOCK------- - ------END PGP PUBLIC KEY BLOCK-------""")], - 'frequency': frequency, + ------END PGP PUBLIC KEY BLOCK-------""" + ) + ], + "frequency": frequency, } schema = { - 'type': 'object', - 'properties': { - 'apt': { - 'type': 'object', - 'additionalProperties': False, - 'properties': { - 'preserve_sources_list': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "type": "object", + "properties": { + "apt": { + "type": "object", + "additionalProperties": False, + "properties": { + "preserve_sources_list": { + "type": "boolean", + "default": False, + "description": dedent( + """\ By default, cloud-init will generate a new sources list in ``/etc/apt/sources.list.d`` based on any changes specified in cloud config. To disable this @@ -179,15 +170,15 @@ all other config keys that would alter ``sources.list`` or ``sources.list.d``, **except** for additional sources to be added - to ``sources.list.d``.""") + to ``sources.list.d``.""" + ), }, - 'disable_suites': { - 'type': 'array', - 'items': { - 'type': 'string' - }, - 'uniqueItems': True, - 'description': dedent("""\ + "disable_suites": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": True, + "description": dedent( + """\ Entries in the sources list can be disabled using ``disable_suites``, which takes a list of suites to be disabled. If the string ``$RELEASE`` is @@ -206,11 +197,13 @@ When a suite is disabled using ``disable_suites``, its entry in ``sources.list`` is not deleted; it - is just commented out.""") + is just commented out.""" + ), }, - 'primary': { + "primary": { **mirror_property, - 'description': dedent("""\ + "description": dedent( + """\ The primary and security archive mirrors can be specified using the ``primary`` and ``security`` keys, respectively. Both the @@ -264,27 +257,35 @@ ``http://archive.ubuntu.com/ubuntu``. - ``security`` => \ ``http://security.ubuntu.com/ubuntu`` - """) + """ + ), }, - 'security': { + "security": { **mirror_property, - 'description': dedent("""\ - Please refer to the primary config documentation""") + "description": dedent( + """\ + Please refer to the primary config documentation""" + ), }, - 'add_apt_repo_match': { - 'type': 'string', - 'default': ADD_APT_REPO_MATCH, - 'description': dedent("""\ + "add_apt_repo_match": { + "type": "string", + "default": ADD_APT_REPO_MATCH, + "description": dedent( + """\ All source entries in ``apt-sources`` that match regex in ``add_apt_repo_match`` will be added to the system using ``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it - defaults to ``{}``""".format(ADD_APT_REPO_MATCH)) + defaults to ``{}``""".format( + ADD_APT_REPO_MATCH + ) + ), }, - 'debconf_selections': { - 'type': 'object', - 'items': {'type': 'string'}, - 'description': dedent("""\ + "debconf_selections": { + "type": "object", + "items": {"type": "string"}, + "description": dedent( + """\ Debconf additional configurations can be specified as a dictionary under the ``debconf_selections`` config key, with each key in the dict representing a @@ -308,11 +309,13 @@ For example: \ ``ippackage ippackage/ip string 127.0.01`` - """) + """ + ), }, - 'sources_list': { - 'type': 'string', - 'description': dedent("""\ + "sources_list": { + "type": "string", + "description": dedent( + """\ Specifies a custom template for rendering ``sources.list`` . If no ``sources_list`` template is given, cloud-init will use sane default. Within @@ -323,45 +326,55 @@ - ``$RELEASE`` - ``$PRIMARY`` - ``$SECURITY`` - - ``$KEY_FILE``""") + - ``$KEY_FILE``""" + ), }, - 'conf': { - 'type': 'string', - 'description': dedent("""\ + "conf": { + "type": "string", + "description": dedent( + """\ Specify configuration for apt, such as proxy configuration. This configuration is specified as a string. For multiline apt configuration, make sure - to follow yaml syntax.""") + to follow yaml syntax.""" + ), }, - 'https_proxy': { - 'type': 'string', - 'description': dedent("""\ + "https_proxy": { + "type": "string", + "description": dedent( + """\ More convenient way to specify https apt proxy. https proxy url is specified in the format - ``https://[[user][:pass]@]host[:port]/``.""") + ``https://[[user][:pass]@]host[:port]/``.""" + ), }, - 'http_proxy': { - 'type': 'string', - 'description': dedent("""\ + "http_proxy": { + "type": "string", + "description": dedent( + """\ More convenient way to specify http apt proxy. http proxy url is specified in the format - ``http://[[user][:pass]@]host[:port]/``.""") + ``http://[[user][:pass]@]host[:port]/``.""" + ), }, - 'proxy': { - 'type': 'string', - 'description': 'Alias for defining a http apt proxy.' + "proxy": { + "type": "string", + "description": "Alias for defining a http apt proxy.", }, - 'ftp_proxy': { - 'type': 'string', - 'description': dedent("""\ + "ftp_proxy": { + "type": "string", + "description": dedent( + """\ More convenient way to specify ftp apt proxy. ftp proxy url is specified in the format - ``ftp://[[user][:pass]@]host[:port]/``.""") + ``ftp://[[user][:pass]@]host[:port]/``.""" + ), }, - 'sources': { - 'type': 'object', - 'items': {'type': 'string'}, - 'description': dedent("""\ + "sources": { + "type": "object", + "items": {"type": "string"}, + "description": dedent( + """\ Source list entries can be specified as a dictionary under the ``sources`` config key, with each key in the dict representing a different source @@ -394,11 +407,12 @@ - ``$PRIMARY`` - ``$SECURITY`` - ``$RELEASE`` - - ``$KEY_FILE``""") - } - } + - ``$KEY_FILE``""" + ), + }, + }, } - } + }, } __doc__ = get_meta_doc(meta, schema) @@ -415,18 +429,22 @@ DEFAULT_KEYSERVER = "keyserver.ubuntu.com" # Default archive mirrors -PRIMARY_ARCH_MIRRORS = {"PRIMARY": "http://archive.ubuntu.com/ubuntu/", - "SECURITY": "http://security.ubuntu.com/ubuntu/"} -PORTS_MIRRORS = {"PRIMARY": "http://ports.ubuntu.com/ubuntu-ports", - "SECURITY": "http://ports.ubuntu.com/ubuntu-ports"} -PRIMARY_ARCHES = ['amd64', 'i386'] -PORTS_ARCHES = ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el', 'riscv64'] +PRIMARY_ARCH_MIRRORS = { + "PRIMARY": "http://archive.ubuntu.com/ubuntu/", + "SECURITY": "http://security.ubuntu.com/ubuntu/", +} +PORTS_MIRRORS = { + "PRIMARY": "http://ports.ubuntu.com/ubuntu-ports", + "SECURITY": "http://ports.ubuntu.com/ubuntu-ports", +} +PRIMARY_ARCHES = ["amd64", "i386"] +PORTS_ARCHES = ["s390x", "arm64", "armhf", "powerpc", "ppc64el", "riscv64"] def get_default_mirrors(arch=None, target=None): """returns the default mirrors for the target. These depend on the - architecture, for more see: - https://wiki.ubuntu.com/UbuntuDevelopment/PackageArchive#Ports""" + architecture, for more see: + https://wiki.ubuntu.com/UbuntuDevelopment/PackageArchive#Ports""" if arch is None: arch = util.get_dpkg_architecture(target) if arch in PRIMARY_ARCHES: @@ -438,8 +456,8 @@ def get_default_mirrors(arch=None, target=None): def handle(name, ocfg, cloud, log, _): """process the config for apt_config. This can be called from - curthooks if a global apt config was provided or via the "apt" - standalone command.""" + curthooks if a global apt config was provided or via the "apt" + standalone command.""" # keeping code close to curtin codebase via entry handler target = None if log is not None: @@ -447,12 +465,14 @@ def handle(name, ocfg, cloud, log, _): LOG = log # feed back converted config, but only work on the subset under 'apt' ocfg = convert_to_v3_apt_format(ocfg) - cfg = ocfg.get('apt', {}) + cfg = ocfg.get("apt", {}) if not isinstance(cfg, dict): raise ValueError( "Expected dictionary for 'apt' config, found {config_type}".format( - config_type=type(cfg))) + config_type=type(cfg) + ) + ) validate_cloudconfig_schema(cfg, schema) apply_debconf_selections(cfg, target) @@ -463,7 +483,7 @@ def _should_configure_on_empty_apt(): # if no config was provided, should apt configuration be done? if util.system_is_snappy(): return False, "system is snappy." - if not (subp.which('apt-get') or subp.which('apt')): + if not (subp.which("apt-get") or subp.which("apt")): return False, "no apt commands." return True, "Apt is available." @@ -478,12 +498,12 @@ def apply_apt(cfg, cloud, target): LOG.debug("handling apt config: %s", cfg) - release = util.lsb_release(target=target)['codename'] + release = util.lsb_release(target=target)["codename"] arch = util.get_dpkg_architecture(target) mirrors = find_apt_mirror_info(cfg, cloud, arch=arch) LOG.debug("Apt Mirror info: %s", mirrors) - if util.is_false(cfg.get('preserve_sources_list', False)): + if util.is_false(cfg.get("preserve_sources_list", False)): add_mirror_keys(cfg, target) generate_sources_list(cfg, release, mirrors, cloud) rename_apt_lists(mirrors, target, arch) @@ -494,25 +514,34 @@ def apply_apt(cfg, cloud, target): LOG.exception("Failed to apply proxy or apt config info:") # Process 'apt_source -> sources {dict}' - if 'sources' in cfg: + if "sources" in cfg: params = mirrors - params['RELEASE'] = release - params['MIRROR'] = mirrors["MIRROR"] + params["RELEASE"] = release + params["MIRROR"] = mirrors["MIRROR"] matcher = None - matchcfg = cfg.get('add_apt_repo_match', ADD_APT_REPO_MATCH) + matchcfg = cfg.get("add_apt_repo_match", ADD_APT_REPO_MATCH) if matchcfg: matcher = re.compile(matchcfg).search - add_apt_sources(cfg['sources'], cloud, target=target, - template_params=params, aa_repo_match=matcher) + add_apt_sources( + cfg["sources"], + cloud, + target=target, + template_params=params, + aa_repo_match=matcher, + ) def debconf_set_selections(selections, target=None): - if not selections.endswith(b'\n'): - selections += b'\n' - subp.subp(['debconf-set-selections'], data=selections, target=target, - capture=True) + if not selections.endswith(b"\n"): + selections += b"\n" + subp.subp( + ["debconf-set-selections"], + data=selections, + target=target, + capture=True, + ) def dpkg_reconfigure(packages, target=None): @@ -532,12 +561,20 @@ def dpkg_reconfigure(packages, target=None): unhandled.append(pkg) if len(unhandled): - LOG.warning("The following packages were installed and preseeded, " - "but cannot be unconfigured: %s", unhandled) + LOG.warning( + "The following packages were installed and preseeded, " + "but cannot be unconfigured: %s", + unhandled, + ) if len(to_config): - subp.subp(['dpkg-reconfigure', '--frontend=noninteractive'] + - list(to_config), data=None, target=target, capture=True) + subp.subp( + ["dpkg-reconfigure", "--frontend=noninteractive"] + + list(to_config), + data=None, + target=target, + capture=True, + ) def apply_debconf_selections(cfg, target=None): @@ -546,13 +583,12 @@ def apply_debconf_selections(cfg, target=None): # set1: | # cloud-init cloud-init/datasources multiselect MAAS # set2: pkg pkg/value string bar - selsets = cfg.get('debconf_selections') + selsets = cfg.get("debconf_selections") if not selsets: LOG.debug("debconf_selections was not set in config") return - selections = '\n'.join( - [selsets[key] for key in sorted(selsets.keys())]) + selections = "\n".join([selsets[key] for key in sorted(selsets.keys())]) debconf_set_selections(selections.encode(), target=target) # get a complete list of packages listed in input @@ -579,7 +615,8 @@ def apply_debconf_selections(cfg, target=None): def clean_cloud_init(target): """clean out any local cloud-init config""" flist = glob.glob( - subp.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*")) + subp.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*") + ) LOG.debug("cleaning cloud-init config from: %s", flist) for dpkg_cfg in flist: @@ -588,18 +625,18 @@ def clean_cloud_init(target): def mirrorurl_to_apt_fileprefix(mirror): """mirrorurl_to_apt_fileprefix - Convert a mirror url to the file prefix used by apt on disk to - store cache information for that mirror. - To do so do: - - take off ???:// - - drop tailing / - - convert in string / to _""" + Convert a mirror url to the file prefix used by apt on disk to + store cache information for that mirror. + To do so do: + - take off ???:// + - drop tailing / + - convert in string / to _""" string = mirror if string.endswith("/"): string = string[0:-1] pos = string.find("://") if pos >= 0: - string = string[pos + 3:] + string = string[pos + 3 :] string = string.replace("/", "_") return string @@ -631,8 +668,8 @@ def rename_apt_lists(new_mirrors, target, arch): def mirror_to_placeholder(tmpl, mirror, placeholder): """mirror_to_placeholder - replace the specified mirror in a template with a placeholder string - Checks for existance of the expected mirror and warns if not found""" + replace the specified mirror in a template with a placeholder string + Checks for existance of the expected mirror and warns if not found""" if mirror not in tmpl: LOG.warning("Expected mirror '%s' not found in: %s", mirror, tmpl) return tmpl.replace(mirror, placeholder) @@ -640,13 +677,15 @@ def mirror_to_placeholder(tmpl, mirror, placeholder): def map_known_suites(suite): """there are a few default names which will be auto-extended. - This comes at the inability to use those names literally as suites, - but on the other hand increases readability of the cfg quite a lot""" - mapping = {'updates': '$RELEASE-updates', - 'backports': '$RELEASE-backports', - 'security': '$RELEASE-security', - 'proposed': '$RELEASE-proposed', - 'release': '$RELEASE'} + This comes at the inability to use those names literally as suites, + but on the other hand increases readability of the cfg quite a lot""" + mapping = { + "updates": "$RELEASE-updates", + "backports": "$RELEASE-backports", + "security": "$RELEASE-security", + "proposed": "$RELEASE-proposed", + "release": "$RELEASE", + } try: retsuite = mapping[suite] except KeyError: @@ -656,14 +695,14 @@ def map_known_suites(suite): def disable_suites(disabled, src, release): """reads the config for suites to be disabled and removes those - from the template""" + from the template""" if not disabled: return src retsrc = src for suite in disabled: suite = map_known_suites(suite) - releasesuite = templater.render_string(suite, {'RELEASE': release}) + releasesuite = templater.render_string(suite, {"RELEASE": release}) LOG.debug("Disabling suite %s as %s", suite, releasesuite) newsrc = "" @@ -685,7 +724,7 @@ def disable_suites(disabled, src, release): break if cols[pcol] == releasesuite: - line = '# suite disabled by cloud-init: %s' % line + line = "# suite disabled by cloud-init: %s" % line newsrc += line retsrc = newsrc @@ -694,36 +733,38 @@ def disable_suites(disabled, src, release): def add_mirror_keys(cfg, target): """Adds any keys included in the primary/security mirror clauses""" - for key in ('primary', 'security'): + for key in ("primary", "security"): for mirror in cfg.get(key, []): add_apt_key(mirror, target, file_name=key) def generate_sources_list(cfg, release, mirrors, cloud): """generate_sources_list - create a source.list file based on a custom or default template - by replacing mirrors and release in the template""" + create a source.list file based on a custom or default template + by replacing mirrors and release in the template""" aptsrc = "/etc/apt/sources.list" - params = {'RELEASE': release, 'codename': release} + params = {"RELEASE": release, "codename": release} for k in mirrors: params[k] = mirrors[k] params[k.lower()] = mirrors[k] - tmpl = cfg.get('sources_list', None) + tmpl = cfg.get("sources_list", None) if tmpl is None: LOG.info("No custom template provided, fall back to builtin") - template_fn = cloud.get_template_filename('sources.list.%s' % - (cloud.distro.name)) + template_fn = cloud.get_template_filename( + "sources.list.%s" % (cloud.distro.name) + ) if not template_fn: - template_fn = cloud.get_template_filename('sources.list') + template_fn = cloud.get_template_filename("sources.list") if not template_fn: - LOG.warning("No template found, " - "not rendering /etc/apt/sources.list") + LOG.warning( + "No template found, not rendering /etc/apt/sources.list" + ) return tmpl = util.load_file(template_fn) rendered = templater.render_string(tmpl, params) - disabled = disable_suites(cfg.get('disable_suites'), rendered, release) + disabled = disable_suites(cfg.get("disable_suites"), rendered, release) util.write_file(aptsrc, disabled, mode=0o644) @@ -735,7 +776,7 @@ def add_apt_key_raw(key, file_name, hardened=False, target=None): LOG.debug("Adding key:\n'%s'", key) try: name = pathlib.Path(file_name).stem - return apt_key('add', output_file=name, data=key, hardened=hardened) + return apt_key("add", output_file=name, data=key, hardened=hardened) except subp.ProcessExecutionError: LOG.exception("failed to add apt GPG Key to apt keyring") raise @@ -747,26 +788,26 @@ def add_apt_key(ent, target=None, hardened=False, file_name=None): Supports raw keys or keyid's The latter will as a first step fetched to get the raw key """ - if 'keyid' in ent and 'key' not in ent: + if "keyid" in ent and "key" not in ent: keyserver = DEFAULT_KEYSERVER - if 'keyserver' in ent: - keyserver = ent['keyserver'] + if "keyserver" in ent: + keyserver = ent["keyserver"] - ent['key'] = gpg.getkeybyid(ent['keyid'], keyserver) + ent["key"] = gpg.getkeybyid(ent["keyid"], keyserver) - if 'key' in ent: + if "key" in ent: return add_apt_key_raw( - ent['key'], - file_name or ent['filename'], - hardened=hardened) + ent["key"], file_name or ent["filename"], hardened=hardened + ) def update_packages(cloud): cloud.distro.update_package_sources() -def add_apt_sources(srcdict, cloud, target=None, template_params=None, - aa_repo_match=None): +def add_apt_sources( + srcdict, cloud, target=None, template_params=None, aa_repo_match=None +): """ install keys and repo source .list files defined in 'sources' @@ -795,33 +836,34 @@ def add_apt_sources(srcdict, cloud, target=None, template_params=None, template_params = {} if aa_repo_match is None: - raise ValueError('did not get a valid repo matcher') + raise ValueError("did not get a valid repo matcher") if not isinstance(srcdict, dict): - raise TypeError('unknown apt format: %s' % (srcdict)) + raise TypeError("unknown apt format: %s" % (srcdict)) for filename in srcdict: ent = srcdict[filename] LOG.debug("adding source/key '%s'", ent) - if 'filename' not in ent: - ent['filename'] = filename + if "filename" not in ent: + ent["filename"] = filename - if 'source' in ent and '$KEY_FILE' in ent['source']: + if "source" in ent and "$KEY_FILE" in ent["source"]: key_file = add_apt_key(ent, target, hardened=True) - template_params['KEY_FILE'] = key_file + template_params["KEY_FILE"] = key_file else: key_file = add_apt_key(ent, target) - if 'source' not in ent: + if "source" not in ent: continue - source = ent['source'] + source = ent["source"] source = templater.render_string(source, template_params) - if not ent['filename'].startswith("/"): - ent['filename'] = os.path.join("/etc/apt/sources.list.d/", - ent['filename']) - if not ent['filename'].endswith(".list"): - ent['filename'] += ".list" + if not ent["filename"].startswith("/"): + ent["filename"] = os.path.join( + "/etc/apt/sources.list.d/", ent["filename"] + ) + if not ent["filename"].endswith(".list"): + ent["filename"] += ".list" if aa_repo_match(source): try: @@ -831,7 +873,7 @@ def add_apt_sources(srcdict, cloud, target=None, template_params=None, raise continue - sourcefn = subp.target_path(target, ent['filename']) + sourcefn = subp.target_path(target, ent["filename"]) try: contents = "%s\n" % (source) util.write_file(sourcefn, contents, omode="a") @@ -850,14 +892,14 @@ def convert_v1_to_v2_apt_format(srclist): if isinstance(srclist, list): LOG.debug("apt config: convert V1 to V2 format (source list to dict)") for srcent in srclist: - if 'filename' not in srcent: + if "filename" not in srcent: # file collides for multiple !filename cases for compatibility # yet we need them all processed, so not same dictionary key - srcent['filename'] = "cloud_config_sources.list" + srcent["filename"] = "cloud_config_sources.list" key = util.rand_dict_key(srcdict, "cloud_config_sources.list") else: # all with filename use that as key (matching new format) - key = srcent['filename'] + key = srcent["filename"] srcdict[key] = srcent elif isinstance(srclist, dict): srcdict = srclist @@ -869,7 +911,7 @@ def convert_v1_to_v2_apt_format(srclist): def convert_key(oldcfg, aptcfg, oldkey, newkey): """convert an old key to the new one if the old one exists - returns true if a key was found and converted""" + returns true if a key was found and converted""" if oldcfg.get(oldkey, None) is not None: aptcfg[newkey] = oldcfg.get(oldkey) del oldcfg[oldkey] @@ -879,33 +921,37 @@ def convert_key(oldcfg, aptcfg, oldkey, newkey): def convert_mirror(oldcfg, aptcfg): """convert old apt_mirror keys into the new more advanced mirror spec""" - keymap = [('apt_mirror', 'uri'), - ('apt_mirror_search', 'search'), - ('apt_mirror_search_dns', 'search_dns')] + keymap = [ + ("apt_mirror", "uri"), + ("apt_mirror_search", "search"), + ("apt_mirror_search_dns", "search_dns"), + ] converted = False - newmcfg = {'arches': ['default']} + newmcfg = {"arches": ["default"]} for oldkey, newkey in keymap: if convert_key(oldcfg, newmcfg, oldkey, newkey): converted = True # only insert new style config if anything was converted if converted: - aptcfg['primary'] = [newmcfg] + aptcfg["primary"] = [newmcfg] def convert_v2_to_v3_apt_format(oldcfg): """convert old to new keys and adapt restructured mirror spec""" - mapoldkeys = {'apt_sources': 'sources', - 'apt_mirror': None, - 'apt_mirror_search': None, - 'apt_mirror_search_dns': None, - 'apt_proxy': 'proxy', - 'apt_http_proxy': 'http_proxy', - 'apt_ftp_proxy': 'https_proxy', - 'apt_https_proxy': 'ftp_proxy', - 'apt_preserve_sources_list': 'preserve_sources_list', - 'apt_custom_sources_list': 'sources_list', - 'add_apt_repo_match': 'add_apt_repo_match'} + mapoldkeys = { + "apt_sources": "sources", + "apt_mirror": None, + "apt_mirror_search": None, + "apt_mirror_search_dns": None, + "apt_proxy": "proxy", + "apt_http_proxy": "http_proxy", + "apt_ftp_proxy": "https_proxy", + "apt_https_proxy": "ftp_proxy", + "apt_preserve_sources_list": "preserve_sources_list", + "apt_custom_sources_list": "sources_list", + "add_apt_repo_match": "add_apt_repo_match", + } needtoconvert = [] for oldkey in mapoldkeys: if oldkey in oldcfg: @@ -917,11 +963,13 @@ def convert_v2_to_v3_apt_format(oldcfg): # no old config, so no new one to be created if not needtoconvert: return oldcfg - LOG.debug("apt config: convert V2 to V3 format for keys '%s'", - ", ".join(needtoconvert)) + LOG.debug( + "apt config: convert V2 to V3 format for keys '%s'", + ", ".join(needtoconvert), + ) # if old AND new config are provided, prefer the new one (LP #1616831) - newaptcfg = oldcfg.get('apt', None) + newaptcfg = oldcfg.get("apt", None) if newaptcfg is not None: LOG.debug("apt config: V1/2 and V3 format specified, preferring V3") for oldkey in needtoconvert: @@ -932,10 +980,11 @@ def convert_v2_to_v3_apt_format(oldcfg): # no simple mapping or no collision on this particular key continue if verify != newaptcfg[newkey]: - raise ValueError("Old and New apt format defined with unequal " - "values %s vs %s @ %s" % (verify, - newaptcfg[newkey], - oldkey)) + raise ValueError( + "Old and New apt format defined with unequal " + "values %s vs %s @ %s" + % (verify, newaptcfg[newkey], oldkey) + ) # return conf after clearing conflicting V1/2 keys return oldcfg @@ -955,17 +1004,17 @@ def convert_v2_to_v3_apt_format(oldcfg): raise ValueError("old apt key '%s' left after conversion" % oldkey) # insert new format into config and return full cfg with only v3 content - oldcfg['apt'] = aptcfg + oldcfg["apt"] = aptcfg return oldcfg def convert_to_v3_apt_format(cfg): """convert the old list based format to the new dict based one. After that - convert the old dict keys/format to v3 a.k.a 'new apt config'""" + convert the old dict keys/format to v3 a.k.a 'new apt config'""" # V1 -> V2, the apt_sources entry from list to dict - apt_sources = cfg.get('apt_sources', None) + apt_sources = cfg.get("apt_sources", None) if apt_sources is not None: - cfg['apt_sources'] = convert_v1_to_v2_apt_format(apt_sources) + cfg["apt_sources"] = convert_v1_to_v2_apt_format(apt_sources) # V2 -> V3, move all former globals under the "apt" key # Restructure into new key names and mirror hierarchy @@ -997,7 +1046,12 @@ def search_for_mirror_dns(configured, mirrortype, cfg, cloud): if mydom: doms.append(".%s" % mydom) - doms.extend((".localdomain", "",)) + doms.extend( + ( + ".localdomain", + "", + ) + ) mirror_list = [] distro = cloud.distro.name @@ -1012,12 +1066,11 @@ def search_for_mirror_dns(configured, mirrortype, cfg, cloud): def update_mirror_info(pmirror, smirror, arch, cloud): """sets security mirror to primary if not defined. - returns defaults if no mirrors are defined""" + returns defaults if no mirrors are defined""" if pmirror is not None: if smirror is None: smirror = pmirror - return {'PRIMARY': pmirror, - 'SECURITY': smirror} + return {"PRIMARY": pmirror, "SECURITY": smirror} # None specified at all, get default mirrors from cloud mirror_info = cloud.datasource.get_package_mirror_info() @@ -1026,8 +1079,8 @@ def update_mirror_info(pmirror, smirror, arch, cloud): # arbitrary key/value pairs including 'primary' and 'security' keys. # caller expects dict with PRIMARY and SECURITY. m = mirror_info.copy() - m['PRIMARY'] = m['primary'] - m['SECURITY'] = m['security'] + m["PRIMARY"] = m["primary"] + m["SECURITY"] = m["security"] return m @@ -1037,7 +1090,7 @@ def update_mirror_info(pmirror, smirror, arch, cloud): def get_arch_mirrorconfig(cfg, mirrortype, arch): """out of a list of potential mirror configurations select - and return the one matching the architecture (or default)""" + and return the one matching the architecture (or default)""" # select the mirror specification (if-any) mirror_cfg_list = cfg.get(mirrortype, None) if mirror_cfg_list is None: @@ -1056,8 +1109,8 @@ def get_arch_mirrorconfig(cfg, mirrortype, arch): def get_mirror(cfg, mirrortype, arch, cloud): """pass the three potential stages of mirror specification - returns None is neither of them found anything otherwise the first - hit is returned""" + returns None is neither of them found anything otherwise the first + hit is returned""" mcfg = get_arch_mirrorconfig(cfg, mirrortype, arch) if mcfg is None: return None @@ -1073,18 +1126,19 @@ def get_mirror(cfg, mirrortype, arch, cloud): # fallback to search_dns if specified if mirror is None: # list of mirrors to try to resolve - mirror = search_for_mirror_dns(mcfg.get("search_dns", None), - mirrortype, cfg, cloud) + mirror = search_for_mirror_dns( + mcfg.get("search_dns", None), mirrortype, cfg, cloud + ) return mirror def find_apt_mirror_info(cfg, cloud, arch=None): """find_apt_mirror_info - find an apt_mirror given the cfg provided. - It can check for separate config of primary and security mirrors - If only primary is given security is assumed to be equal to primary - If the generic apt_mirror is given that is defining for both + find an apt_mirror given the cfg provided. + It can check for separate config of primary and security mirrors + If only primary is given security is assumed to be equal to primary + If the generic apt_mirror is given that is defining for both """ if arch is None: @@ -1105,32 +1159,35 @@ def find_apt_mirror_info(cfg, cloud, arch=None): def apply_apt_config(cfg, proxy_fname, config_fname): """apply_apt_config - Applies any apt*proxy config from if specified + Applies any apt*proxy config from if specified """ # Set up any apt proxy - cfgs = (('proxy', 'Acquire::http::Proxy "%s";'), - ('http_proxy', 'Acquire::http::Proxy "%s";'), - ('ftp_proxy', 'Acquire::ftp::Proxy "%s";'), - ('https_proxy', 'Acquire::https::Proxy "%s";')) + cfgs = ( + ("proxy", 'Acquire::http::Proxy "%s";'), + ("http_proxy", 'Acquire::http::Proxy "%s";'), + ("ftp_proxy", 'Acquire::ftp::Proxy "%s";'), + ("https_proxy", 'Acquire::https::Proxy "%s";'), + ) proxies = [fmt % cfg.get(name) for (name, fmt) in cfgs if cfg.get(name)] if len(proxies): LOG.debug("write apt proxy info to %s", proxy_fname) - util.write_file(proxy_fname, '\n'.join(proxies) + '\n') + util.write_file(proxy_fname, "\n".join(proxies) + "\n") elif os.path.isfile(proxy_fname): util.del_file(proxy_fname) LOG.debug("no apt proxy configured, removed %s", proxy_fname) - if cfg.get('conf', None): + if cfg.get("conf", None): LOG.debug("write apt config info to %s", config_fname) - util.write_file(config_fname, cfg.get('conf')) + util.write_file(config_fname, cfg.get("conf")) elif os.path.isfile(config_fname): util.del_file(config_fname) LOG.debug("no apt config configured, removed %s", config_fname) -def apt_key(command, output_file=None, data=None, hardened=False, - human_output=True): +def apt_key( + command, output_file=None, data=None, hardened=False, human_output=True +): """apt-key replacement commands implemented: 'add', 'list', 'finger' @@ -1153,32 +1210,36 @@ def _get_key_files(): key_files = [APT_LOCAL_KEYS] if os.path.isfile(APT_LOCAL_KEYS) else [] for file in os.listdir(APT_TRUSTED_GPG_DIR): - if file.endswith('.gpg') or file.endswith('.asc'): + if file.endswith(".gpg") or file.endswith(".asc"): key_files.append(APT_TRUSTED_GPG_DIR + file) - return key_files if key_files else '' + return key_files if key_files else "" def apt_key_add(): """apt-key add returns filepath to new keyring, or '/dev/null' when an error occurs """ - file_name = '/dev/null' + file_name = "/dev/null" if not output_file: util.logexc( - LOG, 'Unknown filename, failed to add key: "{}"'.format(data)) + LOG, 'Unknown filename, failed to add key: "{}"'.format(data) + ) else: try: - key_dir = \ + key_dir = ( CLOUD_INIT_GPG_DIR if hardened else APT_TRUSTED_GPG_DIR + ) stdout = gpg.dearmor(data) - file_name = '{}{}.gpg'.format(key_dir, output_file) + file_name = "{}{}.gpg".format(key_dir, output_file) util.write_file(file_name, stdout) except subp.ProcessExecutionError: - util.logexc(LOG, 'Gpg error, failed to add key: {}'.format( - data)) + util.logexc( + LOG, "Gpg error, failed to add key: {}".format(data) + ) except UnicodeDecodeError: - util.logexc(LOG, 'Decode error, failed to add key: {}'.format( - data)) + util.logexc( + LOG, "Decode error, failed to add key: {}".format(data) + ) return file_name def apt_key_list(): @@ -1193,19 +1254,20 @@ def apt_key_list(): key_list.append(gpg.list(key_file, human_output=human_output)) except subp.ProcessExecutionError as error: LOG.warning('Failed to list key "%s": %s', key_file, error) - return '\n'.join(key_list) + return "\n".join(key_list) - if command == 'add': + if command == "add": return apt_key_add() - elif command == 'finger' or command == 'list': + elif command == "finger" or command == "list": return apt_key_list() else: raise ValueError( - 'apt_key() commands add, list, and finger are currently supported') + "apt_key() commands add, list, and finger are currently supported" + ) CONFIG_CLEANERS = { - 'cloud-init': clean_cloud_init, + "cloud-init": clean_cloud_init, } # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py index aa186ce227b..569849d1333 100644 --- a/cloudinit/config/cc_apt_pipelining.py +++ b/cloudinit/config/cc_apt_pipelining.py @@ -29,17 +29,19 @@ apt_pipelining: """ -from cloudinit.settings import PER_INSTANCE from cloudinit import util +from cloudinit.settings import PER_INSTANCE frequency = PER_INSTANCE -distros = ['ubuntu', 'debian'] +distros = ["ubuntu", "debian"] DEFAULT_FILE = "/etc/apt/apt.conf.d/90cloud-init-pipelining" -APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n" - 'Acquire::http::Pipeline-Depth "%s";\n') +APT_PIPE_TPL = ( + "//Written by cloud-init per 'apt_pipelining'\n" + 'Acquire::http::Pipeline-Depth "%s";\n' +) # Acquire::http::Pipeline-Depth can be a value # from 0 to 5 indicating how many outstanding requests APT should send. @@ -49,7 +51,7 @@ def handle(_name, cfg, _cloud, log, _args): - apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", 'os') + apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", "os") apt_pipe_value_s = str(apt_pipe_value).lower().strip() if apt_pipe_value_s == "false": @@ -69,4 +71,5 @@ def write_apt_snippet(setting, log, f_name): util.write_file(f_name, file_contents) log.debug("Wrote %s with apt pipeline depth setting %s", f_name, setting) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py index 06f7a26e5f2..bff11a247d1 100644 --- a/cloudinit/config/cc_bootcmd.py +++ b/cloudinit/config/cc_bootcmd.py @@ -12,11 +12,9 @@ import os from textwrap import dedent +from cloudinit import subp, temp_utils, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_ALWAYS -from cloudinit import temp_utils -from cloudinit import subp -from cloudinit import util frequency = PER_ALWAYS @@ -26,13 +24,14 @@ # configuration options before actually attempting to deploy with said # configuration. -distros = ['all'] +distros = ["all"] meta = { - 'id': 'cc_bootcmd', - 'name': 'Bootcmd', - 'title': 'Run arbitrary commands early in the boot process', - 'description': dedent("""\ + "id": "cc_bootcmd", + "name": "Bootcmd", + "title": "Run arbitrary commands early in the boot process", + "description": dedent( + """\ This module runs arbitrary commands very early in the boot process, only slightly after a boothook would run. This is very similar to a boothook, but more user friendly. The environment variable @@ -48,31 +47,37 @@ when writing files, do not use /tmp dir as it races with systemd-tmpfiles-clean LP: #1707222. Use /run/somedir instead. - """), - 'distros': distros, - 'examples': [dedent("""\ + """ + ), + "distros": distros, + "examples": [ + dedent( + """\ bootcmd: - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ] - """)], - 'frequency': PER_ALWAYS, + """ + ) + ], + "frequency": PER_ALWAYS, } schema = { - 'type': 'object', - 'properties': { - 'bootcmd': { - 'type': 'array', - 'items': { - 'oneOf': [ - {'type': 'array', 'items': {'type': 'string'}}, - {'type': 'string'}] + "type": "object", + "properties": { + "bootcmd": { + "type": "array", + "items": { + "oneOf": [ + {"type": "array", "items": {"type": "string"}}, + {"type": "string"}, + ] }, - 'additionalItems': False, # Reject items of non-string non-list - 'additionalProperties': False, - 'minItems': 1, + "additionalItems": False, # Reject items of non-string non-list + "additionalProperties": False, + "minItems": 1, } - } + }, } __doc__ = get_meta_doc(meta, schema) # Supplement python help() @@ -81,8 +86,9 @@ def handle(name, cfg, cloud, log, _args): if "bootcmd" not in cfg: - log.debug(("Skipping module named %s," - " no 'bootcmd' key in configuration"), name) + log.debug( + "Skipping module named %s, no 'bootcmd' key in configuration", name + ) return validate_cloudconfig_schema(cfg, schema) @@ -99,11 +105,12 @@ def handle(name, cfg, cloud, log, _args): env = os.environ.copy() iid = cloud.get_instance_id() if iid: - env['INSTANCE_ID'] = str(iid) - cmd = ['/bin/sh', tmpf.name] + env["INSTANCE_ID"] = str(iid) + cmd = ["/bin/sh", tmpf.name] subp.subp(cmd, env=env, capture=False) except Exception: util.logexc(log, "Failed to run bootcmd module %s", name) raise + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py index 9fdaeba18e9..53b6d0c8a20 100755 --- a/cloudinit/config/cc_byobu.py +++ b/cloudinit/config/cc_byobu.py @@ -38,11 +38,10 @@ byobu_by_default: """ +from cloudinit import subp, util from cloudinit.distros import ug_util -from cloudinit import subp -from cloudinit import util -distros = ['ubuntu', 'debian'] +distros = ["ubuntu", "debian"] def handle(name, cfg, cloud, log, args): @@ -58,8 +57,14 @@ def handle(name, cfg, cloud, log, args): if value == "user" or value == "system": value = "enable-%s" % value - valid = ("enable-user", "enable-system", "enable", - "disable-user", "disable-system", "disable") + valid = ( + "enable-user", + "enable-system", + "enable", + "disable-user", + "disable-system", + "disable", + ) if value not in valid: log.warning("Unknown value %s for byobu_by_default", value) @@ -81,13 +86,16 @@ def handle(name, cfg, cloud, log, args): (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro) (user, _user_config) = ug_util.extract_default(users) if not user: - log.warning(("No default byobu user provided, " - "can not launch %s for the default user"), bl_inst) + log.warning( + "No default byobu user provided, " + "can not launch %s for the default user", + bl_inst, + ) else: - shcmd += " sudo -Hu \"%s\" byobu-launcher-%s" % (user, bl_inst) + shcmd += ' sudo -Hu "%s" byobu-launcher-%s' % (user, bl_inst) shcmd += " || X=$(($X+1)); " if mod_sys: - shcmd += "echo \"%s\" | debconf-set-selections" % dc_val + shcmd += 'echo "%s" | debconf-set-selections' % dc_val shcmd += " && dpkg-reconfigure byobu --frontend=noninteractive" shcmd += " || X=$(($X+1)); " @@ -96,4 +104,5 @@ def handle(name, cfg, cloud, log, args): log.debug("Setting byobu to %s", value) subp.subp(cmd, capture=False) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py index bd7bead9410..9de065ab0fb 100644 --- a/cloudinit/config/cc_ca_certs.py +++ b/cloudinit/config/cc_ca_certs.py @@ -41,28 +41,27 @@ import os -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util DEFAULT_CONFIG = { - 'ca_cert_path': '/usr/share/ca-certificates/', - 'ca_cert_filename': 'cloud-init-ca-certs.crt', - 'ca_cert_config': '/etc/ca-certificates.conf', - 'ca_cert_system_path': '/etc/ssl/certs/', - 'ca_cert_update_cmd': ['update-ca-certificates'] + "ca_cert_path": "/usr/share/ca-certificates/", + "ca_cert_filename": "cloud-init-ca-certs.crt", + "ca_cert_config": "/etc/ca-certificates.conf", + "ca_cert_system_path": "/etc/ssl/certs/", + "ca_cert_update_cmd": ["update-ca-certificates"], } DISTRO_OVERRIDES = { - 'rhel': { - 'ca_cert_path': '/usr/share/pki/ca-trust-source/', - 'ca_cert_filename': 'anchors/cloud-init-ca-certs.crt', - 'ca_cert_config': None, - 'ca_cert_system_path': '/etc/pki/ca-trust/', - 'ca_cert_update_cmd': ['update-ca-trust'] + "rhel": { + "ca_cert_path": "/usr/share/pki/ca-trust-source/", + "ca_cert_filename": "anchors/cloud-init-ca-certs.crt", + "ca_cert_config": None, + "ca_cert_system_path": "/etc/pki/ca-trust/", + "ca_cert_update_cmd": ["update-ca-trust"], } } -distros = ['alpine', 'debian', 'ubuntu', 'rhel'] +distros = ["alpine", "debian", "ubuntu", "rhel"] def _distro_ca_certs_configs(distro_name): @@ -72,8 +71,9 @@ def _distro_ca_certs_configs(distro_name): @returns: Dict of distro configurations for ca-cert. """ cfg = DISTRO_OVERRIDES.get(distro_name, DEFAULT_CONFIG) - cfg['ca_cert_full_path'] = os.path.join(cfg['ca_cert_path'], - cfg['ca_cert_filename']) + cfg["ca_cert_full_path"] = os.path.join( + cfg["ca_cert_path"], cfg["ca_cert_filename"] + ) return cfg @@ -83,7 +83,7 @@ def update_ca_certs(distro_cfg): @param distro_cfg: A hash providing _distro_ca_certs_configs function. """ - subp.subp(distro_cfg['ca_cert_update_cmd'], capture=False) + subp.subp(distro_cfg["ca_cert_update_cmd"], capture=False) def add_ca_certs(distro_cfg, certs): @@ -98,9 +98,9 @@ def add_ca_certs(distro_cfg, certs): return # First ensure they are strings... cert_file_contents = "\n".join([str(c) for c in certs]) - util.write_file(distro_cfg['ca_cert_full_path'], - cert_file_contents, - mode=0o644) + util.write_file( + distro_cfg["ca_cert_full_path"], cert_file_contents, mode=0o644 + ) update_cert_config(distro_cfg) @@ -110,23 +110,27 @@ def update_cert_config(distro_cfg): @param distro_cfg: A hash providing _distro_ca_certs_configs function. """ - if distro_cfg['ca_cert_config'] is None: + if distro_cfg["ca_cert_config"] is None: return - if os.stat(distro_cfg['ca_cert_config']).st_size == 0: + if os.stat(distro_cfg["ca_cert_config"]).st_size == 0: # If the CA_CERT_CONFIG file is empty (i.e. all existing # CA certs have been deleted) then simply output a single # line with the cloud-init cert filename. - out = "%s\n" % distro_cfg['ca_cert_filename'] + out = "%s\n" % distro_cfg["ca_cert_filename"] else: # Append cert filename to CA_CERT_CONFIG file. # We have to strip the content because blank lines in the file # causes subsequent entries to be ignored. (LP: #1077020) - orig = util.load_file(distro_cfg['ca_cert_config']) - cr_cont = '\n'.join([line for line in orig.splitlines() - if line != distro_cfg['ca_cert_filename']]) - out = "%s\n%s\n" % (cr_cont.rstrip(), - distro_cfg['ca_cert_filename']) - util.write_file(distro_cfg['ca_cert_config'], out, omode="wb") + orig = util.load_file(distro_cfg["ca_cert_config"]) + cr_cont = "\n".join( + [ + line + for line in orig.splitlines() + if line != distro_cfg["ca_cert_filename"] + ] + ) + out = "%s\n%s\n" % (cr_cont.rstrip(), distro_cfg["ca_cert_filename"]) + util.write_file(distro_cfg["ca_cert_config"], out, omode="wb") def remove_default_ca_certs(distro_name, distro_cfg): @@ -137,14 +141,15 @@ def remove_default_ca_certs(distro_name, distro_cfg): @param distro_name: String providing the distro class name. @param distro_cfg: A hash providing _distro_ca_certs_configs function. """ - util.delete_dir_contents(distro_cfg['ca_cert_path']) - util.delete_dir_contents(distro_cfg['ca_cert_system_path']) - util.write_file(distro_cfg['ca_cert_config'], "", mode=0o644) + util.delete_dir_contents(distro_cfg["ca_cert_path"]) + util.delete_dir_contents(distro_cfg["ca_cert_system_path"]) + util.write_file(distro_cfg["ca_cert_config"], "", mode=0o644) - if distro_name in ['debian', 'ubuntu']: + if distro_name in ["debian", "ubuntu"]: debconf_sel = ( - "ca-certificates ca-certificates/trust_new_crts " + "select no") - subp.subp(('debconf-set-selections', '-'), debconf_sel) + "ca-certificates ca-certificates/trust_new_crts " + "select no" + ) + subp.subp(("debconf-set-selections", "-"), debconf_sel) def handle(name, cfg, cloud, log, _args): @@ -159,11 +164,13 @@ def handle(name, cfg, cloud, log, _args): """ # If there isn't a ca-certs section in the configuration don't do anything if "ca-certs" not in cfg: - log.debug(("Skipping module named %s," - " no 'ca-certs' key in configuration"), name) + log.debug( + "Skipping module named %s, no 'ca-certs' key in configuration", + name, + ) return - ca_cert_cfg = cfg['ca-certs'] + ca_cert_cfg = cfg["ca-certs"] distro_cfg = _distro_ca_certs_configs(cloud.distro.name) # If there is a remove-defaults option set to true, remove the system @@ -183,4 +190,5 @@ def handle(name, cfg, cloud, log, _args): log.debug("Updating certificates") update_ca_certs(distro_cfg) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index ed734d1cdc0..67889683373 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -13,87 +13,91 @@ import os from textwrap import dedent -from cloudinit import subp +from cloudinit import subp, temp_utils, templater, url_helper, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema -from cloudinit import templater -from cloudinit import temp_utils -from cloudinit import url_helper -from cloudinit import util from cloudinit.settings import PER_ALWAYS - RUBY_VERSION_DEFAULT = "1.8" -CHEF_DIRS = tuple([ - '/etc/chef', - '/var/log/chef', - '/var/lib/chef', - '/var/cache/chef', - '/var/backups/chef', - '/var/run/chef', -]) -REQUIRED_CHEF_DIRS = tuple([ - '/etc/chef', -]) +CHEF_DIRS = tuple( + [ + "/etc/chef", + "/var/log/chef", + "/var/lib/chef", + "/var/cache/chef", + "/var/backups/chef", + "/var/run/chef", + ] +) +REQUIRED_CHEF_DIRS = tuple( + [ + "/etc/chef", + ] +) # Used if fetching chef from a omnibus style package OMNIBUS_URL = "https://www.chef.io/chef/install.sh" OMNIBUS_URL_RETRIES = 5 -CHEF_VALIDATION_PEM_PATH = '/etc/chef/validation.pem' -CHEF_ENCRYPTED_DATA_BAG_PATH = '/etc/chef/encrypted_data_bag_secret' -CHEF_ENVIRONMENT = '_default' -CHEF_FB_PATH = '/etc/chef/firstboot.json' +CHEF_VALIDATION_PEM_PATH = "/etc/chef/validation.pem" +CHEF_ENCRYPTED_DATA_BAG_PATH = "/etc/chef/encrypted_data_bag_secret" +CHEF_ENVIRONMENT = "_default" +CHEF_FB_PATH = "/etc/chef/firstboot.json" CHEF_RB_TPL_DEFAULTS = { # These are ruby symbols... - 'ssl_verify_mode': ':verify_none', - 'log_level': ':info', + "ssl_verify_mode": ":verify_none", + "log_level": ":info", # These are not symbols... - 'log_location': '/var/log/chef/client.log', - 'validation_key': CHEF_VALIDATION_PEM_PATH, - 'validation_cert': None, - 'client_key': '/etc/chef/client.pem', - 'json_attribs': CHEF_FB_PATH, - 'file_cache_path': '/var/cache/chef', - 'file_backup_path': '/var/backups/chef', - 'pid_file': '/var/run/chef/client.pid', - 'show_time': True, - 'encrypted_data_bag_secret': None, + "log_location": "/var/log/chef/client.log", + "validation_key": CHEF_VALIDATION_PEM_PATH, + "validation_cert": None, + "client_key": "/etc/chef/client.pem", + "json_attribs": CHEF_FB_PATH, + "file_cache_path": "/var/cache/chef", + "file_backup_path": "/var/backups/chef", + "pid_file": "/var/run/chef/client.pid", + "show_time": True, + "encrypted_data_bag_secret": None, } -CHEF_RB_TPL_BOOL_KEYS = frozenset(['show_time']) -CHEF_RB_TPL_PATH_KEYS = frozenset([ - 'log_location', - 'validation_key', - 'client_key', - 'file_cache_path', - 'json_attribs', - 'pid_file', - 'encrypted_data_bag_secret', -]) +CHEF_RB_TPL_BOOL_KEYS = frozenset(["show_time"]) +CHEF_RB_TPL_PATH_KEYS = frozenset( + [ + "log_location", + "validation_key", + "client_key", + "file_cache_path", + "json_attribs", + "pid_file", + "encrypted_data_bag_secret", + ] +) CHEF_RB_TPL_KEYS = list(CHEF_RB_TPL_DEFAULTS.keys()) CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_BOOL_KEYS) CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_PATH_KEYS) -CHEF_RB_TPL_KEYS.extend([ - 'server_url', - 'node_name', - 'environment', - 'validation_name', - 'chef_license', -]) +CHEF_RB_TPL_KEYS.extend( + [ + "server_url", + "node_name", + "environment", + "validation_name", + "chef_license", + ] +) CHEF_RB_TPL_KEYS = frozenset(CHEF_RB_TPL_KEYS) -CHEF_RB_PATH = '/etc/chef/client.rb' -CHEF_EXEC_PATH = '/usr/bin/chef-client' -CHEF_EXEC_DEF_ARGS = tuple(['-d', '-i', '1800', '-s', '20']) +CHEF_RB_PATH = "/etc/chef/client.rb" +CHEF_EXEC_PATH = "/usr/bin/chef-client" +CHEF_EXEC_DEF_ARGS = tuple(["-d", "-i", "1800", "-s", "20"]) frequency = PER_ALWAYS distros = ["all"] meta = { - 'id': 'cc_chef', - 'name': 'Chef', - 'title': 'module that configures, starts and installs chef', - 'description': dedent("""\ + "id": "cc_chef", + "name": "Chef", + "title": "module that configures, starts and installs chef", + "description": dedent( + """\ This module enables chef to be installed (from packages, gems, or from omnibus). Before this occurs, chef configuration is written to disk (validation.pem, client.pem, firstboot.json, @@ -101,9 +105,12 @@ /var/log/chef and so-on). If configured, chef will be installed and started in either daemon or non-daemon mode. If run in non-daemon mode, post run actions are executed to do - finishing activities such as removing validation.pem."""), - 'distros': distros, - 'examples': [dedent(""" + finishing activities such as removing validation.pem.""" + ), + "distros": distros, + "examples": [ + dedent( + """ chef: directories: - /etc/chef @@ -124,180 +131,237 @@ omnibus_url_retries: 2 server_url: https://chef.yourorg.com:4000 ssl_verify_mode: :verify_peer - validation_name: yourorg-validator""")], - 'frequency': frequency, + validation_name: yourorg-validator""" + ) + ], + "frequency": frequency, } schema = { - 'type': 'object', - 'properties': { - 'chef': { - 'type': 'object', - 'additionalProperties': False, - 'properties': { - 'directories': { - 'type': 'array', - 'items': { - 'type': 'string' - }, - 'uniqueItems': True, - 'description': dedent("""\ + "type": "object", + "properties": { + "chef": { + "type": "object", + "additionalProperties": False, + "properties": { + "directories": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": True, + "description": dedent( + """\ Create the necessary directories for chef to run. By default, it creates the following directories: - {chef_dirs}""").format( + {chef_dirs}""" + ).format( chef_dirs="\n".join( [" - ``{}``".format(d) for d in CHEF_DIRS] ) - ) + ), }, - 'validation_cert': { - 'type': 'string', - 'description': dedent("""\ + "validation_cert": { + "type": "string", + "description": dedent( + """\ Optional string to be written to file validation_key. Special value ``system`` means set use existing file. - """) + """ + ), }, - 'validation_key': { - 'type': 'string', - 'default': CHEF_VALIDATION_PEM_PATH, - 'description': dedent("""\ + "validation_key": { + "type": "string", + "default": CHEF_VALIDATION_PEM_PATH, + "description": dedent( + """\ Optional path for validation_cert. default to - ``{}``.""".format(CHEF_VALIDATION_PEM_PATH)) + ``{}``.""".format( + CHEF_VALIDATION_PEM_PATH + ) + ), }, - 'firstboot_path': { - 'type': 'string', - 'default': CHEF_FB_PATH, - 'description': dedent("""\ + "firstboot_path": { + "type": "string", + "default": CHEF_FB_PATH, + "description": dedent( + """\ Path to write run_list and initial_attributes keys that should also be present in this configuration, defaults - to ``{}``.""".format(CHEF_FB_PATH)) + to ``{}``.""".format( + CHEF_FB_PATH + ) + ), }, - 'exec': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "exec": { + "type": "boolean", + "default": False, + "description": dedent( + """\ define if we should run or not run chef (defaults to false, unless a gem installed is requested where this - will then default to true).""") + will then default to true).""" + ), }, - 'client_key': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['client_key'], - 'description': dedent("""\ + "client_key": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["client_key"], + "description": dedent( + """\ Optional path for client_cert. default to - ``{}``.""".format(CHEF_RB_TPL_DEFAULTS['client_key'])) + ``{}``.""".format( + CHEF_RB_TPL_DEFAULTS["client_key"] + ) + ), }, - 'encrypted_data_bag_secret': { - 'type': 'string', - 'default': None, - 'description': dedent("""\ + "encrypted_data_bag_secret": { + "type": "string", + "default": None, + "description": dedent( + """\ Specifies the location of the secret key used by chef to encrypt data items. By default, this path is set to None, meaning that chef will have to look at the path ``{}`` for it. - """.format(CHEF_ENCRYPTED_DATA_BAG_PATH)) + """.format( + CHEF_ENCRYPTED_DATA_BAG_PATH + ) + ), }, - 'environment': { - 'type': 'string', - 'default': CHEF_ENVIRONMENT, - 'description': dedent("""\ + "environment": { + "type": "string", + "default": CHEF_ENVIRONMENT, + "description": dedent( + """\ Specifies which environment chef will use. By default, it will use the ``{}`` configuration. - """.format(CHEF_ENVIRONMENT)) + """.format( + CHEF_ENVIRONMENT + ) + ), }, - 'file_backup_path': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['file_backup_path'], - 'description': dedent("""\ + "file_backup_path": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["file_backup_path"], + "description": dedent( + """\ Specifies the location in which backup files are stored. By default, it uses the ``{}`` location.""".format( - CHEF_RB_TPL_DEFAULTS['file_backup_path'])) + CHEF_RB_TPL_DEFAULTS["file_backup_path"] + ) + ), }, - 'file_cache_path': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['file_cache_path'], - 'description': dedent("""\ + "file_cache_path": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["file_cache_path"], + "description": dedent( + """\ Specifies the location in which chef cache files will be saved. By default, it uses the ``{}`` location.""".format( - CHEF_RB_TPL_DEFAULTS['file_cache_path'])) + CHEF_RB_TPL_DEFAULTS["file_cache_path"] + ) + ), }, - 'json_attribs': { - 'type': 'string', - 'default': CHEF_FB_PATH, - 'description': dedent("""\ + "json_attribs": { + "type": "string", + "default": CHEF_FB_PATH, + "description": dedent( + """\ Specifies the location in which some chef json data is stored. By default, it uses the - ``{}`` location.""".format(CHEF_FB_PATH)) + ``{}`` location.""".format( + CHEF_FB_PATH + ) + ), }, - 'log_level': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['log_level'], - 'description': dedent("""\ + "log_level": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["log_level"], + "description": dedent( + """\ Defines the level of logging to be stored in the log file. By default this value is set to ``{}``. - """.format(CHEF_RB_TPL_DEFAULTS['log_level'])) + """.format( + CHEF_RB_TPL_DEFAULTS["log_level"] + ) + ), }, - 'log_location': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['log_location'], - 'description': dedent("""\ + "log_location": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["log_location"], + "description": dedent( + """\ Specifies the location of the chef lof file. By default, the location is specified at ``{}``.""".format( - CHEF_RB_TPL_DEFAULTS['log_location'])) + CHEF_RB_TPL_DEFAULTS["log_location"] + ) + ), }, - 'node_name': { - 'type': 'string', - 'description': dedent("""\ + "node_name": { + "type": "string", + "description": dedent( + """\ The name of the node to run. By default, we will - use th instance id as the node name.""") + use th instance id as the node name.""" + ), }, - 'omnibus_url': { - 'type': 'string', - 'default': OMNIBUS_URL, - 'description': dedent("""\ + "omnibus_url": { + "type": "string", + "default": OMNIBUS_URL, + "description": dedent( + """\ Omnibus URL if chef should be installed through Omnibus. By default, it uses the - ``{}``.""".format(OMNIBUS_URL)) + ``{}``.""".format( + OMNIBUS_URL + ) + ), }, - 'omnibus_url_retries': { - 'type': 'integer', - 'default': OMNIBUS_URL_RETRIES, - 'description': dedent("""\ + "omnibus_url_retries": { + "type": "integer", + "default": OMNIBUS_URL_RETRIES, + "description": dedent( + """\ The number of retries that will be attempted to reach - the Omnibus URL""") + the Omnibus URL""" + ), }, - 'omnibus_version': { - 'type': 'string', - 'description': dedent("""\ + "omnibus_version": { + "type": "string", + "description": dedent( + """\ Optional version string to require for omnibus - install.""") + install.""" + ), }, - 'pid_file': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['pid_file'], - 'description': dedent("""\ + "pid_file": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["pid_file"], + "description": dedent( + """\ The location in which a process identification number (pid) is saved. By default, it saves in the ``{}`` location.""".format( - CHEF_RB_TPL_DEFAULTS['pid_file'])) + CHEF_RB_TPL_DEFAULTS["pid_file"] + ) + ), }, - 'server_url': { - 'type': 'string', - 'description': 'The URL for the chef server' + "server_url": { + "type": "string", + "description": "The URL for the chef server", }, - 'show_time': { - 'type': 'boolean', - 'default': True, - 'description': 'Show time in chef logs' + "show_time": { + "type": "boolean", + "default": True, + "description": "Show time in chef logs", }, - 'ssl_verify_mode': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['ssl_verify_mode'], - 'description': dedent("""\ + "ssl_verify_mode": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["ssl_verify_mode"], + "description": dedent( + """\ Set the verify mode for HTTPS requests. We can have two possible values for this parameter: @@ -306,67 +370,76 @@ - ``:verify_peer``: Validate all SSL certificates. By default, the parameter is set as ``{}``. - """.format(CHEF_RB_TPL_DEFAULTS['ssl_verify_mode'])) + """.format( + CHEF_RB_TPL_DEFAULTS["ssl_verify_mode"] + ) + ), }, - 'validation_name': { - 'type': 'string', - 'description': dedent("""\ + "validation_name": { + "type": "string", + "description": dedent( + """\ The name of the chef-validator key that Chef Infra Client uses to access the Chef Infra Server during - the initial Chef Infra Client run.""") + the initial Chef Infra Client run.""" + ), }, - 'force_install': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "force_install": { + "type": "boolean", + "default": False, + "description": dedent( + """\ If set to ``True``, forces chef installation, even - if it is already installed.""") + if it is already installed.""" + ), }, - 'initial_attributes': { - 'type': 'object', - 'items': { - 'type': 'string' - }, - 'description': dedent("""\ + "initial_attributes": { + "type": "object", + "items": {"type": "string"}, + "description": dedent( + """\ Specify a list of initial attributes used by the - cookbooks.""") + cookbooks.""" + ), }, - 'install_type': { - 'type': 'string', - 'default': 'packages', - 'description': dedent("""\ + "install_type": { + "type": "string", + "default": "packages", + "description": dedent( + """\ The type of installation for chef. It can be one of the following values: - ``packages`` - ``gems`` - - ``omnibus``""") + - ``omnibus``""" + ), }, - 'run_list': { - 'type': 'array', - 'items': { - 'type': 'string' - }, - 'description': 'A run list for a first boot json.' + "run_list": { + "type": "array", + "items": {"type": "string"}, + "description": "A run list for a first boot json.", }, "chef_license": { - 'type': 'string', - 'description': dedent("""\ + "type": "string", + "description": dedent( + """\ string that indicates if user accepts or not license - related to some of chef products""") - } - } + related to some of chef products""" + ), + }, + }, } - } + }, } __doc__ = get_meta_doc(meta, schema) def post_run_chef(chef_cfg, log): - delete_pem = util.get_cfg_option_bool(chef_cfg, - 'delete_validation_post_exec', - default=False) + delete_pem = util.get_cfg_option_bool( + chef_cfg, "delete_validation_post_exec", default=False + ) if delete_pem and os.path.isfile(CHEF_VALIDATION_PEM_PATH): os.unlink(CHEF_VALIDATION_PEM_PATH) @@ -389,16 +462,20 @@ def get_template_params(iid, chef_cfg, log): else: params[k] = util.get_cfg_option_str(chef_cfg, k) # These ones are overwritten to be exact values... - params.update({ - 'generated_by': util.make_header(), - 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name', - default=iid), - 'environment': util.get_cfg_option_str(chef_cfg, 'environment', - default='_default'), - # These two are mandatory... - 'server_url': chef_cfg['server_url'], - 'validation_name': chef_cfg['validation_name'], - }) + params.update( + { + "generated_by": util.make_header(), + "node_name": util.get_cfg_option_str( + chef_cfg, "node_name", default=iid + ), + "environment": util.get_cfg_option_str( + chef_cfg, "environment", default="_default" + ), + # These two are mandatory... + "server_url": chef_cfg["server_url"], + "validation_name": chef_cfg["validation_name"], + } + ) return params @@ -406,35 +483,38 @@ def handle(name, cfg, cloud, log, _args): """Handler method activated by cloud-init.""" # If there isn't a chef key in the configuration don't do anything - if 'chef' not in cfg: - log.debug(("Skipping module named %s," - " no 'chef' key in configuration"), name) + if "chef" not in cfg: + log.debug( + "Skipping module named %s, no 'chef' key in configuration", name + ) return validate_cloudconfig_schema(cfg, schema) - chef_cfg = cfg['chef'] + chef_cfg = cfg["chef"] # Ensure the chef directories we use exist - chef_dirs = util.get_cfg_option_list(chef_cfg, 'directories') + chef_dirs = util.get_cfg_option_list(chef_cfg, "directories") if not chef_dirs: chef_dirs = list(CHEF_DIRS) for d in itertools.chain(chef_dirs, REQUIRED_CHEF_DIRS): util.ensure_dir(d) - vkey_path = chef_cfg.get('validation_key', CHEF_VALIDATION_PEM_PATH) - vcert = chef_cfg.get('validation_cert') + vkey_path = chef_cfg.get("validation_key", CHEF_VALIDATION_PEM_PATH) + vcert = chef_cfg.get("validation_cert") # special value 'system' means do not overwrite the file # but still render the template to contain 'validation_key' if vcert: if vcert != "system": util.write_file(vkey_path, vcert) elif not os.path.isfile(vkey_path): - log.warning("chef validation_cert provided as 'system', but " - "validation_key path '%s' does not exist.", - vkey_path) + log.warning( + "chef validation_cert provided as 'system', but " + "validation_key path '%s' does not exist.", + vkey_path, + ) # Create the chef config from template - template_fn = cloud.get_template_filename('chef_client.rb') + template_fn = cloud.get_template_filename("chef_client.rb") if template_fn: iid = str(cloud.datasource.get_instance_id()) params = get_template_params(iid, chef_cfg, log) @@ -448,32 +528,33 @@ def handle(name, cfg, cloud, log, _args): util.ensure_dirs(param_paths) templater.render_to_file(template_fn, CHEF_RB_PATH, params) else: - log.warning("No template found, not rendering to %s", - CHEF_RB_PATH) + log.warning("No template found, not rendering to %s", CHEF_RB_PATH) # Set the firstboot json - fb_filename = util.get_cfg_option_str(chef_cfg, 'firstboot_path', - default=CHEF_FB_PATH) + fb_filename = util.get_cfg_option_str( + chef_cfg, "firstboot_path", default=CHEF_FB_PATH + ) if not fb_filename: log.info("First boot path empty, not writing first boot json file") else: initial_json = {} - if 'run_list' in chef_cfg: - initial_json['run_list'] = chef_cfg['run_list'] - if 'initial_attributes' in chef_cfg: - initial_attributes = chef_cfg['initial_attributes'] + if "run_list" in chef_cfg: + initial_json["run_list"] = chef_cfg["run_list"] + if "initial_attributes" in chef_cfg: + initial_attributes = chef_cfg["initial_attributes"] for k in list(initial_attributes.keys()): initial_json[k] = initial_attributes[k] util.write_file(fb_filename, json.dumps(initial_json)) # Try to install chef, if its not already installed... - force_install = util.get_cfg_option_bool(chef_cfg, - 'force_install', default=False) + force_install = util.get_cfg_option_bool( + chef_cfg, "force_install", default=False + ) installed = subp.is_exe(CHEF_EXEC_PATH) if not installed or force_install: run = install_chef(cloud, chef_cfg, log) elif installed: - run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False) + run = util.get_cfg_option_bool(chef_cfg, "exec", default=False) else: run = False if run: @@ -482,18 +563,21 @@ def handle(name, cfg, cloud, log, _args): def run_chef(chef_cfg, log): - log.debug('Running chef-client') + log.debug("Running chef-client") cmd = [CHEF_EXEC_PATH] - if 'exec_arguments' in chef_cfg: - cmd_args = chef_cfg['exec_arguments'] + if "exec_arguments" in chef_cfg: + cmd_args = chef_cfg["exec_arguments"] if isinstance(cmd_args, (list, tuple)): cmd.extend(cmd_args) elif isinstance(cmd_args, str): cmd.append(cmd_args) else: - log.warning("Unknown type %s provided for chef" - " 'exec_arguments' expected list, tuple," - " or string", type(cmd_args)) + log.warning( + "Unknown type %s provided for chef" + " 'exec_arguments' expected list, tuple," + " or string", + type(cmd_args), + ) cmd.extend(CHEF_EXEC_DEF_ARGS) else: cmd.extend(CHEF_EXEC_DEF_ARGS) @@ -507,16 +591,16 @@ def subp_blob_in_tempfile(blob, *args, **kwargs): The 'args' argument to subp will be updated with the full path to the filename as the first argument. """ - basename = kwargs.pop('basename', "subp_blob") + basename = kwargs.pop("basename", "subp_blob") - if len(args) == 0 and 'args' not in kwargs: + if len(args) == 0 and "args" not in kwargs: args = [tuple()] # Use tmpdir over tmpfile to avoid 'text file busy' on execute with temp_utils.tempdir(needs_exe=True) as tmpd: tmpf = os.path.join(tmpd, basename) - if 'args' in kwargs: - kwargs['args'] = [tmpf] + list(kwargs['args']) + if "args" in kwargs: + kwargs["args"] = [tmpf] + list(kwargs["args"]) else: args = list(args) args[0] = [tmpf] + args[0] @@ -543,36 +627,39 @@ def install_chef_from_omnibus(url=None, retries=None, omnibus_version=None): if omnibus_version is None: args = [] else: - args = ['-v', omnibus_version] + args = ["-v", omnibus_version] content = url_helper.readurl(url=url, retries=retries).contents return subp_blob_in_tempfile( - blob=content, args=args, - basename='chef-omnibus-install', capture=False) + blob=content, args=args, basename="chef-omnibus-install", capture=False + ) def install_chef(cloud, chef_cfg, log): # If chef is not installed, we install chef based on 'install_type' - install_type = util.get_cfg_option_str(chef_cfg, 'install_type', - 'packages') - run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False) + install_type = util.get_cfg_option_str( + chef_cfg, "install_type", "packages" + ) + run = util.get_cfg_option_bool(chef_cfg, "exec", default=False) if install_type == "gems": # This will install and run the chef-client from gems - chef_version = util.get_cfg_option_str(chef_cfg, 'version', None) - ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version', - RUBY_VERSION_DEFAULT) + chef_version = util.get_cfg_option_str(chef_cfg, "version", None) + ruby_version = util.get_cfg_option_str( + chef_cfg, "ruby_version", RUBY_VERSION_DEFAULT + ) install_chef_from_gems(ruby_version, chef_version, cloud.distro) # Retain backwards compat, by preferring True instead of False # when not provided/overriden... - run = util.get_cfg_option_bool(chef_cfg, 'exec', default=True) - elif install_type == 'packages': + run = util.get_cfg_option_bool(chef_cfg, "exec", default=True) + elif install_type == "packages": # This will install and run the chef-client from packages - cloud.distro.install_packages(('chef',)) - elif install_type == 'omnibus': + cloud.distro.install_packages(("chef",)) + elif install_type == "omnibus": omnibus_version = util.get_cfg_option_str(chef_cfg, "omnibus_version") install_chef_from_omnibus( url=util.get_cfg_option_str(chef_cfg, "omnibus_url"), retries=util.get_cfg_option_int(chef_cfg, "omnibus_url_retries"), - omnibus_version=omnibus_version) + omnibus_version=omnibus_version, + ) else: log.warning("Unknown chef install type '%s'", install_type) run = False @@ -581,25 +668,47 @@ def install_chef(cloud, chef_cfg, log): def get_ruby_packages(version): # return a list of packages needed to install ruby at version - pkgs = ['ruby%s' % version, 'ruby%s-dev' % version] + pkgs = ["ruby%s" % version, "ruby%s-dev" % version] if version == "1.8": - pkgs.extend(('libopenssl-ruby1.8', 'rubygems1.8')) + pkgs.extend(("libopenssl-ruby1.8", "rubygems1.8")) return pkgs def install_chef_from_gems(ruby_version, chef_version, distro): distro.install_packages(get_ruby_packages(ruby_version)) - if not os.path.exists('/usr/bin/gem'): - util.sym_link('/usr/bin/gem%s' % ruby_version, '/usr/bin/gem') - if not os.path.exists('/usr/bin/ruby'): - util.sym_link('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby') + if not os.path.exists("/usr/bin/gem"): + util.sym_link("/usr/bin/gem%s" % ruby_version, "/usr/bin/gem") + if not os.path.exists("/usr/bin/ruby"): + util.sym_link("/usr/bin/ruby%s" % ruby_version, "/usr/bin/ruby") if chef_version: - subp.subp(['/usr/bin/gem', 'install', 'chef', - '-v %s' % chef_version, '--no-ri', - '--no-rdoc', '--bindir', '/usr/bin', '-q'], capture=False) + subp.subp( + [ + "/usr/bin/gem", + "install", + "chef", + "-v %s" % chef_version, + "--no-ri", + "--no-rdoc", + "--bindir", + "/usr/bin", + "-q", + ], + capture=False, + ) else: - subp.subp(['/usr/bin/gem', 'install', 'chef', - '--no-ri', '--no-rdoc', '--bindir', - '/usr/bin', '-q'], capture=False) + subp.subp( + [ + "/usr/bin/gem", + "install", + "chef", + "--no-ri", + "--no-rdoc", + "--bindir", + "/usr/bin", + "-q", + ], + capture=False, + ) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py index 4d5a6aa27d7..d09fc129a1d 100644 --- a/cloudinit/config/cc_debug.py +++ b/cloudinit/config/cc_debug.py @@ -30,18 +30,16 @@ import copy from io import StringIO -from cloudinit import type_utils -from cloudinit import util -from cloudinit import safeyaml +from cloudinit import safeyaml, type_utils, util -SKIP_KEYS = frozenset(['log_cfgs']) +SKIP_KEYS = frozenset(["log_cfgs"]) def _make_header(text): header = StringIO() header.write("-" * 80) header.write("\n") - header.write(text.center(80, ' ')) + header.write(text.center(80, " ")) header.write("\n") header.write("-" * 80) header.write("\n") @@ -56,17 +54,16 @@ def _dumps(obj): def handle(name, cfg, cloud, log, args): """Handler method activated by cloud-init.""" - verbose = util.get_cfg_by_path(cfg, ('debug', 'verbose'), default=True) + verbose = util.get_cfg_by_path(cfg, ("debug", "verbose"), default=True) if args: # if args are provided (from cmdline) then explicitly set verbose out_file = args[0] verbose = True else: - out_file = util.get_cfg_by_path(cfg, ('debug', 'output')) + out_file = util.get_cfg_by_path(cfg, ("debug", "output")) if not verbose: - log.debug(("Skipping module named %s," - " verbose printing disabled"), name) + log.debug("Skipping module named %s, verbose printing disabled", name) return # Clean out some keys that we just don't care about showing... dump_cfg = copy.deepcopy(cfg) @@ -85,8 +82,9 @@ def handle(name, cfg, cloud, log, args): to_print.write(_dumps(cloud.datasource.metadata)) to_print.write("\n") to_print.write(_make_header("Misc")) - to_print.write("Datasource: %s\n" % - (type_utils.obj_name(cloud.datasource))) + to_print.write( + "Datasource: %s\n" % (type_utils.obj_name(cloud.datasource)) + ) to_print.write("Distro: %s\n" % (type_utils.obj_name(cloud.distro))) to_print.write("Hostname: %s\n" % (cloud.get_hostname(True))) to_print.write("Instance ID: %s\n" % (cloud.get_instance_id())) @@ -102,4 +100,5 @@ def handle(name, cfg, cloud, log, args): else: util.multi_log("".join(content_to_file), console=True, stderr=False) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py index 61c769b3f51..5e528e810f0 100644 --- a/cloudinit/config/cc_disable_ec2_metadata.py +++ b/cloudinit/config/cc_disable_ec2_metadata.py @@ -26,32 +26,35 @@ disable_ec2_metadata: """ -from cloudinit import subp -from cloudinit import util - +from cloudinit import subp, util from cloudinit.settings import PER_ALWAYS frequency = PER_ALWAYS -REJECT_CMD_IF = ['route', 'add', '-host', '169.254.169.254', 'reject'] -REJECT_CMD_IP = ['ip', 'route', 'add', 'prohibit', '169.254.169.254'] +REJECT_CMD_IF = ["route", "add", "-host", "169.254.169.254", "reject"] +REJECT_CMD_IP = ["ip", "route", "add", "prohibit", "169.254.169.254"] def handle(name, cfg, _cloud, log, _args): disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False) if disabled: reject_cmd = None - if subp.which('ip'): + if subp.which("ip"): reject_cmd = REJECT_CMD_IP - elif subp.which('ifconfig'): + elif subp.which("ifconfig"): reject_cmd = REJECT_CMD_IF else: - log.error(('Neither "route" nor "ip" command found, unable to ' - 'manipulate routing table')) + log.error( + 'Neither "route" nor "ip" command found, unable to ' + "manipulate routing table" + ) return subp.subp(reject_cmd, capture=False) else: - log.debug(("Skipping module named %s," - " disabling the ec2 route not enabled"), name) + log.debug( + "Skipping module named %s, disabling the ec2 route not enabled", + name, + ) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index 440f05f176a..4d527c7a5b7 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -100,13 +100,13 @@ replace_fs: """ -from cloudinit.settings import PER_INSTANCE -from cloudinit import util -from cloudinit import subp import logging import os import shlex +from cloudinit import subp, util +from cloudinit.settings import PER_INSTANCE + frequency = PER_INSTANCE # Define the commands to use @@ -118,7 +118,7 @@ PARTPROBE_CMD = subp.which("partprobe") WIPEFS_CMD = subp.which("wipefs") -LANG_C_ENV = {'LANG': 'C'} +LANG_C_ENV = {"LANG": "C"} LOG = logging.getLogger(__name__) @@ -145,9 +145,12 @@ def alias_to_device(cand): try: log.debug("Creating new partition table/disk") - util.log_time(logfunc=LOG.debug, - msg="Creating partition on %s" % disk, - func=mkpart, args=(disk, definition)) + util.log_time( + logfunc=LOG.debug, + msg="Creating partition on %s" % disk, + func=mkpart, + args=(disk, definition), + ) except Exception as e: util.logexc(LOG, "Failed partitioning operation\n%s" % e) @@ -162,10 +165,13 @@ def alias_to_device(cand): try: log.debug("Creating new filesystem.") - device = definition.get('device') - util.log_time(logfunc=LOG.debug, - msg="Creating fs for %s" % device, - func=mkfs, args=(definition,)) + device = definition.get("device") + util.log_time( + logfunc=LOG.debug, + msg="Creating fs for %s" % device, + func=mkfs, + args=(definition,), + ) except Exception as e: util.logexc(LOG, "Failed during filesystem operation\n%s" % e) @@ -178,16 +184,22 @@ def update_disk_setup_devices(disk_setup, tformer): if transformed is None or transformed == origname: continue if transformed in disk_setup: - LOG.info("Replacing %s in disk_setup for translation of %s", - origname, transformed) + LOG.info( + "Replacing %s in disk_setup for translation of %s", + origname, + transformed, + ) del disk_setup[transformed] disk_setup[transformed] = disk_setup[origname] if isinstance(disk_setup[transformed], dict): - disk_setup[transformed]['_origname'] = origname + disk_setup[transformed]["_origname"] = origname del disk_setup[origname] - LOG.debug("updated disk_setup device entry '%s' to '%s'", - origname, transformed) + LOG.debug( + "updated disk_setup device entry '%s' to '%s'", + origname, + transformed, + ) def update_fs_setup_devices(disk_setup, tformer): @@ -198,7 +210,7 @@ def update_fs_setup_devices(disk_setup, tformer): LOG.warning("entry in disk_setup not a dict: %s", definition) continue - origname = definition.get('device') + origname = definition.get("device") if origname is None: continue @@ -208,19 +220,24 @@ def update_fs_setup_devices(disk_setup, tformer): tformed = tformer(dev) if tformed is not None: dev = tformed - LOG.debug("%s is mapped to disk=%s part=%s", - origname, tformed, part) - definition['_origname'] = origname - definition['device'] = tformed + LOG.debug( + "%s is mapped to disk=%s part=%s", origname, tformed, part + ) + definition["_origname"] = origname + definition["device"] = tformed if part: # In origname with .N, N overrides 'partition' key. - if 'partition' in definition: - LOG.warning("Partition '%s' from dotted device name '%s' " - "overrides 'partition' key in %s", part, origname, - definition) - definition['_partition'] = definition['partition'] - definition['partition'] = part + if "partition" in definition: + LOG.warning( + "Partition '%s' from dotted device name '%s' " + "overrides 'partition' key in %s", + part, + origname, + definition, + ) + definition["_partition"] = definition["partition"] + definition["partition"] = part def value_splitter(values, start=None): @@ -232,7 +249,7 @@ def value_splitter(values, start=None): if start: _values = _values[start:] - for key, value in [x.split('=') for x in _values]: + for key, value in [x.split("=") for x in _values]: yield key, value @@ -251,11 +268,16 @@ def enumerate_disk(device, nodeps=False): name: the device name, i.e. sda """ - lsblk_cmd = [LSBLK_CMD, '--pairs', '--output', 'NAME,TYPE,FSTYPE,LABEL', - device] + lsblk_cmd = [ + LSBLK_CMD, + "--pairs", + "--output", + "NAME,TYPE,FSTYPE,LABEL", + device, + ] if nodeps: - lsblk_cmd.append('--nodeps') + lsblk_cmd.append("--nodeps") info = None try: @@ -269,10 +291,10 @@ def enumerate_disk(device, nodeps=False): for part in parts: d = { - 'name': None, - 'type': None, - 'fstype': None, - 'label': None, + "name": None, + "type": None, + "fstype": None, + "label": None, } for key, value in value_splitter(part): @@ -303,9 +325,9 @@ def is_device_valid(name, partition=False): LOG.warning("Query against device %s failed", name) return False - if partition and d_type == 'part': + if partition and d_type == "part": return True - elif not partition and d_type == 'disk': + elif not partition and d_type == "disk": return True return False @@ -321,7 +343,7 @@ def check_fs(device): """ out, label, fs_type, uuid = None, None, None, None - blkid_cmd = [BLKID_CMD, '-c', '/dev/null', device] + blkid_cmd = [BLKID_CMD, "-c", "/dev/null", device] try: out, _err = subp.subp(blkid_cmd, rcs=[0, 2]) except Exception as e: @@ -332,11 +354,11 @@ def check_fs(device): if out: if len(out.splitlines()) == 1: for key, value in value_splitter(out, start=1): - if key.lower() == 'label': + if key.lower() == "label": label = value - elif key.lower() == 'type': + elif key.lower() == "type": fs_type = value - elif key.lower() == 'uuid': + elif key.lower() == "uuid": uuid = value return label, fs_type, uuid @@ -350,8 +372,14 @@ def is_filesystem(device): return fs_type -def find_device_node(device, fs_type=None, label=None, valid_targets=None, - label_match=True, replace_fs=None): +def find_device_node( + device, + fs_type=None, + label=None, + valid_targets=None, + label_match=True, + replace_fs=None, +): """ Find a device that is either matches the spec, or the first @@ -366,31 +394,32 @@ def find_device_node(device, fs_type=None, label=None, valid_targets=None, label = "" if not valid_targets: - valid_targets = ['disk', 'part'] + valid_targets = ["disk", "part"] raw_device_used = False for d in enumerate_disk(device): - if d['fstype'] == replace_fs and label_match is False: + if d["fstype"] == replace_fs and label_match is False: # We found a device where we want to replace the FS - return ('/dev/%s' % d['name'], False) + return ("/dev/%s" % d["name"], False) - if (d['fstype'] == fs_type and - ((label_match and d['label'] == label) or not label_match)): + if d["fstype"] == fs_type and ( + (label_match and d["label"] == label) or not label_match + ): # If we find a matching device, we return that - return ('/dev/%s' % d['name'], True) + return ("/dev/%s" % d["name"], True) - if d['type'] in valid_targets: + if d["type"] in valid_targets: - if d['type'] != 'disk' or d['fstype']: + if d["type"] != "disk" or d["fstype"]: raw_device_used = True - if d['type'] == 'disk': + if d["type"] == "disk": # Skip the raw disk, its the default pass - elif not d['fstype']: - return ('/dev/%s' % d['name'], False) + elif not d["fstype"]: + return ("/dev/%s" % d["name"], False) if not raw_device_used: return (device, False) @@ -433,7 +462,7 @@ def get_dyn_func(*args): if len(args) < 2: raise Exception("Unable to determine dynamic funcation name") - func_name = (args[0] % args[1]) + func_name = args[0] % args[1] func_args = args[2:] try: @@ -448,8 +477,8 @@ def get_dyn_func(*args): def get_hdd_size(device): try: - size_in_bytes, _ = subp.subp([BLKDEV_CMD, '--getsize64', device]) - sector_size, _ = subp.subp([BLKDEV_CMD, '--getss', device]) + size_in_bytes, _ = subp.subp([BLKDEV_CMD, "--getsize64", device]) + sector_size, _ = subp.subp([BLKDEV_CMD, "--getss", device]) except Exception as e: raise Exception("Failed to get %s size\n%s" % (device, e)) from e @@ -481,13 +510,13 @@ def check_partition_mbr_layout(device, layout): if device in _line[0]: # We don't understand extended partitions yet - if _line[-1].lower() in ['extended', 'empty']: + if _line[-1].lower() in ["extended", "empty"]: continue # Find the partition types type_label = None for x in sorted(range(1, len(_line)), reverse=True): - if _line[x].isdigit() and _line[x] != '/': + if _line[x].isdigit() and _line[x] != "/": type_label = _line[x] break @@ -496,7 +525,7 @@ def check_partition_mbr_layout(device, layout): def check_partition_gpt_layout(device, layout): - prt_cmd = [SGDISK_CMD, '-p', device] + prt_cmd = [SGDISK_CMD, "-p", device] try: out, _err = subp.subp(prt_cmd, update_env=LANG_C_ENV) except Exception as e: @@ -522,7 +551,7 @@ def check_partition_gpt_layout(device, layout): # Number Start (sector) End (sector) Size Code Name # 1 2048 206847 100.0 MiB 0700 Microsoft basic data for line in out_lines: - if line.strip().startswith('Number'): + if line.strip().startswith("Number"): break codes = [line.strip().split()[5] for line in out_lines] @@ -545,10 +574,16 @@ def check_partition_layout(table_type, device, layout): function called check_partition_%s_layout """ found_layout = get_dyn_func( - "check_partition_%s_layout", table_type, device, layout) - - LOG.debug("called check_partition_%s_layout(%s, %s), returned: %s", - table_type, device, layout, found_layout) + "check_partition_%s_layout", table_type, device, layout + ) + + LOG.debug( + "called check_partition_%s_layout(%s, %s), returned: %s", + table_type, + device, + layout, + found_layout, + ) if isinstance(layout, bool): # if we are using auto partitioning, or "True" be happy # if a single partition exists. @@ -559,10 +594,12 @@ def check_partition_layout(table_type, device, layout): elif len(found_layout) == len(layout): # This just makes sure that the number of requested # partitions and the type labels are right - layout_types = [str(x[1]) if isinstance(x, (tuple, list)) else None - for x in layout] - LOG.debug("Layout types=%s. Found types=%s", - layout_types, found_layout) + layout_types = [ + str(x[1]) if isinstance(x, (tuple, list)) else None for x in layout + ] + LOG.debug( + "Layout types=%s. Found types=%s", layout_types, found_layout + ) for itype, ftype in zip(layout_types, found_layout): if itype is not None and str(ftype) != str(itype): return False @@ -588,8 +625,9 @@ def get_partition_mbr_layout(size, layout): # Create a single partition return "0," - if ((len(layout) == 0 and isinstance(layout, list)) or - not isinstance(layout, list)): + if (len(layout) == 0 and isinstance(layout, list)) or not isinstance( + layout, list + ): raise Exception("Partition layout is invalid") last_part_num = len(layout) @@ -617,8 +655,10 @@ def get_partition_mbr_layout(size, layout): sfdisk_definition = "\n".join(part_definition) if len(part_definition) > 4: - raise Exception("Calculated partition definition is too big\n%s" % - sfdisk_definition) + raise Exception( + "Calculated partition definition is too big\n%s" + % sfdisk_definition + ) return sfdisk_definition @@ -632,14 +672,15 @@ def get_partition_gpt_layout(size, layout): if isinstance(partition, list): if len(partition) != 2: raise Exception( - "Partition was incorrectly defined: %s" % partition) + "Partition was incorrectly defined: %s" % partition + ) percent, partition_type = partition else: percent = partition partition_type = None part_size = int(float(size) * (float(percent) / 100)) - partition_specs.append((partition_type, [0, '+{}'.format(part_size)])) + partition_specs.append((partition_type, [0, "+{}".format(part_size)])) # The last partition should use up all remaining space partition_specs[-1][-1][-1] = 0 @@ -649,7 +690,7 @@ def get_partition_gpt_layout(size, layout): def purge_disk_ptable(device): # wipe the first and last megabyte of a disk (or file) # gpt stores partition table both at front and at end. - null = '\0' + null = "\0" start_len = 1024 * 1024 end_len = 1024 * 1024 with open(device, "rb+") as fp: @@ -668,14 +709,14 @@ def purge_disk(device): # wipe any file systems first for d in enumerate_disk(device): - if d['type'] not in ["disk", "crypt"]: - wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d['name']] + if d["type"] not in ["disk", "crypt"]: + wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d["name"]] try: - LOG.info("Purging filesystem on /dev/%s", d['name']) + LOG.info("Purging filesystem on /dev/%s", d["name"]) subp.subp(wipefs_cmd) except Exception as e: raise Exception( - "Failed FS purge of /dev/%s" % d['name'] + "Failed FS purge of /dev/%s" % d["name"] ) from e purge_disk_ptable(device) @@ -701,7 +742,7 @@ def read_parttbl(device): if PARTPROBE_CMD is not None: probe_cmd = [PARTPROBE_CMD, device] else: - probe_cmd = [BLKDEV_CMD, '--rereadpt', device] + probe_cmd = [BLKDEV_CMD, "--rereadpt", device] util.udevadm_settle() try: subp.subp(probe_cmd) @@ -730,17 +771,24 @@ def exec_mkpart_mbr(device, layout): def exec_mkpart_gpt(device, layout): try: - subp.subp([SGDISK_CMD, '-Z', device]) + subp.subp([SGDISK_CMD, "-Z", device]) for index, (partition_type, (start, end)) in enumerate(layout): index += 1 - subp.subp([SGDISK_CMD, - '-n', '{}:{}:{}'.format(index, start, end), device]) + subp.subp( + [ + SGDISK_CMD, + "-n", + "{}:{}:{}".format(index, start, end), + device, + ] + ) if partition_type is not None: # convert to a 4 char (or more) string right padded with 0 # 82 -> 8200. 'Linux' -> 'Linux' pinput = str(partition_type).ljust(4, "0") subp.subp( - [SGDISK_CMD, '-t', '{}:{}'.format(index, pinput), device]) + [SGDISK_CMD, "-t", "{}:{}".format(index, pinput), device] + ) except Exception: LOG.warning("Failed to partition device %s", device) raise @@ -766,8 +814,10 @@ def assert_and_settle_device(device): if not os.path.exists(device): util.udevadm_settle() if not os.path.exists(device): - raise RuntimeError("Device %s did not exist and was not created " - "with a udevadm settle." % device) + raise RuntimeError( + "Device %s did not exist and was not created " + "with a udevadm settle." % device + ) # Whether or not the device existed above, it is possible that udev # events that would populate udev database (for reading by lsdname) have @@ -794,9 +844,9 @@ def mkpart(device, definition): device = os.path.realpath(device) LOG.debug("Checking values for %s definition", device) - overwrite = definition.get('overwrite', False) - layout = definition.get('layout', False) - table_type = definition.get('table_type', 'mbr') + overwrite = definition.get("overwrite", False) + layout = definition.get("layout", False) + table_type = definition.get("table_type", "mbr") # Check if the default device is a partition or not LOG.debug("Checking against default devices") @@ -809,7 +859,8 @@ def mkpart(device, definition): LOG.debug("Checking if device %s is a valid device", device) if not is_device_valid(device): raise Exception( - 'Device {device} is not a disk device!'.format(device=device)) + "Device {device} is not a disk device!".format(device=device) + ) # Remove the partition table entries if isinstance(layout, str) and layout.lower() == "remove": @@ -845,21 +896,21 @@ def lookup_force_flag(fs): A force flag might be -F or -F, this look it up """ flags = { - 'ext': '-F', - 'btrfs': '-f', - 'xfs': '-f', - 'reiserfs': '-f', - 'swap': '-f', + "ext": "-F", + "btrfs": "-f", + "xfs": "-f", + "reiserfs": "-f", + "swap": "-f", } - if 'ext' in fs.lower(): - fs = 'ext' + if "ext" in fs.lower(): + fs = "ext" if fs.lower() in flags: return flags[fs] LOG.warning("Force flag for %s is unknown.", fs) - return '' + return "" def mkfs(fs_cfg): @@ -883,14 +934,14 @@ def mkfs(fs_cfg): When 'cmd' is provided then no other parameter is required. """ - label = fs_cfg.get('label') - device = fs_cfg.get('device') - partition = str(fs_cfg.get('partition', 'any')) - fs_type = fs_cfg.get('filesystem') - fs_cmd = fs_cfg.get('cmd', []) - fs_opts = fs_cfg.get('extra_opts', []) - fs_replace = fs_cfg.get('replace_fs', False) - overwrite = fs_cfg.get('overwrite', False) + label = fs_cfg.get("label") + device = fs_cfg.get("device") + partition = str(fs_cfg.get("partition", "any")) + fs_type = fs_cfg.get("filesystem") + fs_cmd = fs_cfg.get("cmd", []) + fs_opts = fs_cfg.get("extra_opts", []) + fs_replace = fs_cfg.get("replace_fs", False) + overwrite = fs_cfg.get("overwrite", False) # ensure that we get a real device rather than a symbolic link assert_and_settle_device(device) @@ -903,14 +954,19 @@ def mkfs(fs_cfg): # Handle manual definition of partition if partition.isdigit(): device = "%s%s" % (device, partition) - LOG.debug("Manual request of partition %s for %s", - partition, device) + LOG.debug( + "Manual request of partition %s for %s", partition, device + ) # Check to see if the fs already exists LOG.debug("Checking device %s", device) check_label, check_fstype, _ = check_fs(device) - LOG.debug("Device '%s' has check_label='%s' check_fstype=%s", - device, check_label, check_fstype) + LOG.debug( + "Device '%s' has check_label='%s' check_fstype=%s", + device, + check_label, + check_fstype, + ) if check_label == label and check_fstype == fs_type: LOG.debug("Existing file system found at %s", device) @@ -924,19 +980,23 @@ def mkfs(fs_cfg): else: LOG.debug("Device %s is cleared for formating", device) - elif partition and str(partition).lower() in ('auto', 'any'): + elif partition and str(partition).lower() in ("auto", "any"): # For auto devices, we match if the filesystem does exist odevice = device LOG.debug("Identifying device to create %s filesytem on", label) # any mean pick the first match on the device with matching fs_type label_match = True - if partition.lower() == 'any': + if partition.lower() == "any": label_match = False - device, reuse = find_device_node(device, fs_type=fs_type, label=label, - label_match=label_match, - replace_fs=fs_replace) + device, reuse = find_device_node( + device, + fs_type=fs_type, + label=label, + label_match=label_match, + replace_fs=fs_replace, + ) LOG.debug("Automatic device for %s identified as %s", odevice, device) if reuse: @@ -947,18 +1007,25 @@ def mkfs(fs_cfg): LOG.debug("Replacing file system on %s as instructed.", device) if not device: - LOG.debug("No device aviable that matches request. " - "Skipping fs creation for %s", fs_cfg) + LOG.debug( + "No device aviable that matches request. " + "Skipping fs creation for %s", + fs_cfg, + ) return - elif not partition or str(partition).lower() == 'none': + elif not partition or str(partition).lower() == "none": LOG.debug("Using the raw device to place filesystem %s on", label) else: LOG.debug("Error in device identification handling.") return - LOG.debug("File system type '%s' with label '%s' will be created on %s", - fs_type, label, device) + LOG.debug( + "File system type '%s' with label '%s' will be created on %s", + fs_type, + label, + device, + ) # Make sure the device is defined if not device: @@ -969,26 +1036,29 @@ def mkfs(fs_cfg): if not (fs_type or fs_cmd): raise Exception( "No way to create filesystem '{label}'. fs_type or fs_cmd " - "must be set.".format(label=label)) + "must be set.".format(label=label) + ) # Create the commands shell = False if fs_cmd: - fs_cmd = fs_cfg['cmd'] % { - 'label': label, - 'filesystem': fs_type, - 'device': device, + fs_cmd = fs_cfg["cmd"] % { + "label": label, + "filesystem": fs_type, + "device": device, } shell = True if overwrite: LOG.warning( "fs_setup:overwrite ignored because cmd was specified: %s", - fs_cmd) + fs_cmd, + ) if fs_opts: LOG.warning( "fs_setup:extra_opts ignored because cmd was specified: %s", - fs_cmd) + fs_cmd, + ) else: # Find the mkfs command mkfs_cmd = subp.which("mkfs.%s" % fs_type) @@ -996,8 +1066,11 @@ def mkfs(fs_cfg): mkfs_cmd = subp.which("mk%s" % fs_type) if not mkfs_cmd: - LOG.warning("Cannot create fstype '%s'. No mkfs.%s command", - fs_type, fs_type) + LOG.warning( + "Cannot create fstype '%s'. No mkfs.%s command", + fs_type, + fs_type, + ) return fs_cmd = [mkfs_cmd, device] @@ -1022,4 +1095,5 @@ def mkfs(fs_cfg): except Exception as e: raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e)) from e + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py index 40eee052d4c..a928082bcc3 100644 --- a/cloudinit/config/cc_emit_upstart.py +++ b/cloudinit/config/cc_emit_upstart.py @@ -24,12 +24,12 @@ import os from cloudinit import log as logging -from cloudinit.settings import PER_ALWAYS from cloudinit import subp +from cloudinit.settings import PER_ALWAYS frequency = PER_ALWAYS -distros = ['ubuntu', 'debian'] +distros = ["ubuntu", "debian"] LOG = logging.getLogger(__name__) @@ -39,15 +39,18 @@ def is_upstart_system(): return False myenv = os.environ.copy() - if 'UPSTART_SESSION' in myenv: - del myenv['UPSTART_SESSION'] - check_cmd = ['initctl', 'version'] + if "UPSTART_SESSION" in myenv: + del myenv["UPSTART_SESSION"] + check_cmd = ["initctl", "version"] try: (out, _err) = subp.subp(check_cmd, env=myenv) - return 'upstart' in out + return "upstart" in out except subp.ProcessExecutionError as e: - LOG.debug("'%s' returned '%s', not using upstart", - ' '.join(check_cmd), e.exit_code) + LOG.debug( + "'%s' returned '%s', not using upstart", + " ".join(check_cmd), + e.exit_code, + ) return False @@ -56,7 +59,7 @@ def handle(name, _cfg, cloud, log, args): if not event_names: # Default to the 'cloud-config' # event for backwards compat. - event_names = ['cloud-config'] + event_names = ["cloud-config"] if not is_upstart_system(): log.debug("not upstart system, '%s' disabled", name) @@ -64,11 +67,12 @@ def handle(name, _cfg, cloud, log, args): cfgpath = cloud.paths.get_ipath_cur("cloud_config") for n in event_names: - cmd = ['initctl', 'emit', str(n), 'CLOUD_CFG=%s' % cfgpath] + cmd = ["initctl", "emit", str(n), "CLOUD_CFG=%s" % cfgpath] try: subp.subp(cmd) except Exception as e: # TODO(harlowja), use log exception from utils?? log.warning("Emission of upstart event %s failed due to: %s", n, e) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py index 91f50e2218f..50a817440be 100644 --- a/cloudinit/config/cc_fan.py +++ b/cloudinit/config/cc_fan.py @@ -38,60 +38,62 @@ """ from cloudinit import log as logging +from cloudinit import subp, util from cloudinit.settings import PER_INSTANCE -from cloudinit import subp -from cloudinit import util LOG = logging.getLogger(__name__) frequency = PER_INSTANCE BUILTIN_CFG = { - 'config': None, - 'config_path': '/etc/network/fan', + "config": None, + "config_path": "/etc/network/fan", } def stop_update_start(distro, service, config_file, content): try: - distro.manage_service('stop', service) + distro.manage_service("stop", service) stop_failed = False except subp.ProcessExecutionError as e: stop_failed = True LOG.warning("failed to stop %s: %s", service, e) - if not content.endswith('\n'): - content += '\n' + if not content.endswith("\n"): + content += "\n" util.write_file(config_file, content, omode="w") try: - distro.manage_service('start', service) + distro.manage_service("start", service) if stop_failed: LOG.warning("success: %s started", service) except subp.ProcessExecutionError as e: LOG.warning("failed to start %s: %s", service, e) - distro.manage_service('enable', service) + distro.manage_service("enable", service) def handle(name, cfg, cloud, log, args): - cfgin = cfg.get('fan') + cfgin = cfg.get("fan") if not cfgin: cfgin = {} mycfg = util.mergemanydict([cfgin, BUILTIN_CFG]) - if not mycfg.get('config'): + if not mycfg.get("config"): LOG.debug("%s: no 'fan' config entry. disabling", name) return - util.write_file(mycfg.get('config_path'), mycfg.get('config'), omode="w") + util.write_file(mycfg.get("config_path"), mycfg.get("config"), omode="w") distro = cloud.distro - if not subp.which('fanctl'): - distro.install_packages(['ubuntu-fan']) + if not subp.which("fanctl"): + distro.install_packages(["ubuntu-fan"]) stop_update_start( distro, - service='ubuntu-fan', config_file=mycfg.get('config_path'), - content=mycfg.get('config')) + service="ubuntu-fan", + config_file=mycfg.get("config_path"), + content=mycfg.get("config"), + ) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py index 4fa5297e43d..f443ccd85e0 100644 --- a/cloudinit/config/cc_final_message.py +++ b/cloudinit/config/cc_final_message.py @@ -31,10 +31,7 @@ """ -from cloudinit import templater -from cloudinit import util -from cloudinit import version - +from cloudinit import templater, util, version from cloudinit.settings import PER_ALWAYS frequency = PER_ALWAYS @@ -49,7 +46,7 @@ def handle(_name, cfg, cloud, log, args): - msg_in = '' + msg_in = "" if len(args) != 0: msg_in = str(args[0]) else: @@ -64,14 +61,18 @@ def handle(_name, cfg, cloud, log, args): cver = version.version_string() try: subs = { - 'uptime': uptime, - 'timestamp': ts, - 'version': cver, - 'datasource': str(cloud.datasource), + "uptime": uptime, + "timestamp": ts, + "version": cver, + "datasource": str(cloud.datasource), } subs.update(dict([(k.upper(), v) for k, v in subs.items()])) - util.multi_log("%s\n" % (templater.render_string(msg_in, subs)), - console=False, stderr=True, log=log) + util.multi_log( + "%s\n" % (templater.render_string(msg_in, subs)), + console=False, + stderr=True, + log=log, + ) except Exception: util.logexc(log, "Failed to render final message template") @@ -85,4 +86,5 @@ def handle(_name, cfg, cloud, log, args): if cloud.datasource.is_disconnected: log.warning("Used fallback datasource") + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_foo.py b/cloudinit/config/cc_foo.py index 924b967ce0b..3c307153fad 100644 --- a/cloudinit/config/cc_foo.py +++ b/cloudinit/config/cc_foo.py @@ -53,4 +53,5 @@ def handle(name, _cfg, _cloud, log, _args): log.debug("Hi from module %s", name) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 1ddc9dc7080..43334caa96e 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -70,17 +70,15 @@ import stat from cloudinit import log as logging +from cloudinit import subp, temp_utils, util from cloudinit.settings import PER_ALWAYS -from cloudinit import subp -from cloudinit import temp_utils -from cloudinit import util frequency = PER_ALWAYS DEFAULT_CONFIG = { - 'mode': 'auto', - 'devices': ['/'], - 'ignore_growroot_disabled': False, + "mode": "auto", + "devices": ["/"], + "ignore_growroot_disabled": False, } @@ -131,7 +129,7 @@ class ResizeFailedException(Exception): class ResizeGrowPart(object): def available(self): myenv = os.environ.copy() - myenv['LANG'] = 'C' + myenv["LANG"] = "C" try: (out, _err) = subp.subp(["growpart", "--help"], env=myenv) @@ -144,7 +142,7 @@ def available(self): def resize(self, diskdev, partnum, partdev): myenv = os.environ.copy() - myenv['LANG'] = 'C' + myenv["LANG"] = "C" before = get_size(partdev) # growpart uses tmp dir to store intermediate states @@ -153,14 +151,19 @@ def resize(self, diskdev, partnum, partdev): growpart_tmp = os.path.join(tmpd, "growpart") if not os.path.exists(growpart_tmp): os.mkdir(growpart_tmp, 0o700) - myenv['TMPDIR'] = growpart_tmp + myenv["TMPDIR"] = growpart_tmp try: - subp.subp(["growpart", '--dry-run', diskdev, partnum], - env=myenv) + subp.subp( + ["growpart", "--dry-run", diskdev, partnum], env=myenv + ) except subp.ProcessExecutionError as e: if e.exit_code != 1: - util.logexc(LOG, "Failed growpart --dry-run for (%s, %s)", - diskdev, partnum) + util.logexc( + LOG, + "Failed growpart --dry-run for (%s, %s)", + diskdev, + partnum, + ) raise ResizeFailedException(e) from e return (before, before) @@ -176,7 +179,7 @@ def resize(self, diskdev, partnum, partdev): class ResizeGpart(object): def available(self): myenv = os.environ.copy() - myenv['LANG'] = 'C' + myenv["LANG"] = "C" try: (_out, err) = subp.subp(["gpart", "help"], env=myenv, rcs=[0, 1]) @@ -234,11 +237,11 @@ def device_part_info(devpath): # the device, like /dev/vtbd0p2. if util.is_FreeBSD(): freebsd_part = "/dev/" + util.find_freebsd_part(devpath) - m = re.search('^(/dev/.+)p([0-9])$', freebsd_part) + m = re.search("^(/dev/.+)p([0-9])$", freebsd_part) return (m.group(1), m.group(2)) elif util.is_DragonFlyBSD(): dragonflybsd_part = "/dev/" + util.find_dragonflybsd_part(devpath) - m = re.search('^(/dev/.+)s([0-9])$', dragonflybsd_part) + m = re.search("^(/dev/.+)s([0-9])$", dragonflybsd_part) return (m.group(1), m.group(2)) if not os.path.exists(syspath): @@ -275,7 +278,7 @@ def devent2dev(devent): container = util.is_container() # Ensure the path is a block device. - if (dev == "/dev/root" and not container): + if dev == "/dev/root" and not container: dev = util.rootdev_from_cmdline(util.get_cmdline()) if dev is None: if os.path.exists(dev): @@ -293,65 +296,102 @@ def resize_devices(resizer, devices): try: blockdev = devent2dev(devent) except ValueError as e: - info.append((devent, RESIZE.SKIPPED, - "unable to convert to device: %s" % e,)) + info.append( + ( + devent, + RESIZE.SKIPPED, + "unable to convert to device: %s" % e, + ) + ) continue try: statret = os.stat(blockdev) except OSError as e: - info.append((devent, RESIZE.SKIPPED, - "stat of '%s' failed: %s" % (blockdev, e),)) + info.append( + ( + devent, + RESIZE.SKIPPED, + "stat of '%s' failed: %s" % (blockdev, e), + ) + ) continue - if (not stat.S_ISBLK(statret.st_mode) and - not stat.S_ISCHR(statret.st_mode)): - info.append((devent, RESIZE.SKIPPED, - "device '%s' not a block device" % blockdev,)) + if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR( + statret.st_mode + ): + info.append( + ( + devent, + RESIZE.SKIPPED, + "device '%s' not a block device" % blockdev, + ) + ) continue try: (disk, ptnum) = device_part_info(blockdev) except (TypeError, ValueError) as e: - info.append((devent, RESIZE.SKIPPED, - "device_part_info(%s) failed: %s" % (blockdev, e),)) + info.append( + ( + devent, + RESIZE.SKIPPED, + "device_part_info(%s) failed: %s" % (blockdev, e), + ) + ) continue try: (old, new) = resizer.resize(disk, ptnum, blockdev) if old == new: - info.append((devent, RESIZE.NOCHANGE, - "no change necessary (%s, %s)" % (disk, ptnum),)) + info.append( + ( + devent, + RESIZE.NOCHANGE, + "no change necessary (%s, %s)" % (disk, ptnum), + ) + ) else: - info.append((devent, RESIZE.CHANGED, - "changed (%s, %s) from %s to %s" % - (disk, ptnum, old, new),)) + info.append( + ( + devent, + RESIZE.CHANGED, + "changed (%s, %s) from %s to %s" + % (disk, ptnum, old, new), + ) + ) except ResizeFailedException as e: - info.append((devent, RESIZE.FAILED, - "failed to resize: disk=%s, ptnum=%s: %s" % - (disk, ptnum, e),)) + info.append( + ( + devent, + RESIZE.FAILED, + "failed to resize: disk=%s, ptnum=%s: %s" + % (disk, ptnum, e), + ) + ) return info def handle(_name, cfg, _cloud, log, _args): - if 'growpart' not in cfg: - log.debug("No 'growpart' entry in cfg. Using default: %s" % - DEFAULT_CONFIG) - cfg['growpart'] = DEFAULT_CONFIG + if "growpart" not in cfg: + log.debug( + "No 'growpart' entry in cfg. Using default: %s" % DEFAULT_CONFIG + ) + cfg["growpart"] = DEFAULT_CONFIG - mycfg = cfg.get('growpart') + mycfg = cfg.get("growpart") if not isinstance(mycfg, dict): log.warning("'growpart' in config was not a dict") return - mode = mycfg.get('mode', "auto") + mode = mycfg.get("mode", "auto") if util.is_false(mode): log.debug("growpart disabled: mode=%s" % mode) return - if util.is_false(mycfg.get('ignore_growroot_disabled', False)): + if util.is_false(mycfg.get("ignore_growroot_disabled", False)): if os.path.isfile("/etc/growroot-disabled"): log.debug("growpart disabled: /etc/growroot-disabled exists") log.debug("use ignore_growroot_disabled to ignore") @@ -370,8 +410,12 @@ def handle(_name, cfg, _cloud, log, _args): raise e return - resized = util.log_time(logfunc=log.debug, msg="resize_devices", - func=resize_devices, args=(resizer, devices)) + resized = util.log_time( + logfunc=log.debug, + msg="resize_devices", + func=resize_devices, + args=(resizer, devices), + ) for (entry, action, msg) in resized: if action == RESIZE.CHANGED: log.info("'%s' resized: %s" % (entry, msg)) @@ -379,6 +423,6 @@ def handle(_name, cfg, _cloud, log, _args): log.debug("'%s' %s: %s" % (entry, action, msg)) -RESIZERS = (('growpart', ResizeGrowPart), ('gpart', ResizeGpart)) +RESIZERS = (("growpart", ResizeGrowPart), ("gpart", ResizeGpart)) # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py index eb03c664ee0..ad7243d9b5c 100644 --- a/cloudinit/config/cc_grub_dpkg.py +++ b/cloudinit/config/cc_grub_dpkg.py @@ -43,11 +43,10 @@ import os -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util from cloudinit.subp import ProcessExecutionError -distros = ['ubuntu', 'debian'] +distros = ["ubuntu", "debian"] def fetch_idevs(log): @@ -60,8 +59,9 @@ def fetch_idevs(log): try: # get the root disk where the /boot directory resides. - disk = subp.subp(['grub-probe', '-t', 'disk', '/boot'], - capture=True)[0].strip() + disk = subp.subp(["grub-probe", "-t", "disk", "/boot"], capture=True)[ + 0 + ].strip() except ProcessExecutionError as e: # grub-common may not be installed, especially on containers # FileNotFoundError is a nested exception of ProcessExecutionError @@ -81,26 +81,30 @@ def fetch_idevs(log): if not disk or not os.path.exists(disk): # If we failed to detect a disk, we can return early - return '' + return "" try: # check if disk exists and use udevadm to fetch symlinks - devices = subp.subp( - ['udevadm', 'info', '--root', '--query=symlink', disk], - capture=True - )[0].strip().split() + devices = ( + subp.subp( + ["udevadm", "info", "--root", "--query=symlink", disk], + capture=True, + )[0] + .strip() + .split() + ) except Exception: util.logexc( log, "udevadm DEVLINKS symlink query failed for disk='%s'", disk ) - log.debug('considering these device symlinks: %s', ','.join(devices)) + log.debug("considering these device symlinks: %s", ",".join(devices)) # filter symlinks for /dev/disk/by-id entries - devices = [dev for dev in devices if 'disk/by-id' in dev] - log.debug('filtered to these disk/by-id symlinks: %s', ','.join(devices)) + devices = [dev for dev in devices if "disk/by-id" in dev] + log.debug("filtered to these disk/by-id symlinks: %s", ",".join(devices)) # select first device if there is one, else fall back to plain name idevs = sorted(devices)[0] if devices else disk - log.debug('selected %s', idevs) + log.debug("selected %s", idevs) return idevs @@ -111,14 +115,15 @@ def handle(name, cfg, _cloud, log, _args): if not mycfg: mycfg = {} - enabled = mycfg.get('enabled', True) + enabled = mycfg.get("enabled", True) if util.is_false(enabled): log.debug("%s disabled by config grub_dpkg/enabled=%s", name, enabled) return idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None) idevs_empty = util.get_cfg_option_str( - mycfg, "grub-pc/install_devices_empty", None) + mycfg, "grub-pc/install_devices_empty", None + ) if idevs is None: idevs = fetch_idevs(log) @@ -128,16 +133,21 @@ def handle(name, cfg, _cloud, log, _args): # now idevs and idevs_empty are set to determined values # or, those set by user - dconf_sel = (("grub-pc grub-pc/install_devices string %s\n" - "grub-pc grub-pc/install_devices_empty boolean %s\n") % - (idevs, idevs_empty)) + dconf_sel = ( + "grub-pc grub-pc/install_devices string %s\n" + "grub-pc grub-pc/install_devices_empty boolean %s\n" + % (idevs, idevs_empty) + ) - log.debug("Setting grub debconf-set-selections with '%s','%s'" % - (idevs, idevs_empty)) + log.debug( + "Setting grub debconf-set-selections with '%s','%s'" + % (idevs, idevs_empty) + ) try: - subp.subp(['debconf-set-selections'], dconf_sel) + subp.subp(["debconf-set-selections"], dconf_sel) except Exception: util.logexc(log, "Failed to run debconf-set-selections for grub-dpkg") + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_install_hotplug.py b/cloudinit/config/cc_install_hotplug.py index 9b4075cc24c..952d9f13ca6 100644 --- a/cloudinit/config/cc_install_hotplug.py +++ b/cloudinit/config/cc_install_hotplug.py @@ -3,15 +3,12 @@ import os from textwrap import dedent -from cloudinit import util -from cloudinit import subp -from cloudinit import stages +from cloudinit import stages, subp, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.distros import ALL_DISTROS -from cloudinit.event import EventType, EventScope +from cloudinit.event import EventScope, EventType from cloudinit.settings import PER_INSTANCE - frequency = PER_INSTANCE distros = [ALL_DISTROS] @@ -19,7 +16,8 @@ "id": "cc_install_hotplug", "name": "Install Hotplug", "title": "Install hotplug if supported and enabled", - "description": dedent("""\ + "description": dedent( + """\ This module will install the udev rules to enable hotplug if supported by the datasource and enabled in the userdata. The udev rules will be installed as @@ -32,21 +30,26 @@ network configuration. Currently supported datasources: Openstack, EC2 - """), + """ + ), "distros": distros, "examples": [ - dedent("""\ + dedent( + """\ # Enable hotplug of network devices updates: network: when: ["hotplug"] - """), - dedent("""\ + """ + ), + dedent( + """\ # Enable network hotplug alongside boot event updates: network: when: ["boot", "hotplug"] - """), + """ + ), ], "frequency": frequency, } @@ -74,14 +77,14 @@ "boot-legacy", "boot", "hotplug", - ] - } + ], + }, } - } + }, } - } + }, } - } + }, } __doc__ = get_meta_doc(meta, schema) @@ -100,14 +103,15 @@ def handle(_name, cfg, cloud, log, _args): validate_cloudconfig_schema(cfg, schema) network_hotplug_enabled = ( - 'updates' in cfg and - 'network' in cfg['updates'] and - 'when' in cfg['updates']['network'] and - 'hotplug' in cfg['updates']['network']['when'] + "updates" in cfg + and "network" in cfg["updates"] + and "when" in cfg["updates"]["network"] + and "hotplug" in cfg["updates"]["network"]["when"] ) hotplug_supported = EventType.HOTPLUG in ( - cloud.datasource.get_supported_events( - [EventType.HOTPLUG]).get(EventScope.NETWORK, set()) + cloud.datasource.get_supported_events([EventType.HOTPLUG]).get( + EventScope.NETWORK, set() + ) ) hotplug_enabled = stages.update_event_enabled( datasource=cloud.datasource, diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py index d72b5244919..ab35e136ef6 100644 --- a/cloudinit/config/cc_keys_to_console.py +++ b/cloudinit/config/cc_keys_to_console.py @@ -38,49 +38,53 @@ import os +from cloudinit import subp, util from cloudinit.settings import PER_INSTANCE -from cloudinit import subp -from cloudinit import util frequency = PER_INSTANCE # This is a tool that cloud init provides -HELPER_TOOL_TPL = '%s/cloud-init/write-ssh-key-fingerprints' +HELPER_TOOL_TPL = "%s/cloud-init/write-ssh-key-fingerprints" def _get_helper_tool_path(distro): try: base_lib = distro.usr_lib_exec except AttributeError: - base_lib = '/usr/lib' + base_lib = "/usr/lib" return HELPER_TOOL_TPL % base_lib def handle(name, cfg, cloud, log, _args): if util.is_false(cfg.get("ssh", {}).get("emit_keys_to_console", True)): - log.debug(("Skipping module named %s, " - "logging of SSH host keys disabled"), name) + log.debug( + "Skipping module named %s, logging of SSH host keys disabled", name + ) return helper_path = _get_helper_tool_path(cloud.distro) if not os.path.exists(helper_path): - log.warning(("Unable to activate module %s," - " helper tool not found at %s"), name, helper_path) + log.warning( + "Unable to activate module %s, helper tool not found at %s", + name, + helper_path, + ) return - fp_blacklist = util.get_cfg_option_list(cfg, - "ssh_fp_console_blacklist", []) - key_blacklist = util.get_cfg_option_list(cfg, - "ssh_key_console_blacklist", - ["ssh-dss"]) + fp_blacklist = util.get_cfg_option_list( + cfg, "ssh_fp_console_blacklist", [] + ) + key_blacklist = util.get_cfg_option_list( + cfg, "ssh_key_console_blacklist", ["ssh-dss"] + ) try: - cmd = [helper_path, ','.join(fp_blacklist), ','.join(key_blacklist)] + cmd = [helper_path, ",".join(fp_blacklist), ",".join(key_blacklist)] (stdout, _stderr) = subp.subp(cmd) - util.multi_log("%s\n" % (stdout.strip()), - stderr=False, console=True) + util.multi_log("%s\n" % (stdout.strip()), stderr=False, console=True) except Exception: log.warning("Writing keys to the system console failed!") raise + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py index 299c4d01c82..03ebf411409 100644 --- a/cloudinit/config/cc_landscape.py +++ b/cloudinit/config/cc_landscape.py @@ -60,10 +60,7 @@ from configobj import ConfigObj -from cloudinit import type_utils -from cloudinit import subp -from cloudinit import util - +from cloudinit import subp, type_utils, util from cloudinit.settings import PER_INSTANCE frequency = PER_INSTANCE @@ -71,15 +68,15 @@ LSC_CLIENT_CFG_FILE = "/etc/landscape/client.conf" LS_DEFAULT_FILE = "/etc/default/landscape-client" -distros = ['ubuntu'] +distros = ["ubuntu"] # defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2 LSC_BUILTIN_CFG = { - 'client': { - 'log_level': "info", - 'url': "https://landscape.canonical.com/message-system", - 'ping_url': "http://landscape.canonical.com/ping", - 'data_path': "/var/lib/landscape/client", + "client": { + "log_level": "info", + "url": "https://landscape.canonical.com/message-system", + "ping_url": "http://landscape.canonical.com/ping", + "data_path": "/var/lib/landscape/client", } } @@ -97,11 +94,13 @@ def handle(_name, cfg, cloud, log, _args): raise RuntimeError( "'landscape' key existed in config, but not a dictionary type," " is a {_type} instead".format( - _type=type_utils.obj_name(ls_cloudcfg))) + _type=type_utils.obj_name(ls_cloudcfg) + ) + ) if not ls_cloudcfg: return - cloud.distro.install_packages(('landscape-client',)) + cloud.distro.install_packages(("landscape-client",)) merge_data = [ LSC_BUILTIN_CFG, @@ -135,4 +134,5 @@ def merge_together(objs): cfg.merge(ConfigObj(obj)) return cfg + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py index 7fed9abd875..487f58f7cb0 100644 --- a/cloudinit/config/cc_locale.py +++ b/cloudinit/config/cc_locale.py @@ -14,45 +14,48 @@ from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE - frequency = PER_INSTANCE -distros = ['all'] +distros = ["all"] meta = { - 'id': 'cc_locale', - 'name': 'Locale', - 'title': 'Set system locale', - 'description': dedent( + "id": "cc_locale", + "name": "Locale", + "title": "Set system locale", + "description": dedent( """\ Configure the system locale and apply it system wide. By default use the locale specified by the datasource.""" ), - 'distros': distros, - 'examples': [ - dedent("""\ + "distros": distros, + "examples": [ + dedent( + """\ # Set the locale to ar_AE locale: ar_AE - """), - dedent("""\ + """ + ), + dedent( + """\ # Set the locale to fr_CA in /etc/alternate_path/locale locale: fr_CA locale_configfile: /etc/alternate_path/locale - """), + """ + ), ], - 'frequency': frequency, + "frequency": frequency, } schema = { - 'type': 'object', - 'properties': { - 'locale': { - 'type': 'string', - 'description': ( + "type": "object", + "properties": { + "locale": { + "type": "string", + "description": ( "The locale to set as the system's locale (e.g. ar_PS)" ), }, - 'locale_configfile': { - 'type': 'string', - 'description': ( + "locale_configfile": { + "type": "string", + "description": ( "The file in which to write the locale configuration (defaults" " to the distro's default location)" ), @@ -70,8 +73,9 @@ def handle(name, cfg, cloud, log, args): locale = util.get_cfg_option_str(cfg, "locale", cloud.get_locale()) if util.is_false(locale): - log.debug("Skipping module named %s, disabled by config: %s", - name, locale) + log.debug( + "Skipping module named %s, disabled by config: %s", name, locale + ) return validate_cloudconfig_schema(cfg, schema) @@ -80,4 +84,5 @@ def handle(name, cfg, cloud, log, args): locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile") cloud.distro.apply_locale(locale, locale_cfgfile) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py index 486037d90e3..13ddcbe930a 100644 --- a/cloudinit/config/cc_lxd.py +++ b/cloudinit/config/cc_lxd.py @@ -47,12 +47,12 @@ domain: """ -from cloudinit import log as logging -from cloudinit import subp -from cloudinit import util import os -distros = ['ubuntu'] +from cloudinit import log as logging +from cloudinit import subp, util + +distros = ["ubuntu"] LOG = logging.getLogger(__name__) @@ -61,36 +61,42 @@ def handle(name, cfg, cloud, log, args): # Get config - lxd_cfg = cfg.get('lxd') + lxd_cfg = cfg.get("lxd") if not lxd_cfg: - log.debug("Skipping module named %s, not present or disabled by cfg", - name) + log.debug( + "Skipping module named %s, not present or disabled by cfg", name + ) return if not isinstance(lxd_cfg, dict): - log.warning("lxd config must be a dictionary. found a '%s'", - type(lxd_cfg)) + log.warning( + "lxd config must be a dictionary. found a '%s'", type(lxd_cfg) + ) return # Grab the configuration - init_cfg = lxd_cfg.get('init') + init_cfg = lxd_cfg.get("init") if not isinstance(init_cfg, dict): - log.warning("lxd/init config must be a dictionary. found a '%s'", - type(init_cfg)) + log.warning( + "lxd/init config must be a dictionary. found a '%s'", + type(init_cfg), + ) init_cfg = {} - bridge_cfg = lxd_cfg.get('bridge', {}) + bridge_cfg = lxd_cfg.get("bridge", {}) if not isinstance(bridge_cfg, dict): - log.warning("lxd/bridge config must be a dictionary. found a '%s'", - type(bridge_cfg)) + log.warning( + "lxd/bridge config must be a dictionary. found a '%s'", + type(bridge_cfg), + ) bridge_cfg = {} # Install the needed packages packages = [] if not subp.which("lxd"): - packages.append('lxd') + packages.append("lxd") - if init_cfg.get("storage_backend") == "zfs" and not subp.which('zfs'): - packages.append('zfsutils-linux') + if init_cfg.get("storage_backend") == "zfs" and not subp.which("zfs"): + packages.append("zfsutils-linux") if len(packages): try: @@ -102,23 +108,30 @@ def handle(name, cfg, cloud, log, args): # Set up lxd if init config is given if init_cfg: init_keys = ( - 'network_address', 'network_port', 'storage_backend', - 'storage_create_device', 'storage_create_loop', - 'storage_pool', 'trust_password') - subp.subp(['lxd', 'waitready', '--timeout=300']) - cmd = ['lxd', 'init', '--auto'] + "network_address", + "network_port", + "storage_backend", + "storage_create_device", + "storage_create_loop", + "storage_pool", + "trust_password", + ) + subp.subp(["lxd", "waitready", "--timeout=300"]) + cmd = ["lxd", "init", "--auto"] for k in init_keys: if init_cfg.get(k): - cmd.extend(["--%s=%s" % - (k.replace('_', '-'), str(init_cfg[k]))]) + cmd.extend( + ["--%s=%s" % (k.replace("_", "-"), str(init_cfg[k]))] + ) subp.subp(cmd) # Set up lxd-bridge if bridge config is given dconf_comm = "debconf-communicate" if bridge_cfg: net_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME) - if os.path.exists("/etc/default/lxd-bridge") \ - and subp.which(dconf_comm): + if os.path.exists("/etc/default/lxd-bridge") and subp.which( + dconf_comm + ): # Bridge configured through packaging debconf = bridge_to_debconf(bridge_cfg) @@ -126,39 +139,47 @@ def handle(name, cfg, cloud, log, args): # Update debconf database try: log.debug("Setting lxd debconf via " + dconf_comm) - data = "\n".join(["set %s %s" % (k, v) - for k, v in debconf.items()]) + "\n" - subp.subp(['debconf-communicate'], data) + data = ( + "\n".join( + ["set %s %s" % (k, v) for k, v in debconf.items()] + ) + + "\n" + ) + subp.subp(["debconf-communicate"], data) except Exception: - util.logexc(log, "Failed to run '%s' for lxd with" % - dconf_comm) + util.logexc( + log, "Failed to run '%s' for lxd with" % dconf_comm + ) # Remove the existing configuration file (forces re-generation) util.del_file("/etc/default/lxd-bridge") # Run reconfigure log.debug("Running dpkg-reconfigure for lxd") - subp.subp(['dpkg-reconfigure', 'lxd', - '--frontend=noninteractive']) + subp.subp(["dpkg-reconfigure", "lxd", "--frontend=noninteractive"]) else: # Built-in LXD bridge support cmd_create, cmd_attach = bridge_to_cmd(bridge_cfg) maybe_cleanup_default( - net_name=net_name, did_init=bool(init_cfg), - create=bool(cmd_create), attach=bool(cmd_attach)) + net_name=net_name, + did_init=bool(init_cfg), + create=bool(cmd_create), + attach=bool(cmd_attach), + ) if cmd_create: - log.debug("Creating lxd bridge: %s" % - " ".join(cmd_create)) + log.debug("Creating lxd bridge: %s" % " ".join(cmd_create)) _lxc(cmd_create) if cmd_attach: - log.debug("Setting up default lxd bridge: %s" % - " ".join(cmd_attach)) + log.debug( + "Setting up default lxd bridge: %s" % " ".join(cmd_attach) + ) _lxc(cmd_attach) elif bridge_cfg: raise RuntimeError( - "Unable to configure lxd bridge without %s." + dconf_comm) + "Unable to configure lxd bridge without %s." + dconf_comm + ) def bridge_to_debconf(bridge_cfg): @@ -180,33 +201,32 @@ def bridge_to_debconf(bridge_cfg): if bridge_cfg.get("ipv4_address"): debconf["lxd/bridge-ipv4"] = "true" - debconf["lxd/bridge-ipv4-address"] = \ - bridge_cfg.get("ipv4_address") - debconf["lxd/bridge-ipv4-netmask"] = \ - bridge_cfg.get("ipv4_netmask") - debconf["lxd/bridge-ipv4-dhcp-first"] = \ - bridge_cfg.get("ipv4_dhcp_first") - debconf["lxd/bridge-ipv4-dhcp-last"] = \ - bridge_cfg.get("ipv4_dhcp_last") - debconf["lxd/bridge-ipv4-dhcp-leases"] = \ - bridge_cfg.get("ipv4_dhcp_leases") - debconf["lxd/bridge-ipv4-nat"] = \ - bridge_cfg.get("ipv4_nat", "true") + debconf["lxd/bridge-ipv4-address"] = bridge_cfg.get("ipv4_address") + debconf["lxd/bridge-ipv4-netmask"] = bridge_cfg.get("ipv4_netmask") + debconf["lxd/bridge-ipv4-dhcp-first"] = bridge_cfg.get( + "ipv4_dhcp_first" + ) + debconf["lxd/bridge-ipv4-dhcp-last"] = bridge_cfg.get( + "ipv4_dhcp_last" + ) + debconf["lxd/bridge-ipv4-dhcp-leases"] = bridge_cfg.get( + "ipv4_dhcp_leases" + ) + debconf["lxd/bridge-ipv4-nat"] = bridge_cfg.get("ipv4_nat", "true") if bridge_cfg.get("ipv6_address"): debconf["lxd/bridge-ipv6"] = "true" - debconf["lxd/bridge-ipv6-address"] = \ - bridge_cfg.get("ipv6_address") - debconf["lxd/bridge-ipv6-netmask"] = \ - bridge_cfg.get("ipv6_netmask") - debconf["lxd/bridge-ipv6-nat"] = \ - bridge_cfg.get("ipv6_nat", "false") + debconf["lxd/bridge-ipv6-address"] = bridge_cfg.get("ipv6_address") + debconf["lxd/bridge-ipv6-netmask"] = bridge_cfg.get("ipv6_netmask") + debconf["lxd/bridge-ipv6-nat"] = bridge_cfg.get( + "ipv6_nat", "false" + ) if bridge_cfg.get("domain"): debconf["lxd/bridge-domain"] = bridge_cfg.get("domain") else: - raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode")) + raise Exception('invalid bridge mode "%s"' % bridge_cfg.get("mode")) return debconf @@ -217,37 +237,41 @@ def bridge_to_cmd(bridge_cfg): bridge_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME) cmd_create = [] - cmd_attach = ["network", "attach-profile", bridge_name, - "default", "eth0"] + cmd_attach = ["network", "attach-profile", bridge_name, "default", "eth0"] if bridge_cfg.get("mode") == "existing": return None, cmd_attach if bridge_cfg.get("mode") != "new": - raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode")) + raise Exception('invalid bridge mode "%s"' % bridge_cfg.get("mode")) cmd_create = ["network", "create", bridge_name] if bridge_cfg.get("ipv4_address") and bridge_cfg.get("ipv4_netmask"): - cmd_create.append("ipv4.address=%s/%s" % - (bridge_cfg.get("ipv4_address"), - bridge_cfg.get("ipv4_netmask"))) + cmd_create.append( + "ipv4.address=%s/%s" + % (bridge_cfg.get("ipv4_address"), bridge_cfg.get("ipv4_netmask")) + ) if bridge_cfg.get("ipv4_nat", "true") == "true": cmd_create.append("ipv4.nat=true") - if bridge_cfg.get("ipv4_dhcp_first") and \ - bridge_cfg.get("ipv4_dhcp_last"): - dhcp_range = "%s-%s" % (bridge_cfg.get("ipv4_dhcp_first"), - bridge_cfg.get("ipv4_dhcp_last")) + if bridge_cfg.get("ipv4_dhcp_first") and bridge_cfg.get( + "ipv4_dhcp_last" + ): + dhcp_range = "%s-%s" % ( + bridge_cfg.get("ipv4_dhcp_first"), + bridge_cfg.get("ipv4_dhcp_last"), + ) cmd_create.append("ipv4.dhcp.ranges=%s" % dhcp_range) else: cmd_create.append("ipv4.address=none") if bridge_cfg.get("ipv6_address") and bridge_cfg.get("ipv6_netmask"): - cmd_create.append("ipv6.address=%s/%s" % - (bridge_cfg.get("ipv6_address"), - bridge_cfg.get("ipv6_netmask"))) + cmd_create.append( + "ipv6.address=%s/%s" + % (bridge_cfg.get("ipv6_address"), bridge_cfg.get("ipv6_netmask")) + ) if bridge_cfg.get("ipv6_nat", "false") == "true": cmd_create.append("ipv6.nat=true") @@ -262,14 +286,17 @@ def bridge_to_cmd(bridge_cfg): def _lxc(cmd): - env = {'LC_ALL': 'C', - 'HOME': os.environ.get('HOME', '/root'), - 'USER': os.environ.get('USER', 'root')} - subp.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env) + env = { + "LC_ALL": "C", + "HOME": os.environ.get("HOME", "/root"), + "USER": os.environ.get("USER", "root"), + } + subp.subp(["lxc"] + list(cmd) + ["--force-local"], update_env=env) -def maybe_cleanup_default(net_name, did_init, create, attach, - profile="default", nic_name="eth0"): +def maybe_cleanup_default( + net_name, did_init, create, attach, profile="default", nic_name="eth0" +): """Newer versions of lxc (3.0.1+) create a lxdbr0 network when 'lxd init --auto' is run. Older versions did not. @@ -306,4 +333,5 @@ def maybe_cleanup_default(net_name, did_init, create, attach, raise e LOG.debug(msg, nic_name, profile, fail_assume_enoent) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py index 41ea4fc98d9..1b0158ec959 100644 --- a/cloudinit/config/cc_mcollective.py +++ b/cloudinit/config/cc_mcollective.py @@ -56,18 +56,21 @@ from configobj import ConfigObj from cloudinit import log as logging -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem" PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem" -SERVER_CFG = '/etc/mcollective/server.cfg' +SERVER_CFG = "/etc/mcollective/server.cfg" LOG = logging.getLogger(__name__) -def configure(config, server_cfg=SERVER_CFG, - pubcert_file=PUBCERT_FILE, pricert_file=PRICERT_FILE): +def configure( + config, + server_cfg=SERVER_CFG, + pubcert_file=PUBCERT_FILE, + pricert_file=PRICERT_FILE, +): # Read server.cfg (if it exists) values from the # original file in order to be able to mix the rest up. try: @@ -77,20 +80,20 @@ def configure(config, server_cfg=SERVER_CFG, if e.errno != errno.ENOENT: raise else: - LOG.debug("Did not find file %s (starting with an empty" - " config)", server_cfg) + LOG.debug( + "Did not find file %s (starting with an empty config)", + server_cfg, + ) mcollective_config = ConfigObj() for (cfg_name, cfg) in config.items(): - if cfg_name == 'public-cert': + if cfg_name == "public-cert": util.write_file(pubcert_file, cfg, mode=0o644) - mcollective_config[ - 'plugin.ssl_server_public'] = pubcert_file - mcollective_config['securityprovider'] = 'ssl' - elif cfg_name == 'private-cert': + mcollective_config["plugin.ssl_server_public"] = pubcert_file + mcollective_config["securityprovider"] = "ssl" + elif cfg_name == "private-cert": util.write_file(pricert_file, cfg, mode=0o600) - mcollective_config[ - 'plugin.ssl_server_private'] = pricert_file - mcollective_config['securityprovider'] = 'ssl' + mcollective_config["plugin.ssl_server_private"] = pricert_file + mcollective_config["securityprovider"] = "ssl" else: if isinstance(cfg, str): # Just set it in the 'main' section @@ -126,21 +129,24 @@ def configure(config, server_cfg=SERVER_CFG, def handle(name, cfg, cloud, log, _args): # If there isn't a mcollective key in the configuration don't do anything - if 'mcollective' not in cfg: - log.debug(("Skipping module named %s, " - "no 'mcollective' key in configuration"), name) + if "mcollective" not in cfg: + log.debug( + "Skipping module named %s, no 'mcollective' key in configuration", + name, + ) return - mcollective_cfg = cfg['mcollective'] + mcollective_cfg = cfg["mcollective"] # Start by installing the mcollective package ... cloud.distro.install_packages(("mcollective",)) # ... and then update the mcollective configuration - if 'conf' in mcollective_cfg: - configure(config=mcollective_cfg['conf']) + if "conf" in mcollective_cfg: + configure(config=mcollective_cfg["conf"]) # restart mcollective to handle updated config - subp.subp(['service', 'mcollective', 'restart'], capture=False) + subp.subp(["service", "mcollective", "restart"], capture=False) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py index 79bcc27d0ce..4fafb4af582 100644 --- a/cloudinit/config/cc_migrator.py +++ b/cloudinit/config/cc_migrator.py @@ -29,16 +29,14 @@ import os import shutil -from cloudinit import helpers -from cloudinit import util - +from cloudinit import helpers, util from cloudinit.settings import PER_ALWAYS frequency = PER_ALWAYS def _migrate_canon_sems(cloud): - paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem')) + paths = (cloud.paths.get_ipath("sem"), cloud.paths.get_cpath("sem")) am_adjusted = 0 for sem_path in paths: if not sem_path or not os.path.exists(sem_path): @@ -57,12 +55,12 @@ def _migrate_canon_sems(cloud): def _migrate_legacy_sems(cloud, log): legacy_adjust = { - 'apt-update-upgrade': [ - 'apt-configure', - 'package-update-upgrade-install', + "apt-update-upgrade": [ + "apt-configure", + "package-update-upgrade-install", ], } - paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem')) + paths = (cloud.paths.get_ipath("sem"), cloud.paths.get_cpath("sem")) for sem_path in paths: if not sem_path or not os.path.exists(sem_path): continue @@ -78,8 +76,9 @@ def _migrate_legacy_sems(cloud, log): util.del_file(os.path.join(sem_path, p)) (_name, freq) = os.path.splitext(p) for m in migrate_to: - log.debug("Migrating %s => %s with the same frequency", - p, m) + log.debug( + "Migrating %s => %s with the same frequency", p, m + ) with sem_helper.lock(m, freq): pass @@ -90,8 +89,10 @@ def handle(name, cfg, cloud, log, _args): log.debug("Skipping module named %s, migration disabled", name) return sems_moved = _migrate_canon_sems(cloud) - log.debug("Migrated %s semaphore files to there canonicalized names", - sems_moved) + log.debug( + "Migrated %s semaphore files to there canonicalized names", sems_moved + ) _migrate_legacy_sems(cloud, log) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index eeb008d2b0d..ec2e46ff202 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -62,15 +62,12 @@ maxsize: """ -from string import whitespace - import logging import os import re +from string import whitespace -from cloudinit import type_utils -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, type_utils, util # Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1, sr0 DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$" @@ -105,21 +102,25 @@ def is_network_device(name): def _get_nth_partition_for_device(device_path, partition_number): - potential_suffixes = [str(partition_number), 'p%s' % (partition_number,), - '-part%s' % (partition_number,)] + potential_suffixes = [ + str(partition_number), + "p%s" % (partition_number,), + "-part%s" % (partition_number,), + ] for suffix in potential_suffixes: - potential_partition_device = '%s%s' % (device_path, suffix) + potential_partition_device = "%s%s" % (device_path, suffix) if os.path.exists(potential_partition_device): return potential_partition_device return None def _is_block_device(device_path, partition_path=None): - device_name = os.path.realpath(device_path).split('/')[-1] - sys_path = os.path.join('/sys/block/', device_name) + device_name = os.path.realpath(device_path).split("/")[-1] + sys_path = os.path.join("/sys/block/", device_name) if partition_path is not None: sys_path = os.path.join( - sys_path, os.path.realpath(partition_path).split('/')[-1]) + sys_path, os.path.realpath(partition_path).split("/")[-1] + ) return os.path.exists(sys_path) @@ -159,8 +160,9 @@ def sanitize_devname(startname, transformer, log, aliases=None): if partition_number is None: partition_path = _get_nth_partition_for_device(device_path, 1) else: - partition_path = _get_nth_partition_for_device(device_path, - partition_number) + partition_path = _get_nth_partition_for_device( + device_path, partition_number + ) if partition_path is None: return None @@ -174,12 +176,12 @@ def sanitize_devname(startname, transformer, log, aliases=None): def suggested_swapsize(memsize=None, maxsize=None, fsys=None): # make a suggestion on the size of swap for this system. if memsize is None: - memsize = util.read_meminfo()['total'] + memsize = util.read_meminfo()["total"] GB = 2 ** 30 sugg_max = 8 * GB - info = {'avail': 'na', 'max_in': maxsize, 'mem': memsize} + info = {"avail": "na", "max_in": maxsize, "mem": memsize} if fsys is None and maxsize is None: # set max to 8GB default if no filesystem given @@ -187,18 +189,18 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None): elif fsys: statvfs = os.statvfs(fsys) avail = statvfs.f_frsize * statvfs.f_bfree - info['avail'] = avail + info["avail"] = avail if maxsize is None: # set to 25% of filesystem space maxsize = min(int(avail / 4), sugg_max) - elif maxsize > ((avail * .9)): + elif maxsize > ((avail * 0.9)): # set to 90% of available disk space - maxsize = int(avail * .9) + maxsize = int(avail * 0.9) elif maxsize is None: maxsize = sugg_max - info['max'] = maxsize + info["max"] = maxsize formulas = [ # < 1G: swap = double memory @@ -226,7 +228,7 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None): if size is not None: size = maxsize - info['size'] = size + info["size"] = size MB = 2 ** 20 pinfo = {} @@ -236,9 +238,14 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None): else: pinfo[k] = v - LOG.debug("suggest %s swap for %s memory with '%s'" - " disk given max=%s [max=%s]'", pinfo['size'], pinfo['mem'], - pinfo['avail'], pinfo['max_in'], pinfo['max']) + LOG.debug( + "suggest %s swap for %s memory with '%s' disk given max=%s [max=%s]'", + pinfo["size"], + pinfo["mem"], + pinfo["avail"], + pinfo["max_in"], + pinfo["max"], + ) return size @@ -248,14 +255,23 @@ def create_swapfile(fname: str, size: str) -> None: errmsg = "Failed to create swapfile '%s' of size %sMB via %s: %s" def create_swap(fname, size, method): - LOG.debug("Creating swapfile in '%s' on fstype '%s' using '%s'", - fname, fstype, method) + LOG.debug( + "Creating swapfile in '%s' on fstype '%s' using '%s'", + fname, + fstype, + method, + ) if method == "fallocate": - cmd = ['fallocate', '-l', '%sM' % size, fname] + cmd = ["fallocate", "-l", "%sM" % size, fname] elif method == "dd": - cmd = ['dd', 'if=/dev/zero', 'of=%s' % fname, 'bs=1M', - 'count=%s' % size] + cmd = [ + "dd", + "if=/dev/zero", + "of=%s" % fname, + "bs=1M", + "count=%s" % size, + ] try: subp.subp(cmd, capture=True) @@ -269,8 +285,9 @@ def create_swap(fname, size, method): fstype = util.get_mount_info(swap_dir)[1] - if (fstype == "xfs" and - util.kernel_version() < (4, 18)) or fstype == "btrfs": + if ( + fstype == "xfs" and util.kernel_version() < (4, 18) + ) or fstype == "btrfs": create_swap(fname, size, "dd") else: try: @@ -282,7 +299,7 @@ def create_swap(fname, size, method): if os.path.exists(fname): util.chmod(fname, 0o600) try: - subp.subp(['mkswap', fname]) + subp.subp(["mkswap", fname]) except subp.ProcessExecutionError: util.del_file(fname) raise @@ -297,37 +314,42 @@ def setup_swapfile(fname, size=None, maxsize=None): swap_dir = os.path.dirname(fname) if str(size).lower() == "auto": try: - memsize = util.read_meminfo()['total'] + memsize = util.read_meminfo()["total"] except IOError: LOG.debug("Not creating swap: failed to read meminfo") return util.ensure_dir(swap_dir) - size = suggested_swapsize(fsys=swap_dir, maxsize=maxsize, - memsize=memsize) + size = suggested_swapsize( + fsys=swap_dir, maxsize=maxsize, memsize=memsize + ) mibsize = str(int(size / (2 ** 20))) if not size: LOG.debug("Not creating swap: suggested size was 0") return - util.log_time(LOG.debug, msg="Setting up swap file", func=create_swapfile, - args=[fname, mibsize]) + util.log_time( + LOG.debug, + msg="Setting up swap file", + func=create_swapfile, + args=[fname, mibsize], + ) return fname def handle_swapcfg(swapcfg): """handle the swap config, calling setup_swap if necessary. - return None or (filename, size) + return None or (filename, size) """ if not isinstance(swapcfg, dict): LOG.warning("input for swap config was not a dict.") return None - fname = swapcfg.get('filename', '/swap.img') - size = swapcfg.get('size', 0) - maxsize = swapcfg.get('maxsize', None) + fname = swapcfg.get("filename", "/swap.img") + size = swapcfg.get("size", 0) + maxsize = swapcfg.get("maxsize", None) if not (size and fname): LOG.debug("no need to setup swap") @@ -335,8 +357,10 @@ def handle_swapcfg(swapcfg): if os.path.exists(fname): if not os.path.exists("/proc/swaps"): - LOG.debug("swap file %s exists, but no /proc/swaps exists, " - "being safe", fname) + LOG.debug( + "swap file %s exists, but no /proc/swaps exists, being safe", + fname, + ) return fname try: for line in util.load_file("/proc/swaps").splitlines(): @@ -345,8 +369,9 @@ def handle_swapcfg(swapcfg): return fname LOG.debug("swap file %s exists, but not in /proc/swaps", fname) except Exception: - LOG.warning("swap file %s exists. Error reading /proc/swaps", - fname) + LOG.warning( + "swap file %s exists. Error reading /proc/swaps", fname + ) return fname try: @@ -373,8 +398,10 @@ def handle(_name, cfg, cloud, log, _args): defvals = cfg.get("mount_default_fields", defvals) # these are our default set of mounts - defmnts = [["ephemeral0", "/mnt", "auto", defvals[3], "0", "2"], - ["swap", "none", "swap", "sw", "0", "0"]] + defmnts = [ + ["ephemeral0", "/mnt", "auto", defvals[3], "0", "2"], + ["swap", "none", "swap", "sw", "0", "0"], + ] cfgmnt = [] if "mounts" in cfg: @@ -404,13 +431,17 @@ def handle(_name, cfg, cloud, log, _args): for i in range(len(cfgmnt)): # skip something that wasn't a list if not isinstance(cfgmnt[i], list): - log.warning("Mount option %s not a list, got a %s instead", - (i + 1), type_utils.obj_name(cfgmnt[i])) + log.warning( + "Mount option %s not a list, got a %s instead", + (i + 1), + type_utils.obj_name(cfgmnt[i]), + ) continue start = str(cfgmnt[i][0]) - sanitized = sanitize_devname(start, cloud.device_name_to_device, log, - aliases=device_aliases) + sanitized = sanitize_devname( + start, cloud.device_name_to_device, log, aliases=device_aliases + ) if sanitized != start: log.debug("changed %s => %s" % (start, sanitized)) @@ -418,8 +449,11 @@ def handle(_name, cfg, cloud, log, _args): log.debug("Ignoring nonexistent named mount %s", start) continue elif sanitized in fstab_devs: - log.info("Device %s already defined in fstab: %s", - sanitized, fstab_devs[sanitized]) + log.info( + "Device %s already defined in fstab: %s", + sanitized, + fstab_devs[sanitized], + ) continue cfgmnt[i][0] = sanitized @@ -452,8 +486,9 @@ def handle(_name, cfg, cloud, log, _args): # entry has the same device name for defmnt in defmnts: start = defmnt[0] - sanitized = sanitize_devname(start, cloud.device_name_to_device, log, - aliases=device_aliases) + sanitized = sanitize_devname( + start, cloud.device_name_to_device, log, aliases=device_aliases + ) if sanitized != start: log.debug("changed default device %s => %s" % (start, sanitized)) @@ -461,8 +496,11 @@ def handle(_name, cfg, cloud, log, _args): log.debug("Ignoring nonexistent default named mount %s", start) continue elif sanitized in fstab_devs: - log.debug("Device %s already defined in fstab: %s", - sanitized, fstab_devs[sanitized]) + log.debug( + "Device %s already defined in fstab: %s", + sanitized, + fstab_devs[sanitized], + ) continue defmnt[0] = sanitized @@ -474,8 +512,7 @@ def handle(_name, cfg, cloud, log, _args): break if cfgmnt_has: - log.debug(("Not including %s, already" - " previously included"), start) + log.debug("Not including %s, already previously included", start) continue cfgmnt.append(defmnt) @@ -488,7 +525,7 @@ def handle(_name, cfg, cloud, log, _args): else: actlist.append(x) - swapret = handle_swapcfg(cfg.get('swap', {})) + swapret = handle_swapcfg(cfg.get("swap", {})) if swapret: actlist.append([swapret, "none", "swap", "sw", "0", "0"]) @@ -507,10 +544,11 @@ def handle(_name, cfg, cloud, log, _args): needswap = True if line[1].startswith("/"): dirs.append(line[1]) - cc_lines.append('\t'.join(line)) + cc_lines.append("\t".join(line)) - mount_points = [v['mountpoint'] for k, v in util.mounts().items() - if 'mountpoint' in v] + mount_points = [ + v["mountpoint"] for k, v in util.mounts().items() if "mountpoint" in v + ] for d in dirs: try: util.ensure_dir(d) @@ -525,11 +563,12 @@ def handle(_name, cfg, cloud, log, _args): sadds = [WS.sub(" ", n) for n in cc_lines] sdrops = [WS.sub(" ", n) for n in fstab_removed] - sops = (["- " + drop for drop in sdrops if drop not in sadds] + - ["+ " + add for add in sadds if add not in sdrops]) + sops = ["- " + drop for drop in sdrops if drop not in sadds] + [ + "+ " + add for add in sadds if add not in sdrops + ] fstab_lines.extend(cc_lines) - contents = "%s\n" % ('\n'.join(fstab_lines)) + contents = "%s\n" % "\n".join(fstab_lines) util.write_file(FSTAB_PATH, contents) activate_cmds = [] @@ -549,7 +588,7 @@ def handle(_name, cfg, cloud, log, _args): fmt = "Activating swap and mounts with: %s" for cmd in activate_cmds: - fmt = "Activate mounts: %s:" + ' '.join(cmd) + fmt = "Activate mounts: %s:" + " ".join(cmd) try: subp.subp(cmd) log.debug(fmt, "PASS") @@ -557,4 +596,5 @@ def handle(_name, cfg, cloud, log, _args): log.warning(fmt, "FAIL") util.logexc(log, fmt, "FAIL") + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index c55d5d86606..a31da9bb5f1 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -11,124 +11,132 @@ from textwrap import dedent from cloudinit import log as logging -from cloudinit import temp_utils -from cloudinit import templater -from cloudinit import type_utils -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, temp_utils, templater, type_utils, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) frequency = PER_INSTANCE -NTP_CONF = '/etc/ntp.conf' +NTP_CONF = "/etc/ntp.conf" NR_POOL_SERVERS = 4 -distros = ['almalinux', 'alpine', 'centos', 'cloudlinux', 'debian', - 'eurolinux', 'fedora', 'miraclelinux', 'openEuler', 'opensuse', - 'photon', 'rhel', 'rocky', 'sles', 'ubuntu', 'virtuozzo'] +distros = [ + "almalinux", + "alpine", + "centos", + "cloudlinux", + "debian", + "eurolinux", + "fedora", + "miraclelinux", + "openEuler", + "opensuse", + "photon", + "rhel", + "rocky", + "sles", + "ubuntu", + "virtuozzo", +] NTP_CLIENT_CONFIG = { - 'chrony': { - 'check_exe': 'chronyd', - 'confpath': '/etc/chrony.conf', - 'packages': ['chrony'], - 'service_name': 'chrony', - 'template_name': 'chrony.conf.{distro}', - 'template': None, + "chrony": { + "check_exe": "chronyd", + "confpath": "/etc/chrony.conf", + "packages": ["chrony"], + "service_name": "chrony", + "template_name": "chrony.conf.{distro}", + "template": None, }, - 'ntp': { - 'check_exe': 'ntpd', - 'confpath': NTP_CONF, - 'packages': ['ntp'], - 'service_name': 'ntp', - 'template_name': 'ntp.conf.{distro}', - 'template': None, + "ntp": { + "check_exe": "ntpd", + "confpath": NTP_CONF, + "packages": ["ntp"], + "service_name": "ntp", + "template_name": "ntp.conf.{distro}", + "template": None, }, - 'ntpdate': { - 'check_exe': 'ntpdate', - 'confpath': NTP_CONF, - 'packages': ['ntpdate'], - 'service_name': 'ntpdate', - 'template_name': 'ntp.conf.{distro}', - 'template': None, + "ntpdate": { + "check_exe": "ntpdate", + "confpath": NTP_CONF, + "packages": ["ntpdate"], + "service_name": "ntpdate", + "template_name": "ntp.conf.{distro}", + "template": None, }, - 'systemd-timesyncd': { - 'check_exe': '/lib/systemd/systemd-timesyncd', - 'confpath': '/etc/systemd/timesyncd.conf.d/cloud-init.conf', - 'packages': [], - 'service_name': 'systemd-timesyncd', - 'template_name': 'timesyncd.conf', - 'template': None, + "systemd-timesyncd": { + "check_exe": "/lib/systemd/systemd-timesyncd", + "confpath": "/etc/systemd/timesyncd.conf.d/cloud-init.conf", + "packages": [], + "service_name": "systemd-timesyncd", + "template_name": "timesyncd.conf", + "template": None, }, } # This is Distro-specific configuration overrides of the base config DISTRO_CLIENT_CONFIG = { - 'alpine': { - 'chrony': { - 'confpath': '/etc/chrony/chrony.conf', - 'service_name': 'chronyd', + "alpine": { + "chrony": { + "confpath": "/etc/chrony/chrony.conf", + "service_name": "chronyd", }, - 'ntp': { - 'confpath': '/etc/ntp.conf', - 'packages': [], - 'service_name': 'ntpd', + "ntp": { + "confpath": "/etc/ntp.conf", + "packages": [], + "service_name": "ntpd", }, }, - 'debian': { - 'chrony': { - 'confpath': '/etc/chrony/chrony.conf', + "debian": { + "chrony": { + "confpath": "/etc/chrony/chrony.conf", }, }, - 'opensuse': { - 'chrony': { - 'service_name': 'chronyd', + "opensuse": { + "chrony": { + "service_name": "chronyd", }, - 'ntp': { - 'confpath': '/etc/ntp.conf', - 'service_name': 'ntpd', + "ntp": { + "confpath": "/etc/ntp.conf", + "service_name": "ntpd", }, - 'systemd-timesyncd': { - 'check_exe': '/usr/lib/systemd/systemd-timesyncd', + "systemd-timesyncd": { + "check_exe": "/usr/lib/systemd/systemd-timesyncd", }, }, - 'photon': { - 'chrony': { - 'service_name': 'chronyd', + "photon": { + "chrony": { + "service_name": "chronyd", }, - 'ntp': { - 'service_name': 'ntpd', - 'confpath': '/etc/ntp.conf' - }, - 'systemd-timesyncd': { - 'check_exe': '/usr/lib/systemd/systemd-timesyncd', - 'confpath': '/etc/systemd/timesyncd.conf', + "ntp": {"service_name": "ntpd", "confpath": "/etc/ntp.conf"}, + "systemd-timesyncd": { + "check_exe": "/usr/lib/systemd/systemd-timesyncd", + "confpath": "/etc/systemd/timesyncd.conf", }, }, - 'rhel': { - 'ntp': { - 'service_name': 'ntpd', + "rhel": { + "ntp": { + "service_name": "ntpd", }, - 'chrony': { - 'service_name': 'chronyd', + "chrony": { + "service_name": "chronyd", }, }, - 'sles': { - 'chrony': { - 'service_name': 'chronyd', + "sles": { + "chrony": { + "service_name": "chronyd", }, - 'ntp': { - 'confpath': '/etc/ntp.conf', - 'service_name': 'ntpd', + "ntp": { + "confpath": "/etc/ntp.conf", + "service_name": "ntpd", }, - 'systemd-timesyncd': { - 'check_exe': '/usr/lib/systemd/systemd-timesyncd', + "systemd-timesyncd": { + "check_exe": "/usr/lib/systemd/systemd-timesyncd", }, }, - 'ubuntu': { - 'chrony': { - 'confpath': '/etc/chrony/chrony.conf', + "ubuntu": { + "chrony": { + "confpath": "/etc/chrony/chrony.conf", }, }, } @@ -141,10 +149,11 @@ # configuration. meta = { - 'id': 'cc_ntp', - 'name': 'NTP', - 'title': 'enable and configure ntp', - 'description': dedent("""\ + "id": "cc_ntp", + "name": "NTP", + "title": "enable and configure ntp", + "description": dedent( + """\ Handle ntp configuration. If ntp is not installed on the system and ntp configuration is specified, ntp will be installed. If there is a default ntp config file in the image or one is present in the @@ -152,16 +161,20 @@ appended to the filename before any changes are made. A list of ntp pools and ntp servers can be provided under the ``ntp`` config key. If no ntp ``servers`` or ``pools`` are provided, 4 pools will be used - in the format ``{0-3}.{distro}.pool.ntp.org``."""), - 'distros': distros, - 'examples': [ - dedent("""\ + in the format ``{0-3}.{distro}.pool.ntp.org``.""" + ), + "distros": distros, + "examples": [ + dedent( + """\ # Override ntp with chrony configuration on Ubuntu ntp: enabled: true ntp_client: chrony # Uses cloud-init default chrony configuration - """), - dedent("""\ + """ + ), + dedent( + """\ # Provide a custom ntp client configuration ntp: enabled: true @@ -188,120 +201,137 @@ servers: - ntp.server.local - ntp.ubuntu.com - - 192.168.23.2""")], - 'frequency': PER_INSTANCE, + - 192.168.23.2""" + ), + ], + "frequency": PER_INSTANCE, } schema = { - 'type': 'object', - 'properties': { - 'ntp': { - 'type': ['object', 'null'], - 'properties': { - 'pools': { - 'type': 'array', - 'items': { - 'type': 'string', - 'format': 'hostname' - }, - 'uniqueItems': True, - 'description': dedent("""\ + "type": "object", + "properties": { + "ntp": { + "type": ["object", "null"], + "properties": { + "pools": { + "type": "array", + "items": {"type": "string", "format": "hostname"}, + "uniqueItems": True, + "description": dedent( + """\ List of ntp pools. If both pools and servers are empty, 4 default pool servers will be provided of the format ``{0-3}.{distro}.pool.ntp.org``. NOTE: for Alpine Linux when using the Busybox NTP client this setting will be ignored due to the limited - functionality of Busybox's ntpd.""") + functionality of Busybox's ntpd.""" + ), }, - 'servers': { - 'type': 'array', - 'items': { - 'type': 'string', - 'format': 'hostname' - }, - 'uniqueItems': True, - 'description': dedent("""\ + "servers": { + "type": "array", + "items": {"type": "string", "format": "hostname"}, + "uniqueItems": True, + "description": dedent( + """\ List of ntp servers. If both pools and servers are empty, 4 default pool servers will be provided with - the format ``{0-3}.{distro}.pool.ntp.org``.""") + the format ``{0-3}.{distro}.pool.ntp.org``.""" + ), }, - 'ntp_client': { - 'type': 'string', - 'default': 'auto', - 'description': dedent("""\ + "ntp_client": { + "type": "string", + "default": "auto", + "description": dedent( + """\ Name of an NTP client to use to configure system NTP. When unprovided or 'auto' the default client preferred by the distribution will be used. The following built-in client names can be used to override existing configuration defaults: chrony, ntp, ntpdate, - systemd-timesyncd."""), + systemd-timesyncd.""" + ), }, - 'enabled': { - 'type': 'boolean', - 'default': True, - 'description': dedent("""\ + "enabled": { + "type": "boolean", + "default": True, + "description": dedent( + """\ Attempt to enable ntp clients if set to True. If set to False, ntp client will not be configured or - installed"""), + installed""" + ), }, - 'config': { - 'description': dedent("""\ + "config": { + "description": dedent( + """\ Configuration settings or overrides for the - ``ntp_client`` specified."""), - 'type': ['object'], - 'properties': { - 'confpath': { - 'type': 'string', - 'description': dedent("""\ + ``ntp_client`` specified.""" + ), + "type": ["object"], + "properties": { + "confpath": { + "type": "string", + "description": dedent( + """\ The path to where the ``ntp_client`` - configuration is written."""), + configuration is written.""" + ), }, - 'check_exe': { - 'type': 'string', - 'description': dedent("""\ + "check_exe": { + "type": "string", + "description": dedent( + """\ The executable name for the ``ntp_client``. For example, ntp service ``check_exe`` is - 'ntpd' because it runs the ntpd binary."""), + 'ntpd' because it runs the ntpd binary.""" + ), }, - 'packages': { - 'type': 'array', - 'items': { - 'type': 'string', + "packages": { + "type": "array", + "items": { + "type": "string", }, - 'uniqueItems': True, - 'description': dedent("""\ + "uniqueItems": True, + "description": dedent( + """\ List of packages needed to be installed for the - selected ``ntp_client``."""), + selected ``ntp_client``.""" + ), }, - 'service_name': { - 'type': 'string', - 'description': dedent("""\ + "service_name": { + "type": "string", + "description": dedent( + """\ The systemd or sysvinit service name used to start and stop the ``ntp_client`` - service."""), + service.""" + ), }, - 'template': { - 'type': 'string', - 'description': dedent("""\ + "template": { + "type": "string", + "description": dedent( + """\ Inline template allowing users to define their own ``ntp_client`` configuration template. The value must start with '## template:jinja' to enable use of templating support. - """), + """ + ), }, }, # Don't use REQUIRED_NTP_CONFIG_KEYS to allow for override # of builtin client values. - 'minProperties': 1, # If we have config, define something - 'additionalProperties': False + "minProperties": 1, # If we have config, define something + "additionalProperties": False, }, }, - 'additionalProperties': False + "additionalProperties": False, } - } + }, } -REQUIRED_NTP_CONFIG_KEYS = frozenset([ - 'check_exe', 'confpath', 'packages', 'service_name']) +REQUIRED_NTP_CONFIG_KEYS = frozenset( + ["check_exe", "confpath", "packages", "service_name"] +) __doc__ = get_meta_doc(meta, schema) # Supplement python help() @@ -334,21 +364,23 @@ def select_ntp_client(ntp_client, distro): distro_cfg = distro_ntp_client_configs(distro.name) # user specified client, return its config - if ntp_client and ntp_client != 'auto': - LOG.debug('Selected NTP client "%s" via user-data configuration', - ntp_client) + if ntp_client and ntp_client != "auto": + LOG.debug( + 'Selected NTP client "%s" via user-data configuration', ntp_client + ) return distro_cfg.get(ntp_client, {}) # default to auto if unset in distro - distro_ntp_client = distro.get_option('ntp_client', 'auto') + distro_ntp_client = distro.get_option("ntp_client", "auto") clientcfg = {} if distro_ntp_client == "auto": for client in distro.preferred_ntp_clients: cfg = distro_cfg.get(client) - if subp.which(cfg.get('check_exe')): - LOG.debug('Selected NTP client "%s", already installed', - client) + if subp.which(cfg.get("check_exe")): + LOG.debug( + 'Selected NTP client "%s", already installed', client + ) clientcfg = cfg break @@ -356,11 +388,14 @@ def select_ntp_client(ntp_client, distro): client = distro.preferred_ntp_clients[0] LOG.debug( 'Selected distro preferred NTP client "%s", not yet installed', - client) + client, + ) clientcfg = distro_cfg.get(client) else: - LOG.debug('Selected NTP client "%s" via distro system config', - distro_ntp_client) + LOG.debug( + 'Selected NTP client "%s" via distro system config', + distro_ntp_client, + ) clientcfg = distro_cfg.get(distro_ntp_client, {}) return clientcfg @@ -378,7 +413,7 @@ def install_ntp_client(install_func, packages=None, check_exe="ntpd"): if subp.which(check_exe): return if packages is None: - packages = ['ntp'] + packages = ["ntp"] install_func(packages) @@ -403,25 +438,34 @@ def generate_server_names(distro): names = [] pool_distro = distro - if distro == 'sles': + if distro == "sles": # For legal reasons x.pool.sles.ntp.org does not exist, # use the opensuse pool - pool_distro = 'opensuse' - elif distro == 'alpine' or distro == 'eurolinux': + pool_distro = "opensuse" + elif distro == "alpine" or distro == "eurolinux": # Alpine-specific pool (i.e. x.alpine.pool.ntp.org) does not exist # so use general x.pool.ntp.org instead. The same applies to EuroLinux - pool_distro = '' + pool_distro = "" for x in range(0, NR_POOL_SERVERS): - names.append(".".join( - [n for n in [str(x)] + [pool_distro] + ['pool.ntp.org'] if n])) + names.append( + ".".join( + [n for n in [str(x)] + [pool_distro] + ["pool.ntp.org"] if n] + ) + ) return names -def write_ntp_config_template(distro_name, service_name=None, servers=None, - pools=None, path=None, template_fn=None, - template=None): +def write_ntp_config_template( + distro_name, + service_name=None, + servers=None, + pools=None, + path=None, + template_fn=None, + template=None, +): """Render a ntp client configuration for the specified client. @param distro_name: string. The distro class name. @@ -444,27 +488,30 @@ def write_ntp_config_template(distro_name, service_name=None, servers=None, if not pools: pools = [] - if (len(servers) == 0 and distro_name == 'alpine' and - service_name == 'ntpd'): + if ( + len(servers) == 0 + and distro_name == "alpine" + and service_name == "ntpd" + ): # Alpine's Busybox ntpd only understands "servers" configuration # and not "pool" configuration. servers = generate_server_names(distro_name) - LOG.debug( - 'Adding distro default ntp servers: %s', ','.join(servers)) + LOG.debug("Adding distro default ntp servers: %s", ",".join(servers)) elif len(servers) == 0 and len(pools) == 0: pools = generate_server_names(distro_name) LOG.debug( - 'Adding distro default ntp pool servers: %s', ','.join(pools)) + "Adding distro default ntp pool servers: %s", ",".join(pools) + ) if not path: - raise ValueError('Invalid value for path parameter') + raise ValueError("Invalid value for path parameter") if not template_fn and not template: - raise ValueError('Not template_fn or template provided') + raise ValueError("Not template_fn or template provided") - params = {'servers': servers, 'pools': pools} + params = {"servers": servers, "pools": pools} if template: - tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl") + tfile = temp_utils.mkstemp(prefix="template_name-", suffix=".tmpl") template_fn = tfile[1] # filepath is second item in tuple util.write_file(template_fn, content=template) @@ -487,50 +534,62 @@ def supplemental_schema_validation(ntp_config): errors = [] missing = REQUIRED_NTP_CONFIG_KEYS.difference(set(ntp_config.keys())) if missing: - keys = ', '.join(sorted(missing)) + keys = ", ".join(sorted(missing)) errors.append( - 'Missing required ntp:config keys: {keys}'.format(keys=keys)) - elif not any([ntp_config.get('template'), - ntp_config.get('template_name')]): + "Missing required ntp:config keys: {keys}".format(keys=keys) + ) + elif not any( + [ntp_config.get("template"), ntp_config.get("template_name")] + ): errors.append( - 'Either ntp:config:template or ntp:config:template_name values' - ' are required') + "Either ntp:config:template or ntp:config:template_name values" + " are required" + ) for key, value in sorted(ntp_config.items()): - keypath = 'ntp:config:' + key - if key == 'confpath': + keypath = "ntp:config:" + key + if key == "confpath": if not all([value, isinstance(value, str)]): errors.append( - 'Expected a config file path {keypath}.' - ' Found ({value})'.format(keypath=keypath, value=value)) - elif key == 'packages': + "Expected a config file path {keypath}." + " Found ({value})".format(keypath=keypath, value=value) + ) + elif key == "packages": if not isinstance(value, list): errors.append( - 'Expected a list of required package names for {keypath}.' - ' Found ({value})'.format(keypath=keypath, value=value)) - elif key in ('template', 'template_name'): + "Expected a list of required package names for {keypath}." + " Found ({value})".format(keypath=keypath, value=value) + ) + elif key in ("template", "template_name"): if value is None: # Either template or template_name can be none continue if not isinstance(value, str): errors.append( - 'Expected a string type for {keypath}.' - ' Found ({value})'.format(keypath=keypath, value=value)) + "Expected a string type for {keypath}." + " Found ({value})".format(keypath=keypath, value=value) + ) elif not isinstance(value, str): errors.append( - 'Expected a string type for {keypath}.' - ' Found ({value})'.format(keypath=keypath, value=value)) + "Expected a string type for {keypath}. Found ({value})".format( + keypath=keypath, value=value + ) + ) if errors: - raise ValueError(r'Invalid ntp configuration:\n{errors}'.format( - errors='\n'.join(errors))) + raise ValueError( + r"Invalid ntp configuration:\n{errors}".format( + errors="\n".join(errors) + ) + ) def handle(name, cfg, cloud, log, _args): """Enable and configure ntp.""" - if 'ntp' not in cfg: + if "ntp" not in cfg: LOG.debug( - "Skipping module named %s, not present or disabled by cfg", name) + "Skipping module named %s, not present or disabled by cfg", name + ) return - ntp_cfg = cfg['ntp'] + ntp_cfg = cfg["ntp"] if ntp_cfg is None: ntp_cfg = {} # Allow empty config which will install the package @@ -538,52 +597,61 @@ def handle(name, cfg, cloud, log, _args): if not isinstance(ntp_cfg, (dict)): raise RuntimeError( "'ntp' key existed in config, but not a dictionary type," - " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg))) + " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg)) + ) validate_cloudconfig_schema(cfg, schema) # Allow users to explicitly enable/disable - enabled = ntp_cfg.get('enabled', True) + enabled = ntp_cfg.get("enabled", True) if util.is_false(enabled): LOG.debug("Skipping module named %s, disabled by cfg", name) return # Select which client is going to be used and get the configuration - ntp_client_config = select_ntp_client(ntp_cfg.get('ntp_client'), - cloud.distro) + ntp_client_config = select_ntp_client( + ntp_cfg.get("ntp_client"), cloud.distro + ) # Allow user ntp config to override distro configurations ntp_client_config = util.mergemanydict( - [ntp_client_config, ntp_cfg.get('config', {})], reverse=True) + [ntp_client_config, ntp_cfg.get("config", {})], reverse=True + ) supplemental_schema_validation(ntp_client_config) - rename_ntp_conf(confpath=ntp_client_config.get('confpath')) + rename_ntp_conf(confpath=ntp_client_config.get("confpath")) template_fn = None - if not ntp_client_config.get('template'): - template_name = ( - ntp_client_config.get('template_name').replace('{distro}', - cloud.distro.name)) + if not ntp_client_config.get("template"): + template_name = ntp_client_config.get("template_name").replace( + "{distro}", cloud.distro.name + ) template_fn = cloud.get_template_filename(template_name) if not template_fn: - msg = ('No template found, not rendering %s' % - ntp_client_config.get('template_name')) + msg = ( + "No template found, not rendering %s" + % ntp_client_config.get("template_name") + ) raise RuntimeError(msg) - write_ntp_config_template(cloud.distro.name, - service_name=ntp_client_config.get( - 'service_name'), - servers=ntp_cfg.get('servers', []), - pools=ntp_cfg.get('pools', []), - path=ntp_client_config.get('confpath'), - template_fn=template_fn, - template=ntp_client_config.get('template')) - - install_ntp_client(cloud.distro.install_packages, - packages=ntp_client_config['packages'], - check_exe=ntp_client_config['check_exe']) + write_ntp_config_template( + cloud.distro.name, + service_name=ntp_client_config.get("service_name"), + servers=ntp_cfg.get("servers", []), + pools=ntp_cfg.get("pools", []), + path=ntp_client_config.get("confpath"), + template_fn=template_fn, + template=ntp_client_config.get("template"), + ) + + install_ntp_client( + cloud.distro.install_packages, + packages=ntp_client_config["packages"], + check_exe=ntp_client_config["check_exe"], + ) try: - cloud.distro.manage_service('reload', - ntp_client_config.get('service_name')) + cloud.distro.manage_service( + "reload", ntp_client_config.get("service_name") + ) except subp.ProcessExecutionError as e: LOG.exception("Failed to reload/start ntp service: %s", e) raise diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py index 036baf85ee9..14cdfab8ab5 100644 --- a/cloudinit/config/cc_package_update_upgrade_install.py +++ b/cloudinit/config/cc_package_update_upgrade_install.py @@ -43,8 +43,7 @@ import time from cloudinit import log as logging -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util REBOOT_FILE = "/var/run/reboot-required" REBOOT_CMD = ["/sbin/reboot"] @@ -68,17 +67,19 @@ def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2): log.debug("Rebooted, but still running after %s seconds", int(elapsed)) # If we got here, not good elapsed = time.time() - start - raise RuntimeError(("Reboot did not happen" - " after %s seconds!") % (int(elapsed))) + raise RuntimeError( + "Reboot did not happen after %s seconds!" % (int(elapsed)) + ) def handle(_name, cfg, cloud, log, _args): # Handle the old style + new config names - update = _multi_cfg_bool_get(cfg, 'apt_update', 'package_update') - upgrade = _multi_cfg_bool_get(cfg, 'package_upgrade', 'apt_upgrade') - reboot_if_required = _multi_cfg_bool_get(cfg, 'apt_reboot_if_required', - 'package_reboot_if_required') - pkglist = util.get_cfg_option_list(cfg, 'packages', []) + update = _multi_cfg_bool_get(cfg, "apt_update", "package_update") + upgrade = _multi_cfg_bool_get(cfg, "package_upgrade", "apt_upgrade") + reboot_if_required = _multi_cfg_bool_get( + cfg, "apt_reboot_if_required", "package_reboot_if_required" + ) + pkglist = util.get_cfg_option_list(cfg, "packages", []) errors = [] if update or len(pkglist) or upgrade: @@ -109,8 +110,9 @@ def handle(_name, cfg, cloud, log, _args): reboot_fn_exists = os.path.isfile(REBOOT_FILE) if (upgrade or pkglist) and reboot_if_required and reboot_fn_exists: try: - log.warning("Rebooting after upgrade or install per " - "%s", REBOOT_FILE) + log.warning( + "Rebooting after upgrade or install per %s", REBOOT_FILE + ) # Flush the above warning + anything else out... logging.flushLoggers(log) _fire_reboot(log) @@ -119,8 +121,10 @@ def handle(_name, cfg, cloud, log, _args): errors.append(e) if len(errors): - log.warning("%s failed with exceptions, re-raising the last one", - len(errors)) + log.warning( + "%s failed with exceptions, re-raising the last one", len(errors) + ) raise errors[-1] + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index 733c391086f..cc1fe53e8e6 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -41,22 +41,19 @@ tries: 10 """ -from cloudinit import templater -from cloudinit import url_helper -from cloudinit import util - +from cloudinit import templater, url_helper, util from cloudinit.settings import PER_INSTANCE frequency = PER_INSTANCE POST_LIST_ALL = [ - 'pub_key_dsa', - 'pub_key_rsa', - 'pub_key_ecdsa', - 'pub_key_ed25519', - 'instance_id', - 'hostname', - 'fqdn' + "pub_key_dsa", + "pub_key_rsa", + "pub_key_ecdsa", + "pub_key_ed25519", + "instance_id", + "hostname", + "fqdn", ] @@ -74,48 +71,58 @@ def handle(name, cfg, cloud, log, args): if len(args) != 0: ph_cfg = util.read_conf(args[0]) else: - if 'phone_home' not in cfg: - log.debug(("Skipping module named %s, " - "no 'phone_home' configuration found"), name) + if "phone_home" not in cfg: + log.debug( + "Skipping module named %s, " + "no 'phone_home' configuration found", + name, + ) return - ph_cfg = cfg['phone_home'] - - if 'url' not in ph_cfg: - log.warning(("Skipping module named %s, " - "no 'url' found in 'phone_home' configuration"), name) + ph_cfg = cfg["phone_home"] + + if "url" not in ph_cfg: + log.warning( + "Skipping module named %s, " + "no 'url' found in 'phone_home' configuration", + name, + ) return - url = ph_cfg['url'] - post_list = ph_cfg.get('post', 'all') - tries = ph_cfg.get('tries') + url = ph_cfg["url"] + post_list = ph_cfg.get("post", "all") + tries = ph_cfg.get("tries") try: tries = int(tries) except Exception: tries = 10 - util.logexc(log, "Configuration entry 'tries' is not an integer, " - "using %s instead", tries) + util.logexc( + log, + "Configuration entry 'tries' is not an integer, using %s instead", + tries, + ) if post_list == "all": post_list = POST_LIST_ALL all_keys = {} - all_keys['instance_id'] = cloud.get_instance_id() - all_keys['hostname'] = cloud.get_hostname() - all_keys['fqdn'] = cloud.get_hostname(fqdn=True) + all_keys["instance_id"] = cloud.get_instance_id() + all_keys["hostname"] = cloud.get_hostname() + all_keys["fqdn"] = cloud.get_hostname(fqdn=True) pubkeys = { - 'pub_key_dsa': '/etc/ssh/ssh_host_dsa_key.pub', - 'pub_key_rsa': '/etc/ssh/ssh_host_rsa_key.pub', - 'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub', - 'pub_key_ed25519': '/etc/ssh/ssh_host_ed25519_key.pub', + "pub_key_dsa": "/etc/ssh/ssh_host_dsa_key.pub", + "pub_key_rsa": "/etc/ssh/ssh_host_rsa_key.pub", + "pub_key_ecdsa": "/etc/ssh/ssh_host_ecdsa_key.pub", + "pub_key_ed25519": "/etc/ssh/ssh_host_ed25519_key.pub", } for (n, path) in pubkeys.items(): try: all_keys[n] = util.load_file(path) except Exception: - util.logexc(log, "%s: failed to open, can not phone home that " - "data!", path) + util.logexc( + log, "%s: failed to open, can not phone home that data!", path + ) submit_keys = {} for k in post_list: @@ -123,28 +130,37 @@ def handle(name, cfg, cloud, log, args): submit_keys[k] = all_keys[k] else: submit_keys[k] = None - log.warning(("Requested key %s from 'post'" - " configuration list not available"), k) + log.warning( + "Requested key %s from 'post'" + " configuration list not available", + k, + ) # Get them read to be posted real_submit_keys = {} for (k, v) in submit_keys.items(): if v is None: - real_submit_keys[k] = 'N/A' + real_submit_keys[k] = "N/A" else: real_submit_keys[k] = str(v) # Incase the url is parameterized url_params = { - 'INSTANCE_ID': all_keys['instance_id'], + "INSTANCE_ID": all_keys["instance_id"], } url = templater.render_string(url, url_params) try: url_helper.read_file_or_url( - url, data=real_submit_keys, retries=tries, sec_between=3, - ssl_details=util.fetch_ssl_details(cloud.paths)) + url, + data=real_submit_keys, + retries=tries, + sec_between=3, + ssl_details=util.fetch_ssl_details(cloud.paths), + ) except Exception: - util.logexc(log, "Failed to post phone home data to %s in %s tries", - url, tries) + util.logexc( + log, "Failed to post phone home data to %s in %s tries", url, tries + ) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 5780a7e9dc2..d4eb68c0014 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -58,9 +58,8 @@ import subprocess import time +from cloudinit import subp, util from cloudinit.settings import PER_INSTANCE -from cloudinit import subp -from cloudinit import util frequency = PER_INSTANCE @@ -75,9 +74,9 @@ def givecmdline(pid): # PID COMM ARGS # 1 init /bin/init -- if util.is_FreeBSD(): - (output, _err) = subp.subp(['procstat', '-c', str(pid)]) + (output, _err) = subp.subp(["procstat", "-c", str(pid)]) line = output.splitlines()[1] - m = re.search(r'\d+ (\w|\.|-)+\s+(/\w.+)', line) + m = re.search(r"\d+ (\w|\.|-)+\s+(/\w.+)", line) return m.group(2) else: return util.load_file("/proc/%s/cmdline" % pid) @@ -106,8 +105,9 @@ def check_condition(cond, log=None): return False else: if log: - log.warning(pre + "unexpected exit %s. " % ret + - "do not apply change.") + log.warning( + pre + "unexpected exit %s. " % ret + "do not apply change." + ) return False except Exception as e: if log: @@ -138,16 +138,24 @@ def handle(_name, cfg, cloud, log, _args): devnull_fp = open(os.devnull, "w") - log.debug("After pid %s ends, will execute: %s" % (mypid, ' '.join(args))) + log.debug("After pid %s ends, will execute: %s" % (mypid, " ".join(args))) - util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log, - condition, execmd, [args, devnull_fp]) + util.fork_cb( + run_after_pid_gone, + mypid, + cmdline, + timeout, + log, + condition, + execmd, + [args, devnull_fp], + ) def load_power_state(cfg, distro): # returns a tuple of shutdown_command, timeout # shutdown_command is None if no config found - pstate = cfg.get('power_state') + pstate = cfg.get("power_state") if pstate is None: return (None, None, None) @@ -155,22 +163,25 @@ def load_power_state(cfg, distro): if not isinstance(pstate, dict): raise TypeError("power_state is not a dict.") - modes_ok = ['halt', 'poweroff', 'reboot'] + modes_ok = ["halt", "poweroff", "reboot"] mode = pstate.get("mode") if mode not in distro.shutdown_options_map: raise TypeError( - "power_state[mode] required, must be one of: %s. found: '%s'." % - (','.join(modes_ok), mode)) + "power_state[mode] required, must be one of: %s. found: '%s'." + % (",".join(modes_ok), mode) + ) - args = distro.shutdown_command(mode=mode, - delay=pstate.get("delay", "now"), - message=pstate.get("message")) + args = distro.shutdown_command( + mode=mode, + delay=pstate.get("delay", "now"), + message=pstate.get("message"), + ) try: - timeout = float(pstate.get('timeout', 30.0)) + timeout = float(pstate.get("timeout", 30.0)) except ValueError as e: raise ValueError( - "failed to convert timeout '%s' to float." % pstate['timeout'] + "failed to convert timeout '%s' to float." % pstate["timeout"] ) from e condition = pstate.get("condition", True) @@ -186,8 +197,12 @@ def doexit(sysexit): def execmd(exe_args, output=None, data_in=None): ret = 1 try: - proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE, - stdout=output, stderr=subprocess.STDOUT) + proc = subprocess.Popen( + exe_args, + stdin=subprocess.PIPE, + stdout=output, + stderr=subprocess.STDOUT, + ) proc.communicate(data_in) ret = proc.returncode except Exception: @@ -230,7 +245,7 @@ def fatal(msg): except Exception as e: fatal("Unexpected Exception: %s" % e) - time.sleep(.25) + time.sleep(0.25) if not msg: fatal("Unexpected error in run_after_pid_gone") @@ -246,4 +261,5 @@ def fatal(msg): func(*args) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index dc20fc44e5e..f51f49bcc21 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -108,23 +108,20 @@ import os import socket -import yaml from io import StringIO -from cloudinit import helpers -from cloudinit import subp -from cloudinit import temp_utils -from cloudinit import util -from cloudinit import url_helper +import yaml -AIO_INSTALL_URL = 'https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh' # noqa: E501 -PUPPET_AGENT_DEFAULT_ARGS = ['--test'] +from cloudinit import helpers, subp, temp_utils, url_helper, util +AIO_INSTALL_URL = "https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh" # noqa: E501 +PUPPET_AGENT_DEFAULT_ARGS = ["--test"] -class PuppetConstants(object): - def __init__(self, puppet_conf_file, puppet_ssl_dir, - csr_attributes_path, log): +class PuppetConstants(object): + def __init__( + self, puppet_conf_file, puppet_ssl_dir, csr_attributes_path, log + ): self.conf_path = puppet_conf_file self.ssl_dir = puppet_ssl_dir self.ssl_cert_dir = os.path.join(puppet_ssl_dir, "certs") @@ -134,18 +131,27 @@ def __init__(self, puppet_conf_file, puppet_ssl_dir, def _autostart_puppet(log): # Set puppet to automatically start - if os.path.exists('/etc/default/puppet'): - subp.subp(['sed', '-i', - '-e', 's/^START=.*/START=yes/', - '/etc/default/puppet'], capture=False) - elif os.path.exists('/bin/systemctl'): - subp.subp(['/bin/systemctl', 'enable', 'puppet.service'], - capture=False) - elif os.path.exists('/sbin/chkconfig'): - subp.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False) + if os.path.exists("/etc/default/puppet"): + subp.subp( + [ + "sed", + "-i", + "-e", + "s/^START=.*/START=yes/", + "/etc/default/puppet", + ], + capture=False, + ) + elif os.path.exists("/bin/systemctl"): + subp.subp( + ["/bin/systemctl", "enable", "puppet.service"], capture=False + ) + elif os.path.exists("/sbin/chkconfig"): + subp.subp(["/sbin/chkconfig", "puppet", "on"], capture=False) else: - log.warning(("Sorry we do not know how to enable" - " puppet services on this system")) + log.warning( + "Sorry we do not know how to enable puppet services on this system" + ) def get_config_value(puppet_bin, setting): @@ -153,12 +159,13 @@ def get_config_value(puppet_bin, setting): :param puppet_bin: path to puppet binary :param setting: setting to query """ - out, _ = subp.subp([puppet_bin, 'config', 'print', setting]) + out, _ = subp.subp([puppet_bin, "config", "print", setting]) return out.rstrip() -def install_puppet_aio(url=AIO_INSTALL_URL, version=None, - collection=None, cleanup=True): +def install_puppet_aio( + url=AIO_INSTALL_URL, version=None, collection=None, cleanup=True +): """Install puppet-agent from the puppetlabs repositories using the one-shot shell script @@ -169,62 +176,70 @@ def install_puppet_aio(url=AIO_INSTALL_URL, version=None, """ args = [] if version is not None: - args = ['-v', version] + args = ["-v", version] if collection is not None: - args += ['-c', collection] + args += ["-c", collection] # Purge puppetlabs repos after installation if cleanup: - args += ['--cleanup'] + args += ["--cleanup"] content = url_helper.readurl(url=url, retries=5).contents # Use tmpdir over tmpfile to avoid 'text file busy' on execute with temp_utils.tempdir(needs_exe=True) as tmpd: - tmpf = os.path.join(tmpd, 'puppet-install') + tmpf = os.path.join(tmpd, "puppet-install") util.write_file(tmpf, content, mode=0o700) return subp.subp([tmpf] + args, capture=False) def handle(name, cfg, cloud, log, _args): # If there isn't a puppet key in the configuration don't do anything - if 'puppet' not in cfg: - log.debug(("Skipping module named %s," - " no 'puppet' configuration found"), name) + if "puppet" not in cfg: + log.debug( + "Skipping module named %s, no 'puppet' configuration found", name + ) return - puppet_cfg = cfg['puppet'] + puppet_cfg = cfg["puppet"] # Start by installing the puppet package if necessary... - install = util.get_cfg_option_bool(puppet_cfg, 'install', True) - version = util.get_cfg_option_str(puppet_cfg, 'version', None) - collection = util.get_cfg_option_str(puppet_cfg, 'collection', None) + install = util.get_cfg_option_bool(puppet_cfg, "install", True) + version = util.get_cfg_option_str(puppet_cfg, "version", None) + collection = util.get_cfg_option_str(puppet_cfg, "collection", None) install_type = util.get_cfg_option_str( - puppet_cfg, 'install_type', 'packages') - cleanup = util.get_cfg_option_bool(puppet_cfg, 'cleanup', True) - run = util.get_cfg_option_bool(puppet_cfg, 'exec', default=False) - start_puppetd = util.get_cfg_option_bool(puppet_cfg, - 'start_service', - default=True) + puppet_cfg, "install_type", "packages" + ) + cleanup = util.get_cfg_option_bool(puppet_cfg, "cleanup", True) + run = util.get_cfg_option_bool(puppet_cfg, "exec", default=False) + start_puppetd = util.get_cfg_option_bool( + puppet_cfg, "start_service", default=True + ) aio_install_url = util.get_cfg_option_str( - puppet_cfg, 'aio_install_url', default=AIO_INSTALL_URL) + puppet_cfg, "aio_install_url", default=AIO_INSTALL_URL + ) # AIO and distro packages use different paths - if install_type == 'aio': - puppet_user = 'root' - puppet_bin = '/opt/puppetlabs/bin/puppet' - puppet_package = 'puppet-agent' + if install_type == "aio": + puppet_user = "root" + puppet_bin = "/opt/puppetlabs/bin/puppet" + puppet_package = "puppet-agent" else: # default to 'packages' - puppet_user = 'puppet' - puppet_bin = 'puppet' - puppet_package = 'puppet' + puppet_user = "puppet" + puppet_bin = "puppet" + puppet_package = "puppet" package_name = util.get_cfg_option_str( - puppet_cfg, 'package_name', puppet_package) + puppet_cfg, "package_name", puppet_package + ) if not install and version: - log.warning(("Puppet install set to false but version supplied," - " doing nothing.")) + log.warning( + "Puppet install set to false but version supplied, doing nothing." + ) elif install: - log.debug(("Attempting to install puppet %s from %s"), - version if version else 'latest', install_type) + log.debug( + "Attempting to install puppet %s from %s", + version if version else "latest", + install_type, + ) if install_type == "packages": cloud.distro.install_packages((package_name, version)) @@ -235,17 +250,21 @@ def handle(name, cfg, cloud, log, _args): run = False conf_file = util.get_cfg_option_str( - puppet_cfg, 'conf_file', get_config_value(puppet_bin, 'config')) + puppet_cfg, "conf_file", get_config_value(puppet_bin, "config") + ) ssl_dir = util.get_cfg_option_str( - puppet_cfg, 'ssl_dir', get_config_value(puppet_bin, 'ssldir')) + puppet_cfg, "ssl_dir", get_config_value(puppet_bin, "ssldir") + ) csr_attributes_path = util.get_cfg_option_str( - puppet_cfg, 'csr_attributes_path', - get_config_value(puppet_bin, 'csr_attributes')) + puppet_cfg, + "csr_attributes_path", + get_config_value(puppet_bin, "csr_attributes"), + ) p_constants = PuppetConstants(conf_file, ssl_dir, csr_attributes_path, log) # ... and then update the puppet configuration - if 'conf' in puppet_cfg: + if "conf" in puppet_cfg: # Add all sections from the conf object to puppet.conf contents = util.load_file(p_constants.conf_path) # Create object for reading puppet.conf values @@ -254,30 +273,31 @@ def handle(name, cfg, cloud, log, _args): # mix the rest up. First clean them up # (TODO(harlowja) is this really needed??) cleaned_lines = [i.lstrip() for i in contents.splitlines()] - cleaned_contents = '\n'.join(cleaned_lines) + cleaned_contents = "\n".join(cleaned_lines) # Move to puppet_config.read_file when dropping py2.7 puppet_config.read_file( - StringIO(cleaned_contents), - source=p_constants.conf_path) - for (cfg_name, cfg) in puppet_cfg['conf'].items(): + StringIO(cleaned_contents), source=p_constants.conf_path + ) + for (cfg_name, cfg) in puppet_cfg["conf"].items(): # Cert configuration is a special case # Dump the puppetserver ca certificate in the correct place - if cfg_name == 'ca_cert': + if cfg_name == "ca_cert": # Puppet ssl sub-directory isn't created yet # Create it with the proper permissions and ownership util.ensure_dir(p_constants.ssl_dir, 0o771) - util.chownbyname(p_constants.ssl_dir, puppet_user, 'root') + util.chownbyname(p_constants.ssl_dir, puppet_user, "root") util.ensure_dir(p_constants.ssl_cert_dir) - util.chownbyname(p_constants.ssl_cert_dir, puppet_user, 'root') + util.chownbyname(p_constants.ssl_cert_dir, puppet_user, "root") util.write_file(p_constants.ssl_cert_path, cfg) - util.chownbyname(p_constants.ssl_cert_path, - puppet_user, 'root') + util.chownbyname( + p_constants.ssl_cert_path, puppet_user, "root" + ) else: # Iterate through the config items, we'll use ConfigParser.set # to overwrite or create new items as needed for (o, v) in cfg.items(): - if o == 'certname': + if o == "certname": # Expand %f as the fqdn # TODO(harlowja) should this use the cloud fqdn?? v = v.replace("%f", socket.getfqdn()) @@ -288,14 +308,16 @@ def handle(name, cfg, cloud, log, _args): puppet_config.set(cfg_name, o, v) # We got all our config as wanted we'll rename # the previous puppet.conf and create our new one - util.rename(p_constants.conf_path, "%s.old" - % (p_constants.conf_path)) + util.rename( + p_constants.conf_path, "%s.old" % (p_constants.conf_path) + ) util.write_file(p_constants.conf_path, puppet_config.stringify()) - if 'csr_attributes' in puppet_cfg: - util.write_file(p_constants.csr_attributes_path, - yaml.dump(puppet_cfg['csr_attributes'], - default_flow_style=False)) + if "csr_attributes" in puppet_cfg: + util.write_file( + p_constants.csr_attributes_path, + yaml.dump(puppet_cfg["csr_attributes"], default_flow_style=False), + ) # Set it up so it autostarts if start_puppetd: @@ -303,18 +325,21 @@ def handle(name, cfg, cloud, log, _args): # Run the agent if needed if run: - log.debug('Running puppet-agent') - cmd = [puppet_bin, 'agent'] - if 'exec_args' in puppet_cfg: - cmd_args = puppet_cfg['exec_args'] + log.debug("Running puppet-agent") + cmd = [puppet_bin, "agent"] + if "exec_args" in puppet_cfg: + cmd_args = puppet_cfg["exec_args"] if isinstance(cmd_args, (list, tuple)): cmd.extend(cmd_args) elif isinstance(cmd_args, str): cmd.extend(cmd_args.split()) else: - log.warning("Unknown type %s provided for puppet" - " 'exec_args' expected list, tuple," - " or string", type(cmd_args)) + log.warning( + "Unknown type %s provided for puppet" + " 'exec_args' expected list, tuple," + " or string", + type(cmd_args), + ) cmd.extend(PUPPET_AGENT_DEFAULT_ARGS) else: cmd.extend(PUPPET_AGENT_DEFAULT_ARGS) @@ -322,6 +347,7 @@ def handle(name, cfg, cloud, log, _args): if start_puppetd: # Start puppetd - subp.subp(['service', 'puppet', 'start'], capture=False) + subp.subp(["service", "puppet", "start"], capture=False) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_refresh_rmc_and_interface.py b/cloudinit/config/cc_refresh_rmc_and_interface.py index d5e0ecb22b6..87be534890e 100644 --- a/cloudinit/config/cc_refresh_rmc_and_interface.py +++ b/cloudinit/config/cc_refresh_rmc_and_interface.py @@ -34,20 +34,18 @@ """ +import errno + from cloudinit import log as logging +from cloudinit import netinfo, subp, util from cloudinit.settings import PER_ALWAYS -from cloudinit import util -from cloudinit import subp -from cloudinit import netinfo - -import errno frequency = PER_ALWAYS LOG = logging.getLogger(__name__) # Ensure that /opt/rsct/bin has been added to standard PATH of the # distro. The symlink to rmcctrl is /usr/sbin/rsct/bin/rmcctrl . -RMCCTRL = 'rmcctrl' +RMCCTRL = "rmcctrl" def handle(name, _cfg, _cloud, _log, _args): @@ -56,10 +54,11 @@ def handle(name, _cfg, _cloud, _log, _args): return LOG.debug( - 'Making the IPv6 up explicitly. ' - 'Ensuring IPv6 interface is not being handled by NetworkManager ' - 'and it is restarted to re-establish the communication with ' - 'the hypervisor') + "Making the IPv6 up explicitly. " + "Ensuring IPv6 interface is not being handled by NetworkManager " + "and it is restarted to re-establish the communication with " + "the hypervisor" + ) ifaces = find_ipv6_ifaces() @@ -80,7 +79,7 @@ def find_ipv6_ifaces(): ifaces = [] for iface, data in info.items(): if iface == "lo": - LOG.debug('Skipping localhost interface') + LOG.debug("Skipping localhost interface") if len(data.get("ipv4", [])) != 0: # skip this interface, as it has ipv4 addrs continue @@ -92,16 +91,16 @@ def refresh_ipv6(interface): # IPv6 interface is explicitly brought up, subsequent to which the # RMC services are restarted to re-establish the communication with # the hypervisor. - subp.subp(['ip', 'link', 'set', interface, 'down']) - subp.subp(['ip', 'link', 'set', interface, 'up']) + subp.subp(["ip", "link", "set", interface, "down"]) + subp.subp(["ip", "link", "set", interface, "up"]) def sysconfig_path(iface): - return '/etc/sysconfig/network-scripts/ifcfg-' + iface + return "/etc/sysconfig/network-scripts/ifcfg-" + iface def restart_network_manager(): - subp.subp(['systemctl', 'restart', 'NetworkManager']) + subp.subp(["systemctl", "restart", "NetworkManager"]) def disable_ipv6(iface_file): @@ -113,12 +112,11 @@ def disable_ipv6(iface_file): contents = util.load_file(iface_file) except IOError as e: if e.errno == errno.ENOENT: - LOG.debug("IPv6 interface file %s does not exist\n", - iface_file) + LOG.debug("IPv6 interface file %s does not exist\n", iface_file) else: raise e - if 'IPV6INIT' not in contents: + if "IPV6INIT" not in contents: LOG.debug("Interface file %s did not have IPV6INIT", iface_file) return @@ -135,11 +133,12 @@ def disable_ipv6(iface_file): def search(contents): # Search for any NM_CONTROLLED or IPV6 lines in IPv6 interface file. - return( - contents.startswith("IPV6ADDR") or - contents.startswith("IPADDR6") or - contents.startswith("IPV6INIT") or - contents.startswith("NM_CONTROLLED")) + return ( + contents.startswith("IPV6ADDR") + or contents.startswith("IPADDR6") + or contents.startswith("IPV6INIT") + or contents.startswith("NM_CONTROLLED") + ) def refresh_rmc(): @@ -152,8 +151,8 @@ def refresh_rmc(): # until the subsystem and all resource managers are stopped. # -s : start Resource Monitoring & Control subsystem. try: - subp.subp([RMCCTRL, '-z']) - subp.subp([RMCCTRL, '-s']) + subp.subp([RMCCTRL, "-z"]) + subp.subp([RMCCTRL, "-s"]) except Exception: - util.logexc(LOG, 'Failed to refresh the RMC subsystem.') + util.logexc(LOG, "Failed to refresh the RMC subsystem.") raise diff --git a/cloudinit/config/cc_reset_rmc.py b/cloudinit/config/cc_reset_rmc.py index 1cd72774bf2..3b9299031d9 100644 --- a/cloudinit/config/cc_reset_rmc.py +++ b/cloudinit/config/cc_reset_rmc.py @@ -39,9 +39,8 @@ import os from cloudinit import log as logging +from cloudinit import subp, util from cloudinit.settings import PER_INSTANCE -from cloudinit import util -from cloudinit import subp frequency = PER_INSTANCE @@ -49,34 +48,34 @@ # The symlink for RMCCTRL and RECFGCT are # /usr/sbin/rsct/bin/rmcctrl and # /usr/sbin/rsct/install/bin/recfgct respectively. -RSCT_PATH = '/opt/rsct/install/bin' -RMCCTRL = 'rmcctrl' -RECFGCT = 'recfgct' +RSCT_PATH = "/opt/rsct/install/bin" +RMCCTRL = "rmcctrl" +RECFGCT = "recfgct" LOG = logging.getLogger(__name__) -NODE_ID_FILE = '/etc/ct_node_id' +NODE_ID_FILE = "/etc/ct_node_id" def handle(name, _cfg, cloud, _log, _args): # Ensuring node id has to be generated only once during first boot - if cloud.datasource.platform_type == 'none': - LOG.debug('Skipping creation of new ct_node_id node') + if cloud.datasource.platform_type == "none": + LOG.debug("Skipping creation of new ct_node_id node") return if not os.path.isdir(RSCT_PATH): LOG.debug("module disabled, RSCT_PATH not present") return - orig_path = os.environ.get('PATH') + orig_path = os.environ.get("PATH") try: add_path(orig_path) reset_rmc() finally: if orig_path: - os.environ['PATH'] = orig_path + os.environ["PATH"] = orig_path else: - del os.environ['PATH'] + del os.environ["PATH"] def reconfigure_rsct_subsystems(): @@ -88,17 +87,17 @@ def reconfigure_rsct_subsystems(): LOG.debug(out.strip()) return out except subp.ProcessExecutionError: - util.logexc(LOG, 'Failed to reconfigure the RSCT subsystems.') + util.logexc(LOG, "Failed to reconfigure the RSCT subsystems.") raise def get_node_id(): try: fp = util.load_file(NODE_ID_FILE) - node_id = fp.split('\n')[0] + node_id = fp.split("\n")[0] return node_id except Exception: - util.logexc(LOG, 'Failed to get node ID from file %s.' % NODE_ID_FILE) + util.logexc(LOG, "Failed to get node ID from file %s." % NODE_ID_FILE) raise @@ -107,25 +106,25 @@ def add_path(orig_path): # So thet cloud init automatically find and # run RECFGCT to create new node_id. suff = ":" + orig_path if orig_path else "" - os.environ['PATH'] = RSCT_PATH + suff - return os.environ['PATH'] + os.environ["PATH"] = RSCT_PATH + suff + return os.environ["PATH"] def rmcctrl(): # Stop the RMC subsystem and all resource managers so that we can make # some changes to it try: - return subp.subp([RMCCTRL, '-z']) + return subp.subp([RMCCTRL, "-z"]) except Exception: - util.logexc(LOG, 'Failed to stop the RMC subsystem.') + util.logexc(LOG, "Failed to stop the RMC subsystem.") raise def reset_rmc(): - LOG.debug('Attempting to reset RMC.') + LOG.debug("Attempting to reset RMC.") node_id_before = get_node_id() - LOG.debug('Node ID at beginning of module: %s', node_id_before) + LOG.debug("Node ID at beginning of module: %s", node_id_before) # Stop the RMC subsystem and all resource managers so that we can make # some changes to it @@ -133,11 +132,11 @@ def reset_rmc(): reconfigure_rsct_subsystems() node_id_after = get_node_id() - LOG.debug('Node ID at end of module: %s', node_id_after) + LOG.debug("Node ID at end of module: %s", node_id_after) # Check if new node ID is generated or not # by comparing old and new node ID if node_id_after == node_id_before: - msg = 'New node ID did not get generated.' + msg = "New node ID did not get generated." LOG.error(msg) raise Exception(msg) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 00bb7ae704a..b009c392c22 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -13,21 +13,21 @@ import stat from textwrap import dedent +from cloudinit import subp, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_ALWAYS -from cloudinit import subp -from cloudinit import util NOBLOCK = "noblock" frequency = PER_ALWAYS -distros = ['all'] +distros = ["all"] meta = { - 'id': 'cc_resizefs', - 'name': 'Resizefs', - 'title': 'Resize filesystem', - 'description': dedent("""\ + "id": "cc_resizefs", + "name": "Resizefs", + "title": "Resize filesystem", + "description": dedent( + """\ Resize a filesystem to use all avaliable space on partition. This module is useful along with ``cc_growpart`` and will ensure that if the root partition has been resized the root filesystem will be resized @@ -36,22 +36,26 @@ running. Optionally, the resize operation can be performed in the background while cloud-init continues running modules. This can be enabled by setting ``resize_rootfs`` to ``true``. This module can be - disabled altogether by setting ``resize_rootfs`` to ``false``."""), - 'distros': distros, - 'examples': [ - 'resize_rootfs: false # disable root filesystem resize operation'], - 'frequency': PER_ALWAYS, + disabled altogether by setting ``resize_rootfs`` to ``false``.""" + ), + "distros": distros, + "examples": [ + "resize_rootfs: false # disable root filesystem resize operation" + ], + "frequency": PER_ALWAYS, } schema = { - 'type': 'object', - 'properties': { - 'resize_rootfs': { - 'enum': [True, False, NOBLOCK], - 'description': dedent("""\ - Whether to resize the root partition. Default: 'true'""") + "type": "object", + "properties": { + "resize_rootfs": { + "enum": [True, False, NOBLOCK], + "description": dedent( + """\ + Whether to resize the root partition. Default: 'true'""" + ), } - } + }, } __doc__ = get_meta_doc(meta, schema) # Supplement python help() @@ -63,32 +67,38 @@ def _resize_btrfs(mount_point, devpth): # Use a subvolume that is not ro to trick the resize operation to do the # "right" thing. The use of ".snapshot" is specific to "snapper" a generic # solution would be walk the subvolumes and find a rw mounted subvolume. - if (not util.mount_is_read_write(mount_point) and - os.path.isdir("%s/.snapshots" % mount_point)): - return ('btrfs', 'filesystem', 'resize', 'max', - '%s/.snapshots' % mount_point) + if not util.mount_is_read_write(mount_point) and os.path.isdir( + "%s/.snapshots" % mount_point + ): + return ( + "btrfs", + "filesystem", + "resize", + "max", + "%s/.snapshots" % mount_point, + ) else: - return ('btrfs', 'filesystem', 'resize', 'max', mount_point) + return ("btrfs", "filesystem", "resize", "max", mount_point) def _resize_ext(mount_point, devpth): - return ('resize2fs', devpth) + return ("resize2fs", devpth) def _resize_xfs(mount_point, devpth): - return ('xfs_growfs', mount_point) + return ("xfs_growfs", mount_point) def _resize_ufs(mount_point, devpth): - return ('growfs', '-y', mount_point) + return ("growfs", "-y", mount_point) def _resize_zfs(mount_point, devpth): - return ('zpool', 'online', '-e', mount_point, devpth) + return ("zpool", "online", "-e", mount_point, devpth) def _resize_hammer2(mount_point, devpth): - return ('hammer2', 'growfs', mount_point) + return ("hammer2", "growfs", mount_point) def _can_skip_resize_ufs(mount_point, devpth): @@ -100,7 +110,7 @@ def _can_skip_resize_ufs(mount_point, devpth): # growfs exits with 1 for almost all cases up to this one. # This means we can't just use rcs=[0, 1] as subp parameter: try: - subp.subp(['growfs', '-N', devpth]) + subp.subp(["growfs", "-N", devpth]) except subp.ProcessExecutionError as e: if e.stderr.startswith(skip_start) and skip_contain in e.stderr: # This FS is already at the desired size @@ -114,17 +124,15 @@ def _can_skip_resize_ufs(mount_point, devpth): # for multiple filesystem types if possible, e.g. one command for # ext2, ext3 and ext4. RESIZE_FS_PREFIXES_CMDS = [ - ('btrfs', _resize_btrfs), - ('ext', _resize_ext), - ('xfs', _resize_xfs), - ('ufs', _resize_ufs), - ('zfs', _resize_zfs), - ('hammer2', _resize_hammer2), + ("btrfs", _resize_btrfs), + ("ext", _resize_ext), + ("xfs", _resize_xfs), + ("ufs", _resize_ufs), + ("zfs", _resize_zfs), + ("hammer2", _resize_hammer2), ] -RESIZE_FS_PRECHECK_CMDS = { - 'ufs': _can_skip_resize_ufs -} +RESIZE_FS_PRECHECK_CMDS = {"ufs": _can_skip_resize_ufs} def can_skip_resize(fs_type, resize_what, devpth): @@ -148,52 +156,66 @@ def maybe_get_writable_device_path(devpath, info, log): container = util.is_container() # Ensure the path is a block device. - if (devpath == "/dev/root" and not os.path.exists(devpath) and - not container): + if ( + devpath == "/dev/root" + and not os.path.exists(devpath) + and not container + ): devpath = util.rootdev_from_cmdline(util.get_cmdline()) if devpath is None: log.warning("Unable to find device '/dev/root'") return None log.debug("Converted /dev/root to '%s' per kernel cmdline", devpath) - if devpath == 'overlayroot': + if devpath == "overlayroot": log.debug("Not attempting to resize devpath '%s': %s", devpath, info) return None # FreeBSD zpool can also just use gpt/