diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index e4408a4..0ad6b7f 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -33,7 +33,7 @@ file). chef: directories: (defaulting to /etc/chef, /var/log/chef, /var/lib/chef, - /var/cache/chef, /var/backups/chef, /run/chef) + /var/cache/chef, /var/backups/chef, /var/run/chef) validation_cert: (optional string to be written to file validation_key) special value 'system' means set use existing file validation_key: (optional the path for validation_cert. default @@ -89,7 +89,7 @@ CHEF_DIRS = tuple([ '/var/lib/chef', '/var/cache/chef', '/var/backups/chef', - '/run/chef', + '/var/run/chef', ]) REQUIRED_CHEF_DIRS = tuple([ '/etc/chef', @@ -113,7 +113,7 @@ CHEF_RB_TPL_DEFAULTS = { 'json_attribs': CHEF_FB_PATH, 'file_cache_path': "/var/cache/chef", 'file_backup_path': "/var/backups/chef", - 'pid_file': "/run/chef/client.pid", + 'pid_file': "/var/run/chef/client.pid", 'show_time': True, 'encrypted_data_bag_secret': None, } diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index 55b6770..c741c74 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -65,7 +65,7 @@ swap file is created. from string import whitespace import logging -import os +import os.path import re from cloudinit import type_utils @@ -223,59 +223,13 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None): return size -def create_swapfile(fname, size): - """Size is in MiB.""" - - errmsg = "Failed to create swapfile '%s' of size %sMB via %s: %s" - - def create_swap(fname, size, method): - LOG.debug("Creating swapfile in '%s' on fstype '%s' using '%s'", - fname, fstype, method) - - if method == "fallocate": - cmd = ['fallocate', '-l', '%sM' % size, fname] - elif method == "dd": - cmd = ['dd', 'if=/dev/zero', 'of=%s' % fname, 'bs=1M', - 'count=%s' % size] - - try: - util.subp(cmd, capture=True) - except util.ProcessExecutionError as e: - LOG.warning(errmsg, fname, size, method, e) - util.del_file(fname) - - swap_dir = os.path.dirname(fname) - util.ensure_dir(swap_dir) - - fstype = util.get_mount_info(swap_dir)[1] - - if (fstype == "xfs" and - util.kernel_version() < (4, 18)) or fstype == "btrfs": - create_swap(fname, size, "dd") - else: - try: - create_swap(fname, size, "fallocate") - except util.ProcessExecutionError as e: - LOG.warning(errmsg, fname, size, "dd", e) - LOG.warning("Will attempt with dd.") - create_swap(fname, size, "dd") - - if os.path.exists(fname): - util.chmod(fname, 0o600) - try: - util.subp(['mkswap', fname]) - except util.ProcessExecutionError: - util.del_file(fname) - raise - - def setup_swapfile(fname, size=None, maxsize=None): """ fname: full path string of filename to setup size: the size to create. set to "auto" for recommended maxsize: the maximum size """ - swap_dir = os.path.dirname(fname) + tdir = os.path.dirname(fname) if str(size).lower() == "auto": try: memsize = util.read_meminfo()['total'] @@ -283,17 +237,28 @@ def setup_swapfile(fname, size=None, maxsize=None): LOG.debug("Not creating swap: failed to read meminfo") return - util.ensure_dir(swap_dir) - size = suggested_swapsize(fsys=swap_dir, maxsize=maxsize, + util.ensure_dir(tdir) + size = suggested_swapsize(fsys=tdir, maxsize=maxsize, memsize=memsize) - mibsize = str(int(size / (2 ** 20))) if not size: LOG.debug("Not creating swap: suggested size was 0") return - util.log_time(LOG.debug, msg="Setting up swap file", func=create_swapfile, - args=[fname, mibsize]) + mbsize = str(int(size / (2 ** 20))) + msg = "creating swap file '%s' of %sMB" % (fname, mbsize) + try: + util.ensure_dir(tdir) + util.log_time(LOG.debug, msg, func=util.subp, + args=[['sh', '-c', + ('rm -f "$1" && umask 0066 && ' + '{ fallocate -l "${2}M" "$1" || ' + 'dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && ' + 'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'), + 'setup_swap', fname, mbsize]]) + + except Exception as e: + raise IOError("Failed %s: %s" % (msg, e)) return fname diff --git a/cloudinit/config/cc_refresh_rmc_and_interface.py b/cloudinit/config/cc_refresh_rmc_and_interface.py deleted file mode 100644 index 07050c4..0000000 --- a/cloudinit/config/cc_refresh_rmc_and_interface.py +++ /dev/null @@ -1,158 +0,0 @@ -# (c) Copyright IBM Corp. 2020 All Rights Reserved -# -# Author: Aman Kumar Sinha -# -# This file is part of cloud-init. See LICENSE file for license information. - -""" -Refresh IPv6 interface and RMC ------------------------------- -**Summary:** Ensure Network Manager is not managing IPv6 interface - -This module is IBM PowerVM Hypervisor specific - -Reliable Scalable Cluster Technology (RSCT) is a set of software components -that together provide a comprehensive clustering environment(RAS features) -for IBM PowerVM based virtual machines. RSCT includes the Resource -Monitoring and Control (RMC) subsystem. RMC is a generalized framework used -for managing, monitoring, and manipulating resources. RMC runs as a daemon -process on individual machines and needs creation of unique node id and -restarts during VM boot. -More details refer -https://www.ibm.com/support/knowledgecenter/en/SGVKBA_3.2/admin/bl503_ovrv.htm - -This module handles -- Refreshing RMC -- Disabling NetworkManager from handling IPv6 interface, as IPv6 interface - is used for communication between RMC daemon and PowerVM hypervisor. - -**Internal name:** ``cc_refresh_rmc_and_interface`` - -**Module frequency:** per always - -**Supported distros:** RHEL - -""" - -from cloudinit import log as logging -from cloudinit.settings import PER_ALWAYS -from cloudinit import util -from cloudinit import netinfo - -import errno - -frequency = PER_ALWAYS - -LOG = logging.getLogger(__name__) -# Ensure that /opt/rsct/bin has been added to standard PATH of the -# distro. The symlink to rmcctrl is /usr/sbin/rsct/bin/rmcctrl . -RMCCTRL = 'rmcctrl' - - -def handle(name, _cfg, _cloud, _log, _args): - if not util.which(RMCCTRL): - LOG.debug("No '%s' in path, disabled", RMCCTRL) - return - - LOG.debug( - 'Making the IPv6 up explicitly. ' - 'Ensuring IPv6 interface is not being handled by NetworkManager ' - 'and it is restarted to re-establish the communication with ' - 'the hypervisor') - - ifaces = find_ipv6_ifaces() - - # Setting NM_CONTROLLED=no for IPv6 interface - # making it down and up - - if len(ifaces) == 0: - LOG.debug("Did not find any interfaces with ipv6 addresses.") - else: - for iface in ifaces: - refresh_ipv6(iface) - disable_ipv6(sysconfig_path(iface)) - restart_network_manager() - - -def find_ipv6_ifaces(): - info = netinfo.netdev_info() - ifaces = [] - for iface, data in info.items(): - if iface == "lo": - LOG.debug('Skipping localhost interface') - if len(data.get("ipv4", [])) != 0: - # skip this interface, as it has ipv4 addrs - continue - ifaces.append(iface) - return ifaces - - -def refresh_ipv6(interface): - # IPv6 interface is explicitly brought up, subsequent to which the - # RMC services are restarted to re-establish the communication with - # the hypervisor. - util.subp(['ip', 'link', 'set', interface, 'down']) - util.subp(['ip', 'link', 'set', interface, 'up']) - - -def sysconfig_path(iface): - return '/etc/sysconfig/network-scripts/ifcfg-' + iface - - -def restart_network_manager(): - util.subp(['systemctl', 'restart', 'NetworkManager']) - - -def disable_ipv6(iface_file): - # Ensuring that the communication b/w the hypervisor and VM is not - # interrupted due to NetworkManager. For this purpose, as part of - # this function, the NM_CONTROLLED is explicitly set to No for IPV6 - # interface and NetworkManager is restarted. - try: - contents = util.load_file(iface_file) - except IOError as e: - if e.errno == errno.ENOENT: - LOG.debug("IPv6 interface file %s does not exist\n", - iface_file) - else: - raise e - - if 'IPV6INIT' not in contents: - LOG.debug("Interface file %s did not have IPV6INIT", iface_file) - return - - LOG.debug("Editing interface file %s ", iface_file) - - # Dropping any NM_CONTROLLED or IPV6 lines from IPv6 interface file. - lines = contents.splitlines() - lines = [line for line in lines if not search(line)] - lines.append("NM_CONTROLLED=no") - - with open(iface_file, "w") as fp: - fp.write("\n".join(lines) + "\n") - - -def search(contents): - # Search for any NM_CONTROLLED or IPV6 lines in IPv6 interface file. - return( - contents.startswith("IPV6ADDR") or - contents.startswith("IPADDR6") or - contents.startswith("IPV6INIT") or - contents.startswith("NM_CONTROLLED")) - - -def refresh_rmc(): - # To make a healthy connection between RMC daemon and hypervisor we - # refresh RMC. With refreshing RMC we are ensuring that making IPv6 - # down and up shouldn't impact communication between RMC daemon and - # hypervisor. - # -z : stop Resource Monitoring & Control subsystem and all resource - # managers, but the command does not return control to the user - # until the subsystem and all resource managers are stopped. - # -s : start Resource Monitoring & Control subsystem. - try: - util.subp([RMCCTRL, '-z']) - util.subp([RMCCTRL, '-s']) - except Exception: - util.logexc(LOG, 'Failed to refresh the RMC subsystem.') - raise diff --git a/cloudinit/config/cc_reset_rmc.py b/cloudinit/config/cc_reset_rmc.py deleted file mode 100644 index 68373ad..0000000 --- a/cloudinit/config/cc_reset_rmc.py +++ /dev/null @@ -1,142 +0,0 @@ -# (c) Copyright IBM Corp. 2020 All Rights Reserved -# -# Author: Aman Kumar Sinha -# -# This file is part of cloud-init. See LICENSE file for license information. - - -""" -Reset RMC ------------- -**Summary:** reset rsct node id - -Reset RMC module is IBM PowerVM Hypervisor specific - -Reliable Scalable Cluster Technology (RSCT) is a set of software components, -that together provide a comprehensive clustering environment (RAS features) -for IBM PowerVM based virtual machines. RSCT includes the Resource monitoring -and control (RMC) subsystem. RMC is a generalized framework used for managing, -monitoring, and manipulating resources. RMC runs as a daemon process on -individual machines and needs creation of unique node id and restarts -during VM boot. -More details refer -https://www.ibm.com/support/knowledgecenter/en/SGVKBA_3.2/admin/bl503_ovrv.htm - -This module handles -- creation of the unique RSCT node id to every instance/virtual machine - and ensure once set, it isn't changed subsequently by cloud-init. - In order to do so, it restarts RSCT service. - -Prerequisite of using this module is to install RSCT packages. - -**Internal name:** ``cc_reset_rmc`` - -**Module frequency:** per instance - -**Supported distros:** rhel, sles and ubuntu - -""" -import os - -from cloudinit import log as logging -from cloudinit.settings import PER_INSTANCE -from cloudinit import util - -frequency = PER_INSTANCE - -# RMCCTRL is expected to be in system PATH (/opt/rsct/bin) -# The symlink for RMCCTRL and RECFGCT are -# /usr/sbin/rsct/bin/rmcctrl and -# /usr/sbin/rsct/install/bin/recfgct respectively. -RSCT_PATH = '/opt/rsct/install/bin' -RMCCTRL = 'rmcctrl' -RECFGCT = 'recfgct' - -LOG = logging.getLogger(__name__) - -NODE_ID_FILE = '/etc/ct_node_id' - - -def handle(name, _cfg, cloud, _log, _args): - # Ensuring node id has to be generated only once during first boot - if cloud.datasource.platform_type == 'none': - LOG.debug('Skipping creation of new ct_node_id node') - return - - if not os.path.isdir(RSCT_PATH): - LOG.debug("module disabled, RSCT_PATH not present") - return - - orig_path = os.environ.get('PATH') - try: - add_path(orig_path) - reset_rmc() - finally: - if orig_path: - os.environ['PATH'] = orig_path - else: - del os.environ['PATH'] - - -def reconfigure_rsct_subsystems(): - # Reconfigure the RSCT subsystems, which includes removing all RSCT data - # under the /var/ct directory, generating a new node ID, and making it - # appear as if the RSCT components were just installed - try: - out = util.subp([RECFGCT])[0] - LOG.debug(out.strip()) - return out - except util.ProcessExecutionError: - util.logexc(LOG, 'Failed to reconfigure the RSCT subsystems.') - raise - - -def get_node_id(): - try: - fp = util.load_file(NODE_ID_FILE) - node_id = fp.split('\n')[0] - return node_id - except Exception: - util.logexc(LOG, 'Failed to get node ID from file %s.' % NODE_ID_FILE) - raise - - -def add_path(orig_path): - # Adding the RSCT_PATH to env standard path - # So thet cloud init automatically find and - # run RECFGCT to create new node_id. - suff = ":" + orig_path if orig_path else "" - os.environ['PATH'] = RSCT_PATH + suff - return os.environ['PATH'] - - -def rmcctrl(): - # Stop the RMC subsystem and all resource managers so that we can make - # some changes to it - try: - return util.subp([RMCCTRL, '-z']) - except Exception: - util.logexc(LOG, 'Failed to stop the RMC subsystem.') - raise - - -def reset_rmc(): - LOG.debug('Attempting to reset RMC.') - - node_id_before = get_node_id() - LOG.debug('Node ID at beginning of module: %s', node_id_before) - - # Stop the RMC subsystem and all resource managers so that we can make - # some changes to it - rmcctrl() - reconfigure_rsct_subsystems() - - node_id_after = get_node_id() - LOG.debug('Node ID at end of module: %s', node_id_after) - - # Check if new node ID is generated or not - # by comparing old and new node ID - if node_id_after == node_id_before: - msg = 'New node ID did not get generated.' - LOG.error(msg) - raise Exception(msg) diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index 0742234..c3c5b0f 100755 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -236,7 +236,7 @@ def handle(_name, cfg, cloud, log, args): raise errors[-1] -def rand_user_password(pwlen=20): +def rand_user_password(pwlen=9): return util.rand_str(pwlen, select_from=PW_SET) diff --git a/cloudinit/config/tests/test_mounts.py b/cloudinit/config/tests/test_mounts.py deleted file mode 100644 index c7dad61..0000000 --- a/cloudinit/config/tests/test_mounts.py +++ /dev/null @@ -1,22 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -from unittest import mock - -from cloudinit.config.cc_mounts import create_swapfile - - -M_PATH = 'cloudinit.config.cc_mounts.' - - -class TestCreateSwapfile: - - @mock.patch(M_PATH + 'util.subp') - def test_happy_path(self, m_subp, tmpdir): - swap_file = tmpdir.join("swap-file") - fname = str(swap_file) - - # Some of the calls to util.subp should create the swap file; this - # roughly approximates that - m_subp.side_effect = lambda *args, **kwargs: swap_file.write('') - - create_swapfile(fname, '') - assert mock.call(['mkswap', fname]) in m_subp.call_args_list diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 34acfe8..57708c1 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -142,8 +142,7 @@ def skip_retry_on_codes(status_codes, _request_args, cause): def get_instance_userdata(api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5, - headers_cb=None, headers_redact=None, - exception_cb=None): + headers_cb=None, exception_cb=None): ud_url = url_helper.combine_url(metadata_address, api_version) ud_url = url_helper.combine_url(ud_url, 'user-data') user_data = '' @@ -156,8 +155,7 @@ def get_instance_userdata(api_version='latest', SKIP_USERDATA_CODES) response = url_helper.read_file_or_url( ud_url, ssl_details=ssl_details, timeout=timeout, - retries=retries, exception_cb=exception_cb, headers_cb=headers_cb, - headers_redact=headers_redact) + retries=retries, exception_cb=exception_cb, headers_cb=headers_cb) user_data = response.contents except url_helper.UrlError as e: if e.code not in SKIP_USERDATA_CODES: @@ -171,13 +169,11 @@ def _get_instance_metadata(tree, api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5, leaf_decoder=None, headers_cb=None, - headers_redact=None, exception_cb=None): md_url = url_helper.combine_url(metadata_address, api_version, tree) caller = functools.partial( url_helper.read_file_or_url, ssl_details=ssl_details, timeout=timeout, retries=retries, headers_cb=headers_cb, - headers_redact=headers_redact, exception_cb=exception_cb) def mcaller(url): @@ -201,7 +197,6 @@ def get_instance_metadata(api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5, leaf_decoder=None, headers_cb=None, - headers_redact=None, exception_cb=None): # Note, 'meta-data' explicitly has trailing /. # this is required for CloudStack (LP: #1356855) @@ -209,7 +204,6 @@ def get_instance_metadata(api_version='latest', metadata_address=metadata_address, ssl_details=ssl_details, timeout=timeout, retries=retries, leaf_decoder=leaf_decoder, - headers_redact=headers_redact, headers_cb=headers_cb, exception_cb=exception_cb) @@ -218,14 +212,12 @@ def get_instance_identity(api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5, leaf_decoder=None, headers_cb=None, - headers_redact=None, exception_cb=None): return _get_instance_metadata(tree='dynamic/instance-identity', api_version=api_version, metadata_address=metadata_address, ssl_details=ssl_details, timeout=timeout, retries=retries, leaf_decoder=leaf_decoder, - headers_redact=headers_redact, headers_cb=headers_cb, exception_cb=exception_cb) # vi: ts=4 expandtab diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py index 841e72e..c033cc8 100644 --- a/cloudinit/net/dhcp.py +++ b/cloudinit/net/dhcp.py @@ -215,12 +215,6 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir): pid_file = os.path.join(cleandir, 'dhclient.pid') lease_file = os.path.join(cleandir, 'dhcp.leases') - # In some cases files in /var/tmp may not be executable, launching dhclient - # from there will certainly raise 'Permission denied' error. Try launching - # the original dhclient instead. - if not os.access(sandbox_dhclient_cmd, os.X_OK): - sandbox_dhclient_cmd = dhclient_cmd_path - # ISC dhclient needs the interface up to send initial discovery packets. # Generally dhclient relies on dhclient-script PREINIT action to bring the # link up before attempting discovery. Since we are using -sf /bin/true, diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index 2f71456..7077106 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -429,9 +429,7 @@ class Renderer(renderer.Renderer): iface['mode'] = 'auto' # Use stateless DHCPv6 (0=off, 1=on) iface['dhcp'] = '0' - elif subnet_is_ipv6(subnet): - # mode might be static6, eni uses 'static' - iface['mode'] = 'static' + elif subnet_is_ipv6(subnet) and subnet['type'] == 'static': if accept_ra is not None: # Accept router advertisements (0=off, 1=on) iface['accept_ra'] = '1' if accept_ra else '0' diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index 8985527..14d3999 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -98,7 +98,7 @@ def _extract_addresses(config, entry, ifname, features=None): entry.update({sn_type: True}) elif sn_type in IPV6_DYNAMIC_TYPES: entry.update({'dhcp6': True}) - elif sn_type in ['static', 'static6']: + elif sn_type in ['static']: addr = "%s" % subnet.get('address') if 'prefix' in subnet: addr += "/%d" % subnet.get('prefix') diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 48e5b6e..f3e8e25 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -822,8 +822,7 @@ def _normalize_subnet(subnet): if subnet.get('type') in ('static', 'static6'): normal_subnet.update( - _normalize_net_keys(normal_subnet, address_keys=( - 'address', 'ip_address',))) + _normalize_net_keys(normal_subnet, address_keys=('address',))) normal_subnet['routes'] = [_normalize_route(r) for r in subnet.get('routes', [])] @@ -942,7 +941,7 @@ def subnet_is_ipv6(subnet): # 'static6', 'dhcp6', 'ipv6_dhcpv6-stateful', 'ipv6_dhcpv6-stateless' or # 'ipv6_slaac' if subnet['type'].endswith('6') or subnet['type'] in IPV6_DYNAMIC_TYPES: - # This is a request either static6 type or DHCPv6. + # This is a request for DHCPv6. return True elif subnet['type'] == 'static' and is_ipv6_addr(subnet.get('address')): return True diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 1989d01..310cdf0 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -95,10 +95,6 @@ class ConfigMap(object): def __len__(self): return len(self._conf) - def skip_key_value(self, key, val): - """Skip the pair key, value if it matches a certain rule.""" - return False - def to_string(self): buf = six.StringIO() buf.write(_make_header()) @@ -106,8 +102,6 @@ class ConfigMap(object): buf.write("\n") for key in sorted(self._conf.keys()): value = self._conf[key] - if self.skip_key_value(key, value): - continue if isinstance(value, bool): value = self._bool_map[value] if not isinstance(value, six.string_types): @@ -213,7 +207,6 @@ class NetInterface(ConfigMap): 'bond': 'Bond', 'bridge': 'Bridge', 'infiniband': 'InfiniBand', - 'vlan': 'Vlan', } def __init__(self, iface_name, base_sysconf_dir, templates, @@ -267,11 +260,6 @@ class NetInterface(ConfigMap): c.routes = self.routes.copy() return c - def skip_key_value(self, key, val): - if key == 'TYPE' and val == 'Vlan': - return True - return False - class Renderer(renderer.Renderer): """Renders network information in a /etc/sysconfig format.""" @@ -284,6 +272,7 @@ class Renderer(renderer.Renderer): iface_defaults = tuple([ ('ONBOOT', True), ('USERCTL', False), + ('NM_CONTROLLED', False), ('BOOTPROTO', 'none'), ('STARTMODE', 'auto'), ]) @@ -378,7 +367,7 @@ class Renderer(renderer.Renderer): iface_cfg['IPV6_AUTOCONF'] = True elif subnet_type in ['dhcp4', 'dhcp']: iface_cfg['BOOTPROTO'] = 'dhcp' - elif subnet_type in ['static', 'static6']: + elif subnet_type == 'static': # grep BOOTPROTO sysconfig.txt -A2 | head -3 # BOOTPROTO=none|bootp|dhcp # 'bootp' or 'dhcp' cause a DHCP client @@ -401,10 +390,6 @@ class Renderer(renderer.Renderer): ' because ipv4 subnet-level mtu:%s provided.', iface_cfg.name, iface_cfg[mtu_key], subnet['mtu']) iface_cfg[mtu_key] = subnet['mtu'] - - if subnet_is_ipv6(subnet): - iface_cfg['IPV6_FORCE_ACCEPT_RA'] = False - iface_cfg['IPV6_AUTOCONF'] = False elif subnet_type == 'manual': # If the subnet has an MTU setting, then ONBOOT=True # to apply the setting @@ -434,7 +419,7 @@ class Renderer(renderer.Renderer): continue elif subnet_type in IPV6_DYNAMIC_TYPES: continue - elif subnet_type in ['static', 'static6']: + elif subnet_type == 'static': if subnet_is_ipv6(subnet): ipv6_index = ipv6_index + 1 ipv6_cidr = "%s/%s" % (subnet['address'], subnet['prefix']) @@ -615,16 +600,7 @@ class Renderer(renderer.Renderer): iface_name = iface['name'] iface_cfg = iface_contents[iface_name] iface_cfg['VLAN'] = True - iface_cfg.kind = 'vlan' - - rdev = iface['vlan-raw-device'] - supported = _supported_vlan_names(rdev, iface['vlan_id']) - if iface_name not in supported: - LOG.info( - "Name '%s' for vlan '%s' is not officially supported" - "by RHEL. Supported: %s", - iface_name, rdev, ' '.join(supported)) - iface_cfg['PHYSDEV'] = rdev + iface_cfg['PHYSDEV'] = iface_name[:iface_name.rfind('.')] iface_subnets = iface.get("subnets", []) route_cfg = iface_cfg.routes @@ -779,16 +755,7 @@ class Renderer(renderer.Renderer): # Distros configuring /etc/sysconfig/network as a file e.g. Centos if sysconfig_path.endswith('network'): util.ensure_dir(os.path.dirname(sysconfig_path)) - netcfg = [] - for line in util.load_file(sysconfig_path, quiet=True).split('\n'): - if 'cloud-init' in line: - break - if not line.startswith(('NETWORKING=', - 'IPV6_AUTOCONF=', - 'NETWORKING_IPV6=')): - netcfg.append(line) - # Now generate the cloud-init portion of sysconfig/network - netcfg.extend([_make_header(), 'NETWORKING=yes']) + netcfg = [_make_header(), 'NETWORKING=yes'] if network_state.use_ipv6: netcfg.append('NETWORKING_IPV6=yes') netcfg.append('IPV6_AUTOCONF=no') @@ -796,15 +763,6 @@ class Renderer(renderer.Renderer): "\n".join(netcfg) + "\n", file_mode) -def _supported_vlan_names(rdev, vid): - """Return list of supported names for vlan devices per RHEL doc - 11.5. Naming Scheme for VLAN Interfaces.""" - return [ - v.format(rdev=rdev, vid=int(vid)) - for v in ("{rdev}{vid:04}", "{rdev}{vid}", - "{rdev}.{vid:04}", "{rdev}.{vid}")] - - def available(target=None): sysconfig = available_sysconfig(target=target) nm = available_nm(target=target) diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py index 08e2cfb..c3fa1e0 100644 --- a/cloudinit/net/tests/test_dhcp.py +++ b/cloudinit/net/tests/test_dhcp.py @@ -406,52 +406,6 @@ class TestDHCPDiscoveryClean(CiTestCase): 'eth9', '-sf', '/bin/true'], capture=True)]) m_kill.assert_has_calls([mock.call(my_pid, signal.SIGKILL)]) - @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') - @mock.patch('cloudinit.net.dhcp.os.kill') - @mock.patch('cloudinit.net.dhcp.subp.subp') - def test_dhcp_discovery_outside_sandbox(self, m_subp, m_kill, m_getppid): - """dhcp_discovery brings up the interface and runs dhclient. - - It also returns the parsed dhcp.leases file generated in the sandbox. - """ - m_subp.return_value = ('', '') - tmpdir = self.tmp_dir() - dhclient_script = os.path.join(tmpdir, 'dhclient.orig') - script_content = '#!/bin/bash\necho fake-dhclient' - write_file(dhclient_script, script_content, mode=0o755) - lease_content = dedent(""" - lease { - interface "eth9"; - fixed-address 192.168.2.74; - option subnet-mask 255.255.255.0; - option routers 192.168.2.1; - } - """) - lease_file = os.path.join(tmpdir, 'dhcp.leases') - write_file(lease_file, lease_content) - pid_file = os.path.join(tmpdir, 'dhclient.pid') - my_pid = 1 - write_file(pid_file, "%d\n" % my_pid) - m_getppid.return_value = 1 # Indicate that dhclient has daemonized - - with mock.patch('os.access', return_value=False): - self.assertCountEqual( - [{'interface': 'eth9', 'fixed-address': '192.168.2.74', - 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}], - dhcp_discovery(dhclient_script, 'eth9', tmpdir)) - # dhclient script got copied - with open(os.path.join(tmpdir, 'dhclient.orig')) as stream: - self.assertEqual(script_content, stream.read()) - # Interface was brought up before dhclient called from sandbox - m_subp.assert_has_calls([ - mock.call( - ['ip', 'link', 'set', 'dev', 'eth9', 'up'], capture=True), - mock.call( - [os.path.join(tmpdir, 'dhclient.orig'), '-1', '-v', '-lf', - lease_file, '-pf', os.path.join(tmpdir, 'dhclient.pid'), - 'eth9', '-sf', '/bin/true'], capture=True)]) - m_kill.assert_has_calls([mock.call(my_pid, signal.SIGKILL)]) - class TestSystemdParseLeases(CiTestCase): diff --git a/cloudinit/settings.py b/cloudinit/settings.py index 439eee0..ca4ffa8 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -45,18 +45,14 @@ CFG_BUILTIN = { 'None', ], 'def_log_file': '/var/log/cloud-init.log', - 'def_log_file_mode': 0o600, 'log_cfgs': [], - 'mount_default_fields': [None, None, 'auto', 'defaults,nofail', '0', '2'], - 'ssh_deletekeys': False, - 'ssh_genkeytypes': [], - 'syslog_fix_perms': [], + 'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel', 'root:root'], 'system_info': { 'paths': { 'cloud_dir': '/var/lib/cloud', 'templates_dir': '/etc/cloud/templates/', }, - 'distro': 'rhel', + 'distro': 'ubuntu', 'network': {'renderers': None}, }, 'vendor_data': {'enabled': True, 'prefix': []}, diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 69454c4..24f448c 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -256,7 +256,7 @@ def get_hostname(hostname_command='hostname'): def set_hostname(hostname, hostname_command='hostname'): - util.subp(['hostnamectl', 'set-hostname', str(hostname)]) + util.subp([hostname_command, hostname]) @azure_ds_telemetry_reporter @@ -1206,7 +1206,7 @@ def read_azure_ovf(contents): if password: defuser['lock_passwd'] = False if DEF_PASSWD_REDACTION != password: - defuser['passwd'] = cfg['password'] = encrypt_pass(password) + defuser['passwd'] = encrypt_pass(password) if defuser: cfg['system_info'] = {'default_user': defuser} diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 0f2bfef..b9f346a 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -31,9 +31,6 @@ STRICT_ID_DEFAULT = "warn" API_TOKEN_ROUTE = 'latest/api/token' API_TOKEN_DISABLED = '_ec2_disable_api_token' AWS_TOKEN_TTL_SECONDS = '21600' -AWS_TOKEN_PUT_HEADER = 'X-aws-ec2-metadata-token' -AWS_TOKEN_REQ_HEADER = AWS_TOKEN_PUT_HEADER + '-ttl-seconds' -AWS_TOKEN_REDACT = [AWS_TOKEN_PUT_HEADER, AWS_TOKEN_REQ_HEADER] class CloudNames(object): @@ -161,8 +158,7 @@ class DataSourceEc2(sources.DataSource): for api_ver in self.extended_metadata_versions: url = url_tmpl.format(self.metadata_address, api_ver) try: - resp = uhelp.readurl(url=url, headers=headers, - headers_redact=AWS_TOKEN_REDACT) + resp = uhelp.readurl(url=url, headers=headers) except uhelp.UrlError as e: LOG.debug('url %s raised exception %s', url, e) else: @@ -184,7 +180,6 @@ class DataSourceEc2(sources.DataSource): self.identity = ec2.get_instance_identity( api_version, self.metadata_address, headers_cb=self._get_headers, - headers_redact=AWS_TOKEN_REDACT, exception_cb=self._refresh_stale_aws_token_cb).get( 'document', {}) return self.identity.get( @@ -210,8 +205,7 @@ class DataSourceEc2(sources.DataSource): LOG.debug('Fetching Ec2 IMDSv2 API Token') url, response = uhelp.wait_for_url( urls=urls, max_wait=1, timeout=1, status_cb=self._status_cb, - headers_cb=self._get_headers, request_method=request_method, - headers_redact=AWS_TOKEN_REDACT) + headers_cb=self._get_headers, request_method=request_method) if url and response: self._api_token = response @@ -258,8 +252,7 @@ class DataSourceEc2(sources.DataSource): url, _ = uhelp.wait_for_url( urls=urls, max_wait=url_params.max_wait_seconds, timeout=url_params.timeout_seconds, status_cb=LOG.warning, - headers_redact=AWS_TOKEN_REDACT, headers_cb=self._get_headers, - request_method=request_method) + headers_cb=self._get_headers, request_method=request_method) if url: metadata_address = url2base[url] @@ -427,7 +420,6 @@ class DataSourceEc2(sources.DataSource): if not self.wait_for_metadata_service(): return {} api_version = self.get_metadata_api_version() - redact = AWS_TOKEN_REDACT crawled_metadata = {} if self.cloud_name == CloudNames.AWS: exc_cb = self._refresh_stale_aws_token_cb @@ -437,17 +429,14 @@ class DataSourceEc2(sources.DataSource): try: crawled_metadata['user-data'] = ec2.get_instance_userdata( api_version, self.metadata_address, - headers_cb=self._get_headers, headers_redact=redact, - exception_cb=exc_cb_ud) + headers_cb=self._get_headers, exception_cb=exc_cb_ud) crawled_metadata['meta-data'] = ec2.get_instance_metadata( api_version, self.metadata_address, - headers_cb=self._get_headers, headers_redact=redact, - exception_cb=exc_cb) + headers_cb=self._get_headers, exception_cb=exc_cb) if self.cloud_name == CloudNames.AWS: identity = ec2.get_instance_identity( api_version, self.metadata_address, - headers_cb=self._get_headers, headers_redact=redact, - exception_cb=exc_cb) + headers_cb=self._get_headers, exception_cb=exc_cb) crawled_metadata['dynamic'] = {'instance-identity': identity} except Exception: util.logexc( @@ -466,12 +455,11 @@ class DataSourceEc2(sources.DataSource): if self.cloud_name != CloudNames.AWS: return None LOG.debug("Refreshing Ec2 metadata API token") - request_header = {AWS_TOKEN_REQ_HEADER: seconds} + request_header = {'X-aws-ec2-metadata-token-ttl-seconds': seconds} token_url = '{}/{}'.format(self.metadata_address, API_TOKEN_ROUTE) try: - response = uhelp.readurl(token_url, headers=request_header, - headers_redact=AWS_TOKEN_REDACT, - request_method="PUT") + response = uhelp.readurl( + token_url, headers=request_header, request_method="PUT") except uhelp.UrlError as e: LOG.warning( 'Unable to get API token: %s raised exception %s', @@ -512,7 +500,8 @@ class DataSourceEc2(sources.DataSource): API_TOKEN_DISABLED): return {} # Request a 6 hour token if URL is API_TOKEN_ROUTE - request_token_header = {AWS_TOKEN_REQ_HEADER: AWS_TOKEN_TTL_SECONDS} + request_token_header = { + 'X-aws-ec2-metadata-token-ttl-seconds': AWS_TOKEN_TTL_SECONDS} if API_TOKEN_ROUTE in url: return request_token_header if not self._api_token: @@ -522,7 +511,7 @@ class DataSourceEc2(sources.DataSource): self._api_token = self._refresh_api_token() if not self._api_token: return {} - return {AWS_TOKEN_PUT_HEADER: self._api_token} + return {'X-aws-ec2-metadata-token': self._api_token} class DataSourceEc2Local(DataSourceEc2): diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 6ef4f90..0778f45 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -592,17 +592,11 @@ def convert_net_json(network_json=None, known_macs=None): elif network['type'] in ['ipv6_slaac', 'ipv6_dhcpv6-stateless', 'ipv6_dhcpv6-stateful']: subnet.update({'type': network['type']}) - elif network['type'] in ['ipv4', 'static']: + elif network['type'] in ['ipv4', 'ipv6']: subnet.update({ 'type': 'static', 'address': network.get('ip_address'), }) - elif network['type'] in ['ipv6', 'static6']: - cfg.update({'accept-ra': False}) - subnet.update({ - 'type': 'static6', - 'address': network.get('ip_address'), - }) # Enable accept_ra for stateful and legacy ipv6_dhcp types if network['type'] in ['ipv6_dhcpv6-stateful', 'ipv6_dhcp']: diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py index 816f52e..3d369d0 100644 --- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py @@ -133,30 +133,23 @@ def get_tools_config(section, key, defaultVal): 'vmware-toolbox-cmd not installed, returning default value') return defaultVal + retValue = defaultVal cmd = ['vmware-toolbox-cmd', 'config', 'get', section, key] try: (outText, _) = util.subp(cmd) - except util.ProcessExecutionError as e: - if e.exit_code == 69: - logger.debug( - "vmware-toolbox-cmd returned 69 (unavailable) for cmd: %s." - " Return default value: %s", " ".join(cmd), defaultVal) + m = re.match(r'([^=]+)=(.*)', outText) + if m: + retValue = m.group(2).strip() + logger.debug("Get tools config: [%s] %s = %s", + section, key, retValue) else: - logger.error("Failed running %s[%s]", cmd, e.exit_code) - logger.exception(e) - return defaultVal - - retValue = defaultVal - m = re.match(r'([^=]+)=(.*)', outText) - if m: - retValue = m.group(2).strip() - logger.debug("Get tools config: [%s] %s = %s", - section, key, retValue) - else: - logger.debug( - "Tools config: [%s] %s is not found, return default value: %s", - section, key, retValue) + logger.debug( + "Tools config: [%s] %s is not found, return default value: %s", + section, key, retValue) + except util.ProcessExecutionError as e: + logger.error("Failed running %s[%s]", cmd, e.exit_code) + logger.exception(e) return retValue diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 8ff61a2..bcb23a5 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -40,13 +40,11 @@ VALID_KEY_TYPES = ( "ssh-rsa-cert-v01@openssh.com", ) -_DISABLE_USER_SSH_EXIT = 142 DISABLE_USER_OPTS = ( "no-port-forwarding,no-agent-forwarding," "no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\"" - " rather than the user \\\"$DISABLE_USER\\\".\';echo;sleep 10;" - "exit " + str(_DISABLE_USER_SSH_EXIT) + "\"") + " rather than the user \\\"$DISABLE_USER\\\".\';echo;sleep 10\"") class AuthKeyLine(object): diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 68b83af..71f3a49 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -149,9 +149,8 @@ class Init(object): def _initialize_filesystem(self): util.ensure_dirs(self._initial_subdirs()) log_file = util.get_cfg_option_str(self.cfg, 'def_log_file') - log_file_mode = util.get_cfg_option_int(self.cfg, 'def_log_file_mode') if log_file: - util.ensure_file(log_file, mode=log_file_mode) + util.ensure_file(log_file) perms = self.cfg.get('syslog_fix_perms') if not perms: perms = {} diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py index 29b3937..1674120 100644 --- a/cloudinit/tests/test_url_helper.py +++ b/cloudinit/tests/test_url_helper.py @@ -1,8 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit.url_helper import ( - NOT_FOUND, UrlError, REDACTED, oauth_headers, read_file_or_url, - retry_on_url_exc) + NOT_FOUND, UrlError, oauth_headers, read_file_or_url, retry_on_url_exc) from cloudinit.tests.helpers import CiTestCase, mock, skipIf from cloudinit import util from cloudinit import version @@ -51,9 +50,6 @@ class TestOAuthHeaders(CiTestCase): class TestReadFileOrUrl(CiTestCase): - - with_logs = True - def test_read_file_or_url_str_from_file(self): """Test that str(result.contents) on file is text version of contents. It should not be "b'data'", but just "'data'" """ @@ -75,34 +71,6 @@ class TestReadFileOrUrl(CiTestCase): self.assertEqual(result.contents, data) self.assertEqual(str(result), data.decode('utf-8')) - @httpretty.activate - def test_read_file_or_url_str_from_url_redacting_headers_from_logs(self): - """Headers are redacted from logs but unredacted in requests.""" - url = 'http://hostname/path' - headers = {'sensitive': 'sekret', 'server': 'blah'} - httpretty.register_uri(httpretty.GET, url) - - read_file_or_url(url, headers=headers, headers_redact=['sensitive']) - logs = self.logs.getvalue() - for k in headers.keys(): - self.assertEqual(headers[k], httpretty.last_request().headers[k]) - self.assertIn(REDACTED, logs) - self.assertNotIn('sekret', logs) - - @httpretty.activate - def test_read_file_or_url_str_from_url_redacts_noheaders(self): - """When no headers_redact, header values are in logs and requests.""" - url = 'http://hostname/path' - headers = {'sensitive': 'sekret', 'server': 'blah'} - httpretty.register_uri(httpretty.GET, url) - - read_file_or_url(url, headers=headers) - for k in headers.keys(): - self.assertEqual(headers[k], httpretty.last_request().headers[k]) - logs = self.logs.getvalue() - self.assertNotIn(REDACTED, logs) - self.assertIn('sekret', logs) - @mock.patch(M_PATH + 'readurl') def test_read_file_or_url_passes_params_to_readurl(self, m_readurl): """read_file_or_url passes all params through to readurl.""" diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index e6188ea..1496a47 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -8,7 +8,6 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import copy import json import os import requests @@ -42,7 +41,6 @@ else: SSL_ENABLED = False CONFIG_ENABLED = False # This was added in 0.7 (but taken out in >=1.0) _REQ_VER = None -REDACTED = 'REDACTED' try: from distutils.version import LooseVersion import pkg_resources @@ -201,9 +199,9 @@ def _get_ssl_args(url, ssl_details): def readurl(url, data=None, timeout=None, retries=0, sec_between=1, - headers=None, headers_cb=None, headers_redact=None, - ssl_details=None, check_status=True, allow_redirects=True, - exception_cb=None, session=None, infinite=False, log_req_resp=True, + headers=None, headers_cb=None, ssl_details=None, + check_status=True, allow_redirects=True, exception_cb=None, + session=None, infinite=False, log_req_resp=True, request_method=None): """Wrapper around requests.Session to read the url and retry if necessary @@ -219,7 +217,6 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, :param headers: Optional dict of headers to send during request :param headers_cb: Optional callable returning a dict of values to send as headers during request - :param headers_redact: Optional list of header names to redact from the log :param ssl_details: Optional dict providing key_file, ca_certs, and cert_file keys for use on in ssl connections. :param check_status: Optional boolean set True to raise when HTTPError @@ -246,8 +243,6 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, req_args['method'] = request_method if timeout is not None: req_args['timeout'] = max(float(timeout), 0) - if headers_redact is None: - headers_redact = [] # It doesn't seem like config # was added in older library versions (or newer ones either), thus we # need to manually do the retries if it wasn't... @@ -291,14 +286,7 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, for (k, v) in req_args.items(): if k == 'data': continue - if k == 'headers' and headers_redact: - matched_headers = [k for k in headers_redact if v.get(k)] - if matched_headers: - filtered_req_args[k] = copy.deepcopy(v) - for key in matched_headers: - filtered_req_args[k][key] = REDACTED - else: - filtered_req_args[k] = v + filtered_req_args[k] = v try: if log_req_resp: @@ -351,8 +339,8 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, return None # Should throw before this... -def wait_for_url(urls, max_wait=None, timeout=None, status_cb=None, - headers_cb=None, headers_redact=None, sleep_time=1, +def wait_for_url(urls, max_wait=None, timeout=None, + status_cb=None, headers_cb=None, sleep_time=1, exception_cb=None, sleep_time_cb=None, request_method=None): """ urls: a list of urls to try @@ -364,7 +352,6 @@ def wait_for_url(urls, max_wait=None, timeout=None, status_cb=None, status_cb: call method with string message when a url is not available headers_cb: call method with single argument of url to get headers for request. - headers_redact: a list of header names to redact from the log exception_cb: call method with 2 arguments 'msg' (per status_cb) and 'exception', the exception that occurred. sleep_time_cb: call method with 2 arguments (response, loop_n) that @@ -428,9 +415,8 @@ def wait_for_url(urls, max_wait=None, timeout=None, status_cb=None, headers = {} response = readurl( - url, headers=headers, headers_redact=headers_redact, - timeout=timeout, check_status=False, - request_method=request_method) + url, headers=headers, timeout=timeout, + check_status=False, request_method=request_method) if not response.contents: reason = "empty response [%s]" % (response.code) url_exc = UrlError(ValueError(reason), code=response.code, diff --git a/cloudinit/util.py b/cloudinit/util.py index ad89376..9d9d5c7 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -79,10 +79,6 @@ CONTAINER_TESTS = (['systemd-detect-virt', '--quiet', '--container'], ['lxc-is-container']) -def kernel_version(): - return tuple(map(int, os.uname().release.split('.')[:2])) - - @lru_cache() def get_architecture(target=None): out, _ = subp(['dpkg', '--print-architecture'], capture=True, @@ -405,10 +401,9 @@ def translate_bool(val, addons=None): def rand_str(strlen=32, select_from=None): - r = random.SystemRandom() if not select_from: select_from = string.ascii_letters + string.digits - return "".join([r.choice(select_from) for _x in range(0, strlen)]) + return "".join([random.choice(select_from) for _x in range(0, strlen)]) def rand_dict_key(dictionary, postfix=None): diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 52a259c..87c37ba 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -121,8 +121,6 @@ cloud_final_modules: - mcollective {% endif %} - salt-minion - - reset_rmc - - refresh_rmc_and_interface - rightscale_userdata - scripts-vendor - scripts-per-once diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt index f00db68..eb84dcf 100644 --- a/doc/examples/cloud-config.txt +++ b/doc/examples/cloud-config.txt @@ -235,7 +235,7 @@ disable_root: false # The string '$USER' will be replaced with the username of the default user. # The string '$DISABLE_USER' will be replaced with the username to disable. # -# disable_root_opts: no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command="echo 'Please login as the user \"$USER\" rather than the user \"$DISABLE_USER\".';echo;sleep 10;exit 142" +# disable_root_opts: no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command="echo 'Please login as the user \"$USER\" rather than the user \"$DISABLE_USER\".';echo;sleep 10" # disable ssh access for non-root-users # To disable ssh access for non-root users, ssh_redirect_user: true can be @@ -413,14 +413,10 @@ timezone: US/Eastern # if syslog_fix_perms is a list, it will iterate through and use the # first pair that does not raise error. # -# 'def_log_file' will be created with mode 'def_log_file_mode', which -# is specified as a numeric value and defaults to 0600. -# # the default values are '/var/log/cloud-init.log' and 'syslog:adm' # the value of 'def_log_file' should match what is configured in logging # if either is empty, then no change of ownership will be done def_log_file: /var/log/my-logging-file.log -def_log_file_mode: 0600 syslog_fix_perms: syslog:root # you can set passwords for a user or multiple users diff --git a/rhel/README.rhel b/rhel/README.rhel deleted file mode 100644 index aa29630..0000000 --- a/rhel/README.rhel +++ /dev/null @@ -1,5 +0,0 @@ -The following cloud-init modules are currently unsupported on this OS: - - apt_update_upgrade ('apt_update', 'apt_upgrade', 'apt_mirror', 'apt_preserve_sources_list', 'apt_old_mirror', 'apt_sources', 'debconf_selections', 'packages' options) - - byobu ('byobu_by_default' option) - - chef - - grub_dpkg diff --git a/rhel/cloud-init-tmpfiles.conf b/rhel/cloud-init-tmpfiles.conf deleted file mode 100644 index 0c6d2a3..0000000 --- a/rhel/cloud-init-tmpfiles.conf +++ /dev/null @@ -1 +0,0 @@ -d /run/cloud-init 0700 root root - - diff --git a/rhel/cloud.cfg b/rhel/cloud.cfg deleted file mode 100644 index 9ecba21..0000000 --- a/rhel/cloud.cfg +++ /dev/null @@ -1,69 +0,0 @@ -users: - - default - -disable_root: 1 -ssh_pwauth: 0 - -mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.requires=cloud-init.service', '0', '2'] -resize_rootfs_tmp: /dev -ssh_deletekeys: 1 -ssh_genkeytypes: ~ -syslog_fix_perms: ~ -disable_vmware_customization: false - -cloud_init_modules: - - disk_setup - - migrator - - bootcmd - - write-files - - growpart - - resizefs - - set_hostname - - update_hostname - - update_etc_hosts - - rsyslog - - users-groups - - ssh - -cloud_config_modules: - - mounts - - locale - - set-passwords - - rh_subscription - - yum-add-repo - - package-update-upgrade-install - - timezone - - puppet - - chef - - salt-minion - - mcollective - - disable-ec2-metadata - - runcmd - -cloud_final_modules: - - rightscale_userdata - - scripts-per-once - - scripts-per-boot - - scripts-per-instance - - scripts-user - - ssh-authkey-fingerprints - - keys-to-console - - phone-home - - final-message - - power-state-change - -system_info: - default_user: - name: cloud-user - lock_passwd: true - gecos: Cloud User - groups: [adm, systemd-journal] - sudo: ["ALL=(ALL) NOPASSWD:ALL"] - shell: /bin/bash - distro: rhel - paths: - cloud_dir: /var/lib/cloud - templates_dir: /etc/cloud/templates - ssh_svcname: sshd - -# vim:syntax=yaml diff --git a/rhel/systemd/cloud-config.service b/rhel/systemd/cloud-config.service deleted file mode 100644 index f3dcd4b..0000000 --- a/rhel/systemd/cloud-config.service +++ /dev/null @@ -1,18 +0,0 @@ -[Unit] -Description=Apply the settings specified in cloud-config -After=network-online.target cloud-config.target -Wants=network-online.target cloud-config.target -ConditionPathExists=!/etc/cloud/cloud-init.disabled -ConditionKernelCommandLine=!cloud-init=disabled - -[Service] -Type=oneshot -ExecStart=/usr/bin/cloud-init modules --mode=config -RemainAfterExit=yes -TimeoutSec=0 - -# Output needs to appear in instance console output -StandardOutput=journal+console - -[Install] -WantedBy=cloud-init.target diff --git a/rhel/systemd/cloud-config.target b/rhel/systemd/cloud-config.target deleted file mode 100644 index ae9b7d0..0000000 --- a/rhel/systemd/cloud-config.target +++ /dev/null @@ -1,11 +0,0 @@ -# cloud-init normally emits a "cloud-config" upstart event to inform third -# parties that cloud-config is available, which does us no good when we're -# using systemd. cloud-config.target serves as this synchronization point -# instead. Services that would "start on cloud-config" with upstart can -# instead use "After=cloud-config.target" and "Wants=cloud-config.target" -# as appropriate. - -[Unit] -Description=Cloud-config availability -Wants=cloud-init-local.service cloud-init.service -After=cloud-init-local.service cloud-init.service diff --git a/rhel/systemd/cloud-final.service b/rhel/systemd/cloud-final.service deleted file mode 100644 index 05add07..0000000 --- a/rhel/systemd/cloud-final.service +++ /dev/null @@ -1,21 +0,0 @@ -[Unit] -Description=Execute cloud user/final scripts -After=network-online.target cloud-config.service rc-local.service -Wants=network-online.target cloud-config.service -ConditionPathExists=!/etc/cloud/cloud-init.disabled -ConditionKernelCommandLine=!cloud-init=disabled - -[Service] -Type=oneshot -ExecStart=/usr/bin/cloud-init modules --mode=final -RemainAfterExit=yes -TimeoutSec=0 -KillMode=process -ExecStartPost=/bin/echo "trying to reload or restart NetworkManager.service" -ExecStartPost=/usr/bin/systemctl try-reload-or-restart NetworkManager.service - -# Output needs to appear in instance console output -StandardOutput=journal+console - -[Install] -WantedBy=cloud-init.target diff --git a/rhel/systemd/cloud-init-local.service b/rhel/systemd/cloud-init-local.service deleted file mode 100644 index 8f9f6c9..0000000 --- a/rhel/systemd/cloud-init-local.service +++ /dev/null @@ -1,31 +0,0 @@ -[Unit] -Description=Initial cloud-init job (pre-networking) -DefaultDependencies=no -Wants=network-pre.target -After=systemd-remount-fs.service -Requires=dbus.socket -After=dbus.socket -Before=NetworkManager.service network.service -Before=network-pre.target -Before=shutdown.target -Before=firewalld.target -Conflicts=shutdown.target -RequiresMountsFor=/var/lib/cloud -ConditionPathExists=!/etc/cloud/cloud-init.disabled -ConditionKernelCommandLine=!cloud-init=disabled - -[Service] -Type=oneshot -ExecStartPre=/bin/mkdir -p /run/cloud-init -ExecStartPre=/sbin/restorecon /run/cloud-init -ExecStartPre=/usr/bin/touch /run/cloud-init/enabled -ExecStart=/usr/bin/cloud-init init --local -ExecStart=/bin/touch /run/cloud-init/network-config-ready -RemainAfterExit=yes -TimeoutSec=0 - -# Output needs to appear in instance console output -StandardOutput=journal+console - -[Install] -WantedBy=cloud-init.target diff --git a/rhel/systemd/cloud-init.service b/rhel/systemd/cloud-init.service deleted file mode 100644 index 0b3d796..0000000 --- a/rhel/systemd/cloud-init.service +++ /dev/null @@ -1,26 +0,0 @@ -[Unit] -Description=Initial cloud-init job (metadata service crawler) -Wants=cloud-init-local.service -Wants=sshd-keygen.service -Wants=sshd.service -After=cloud-init-local.service -After=NetworkManager.service network.service -After=NetworkManager-wait-online.service -Before=network-online.target -Before=sshd-keygen.service -Before=sshd.service -Before=systemd-user-sessions.service -ConditionPathExists=!/etc/cloud/cloud-init.disabled -ConditionKernelCommandLine=!cloud-init=disabled - -[Service] -Type=oneshot -ExecStart=/usr/bin/cloud-init init -RemainAfterExit=yes -TimeoutSec=0 - -# Output needs to appear in instance console output -StandardOutput=journal+console - -[Install] -WantedBy=cloud-init.target diff --git a/rhel/systemd/cloud-init.target b/rhel/systemd/cloud-init.target deleted file mode 100644 index 083c3b6..0000000 --- a/rhel/systemd/cloud-init.target +++ /dev/null @@ -1,7 +0,0 @@ -# cloud-init target is enabled by cloud-init-generator -# To disable it you can either: -# a.) boot with kernel cmdline of 'cloud-init=disabled' -# b.) touch a file /etc/cloud/cloud-init.disabled -[Unit] -Description=Cloud-init target -After=multi-user.target diff --git a/setup.py b/setup.py index b2ac9bb..01a67b9 100755 --- a/setup.py +++ b/setup.py @@ -139,6 +139,14 @@ INITSYS_FILES = { 'sysvinit_deb': [f for f in glob('sysvinit/debian/*') if is_f(f)], 'sysvinit_openrc': [f for f in glob('sysvinit/gentoo/*') if is_f(f)], 'sysvinit_suse': [f for f in glob('sysvinit/suse/*') if is_f(f)], + 'systemd': [render_tmpl(f) + for f in (glob('systemd/*.tmpl') + + glob('systemd/*.service') + + glob('systemd/*.target')) + if (is_f(f) and not is_generator(f))], + 'systemd.generators': [ + render_tmpl(f, mode=0o755) + for f in glob('systemd/*') if is_f(f) and is_generator(f)], 'upstart': [f for f in glob('upstart/*') if is_f(f)], } INITSYS_ROOTS = { @@ -147,6 +155,9 @@ INITSYS_ROOTS = { 'sysvinit_deb': 'etc/init.d', 'sysvinit_openrc': 'etc/init.d', 'sysvinit_suse': 'etc/init.d', + 'systemd': pkg_config_read('systemd', 'systemdsystemunitdir'), + 'systemd.generators': pkg_config_read('systemd', + 'systemdsystemgeneratordir'), 'upstart': 'etc/init/', } INITSYS_TYPES = sorted([f.partition(".")[0] for f in INITSYS_ROOTS.keys()]) @@ -197,6 +208,47 @@ class MyEggInfo(egg_info): return ret +# TODO: Is there a better way to do this?? +class InitsysInstallData(install): + init_system = None + user_options = install.user_options + [ + # This will magically show up in member variable 'init_sys' + ('init-system=', None, + ('init system(s) to configure (%s) [default: None]' % + (", ".join(INITSYS_TYPES)))), + ] + + def initialize_options(self): + install.initialize_options(self) + self.init_system = "" + + def finalize_options(self): + install.finalize_options(self) + + if self.init_system and isinstance(self.init_system, str): + self.init_system = self.init_system.split(",") + + if len(self.init_system) == 0: + self.init_system = ['systemd'] + + bad = [f for f in self.init_system if f not in INITSYS_TYPES] + if len(bad) != 0: + raise DistutilsArgError( + "Invalid --init-system: %s" % (','.join(bad))) + + for system in self.init_system: + # add data files for anything that starts with '.' + datakeys = [k for k in INITSYS_ROOTS + if k.partition(".")[0] == system] + for k in datakeys: + if not INITSYS_FILES[k]: + continue + self.distribution.data_files.append( + (INITSYS_ROOTS[k], INITSYS_FILES[k])) + # Force that command to reinitalize (with new file list) + self.distribution.reinitialize_command('install_data', True) + + if not in_virtualenv(): USR = "/" + USR ETC = "/" + ETC @@ -206,11 +258,14 @@ if not in_virtualenv(): INITSYS_ROOTS[k] = "/" + INITSYS_ROOTS[k] data_files = [ - (ETC + '/bash_completion.d', ['bash_completion/cloud-init']), + (ETC + '/cloud', [render_tmpl("config/cloud.cfg.tmpl")]), (ETC + '/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')), (ETC + '/cloud/templates', glob('templates/*')), - (USR_LIB_EXEC + '/cloud-init', ['tools/uncloud-init', + (USR_LIB_EXEC + '/cloud-init', ['tools/ds-identify', + 'tools/uncloud-init', 'tools/write-ssh-key-fingerprints']), + (USR + '/share/bash-completion/completions', + ['bash_completion/cloud-init']), (USR + '/share/doc/cloud-init', [f for f in glob('doc/*') if is_f(f)]), (USR + '/share/doc/cloud-init/examples', [f for f in glob('doc/examples/*') if is_f(f)]), @@ -221,8 +276,15 @@ if os.uname()[0] != 'FreeBSD': data_files.extend([ (ETC + '/NetworkManager/dispatcher.d/', ['tools/hook-network-manager']), - ('/usr/lib/udev/rules.d', [f for f in glob('udev/*.rules')]) + (ETC + '/dhcp/dhclient-exit-hooks.d/', ['tools/hook-dhclient']), + (LIB + '/udev/rules.d', [f for f in glob('udev/*.rules')]) ]) +# Use a subclass for install that handles +# adding on the right init system configuration files +cmdclass = { + 'install': InitsysInstallData, + 'egg_info': MyEggInfo, +} requirements = read_requires() @@ -237,6 +299,8 @@ setuptools.setup( scripts=['tools/cloud-init-per'], license='Dual-licensed under GPLv3 or Apache 2.0', data_files=data_files, + install_requires=requirements, + cmdclass=cmdclass, entry_points={ 'console_scripts': [ 'cloud-init = cloudinit.cmd.main:main', diff --git a/systemd/cloud-init-generator.tmpl b/systemd/cloud-init-generator.tmpl index 0773356..45efa24 100755 --- a/systemd/cloud-init-generator.tmpl +++ b/systemd/cloud-init-generator.tmpl @@ -83,7 +83,7 @@ default() { check_for_datasource() { local ds_rc="" -{% if variant in ["rhel", "fedora", "centos"] %} +{% if variant in ["redhat", "fedora", "centos"] %} local dsidentify="/usr/libexec/cloud-init/ds-identify" {% else %} local dsidentify="/usr/lib/cloud-init/ds-identify" diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl index af6d9a8..9ad3574 100644 --- a/systemd/cloud-init.service.tmpl +++ b/systemd/cloud-init.service.tmpl @@ -10,7 +10,7 @@ After=systemd-networkd-wait-online.service {% if variant in ["ubuntu", "unknown", "debian"] %} After=networking.service {% endif %} -{% if variant in ["centos", "fedora", "rhel"] %} +{% if variant in ["centos", "fedora", "redhat"] %} After=network.service After=NetworkManager.service {% endif %} diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index f141dc6..a809fd8 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -899,9 +899,6 @@ scbus-1 on xpt0 bus 0 crypt.crypt(odata['UserPassword'], defuser['passwd'][0:pos])) - # the same hashed value should also be present in cfg['password'] - self.assertEqual(defuser['passwd'], dsrc.cfg['password']) - def test_user_not_locked_if_password_redacted(self): odata = {'HostName': "myhost", 'UserName': "myuser", 'UserPassword': dsaz.DEF_PASSWD_REDACTION} diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index bd5bd4c..34a089f 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -429,23 +429,6 @@ class TestEc2(test_helpers.HttprettyTestCase): self.assertTrue(ds.get_data()) self.assertFalse(ds.is_classic_instance()) - def test_aws_token_redacted(self): - """Verify that aws tokens are redacted when logged.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md={'md': DEFAULT_METADATA}) - self.assertTrue(ds.get_data()) - all_logs = self.logs.getvalue().splitlines() - REDACT_TTL = "'X-aws-ec2-metadata-token-ttl-seconds': 'REDACTED'" - REDACT_TOK = "'X-aws-ec2-metadata-token': 'REDACTED'" - logs_with_redacted_ttl = [log for log in all_logs if REDACT_TTL in log] - logs_with_redacted = [log for log in all_logs if REDACT_TOK in log] - logs_with_token = [log for log in all_logs if 'API-TOKEN' in log] - self.assertEqual(1, len(logs_with_redacted_ttl)) - self.assertEqual(79, len(logs_with_redacted)) - self.assertEqual(0, len(logs_with_token)) - @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') def test_valid_platform_with_strict_true(self, m_dhcp): """Valid platform data should return true with strict_id true.""" diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index e277bca..6720995 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -109,31 +109,13 @@ auto eth1 iface eth1 inet dhcp """ -V1_NET_CFG_IPV6_OUTPUT = """\ -# This file is generated from information provided by the datasource. Changes -# to it will not persist across an instance reboot. To disable cloud-init's -# network configuration capabilities, write a file -# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: -# network: {config: disabled} -auto lo -iface lo inet loopback - -auto eth0 -iface eth0 inet6 static - address 2607:f0d0:1002:0011::2/64 - gateway 2607:f0d0:1002:0011::1 - -auto eth1 -iface eth1 inet dhcp -""" - V1_NET_CFG_IPV6 = {'config': [{'name': 'eth0', 'subnets': [{'address': '2607:f0d0:1002:0011::2', 'gateway': '2607:f0d0:1002:0011::1', 'netmask': '64', - 'type': 'static6'}], + 'type': 'static'}], 'type': 'physical'}, {'name': 'eth1', 'subnets': [{'control': 'auto', @@ -159,23 +141,6 @@ network: dhcp4: true """ -V1_TO_V2_NET_CFG_IPV6_OUTPUT = """\ -# This file is generated from information provided by the datasource. Changes -# to it will not persist across an instance reboot. To disable cloud-init's -# network configuration capabilities, write a file -# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: -# network: {config: disabled} -network: - version: 2 - ethernets: - eth0: - addresses: - - 2607:f0d0:1002:0011::2/64 - gateway6: 2607:f0d0:1002:0011::1 - eth1: - dhcp4: true -""" - V2_NET_CFG = { 'ethernets': { 'eth7': { @@ -411,14 +376,6 @@ class TestNetCfgDistroUbuntuEni(TestNetCfgDistroBase): V1_NET_CFG, expected_cfgs=expected_cfgs.copy()) - def test_apply_network_config_ipv6_ub(self): - expected_cfgs = { - self.eni_path(): V1_NET_CFG_IPV6_OUTPUT - } - self._apply_and_verify_eni(self.distro.apply_network_config, - V1_NET_CFG_IPV6, - expected_cfgs=expected_cfgs.copy()) - class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase): def setUp(self): @@ -462,16 +419,6 @@ class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase): V1_NET_CFG, expected_cfgs=expected_cfgs.copy()) - def test_apply_network_config_v1_ipv6_to_netplan_ub(self): - expected_cfgs = { - self.netplan_path(): V1_TO_V2_NET_CFG_IPV6_OUTPUT, - } - - # ub_distro.apply_network_config(V1_NET_CFG_IPV6, False) - self._apply_and_verify_netplan(self.distro.apply_network_config, - V1_NET_CFG_IPV6, - expected_cfgs=expected_cfgs.copy()) - def test_apply_network_config_v2_passthrough_ub(self): expected_cfgs = { self.netplan_path(): V2_TO_V2_NET_CFG_OUTPUT, @@ -579,87 +526,6 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase): V1_NET_CFG_IPV6, expected_cfgs=expected_cfgs.copy()) - def test_vlan_render_unsupported(self): - """Render officially unsupported vlan names.""" - cfg = { - 'version': 2, - 'ethernets': { - 'eth0': {'addresses': ["192.10.1.2/24"], - 'match': {'macaddress': "00:16:3e:60:7c:df"}}}, - 'vlans': { - 'infra0': {'addresses': ["10.0.1.2/16"], - 'id': 1001, 'link': 'eth0'}}, - } - expected_cfgs = { - self.ifcfg_path('eth0'): dedent("""\ - BOOTPROTO=none - DEVICE=eth0 - HWADDR=00:16:3e:60:7c:df - IPADDR=192.10.1.2 - NETMASK=255.255.255.0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """), - self.ifcfg_path('infra0'): dedent("""\ - BOOTPROTO=none - DEVICE=infra0 - IPADDR=10.0.1.2 - NETMASK=255.255.0.0 - NM_CONTROLLED=no - ONBOOT=yes - PHYSDEV=eth0 - USERCTL=no - VLAN=yes - """), - self.control_path(): dedent("""\ - NETWORKING=yes - """), - } - self._apply_and_verify( - self.distro.apply_network_config, cfg, - expected_cfgs=expected_cfgs) - - def test_vlan_render(self): - cfg = { - 'version': 2, - 'ethernets': { - 'eth0': {'addresses': ["192.10.1.2/24"]}}, - 'vlans': { - 'eth0.1001': {'addresses': ["10.0.1.2/16"], - 'id': 1001, 'link': 'eth0'}}, - } - expected_cfgs = { - self.ifcfg_path('eth0'): dedent("""\ - BOOTPROTO=none - DEVICE=eth0 - IPADDR=192.10.1.2 - NETMASK=255.255.255.0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """), - self.ifcfg_path('eth0.1001'): dedent("""\ - BOOTPROTO=none - DEVICE=eth0.1001 - IPADDR=10.0.1.2 - NETMASK=255.255.0.0 - NM_CONTROLLED=no - ONBOOT=yes - PHYSDEV=eth0 - USERCTL=no - VLAN=yes - """), - self.control_path(): dedent("""\ - NETWORKING=yes - """), - } - self._apply_and_verify( - self.distro.apply_network_config, cfg, - expected_cfgs=expected_cfgs) - class TestNetCfgDistroOpensuse(TestNetCfgDistroBase): @@ -726,9 +592,7 @@ class TestNetCfgDistroOpensuse(TestNetCfgDistroBase): IPADDR6=2607:f0d0:1002:0011::2/64 IPV6ADDR=2607:f0d0:1002:0011::2/64 IPV6INIT=yes - IPV6_AUTOCONF=no IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 - IPV6_FORCE_ACCEPT_RA=no NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto diff --git a/tests/unittests/test_handler/test_handler_mounts.py b/tests/unittests/test_handler/test_handler_mounts.py index 27bcc6f..0fb160b 100644 --- a/tests/unittests/test_handler/test_handler_mounts.py +++ b/tests/unittests/test_handler/test_handler_mounts.py @@ -132,113 +132,6 @@ class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase): 'ephemeral0.1', lambda x: disk_path, mock.Mock())) -class TestSwapFileCreation(test_helpers.FilesystemMockingTestCase): - - def setUp(self): - super(TestSwapFileCreation, self).setUp() - self.new_root = self.tmp_dir() - self.patchOS(self.new_root) - - self.fstab_path = os.path.join(self.new_root, 'etc/fstab') - self.swap_path = os.path.join(self.new_root, 'swap.img') - self._makedirs('/etc') - - self.add_patch('cloudinit.config.cc_mounts.FSTAB_PATH', - 'mock_fstab_path', - self.fstab_path, - autospec=False) - - self.add_patch('cloudinit.config.cc_mounts.subp.subp', - 'm_subp_subp') - - self.add_patch('cloudinit.config.cc_mounts.util.mounts', - 'mock_util_mounts', - return_value={ - '/dev/sda1': {'fstype': 'ext4', - 'mountpoint': '/', - 'opts': 'rw,relatime,discard' - }}) - - self.mock_cloud = mock.Mock() - self.mock_log = mock.Mock() - self.mock_cloud.device_name_to_device = self.device_name_to_device - - self.cc = { - 'swap': { - 'filename': self.swap_path, - 'size': '512', - 'maxsize': '512'}} - - def _makedirs(self, directory): - directory = os.path.join(self.new_root, directory.lstrip('/')) - if not os.path.exists(directory): - os.makedirs(directory) - - def device_name_to_device(self, path): - if path == 'swap': - return self.swap_path - else: - dev = None - - return dev - - @mock.patch('cloudinit.util.get_mount_info') - @mock.patch('cloudinit.util.kernel_version') - def test_swap_creation_method_fallocate_on_xfs(self, m_kernel_version, - m_get_mount_info): - m_kernel_version.return_value = (4, 20) - m_get_mount_info.return_value = ["", "xfs"] - - cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, []) - self.m_subp_subp.assert_has_calls([ - mock.call(['fallocate', '-l', '0M', self.swap_path], capture=True), - mock.call(['mkswap', self.swap_path]), - mock.call(['swapon', '-a'])]) - - @mock.patch('cloudinit.util.get_mount_info') - @mock.patch('cloudinit.util.kernel_version') - def test_swap_creation_method_xfs(self, m_kernel_version, - m_get_mount_info): - m_kernel_version.return_value = (3, 18) - m_get_mount_info.return_value = ["", "xfs"] - - cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, []) - self.m_subp_subp.assert_has_calls([ - mock.call(['dd', 'if=/dev/zero', - 'of=' + self.swap_path, - 'bs=1M', 'count=0'], capture=True), - mock.call(['mkswap', self.swap_path]), - mock.call(['swapon', '-a'])]) - - @mock.patch('cloudinit.util.get_mount_info') - @mock.patch('cloudinit.util.kernel_version') - def test_swap_creation_method_btrfs(self, m_kernel_version, - m_get_mount_info): - m_kernel_version.return_value = (4, 20) - m_get_mount_info.return_value = ["", "btrfs"] - - cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, []) - self.m_subp_subp.assert_has_calls([ - mock.call(['dd', 'if=/dev/zero', - 'of=' + self.swap_path, - 'bs=1M', 'count=0'], capture=True), - mock.call(['mkswap', self.swap_path]), - mock.call(['swapon', '-a'])]) - - @mock.patch('cloudinit.util.get_mount_info') - @mock.patch('cloudinit.util.kernel_version') - def test_swap_creation_method_ext4(self, m_kernel_version, - m_get_mount_info): - m_kernel_version.return_value = (5, 14) - m_get_mount_info.return_value = ["", "ext4"] - - cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, []) - self.m_subp_subp.assert_has_calls([ - mock.call(['fallocate', '-l', '0M', self.swap_path], capture=True), - mock.call(['mkswap', self.swap_path]), - mock.call(['swapon', '-a'])]) - - class TestFstabHandling(test_helpers.FilesystemMockingTestCase): swap_path = '/dev/sdb1' @@ -288,18 +181,6 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase): return dev - def test_swap_integrity(self): - '''Ensure that the swap file is correctly created and can - swapon successfully. Fixing the corner case of: - kernel: swapon: swapfile has holes''' - - fstab = '/swap.img swap swap defaults 0 0\n' - - with open(cc_mounts.FSTAB_PATH, 'w') as fd: - fd.write(fstab) - cc = {'swap': ['filename: /swap.img', 'size: 512', 'maxsize: 512']} - cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, []) - def test_fstab_no_swap_device(self): '''Ensure that cloud-init adds a discovered swap partition to /etc/fstab.''' diff --git a/tests/unittests/test_handler/test_handler_refresh_rmc_and_interface.py b/tests/unittests/test_handler/test_handler_refresh_rmc_and_interface.py deleted file mode 100644 index 0c35710..0000000 --- a/tests/unittests/test_handler/test_handler_refresh_rmc_and_interface.py +++ /dev/null @@ -1,109 +0,0 @@ -from cloudinit.config import cc_refresh_rmc_and_interface as ccrmci - -from cloudinit import util - -from cloudinit.tests import helpers as t_help -from cloudinit.tests.helpers import mock - -from textwrap import dedent -import logging - -LOG = logging.getLogger(__name__) -MPATH = "cloudinit.config.cc_refresh_rmc_and_interface" -NET_INFO = { - 'lo': {'ipv4': [{'ip': '127.0.0.1', - 'bcast': '', 'mask': '255.0.0.0', - 'scope': 'host'}], - 'ipv6': [{'ip': '::1/128', - 'scope6': 'host'}], 'hwaddr': '', - 'up': 'True'}, - 'env2': {'ipv4': [{'ip': '8.0.0.19', - 'bcast': '8.0.0.255', 'mask': '255.255.255.0', - 'scope': 'global'}], - 'ipv6': [{'ip': 'fe80::f896:c2ff:fe81:8220/64', - 'scope6': 'link'}], 'hwaddr': 'fa:96:c2:81:82:20', - 'up': 'True'}, - 'env3': {'ipv4': [{'ip': '90.0.0.14', - 'bcast': '90.0.0.255', 'mask': '255.255.255.0', - 'scope': 'global'}], - 'ipv6': [{'ip': 'fe80::f896:c2ff:fe81:8221/64', - 'scope6': 'link'}], 'hwaddr': 'fa:96:c2:81:82:21', - 'up': 'True'}, - 'env4': {'ipv4': [{'ip': '9.114.23.7', - 'bcast': '9.114.23.255', 'mask': '255.255.255.0', - 'scope': 'global'}], - 'ipv6': [{'ip': 'fe80::f896:c2ff:fe81:8222/64', - 'scope6': 'link'}], 'hwaddr': 'fa:96:c2:81:82:22', - 'up': 'True'}, - 'env5': {'ipv4': [], - 'ipv6': [{'ip': 'fe80::9c26:c3ff:fea4:62c8/64', - 'scope6': 'link'}], 'hwaddr': '42:20:86:df:fa:4c', - 'up': 'True'}} - - -class TestRsctNodeFile(t_help.CiTestCase): - def test_disable_ipv6_interface(self): - """test parsing of iface files.""" - fname = self.tmp_path("iface-eth5") - util.write_file(fname, dedent("""\ - BOOTPROTO=static - DEVICE=eth5 - HWADDR=42:20:86:df:fa:4c - IPV6INIT=yes - IPADDR6=fe80::9c26:c3ff:fea4:62c8/64 - IPV6ADDR=fe80::9c26:c3ff:fea4:62c8/64 - NM_CONTROLLED=yes - ONBOOT=yes - STARTMODE=auto - TYPE=Ethernet - USERCTL=no - """)) - - ccrmci.disable_ipv6(fname) - self.assertEqual(dedent("""\ - BOOTPROTO=static - DEVICE=eth5 - HWADDR=42:20:86:df:fa:4c - ONBOOT=yes - STARTMODE=auto - TYPE=Ethernet - USERCTL=no - NM_CONTROLLED=no - """), util.load_file(fname)) - - @mock.patch(MPATH + '.refresh_rmc') - @mock.patch(MPATH + '.restart_network_manager') - @mock.patch(MPATH + '.disable_ipv6') - @mock.patch(MPATH + '.refresh_ipv6') - @mock.patch(MPATH + '.netinfo.netdev_info') - @mock.patch(MPATH + '.util.which') - def test_handle(self, m_refresh_rmc, - m_netdev_info, m_refresh_ipv6, m_disable_ipv6, - m_restart_nm, m_which): - """Basic test of handle.""" - m_netdev_info.return_value = NET_INFO - m_which.return_value = '/opt/rsct/bin/rmcctrl' - ccrmci.handle( - "refresh_rmc_and_interface", None, None, None, None) - self.assertEqual(1, m_netdev_info.call_count) - m_refresh_ipv6.assert_called_with('env5') - m_disable_ipv6.assert_called_with( - '/etc/sysconfig/network-scripts/ifcfg-env5') - self.assertEqual(1, m_restart_nm.call_count) - self.assertEqual(1, m_refresh_rmc.call_count) - - @mock.patch(MPATH + '.netinfo.netdev_info') - def test_find_ipv6(self, m_netdev_info): - """find_ipv6_ifaces parses netdev_info returning those with ipv6""" - m_netdev_info.return_value = NET_INFO - found = ccrmci.find_ipv6_ifaces() - self.assertEqual(['env5'], found) - - @mock.patch(MPATH + '.util.subp') - def test_refresh_ipv6(self, m_subp): - """refresh_ipv6 should ip down and up the interface.""" - iface = "myeth0" - ccrmci.refresh_ipv6(iface) - m_subp.assert_has_calls([ - mock.call(['ip', 'link', 'set', iface, 'down']), - mock.call(['ip', 'link', 'set', iface, 'up'])]) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index b2b7c4b..01119e0 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -530,6 +530,7 @@ GATEWAY=172.19.3.254 HWADDR=fa:16:3e:ed:9a:59 IPADDR=172.19.1.34 NETMASK=255.255.252.0 +NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto TYPE=Ethernet @@ -635,6 +636,7 @@ IPADDR=172.19.1.34 IPADDR1=10.0.0.10 NETMASK=255.255.252.0 NETMASK1=255.255.255.0 +NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto TYPE=Ethernet @@ -768,10 +770,9 @@ IPADDR6_2=2001:DB10::10/64 IPV6ADDR=2001:DB8::10/64 IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64" IPV6INIT=yes -IPV6_AUTOCONF=no IPV6_DEFAULTGW=2001:DB8::1 -IPV6_FORCE_ACCEPT_RA=no NETMASK=255.255.252.0 +NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto TYPE=Ethernet @@ -888,6 +889,7 @@ NETWORK_CONFIGS = { BOOTPROTO=none DEVICE=eth1 HWADDR=cf:d6:af:48:e8:80 + NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto TYPE=Ethernet @@ -905,6 +907,7 @@ NETWORK_CONFIGS = { IPADDR=192.168.21.3 NETMASK=255.255.255.0 METRIC=10000 + NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto TYPE=Ethernet @@ -1018,9 +1021,8 @@ NETWORK_CONFIGS = { IPADDR6=2001:1::1/64 IPV6ADDR=2001:1::1/64 IPV6INIT=yes - IPV6_AUTOCONF=no - IPV6_FORCE_ACCEPT_RA=no NETMASK=255.255.255.0 + NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto TYPE=Ethernet @@ -1205,33 +1207,6 @@ NETWORK_CONFIGS = { """), }, }, - 'static6': { - 'yaml': textwrap.dedent("""\ - version: 1 - config: - - type: 'physical' - name: 'iface0' - accept-ra: 'no' - subnets: - - type: 'static6' - address: 2001:1::1/64 - """).rstrip(' '), - 'expected_sysconfig_rhel': { - 'ifcfg-iface0': textwrap.dedent("""\ - BOOTPROTO=none - DEVICE=iface0 - IPV6ADDR=2001:1::1/64 - IPV6INIT=yes - IPV6_AUTOCONF=no - IPV6_FORCE_ACCEPT_RA=no - DEVICE=iface0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """), - }, - }, 'dhcpv6_stateless': { 'expected_eni': textwrap.dedent("""\ auto lo @@ -1516,6 +1491,7 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true DHCPV6C=yes IPV6INIT=yes MACADDR=aa:bb:cc:dd:ee:ff + NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto TYPE=Bond @@ -1524,9 +1500,11 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true BOOTPROTO=dhcp DEVICE=bond0.200 DHCLIENT_SET_DEFAULT_ROUTE=no + NM_CONTROLLED=no ONBOOT=yes PHYSDEV=bond0 STARTMODE=auto + TYPE=Ethernet USERCTL=no VLAN=yes"""), 'ifcfg-br0': textwrap.dedent("""\ @@ -1538,11 +1516,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true IPADDR6=2001:1::1/64 IPV6ADDR=2001:1::1/64 IPV6INIT=yes - IPV6_AUTOCONF=no - IPV6_FORCE_ACCEPT_RA=no IPV6_DEFAULTGW=2001:4800:78ff:1b::1 MACADDR=bb:bb:bb:bb:bb:aa NETMASK=255.255.255.0 + NM_CONTROLLED=no ONBOOT=yes PRIO=22 STARTMODE=auto @@ -1553,6 +1530,7 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true BOOTPROTO=none DEVICE=eth0 HWADDR=c0:d6:9f:2c:e8:80 + NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto TYPE=Ethernet @@ -1570,9 +1548,11 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true MTU=1500 NETMASK=255.255.255.0 NETMASK1=255.255.255.0 + NM_CONTROLLED=no ONBOOT=yes PHYSDEV=eth0 STARTMODE=auto + TYPE=Ethernet USERCTL=no VLAN=yes"""), 'ifcfg-eth1': textwrap.dedent("""\ @@ -1580,6 +1560,7 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true DEVICE=eth1 HWADDR=aa:d6:9f:2c:e8:80 MASTER=bond0 + NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto SLAVE=yes @@ -1590,6 +1571,7 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true DEVICE=eth2 HWADDR=c0:bb:9f:2c:e8:80 MASTER=bond0 + NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto SLAVE=yes @@ -1600,6 +1582,7 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true BRIDGE=br0 DEVICE=eth3 HWADDR=66:bb:9f:2c:e8:80 + NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto TYPE=Ethernet @@ -1609,6 +1592,7 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true BRIDGE=br0 DEVICE=eth4 HWADDR=98:bb:9f:2c:e8:80 + NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto TYPE=Ethernet @@ -1618,6 +1602,7 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true DEVICE=eth5 DHCLIENT_SET_DEFAULT_ROUTE=no HWADDR=98:bb:9f:2c:e8:8a + NM_CONTROLLED=no ONBOOT=no STARTMODE=manual TYPE=Ethernet @@ -2100,11 +2085,10 @@ iface bond0 inet6 static IPADDR6=2001:1::1/92 IPV6ADDR=2001:1::1/92 IPV6INIT=yes - IPV6_AUTOCONF=no - IPV6_FORCE_ACCEPT_RA=no MTU=9000 NETMASK=255.255.255.0 NETMASK1=255.255.255.0 + NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto TYPE=Bond @@ -2115,6 +2099,7 @@ iface bond0 inet6 static DEVICE=bond0s0 HWADDR=aa:bb:cc:dd:e8:00 MASTER=bond0 + NM_CONTROLLED=no ONBOOT=yes SLAVE=yes STARTMODE=auto @@ -2137,6 +2122,7 @@ iface bond0 inet6 static DEVICE=bond0s1 HWADDR=aa:bb:cc:dd:e8:01 MASTER=bond0 + NM_CONTROLLED=no ONBOOT=yes SLAVE=yes STARTMODE=auto @@ -2175,6 +2161,7 @@ iface bond0 inet6 static BOOTPROTO=none DEVICE=en0 HWADDR=aa:bb:cc:dd:e8:00 + NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto TYPE=Ethernet @@ -2189,15 +2176,15 @@ iface bond0 inet6 static IPADDR6=2001:1::bbbb/96 IPV6ADDR=2001:1::bbbb/96 IPV6INIT=yes - IPV6_AUTOCONF=no - IPV6_FORCE_ACCEPT_RA=no IPV6_DEFAULTGW=2001:1::1 MTU=2222 NETMASK=255.255.255.0 NETMASK1=255.255.255.0 + NM_CONTROLLED=no ONBOOT=yes PHYSDEV=en0 STARTMODE=auto + TYPE=Ethernet USERCTL=no VLAN=yes"""), }, @@ -2235,6 +2222,7 @@ iface bond0 inet6 static DEVICE=br0 IPADDR=192.168.2.2 NETMASK=255.255.255.0 + NM_CONTROLLED=no ONBOOT=yes PRIO=22 STARTMODE=auto @@ -2250,8 +2238,6 @@ iface bond0 inet6 static IPADDR6=2001:1::100/96 IPV6ADDR=2001:1::100/96 IPV6INIT=yes - IPV6_AUTOCONF=no - IPV6_FORCE_ACCEPT_RA=no NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto @@ -2266,8 +2252,6 @@ iface bond0 inet6 static IPADDR6=2001:1::101/96 IPV6ADDR=2001:1::101/96 IPV6INIT=yes - IPV6_AUTOCONF=no - IPV6_FORCE_ACCEPT_RA=no NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto @@ -2343,6 +2327,7 @@ iface bond0 inet6 static HWADDR=52:54:00:12:34:00 IPADDR=192.168.1.2 NETMASK=255.255.255.0 + NM_CONTROLLED=no ONBOOT=no STARTMODE=manual TYPE=Ethernet @@ -2353,6 +2338,7 @@ iface bond0 inet6 static DEVICE=eth1 HWADDR=52:54:00:12:34:aa MTU=1480 + NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto TYPE=Ethernet @@ -2362,6 +2348,7 @@ iface bond0 inet6 static BOOTPROTO=none DEVICE=eth2 HWADDR=52:54:00:12:34:ff + NM_CONTROLLED=no ONBOOT=no STARTMODE=manual TYPE=Ethernet @@ -2779,6 +2766,7 @@ class TestRhelSysConfigRendering(CiTestCase): BOOTPROTO=dhcp DEVICE=eth1000 HWADDR=07-1c-c6-75-a4-be +NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto TYPE=Ethernet @@ -2900,6 +2888,7 @@ GATEWAY=10.0.2.2 HWADDR=52:54:00:12:34:00 IPADDR=10.0.2.15 NETMASK=255.255.255.0 +NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto TYPE=Ethernet @@ -2972,6 +2961,7 @@ USERCTL=no # BOOTPROTO=dhcp DEVICE=eth0 +NM_CONTROLLED=no ONBOOT=yes STARTMODE=auto TYPE=Ethernet @@ -3058,61 +3048,6 @@ USERCTL=no self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) - def test_stattic6_from_json(self): - net_json = { - "services": [{"type": "dns", "address": "172.19.0.12"}], - "networks": [{ - "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4", - "type": "ipv4", "netmask": "255.255.252.0", - "link": "tap1a81968a-79", - "routes": [{ - "netmask": "0.0.0.0", - "network": "0.0.0.0", - "gateway": "172.19.3.254", - }, { - "netmask": "0.0.0.0", # A second default gateway - "network": "0.0.0.0", - "gateway": "172.20.3.254", - }], - "ip_address": "172.19.1.34", "id": "network0" - }, { - "network_id": "mgmt", - "netmask": "ffff:ffff:ffff:ffff::", - "link": "interface1", - "mode": "link-local", - "routes": [], - "ip_address": "fe80::c096:67ff:fe5c:6e84", - "type": "static6", - "id": "network1", - "services": [], - "accept-ra": "false" - }], - "links": [ - { - "ethernet_mac_address": "fa:16:3e:ed:9a:59", - "mtu": None, "type": "bridge", "id": - "tap1a81968a-79", - "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" - }, - ], - } - macs = {'fa:16:3e:ed:9a:59': 'eth0'} - render_dir = self.tmp_dir() - network_cfg = openstack.convert_net_json(net_json, known_macs=macs) - ns = network_state.parse_net_config_data(network_cfg, - skip_broken=False) - renderer = self._get_renderer() - with self.assertRaises(ValueError): - renderer.render_network_state(ns, target=render_dir) - self.assertEqual([], os.listdir(render_dir)) - - def test_static6_from_yaml(self): - entry = NETWORK_CONFIGS['static6'] - found = self._render_and_read(network_config=yaml.load( - entry['yaml'])) - self._compare_files_to_expected(entry[self.expected_name], found) - self._assert_headers(found) - def test_dhcpv6_reject_ra_config_v2(self): entry = NETWORK_CONFIGS['dhcpv6_reject_ra'] found = self._render_and_read(network_config=yaml.load( @@ -3231,8 +3166,6 @@ USERCTL=no IPADDR6=2001:db8::100/32 IPV6ADDR=2001:db8::100/32 IPV6INIT=yes - IPV6_AUTOCONF=no - IPV6_FORCE_ACCEPT_RA=no IPV6_DEFAULTGW=2001:db8::1 NETMASK=255.255.255.0 NM_CONTROLLED=no @@ -3277,6 +3210,7 @@ USERCTL=no ONBOOT=yes PHYSDEV=eno1 STARTMODE=auto + TYPE=Ethernet USERCTL=no VLAN=yes """) diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 87dc8dd..0e71db8 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -1177,19 +1177,4 @@ class TestGetProcEnv(helpers.TestCase): my_ppid = os.getppid() self.assertEqual(my_ppid, util.get_proc_ppid(my_pid)) - -class TestKernelVersion(): - """test kernel version function""" - - params = [ - ('5.6.19-300.fc32.x86_64', (5, 6)), - ('4.15.0-101-generic', (4, 15)), - ('3.10.0-1062.12.1.vz7.131.10', (3, 10)), - ('4.18.0-144.el8.x86_64', (4, 18))] - - @mock.patch('os.uname') - @pytest.mark.parametrize("uname_release,expected", params) - def test_kernel_version(self, m_uname, uname_release, expected): - m_uname.return_value.release = uname_release - assert expected == util.kernel_version() # vi: ts=4 expandtab diff --git a/tools/ccfg-merge-debug b/tools/ccfg-merge-debug index dd5873a..1f08e0c 100755 --- a/tools/ccfg-merge-debug +++ b/tools/ccfg-merge-debug @@ -1,4 +1,4 @@ -#!/usr/bin/python3 +#!/usr/bin/python from cloudinit import handlers from cloudinit.handlers import cloud_config as cc_part diff --git a/tools/cloudconfig-schema b/tools/cloudconfig-schema index abba445..32f0d61 100755 --- a/tools/cloudconfig-schema +++ b/tools/cloudconfig-schema @@ -1,4 +1,4 @@ -#!/usr/bin/env python33 +#!/usr/bin/env python3 # This file is part of cloud-init. See LICENSE file for license information. """cloudconfig-schema diff --git a/tools/make-mime.py b/tools/make-mime.py index 7fc60c4..d321479 100755 --- a/tools/make-mime.py +++ b/tools/make-mime.py @@ -1,4 +1,4 @@ -#!/usr/bin/python3 +#!/usr/bin/python import argparse import sys diff --git a/tools/migrate-lp-user-to-github b/tools/migrate-lp-user-to-github index 9f7ea61..1c05818 100755 --- a/tools/migrate-lp-user-to-github +++ b/tools/migrate-lp-user-to-github @@ -1,4 +1,4 @@ -#!/usr/bin/env python33 +#!/usr/bin/env python3 """Link your Launchpad user to github, proposing branches to LP and Github""" from argparse import ArgumentParser diff --git a/tools/mock-meta.py b/tools/mock-meta.py index c82e8be..724f7fc 100755 --- a/tools/mock-meta.py +++ b/tools/mock-meta.py @@ -1,4 +1,4 @@ -#!/usr/bin/python3 +#!/usr/bin/python # Provides a somewhat random, somewhat compat, somewhat useful mock version of # http://docs.amazonwebservices.com diff --git a/tools/pipremove b/tools/pipremove index 620ca85..f8f4ff1 100755 --- a/tools/pipremove +++ b/tools/pipremove @@ -1,4 +1,4 @@ -#!/usr/bin/python33 +#!/usr/bin/python3 import subprocess import sys diff --git a/tools/read-dependencies b/tools/read-dependencies index 75d08a3..b4656e6 100755 --- a/tools/read-dependencies +++ b/tools/read-dependencies @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +#!/usr/bin/env python """List pip dependencies or system package dependencies for cloud-init.""" # You might be tempted to rewrite this as a shell script, but you diff --git a/tools/read-version b/tools/read-version index 4f444e6..6dca659 100755 --- a/tools/read-version +++ b/tools/read-version @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +#!/usr/bin/env python import os import json @@ -65,8 +65,32 @@ output_json = '--json' in sys.argv src_version = ci_version.version_string() version_long = None -version = src_version -version_long = None +if is_gitdir(_tdir) and which("git"): + flags = [] + if use_tags: + flags = ['--tags'] + cmd = ['git', 'describe', '--abbrev=8', '--match=[0-9]*'] + flags + + try: + version = tiny_p(cmd).strip() + except RuntimeError: + version = None + + if version is None or not version.startswith(src_version): + sys.stderr.write("git describe version (%s) differs from " + "cloudinit.version (%s)\n" % (version, src_version)) + sys.stderr.write( + "Please get the latest upstream tags.\n" + "As an example, this can be done with the following:\n" + "$ git remote add upstream https://git.launchpad.net/cloud-init\n" + "$ git fetch upstream --tags\n" + ) + sys.exit(1) + + version_long = tiny_p(cmd + ["--long"]).strip() +else: + version = src_version + version_long = None # version is X.Y.Z[+xxx.gHASH] # version_long is None or X.Y.Z-xxx-gHASH diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg index eaf8c2b..a441f4f 100755 --- a/tools/render-cloudcfg +++ b/tools/render-cloudcfg @@ -1,4 +1,4 @@ -#!/usr/bin/env python33 +#!/usr/bin/env python3 import argparse import os diff --git a/tools/validate-yaml.py b/tools/validate-yaml.py index d8bbcfc..a57ea84 100755 --- a/tools/validate-yaml.py +++ b/tools/validate-yaml.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +#!/usr/bin/env python """Try to read a YAML file and report any errors. """